Compare commits

..

30 Commits

Author SHA1 Message Date
binary-husky
491174095a 更新docker-compose说明 2023-10-07 11:59:06 +08:00
binary-husky
49cea97822 启动主题自动转换 2023-10-06 10:36:30 +08:00
binary-husky
6310b65d70 重新编译Gradio优化使用体验 2023-10-06 10:32:03 +08:00
binary-husky
93c76e1809 更新内置gradio版本 2023-10-06 09:54:07 +08:00
binary-husky
f64cf7a3d1 update translation matrix 2023-10-02 14:24:01 +08:00
binary-husky
fdffbee1b0 Update toolbox.py 2023-09-30 09:56:30 +08:00
binary-husky
87ccd1a89a Update crazy_functional.py 2023-09-27 18:35:06 +08:00
binary-husky
87b9734986 修复'copiedIcon'重复定义BUG 2023-09-27 16:35:58 +08:00
binary-husky
d2d5665c37 允许模块预热时使用Proxy 2023-09-27 15:53:45 +08:00
binary-husky
0844b6e9cf GROBID服务代理访问支持 2023-09-27 15:40:55 +08:00
binary-husky
9cb05e5724 修改布局 2023-09-27 15:20:28 +08:00
binary-husky
80b209fa0c Merge branch 'frontier' 2023-09-27 15:19:07 +08:00
binary-husky
8d4cb05738 Matlab项目解析插件的Shortcut 2023-09-26 10:16:38 +08:00
binary-husky
31f4069563 改善润色和校读Prompt 2023-09-25 17:46:28 +08:00
binary-husky
8ba6fc062e Merge branch 'frontier' of github.com:binary-husky/chatgpt_academic into frontier 2023-09-23 23:59:30 +08:00
binary-husky
c0c2d14e3d better scrollbar 2023-09-23 23:58:32 +08:00
binary-husky
f0a5c49a9c Merge branch 'frontier' of github.com:binary-husky/chatgpt_academic into frontier 2023-09-23 23:47:42 +08:00
binary-husky
9333570ab7 减小重置等基础按钮的最小大小 2023-09-23 23:47:25 +08:00
binary-husky
d6eaaad962 禁止gradio显示误导性的share=True 2023-09-23 23:23:23 +08:00
binary-husky
e24f077b68 显式增加azure-gpt-4选项 2023-09-23 23:06:58 +08:00
binary-husky
dc5bb9741a 版本更新 2023-09-23 22:45:07 +08:00
binary-husky
b383b45191 version 3.54 beta 2023-09-23 22:44:18 +08:00
binary-husky
2d8f37baba 细分代理场景 2023-09-23 22:43:15 +08:00
binary-husky
409927ef8e 统一 transformers 版本 2023-09-23 22:26:28 +08:00
binary-husky
5b231e0170 添加整体复制按钮 2023-09-23 22:11:29 +08:00
binary-husky
87f629bb37 添加gpt-4-32k 2023-09-23 20:24:13 +08:00
binary-husky
3672c97a06 动态代码解释器 2023-09-23 01:51:05 +08:00
binary-husky
b6ee3e9807 Merge pull request #1121 from binary-husky/frontier
arxiv翻译插件添加禁用缓存选项
2023-09-21 09:33:19 +08:00
binary-husky
d56bc280e9 添加禁用缓存选项 2023-09-20 22:04:15 +08:00
qingxu fu
d5fd00c15d 微调Dockerfile 2023-09-20 10:02:10 +08:00
39 changed files with 928 additions and 140 deletions

View File

@@ -155,11 +155,13 @@ def auto_update(raise_error=False):
def warm_up_modules():
print('正在执行一些模块的预热...')
from toolbox import ProxyNetworkActivate
from request_llm.bridge_all import model_info
enc = model_info["gpt-3.5-turbo"]['tokenizer']
enc.encode("模块预热", disallowed_special=())
enc = model_info["gpt-4"]['tokenizer']
enc.encode("模块预热", disallowed_special=())
with ProxyNetworkActivate("Warmup_Modules"):
enc = model_info["gpt-3.5-turbo"]['tokenizer']
enc.encode("模块预热", disallowed_special=())
enc = model_info["gpt-4"]['tokenizer']
enc.encode("模块预热", disallowed_special=())
if __name__ == '__main__':
import os

View File

@@ -48,6 +48,7 @@ DEFAULT_WORKER_NUM = 3
THEME = "Default"
AVAIL_THEMES = ["Default", "Chuanhu-Small-and-Beautiful", "High-Contrast", "Gstaff/Xkcd", "NoCrypt/Miku"]
# 对话窗的高度 仅在LAYOUT="TOP-DOWN"时生效)
CHATBOT_HEIGHT = 1115
@@ -58,7 +59,10 @@ CODE_HIGHLIGHT = True
# 窗口布局
LAYOUT = "LEFT-RIGHT" # "LEFT-RIGHT"(左右布局) # "TOP-DOWN"(上下布局)
DARK_MODE = True # 暗色模式 / 亮色模式
# 暗色模式 / 亮色模式
DARK_MODE = True
# 发送请求到OpenAI后等待多久判定为超时
@@ -74,13 +78,13 @@ MAX_RETRY = 2
# 插件分类默认选项
DEFAULT_FN_GROUPS = ['对话', '编程', '学术']
DEFAULT_FN_GROUPS = ['对话', '编程', '学术', '智能体']
# 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 )
LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "api2d-gpt-3.5-turbo",
"gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "stack-claude"]
"gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "stack-claude"]
# P.S. 其他可用的模型还包括 ["qianfan", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613",
# "spark", "sparkv2", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"]
@@ -179,11 +183,20 @@ GROBID_URLS = [
# 是否允许通过自然语言描述修改本页的配置,该功能具有一定的危险性,默认关闭
ALLOW_RESET_CONFIG = False
# 临时的上传文件夹位置,请勿修改
PATH_PRIVATE_UPLOAD = "private_upload"
# 日志文件夹的位置,请勿修改
PATH_LOGGING = "gpt_log"
# 除了连接OpenAI之外还有哪些场合允许使用代理请勿修改
WHEN_TO_USE_PROXY = ["Download_LLM", "Download_Gradio_Theme", "Connect_Grobid", "Warmup_Modules"]
"""
在线大模型配置关联关系示意图

View File

@@ -11,7 +11,8 @@ def get_core_functions():
# 前缀,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等
"Prefix": r"Below is a paragraph from an academic paper. Polish the writing to meet the academic style, " +
r"improve the spelling, grammar, clarity, concision and overall readability. When necessary, rewrite the whole sentence. " +
r"Furthermore, list all modification and explain the reasons to do so in markdown table." + "\n\n",
r"Firstly, you should provide the polished paragraph. "
r"Secondly, you should list all your modification and explain the reasons to do so in markdown table." + "\n\n",
# 后缀,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来
"Suffix": r"",
# 按钮颜色 (默认 secondary)
@@ -27,17 +28,18 @@ def get_core_functions():
"Suffix": r"",
},
"查找语法错误": {
"Prefix": r"Can you help me ensure that the grammar and the spelling is correct? " +
r"Do not try to polish the text, if no mistake is found, tell me that this paragraph is good." +
r"If you find grammar or spelling mistakes, please list mistakes you find in a two-column markdown table, " +
r"put the original text the first column, " +
r"put the corrected text in the second column and highlight the key words you fixed.""\n"
"Prefix": r"Help me ensure that the grammar and the spelling is correct. "
r"Do not try to polish the text, if no mistake is found, tell me that this paragraph is good. "
r"If you find grammar or spelling mistakes, please list mistakes you find in a two-column markdown table, "
r"put the original text the first column, "
r"put the corrected text in the second column and highlight the key words you fixed. "
r"Finally, please provide the proofreaded text.""\n\n"
r"Example:""\n"
r"Paragraph: How is you? Do you knows what is it?""\n"
r"| Original sentence | Corrected sentence |""\n"
r"| :--- | :--- |""\n"
r"| How **is** you? | How **are** you? |""\n"
r"| Do you **knows** what **is** **it**? | Do you **know** what **it** **is** ? |""\n"
r"| Do you **knows** what **is** **it**? | Do you **know** what **it** **is** ? |""\n\n"
r"Below is a paragraph from an academic paper. "
r"You need to report all grammar and spelling mistakes as the example before."
+ "\n\n",

View File

@@ -6,6 +6,7 @@ def get_crazy_functions():
from crazy_functions.生成函数注释 import 批量生成函数注释
from crazy_functions.解析项目源代码 import 解析项目本身
from crazy_functions.解析项目源代码 import 解析一个Python项目
from crazy_functions.解析项目源代码 import 解析一个Matlab项目
from crazy_functions.解析项目源代码 import 解析一个C项目的头文件
from crazy_functions.解析项目源代码 import 解析一个C项目
from crazy_functions.解析项目源代码 import 解析一个Golang项目
@@ -38,7 +39,7 @@ def get_crazy_functions():
function_plugins = {
"虚空终端": {
"Group": "对话|编程|学术",
"Group": "对话|编程|学术|智能体",
"Color": "stop",
"AsButton": True,
"Function": HotReload(虚空终端)
@@ -77,6 +78,13 @@ def get_crazy_functions():
"Info": "批量总结word文档 | 输入参数为路径",
"Function": HotReload(总结word文档)
},
"解析整个Matlab项目": {
"Group": "编程",
"Color": "stop",
"AsButton": False,
"Info": "解析一个Matlab项目的所有源文件(.m) | 输入参数为路径",
"Function": HotReload(解析一个Matlab项目)
},
"解析整个C++项目头文件": {
"Group": "编程",
"Color": "stop",
@@ -243,20 +251,23 @@ def get_crazy_functions():
"Info": "对中文Latex项目全文进行润色处理 | 输入参数为路径或上传压缩包",
"Function": HotReload(Latex中文润色)
},
"Latex项目全文中译英输入路径或上传压缩包": {
"Group": "学术",
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
"Info": "对Latex项目全文进行中译英处理 | 输入参数为路径或上传压缩包",
"Function": HotReload(Latex中译英)
},
"Latex项目全文英译中输入路径或上传压缩包": {
"Group": "学术",
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
"Info": "对Latex项目全文进行英译中处理 | 输入参数为路径或上传压缩包",
"Function": HotReload(Latex英译中)
},
# 被新插件取代
# "Latex项目全文中译英输入路径或上传压缩包": {
# "Group": "学术",
# "Color": "stop",
# "AsButton": False, # 加入下拉菜单中
# "Info": "对Latex项目全文进行中译英处理 | 输入参数为路径或上传压缩包",
# "Function": HotReload(Latex中译英)
# },
# "Latex项目全文英译中输入路径或上传压缩包": {
# "Group": "学术",
# "Color": "stop",
# "AsButton": False, # 加入下拉菜单中
# "Info": "对Latex项目全文进行英译中处理 | 输入参数为路径或上传压缩包",
# "Function": HotReload(Latex英译中)
# },
"批量Markdown中译英输入路径或上传压缩包": {
"Group": "编程",
"Color": "stop",
@@ -513,6 +524,18 @@ def get_crazy_functions():
except:
print('Load function plugin failed')
try:
from crazy_functions.函数动态生成 import 函数动态生成
function_plugins.update({
"动态代码解释器CodeInterpreter": {
"Group": "智能体",
"Color": "stop",
"AsButton": False,
"Function": HotReload(函数动态生成)
}
})
except:
print('Load function plugin failed')
# try:
# from crazy_functions.CodeInterpreter import 虚空终端CodeInterpreter

View File

@@ -53,14 +53,14 @@ def 知识库问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
print('Checking Text2vec ...')
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
with ProxyNetworkActivate(): # 临时地激活代理网络
with ProxyNetworkActivate('Download_LLM'): # 临时地激活代理网络
HuggingFaceEmbeddings(model_name="GanymedeNil/text2vec-large-chinese")
# < -------------------构建知识库--------------- >
chatbot.append(['<br/>'.join(file_manifest), "正在构建知识库..."])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
print('Establishing knowledge archive ...')
with ProxyNetworkActivate(): # 临时地激活代理网络
with ProxyNetworkActivate('Download_LLM'): # 临时地激活代理网络
kai = knowledge_archive_interface()
kai.feed_archive(file_manifest=file_manifest, id=kai_id)
kai_files = kai.get_loaded_file()

View File

@@ -79,7 +79,7 @@ def move_project(project_folder, arxiv_id=None):
shutil.copytree(src=project_folder, dst=new_workfolder)
return new_workfolder
def arxiv_download(chatbot, history, txt):
def arxiv_download(chatbot, history, txt, allow_cache=True):
def check_cached_translation_pdf(arxiv_id):
translation_dir = pj(ARXIV_CACHE_DIR, arxiv_id, 'translation')
if not os.path.exists(translation_dir):
@@ -116,7 +116,7 @@ def arxiv_download(chatbot, history, txt):
arxiv_id = url_.split('/abs/')[-1]
if 'v' in arxiv_id: arxiv_id = arxiv_id[:10]
cached_translation_pdf = check_cached_translation_pdf(arxiv_id)
if cached_translation_pdf: return cached_translation_pdf, arxiv_id
if cached_translation_pdf and allow_cache: return cached_translation_pdf, arxiv_id
url_tar = url_.replace('/abs/', '/e-print/')
translation_dir = pj(ARXIV_CACHE_DIR, arxiv_id, 'e-print')
@@ -228,6 +228,9 @@ def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot,
# <-------------- more requirements ------------->
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
more_req = plugin_kwargs.get("advanced_arg", "")
no_cache = more_req.startswith("--no-cache")
if no_cache: more_req.lstrip("--no-cache")
allow_cache = not no_cache
_switch_prompt_ = partial(switch_prompt, more_requirement=more_req)
# <-------------- check deps ------------->
@@ -244,7 +247,7 @@ def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot,
# <-------------- clear history and read input ------------->
history = []
txt, arxiv_id = yield from arxiv_download(chatbot, history, txt)
txt, arxiv_id = yield from arxiv_download(chatbot, history, txt, allow_cache)
if txt.endswith('.pdf'):
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"发现已经存在翻译好的PDF文档")
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面

View File

@@ -651,7 +651,7 @@ class knowledge_archive_interface():
from toolbox import ProxyNetworkActivate
print('Checking Text2vec ...')
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
with ProxyNetworkActivate(): # 临时地激活代理网络
with ProxyNetworkActivate('Download_LLM'): # 临时地激活代理网络
self.text2vec_large_chinese = HuggingFaceEmbeddings(model_name="GanymedeNil/text2vec-large-chinese")
return self.text2vec_large_chinese
@@ -807,3 +807,10 @@ class construct_html():
with open(os.path.join(get_log_folder(), file_name), 'w', encoding='utf8') as f:
f.write(self.html_string.encode('utf-8', 'ignore').decode())
return os.path.join(get_log_folder(), file_name)
def get_plugin_arg(plugin_kwargs, key, default):
# 如果参数是空的
if (key in plugin_kwargs) and (plugin_kwargs[key] == ""): plugin_kwargs.pop(key)
# 正常情况
return plugin_kwargs.get(key, default)

View File

@@ -0,0 +1,70 @@
import time
import importlib
from toolbox import trimmed_format_exc, gen_time_str, get_log_folder
from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, is_the_upload_folder
from toolbox import promote_file_to_downloadzone, get_log_folder, update_ui_lastest_msg
import multiprocessing
def get_class_name(class_string):
import re
# Use regex to extract the class name
class_name = re.search(r'class (\w+)\(', class_string).group(1)
return class_name
def try_make_module(code, chatbot):
module_file = 'gpt_fn_' + gen_time_str().replace('-','_')
fn_path = f'{get_log_folder(plugin_name="gen_plugin_verify")}/{module_file}.py'
with open(fn_path, 'w', encoding='utf8') as f: f.write(code)
promote_file_to_downloadzone(fn_path, chatbot=chatbot)
class_name = get_class_name(code)
manager = multiprocessing.Manager()
return_dict = manager.dict()
p = multiprocessing.Process(target=is_function_successfully_generated, args=(fn_path, class_name, return_dict))
# only has 10 seconds to run
p.start(); p.join(timeout=10)
if p.is_alive(): p.terminate(); p.join()
p.close()
return return_dict["success"], return_dict['traceback']
# check is_function_successfully_generated
def is_function_successfully_generated(fn_path, class_name, return_dict):
return_dict['success'] = False
return_dict['traceback'] = ""
try:
# Create a spec for the module
module_spec = importlib.util.spec_from_file_location('example_module', fn_path)
# Load the module
example_module = importlib.util.module_from_spec(module_spec)
module_spec.loader.exec_module(example_module)
# Now you can use the module
some_class = getattr(example_module, class_name)
# Now you can create an instance of the class
instance = some_class()
return_dict['success'] = True
return
except:
return_dict['traceback'] = trimmed_format_exc()
return
def subprocess_worker(code, file_path, return_dict):
return_dict['result'] = None
return_dict['success'] = False
return_dict['traceback'] = ""
try:
module_file = 'gpt_fn_' + gen_time_str().replace('-','_')
fn_path = f'{get_log_folder(plugin_name="gen_plugin_run")}/{module_file}.py'
with open(fn_path, 'w', encoding='utf8') as f: f.write(code)
class_name = get_class_name(code)
# Create a spec for the module
module_spec = importlib.util.spec_from_file_location('example_module', fn_path)
# Load the module
example_module = importlib.util.module_from_spec(module_spec)
module_spec.loader.exec_module(example_module)
# Now you can use the module
some_class = getattr(example_module, class_name)
# Now you can create an instance of the class
instance = some_class()
return_dict['result'] = instance.run(file_path)
return_dict['success'] = True
except:
return_dict['traceback'] = trimmed_format_exc()

View File

@@ -2,6 +2,8 @@ from functools import lru_cache
from toolbox import gen_time_str
from toolbox import promote_file_to_downloadzone
from toolbox import write_history_to_file, promote_file_to_downloadzone
from toolbox import get_conf
from toolbox import ProxyNetworkActivate
from colorful import *
import requests
import random
@@ -12,13 +14,13 @@ import math
class GROBID_OFFLINE_EXCEPTION(Exception): pass
def get_avail_grobid_url():
from toolbox import get_conf
GROBID_URLS, = get_conf('GROBID_URLS')
if len(GROBID_URLS) == 0: return None
try:
_grobid_url = random.choice(GROBID_URLS) # 随机负载均衡
if _grobid_url.endswith('/'): _grobid_url = _grobid_url.rstrip('/')
res = requests.get(_grobid_url+'/api/isalive')
with ProxyNetworkActivate('Connect_Grobid'):
res = requests.get(_grobid_url+'/api/isalive')
if res.text=='true': return _grobid_url
else: return None
except:
@@ -29,7 +31,8 @@ def parse_pdf(pdf_path, grobid_url):
import scipdf # pip install scipdf_parser
if grobid_url.endswith('/'): grobid_url = grobid_url.rstrip('/')
try:
article_dict = scipdf.parse_pdf_to_dict(pdf_path, grobid_url=grobid_url)
with ProxyNetworkActivate('Connect_Grobid'):
article_dict = scipdf.parse_pdf_to_dict(pdf_path, grobid_url=grobid_url)
except GROBID_OFFLINE_EXCEPTION:
raise GROBID_OFFLINE_EXCEPTION("GROBID服务不可用请修改config中的GROBID_URL可修改成本地GROBID服务。")
except:

View File

@@ -0,0 +1,252 @@
# 本源代码中, ⭐ = 关键步骤
"""
测试:
- 裁剪图像,保留下半部分
- 交换图像的蓝色通道和红色通道
- 将图像转为灰度图像
- 将csv文件转excel表格
Testing:
- Crop the image, keeping the bottom half.
- Swap the blue channel and red channel of the image.
- Convert the image to grayscale.
- Convert the CSV file to an Excel spreadsheet.
"""
from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, is_the_upload_folder
from toolbox import promote_file_to_downloadzone, get_log_folder, update_ui_lastest_msg
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, get_plugin_arg
from .crazy_utils import input_clipping, try_install_deps
from crazy_functions.gen_fns.gen_fns_shared import is_function_successfully_generated
from crazy_functions.gen_fns.gen_fns_shared import get_class_name
from crazy_functions.gen_fns.gen_fns_shared import subprocess_worker
from crazy_functions.gen_fns.gen_fns_shared import try_make_module
import os
import time
import glob
import multiprocessing
templete = """
```python
import ... # Put dependencies here, e.g. import numpy as np.
class TerminalFunction(object): # Do not change the name of the class, The name of the class must be `TerminalFunction`
def run(self, path): # The name of the function must be `run`, it takes only a positional argument.
# rewrite the function you have just written here
...
return generated_file_path
```
"""
def inspect_dependency(chatbot, history):
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return True
def get_code_block(reply):
import re
pattern = r"```([\s\S]*?)```" # regex pattern to match code blocks
matches = re.findall(pattern, reply) # find all code blocks in text
if len(matches) == 1:
return matches[0].strip('python') # code block
for match in matches:
if 'class TerminalFunction' in match:
return match.strip('python') # code block
raise RuntimeError("GPT is not generating proper code.")
def gpt_interact_multi_step(txt, file_type, llm_kwargs, chatbot, history):
# 输入
prompt_compose = [
f'Your job:\n'
f'1. write a single Python function, which takes a path of a `{file_type}` file as the only argument and returns a `string` containing the result of analysis or the path of generated files. \n',
f"2. You should write this function to perform following task: " + txt + "\n",
f"3. Wrap the output python function with markdown codeblock."
]
i_say = "".join(prompt_compose)
demo = []
# 第一步
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
inputs=i_say, inputs_show_user=i_say,
llm_kwargs=llm_kwargs, chatbot=chatbot, history=demo,
sys_prompt= r"You are a world-class programmer."
)
history.extend([i_say, gpt_say])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
# 第二步
prompt_compose = [
"If previous stage is successful, rewrite the function you have just written to satisfy following templete: \n",
templete
]
i_say = "".join(prompt_compose); inputs_show_user = "If previous stage is successful, rewrite the function you have just written to satisfy executable templete. "
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
inputs=i_say, inputs_show_user=inputs_show_user,
llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
sys_prompt= r"You are a programmer. You need to replace `...` with valid packages, do not give `...` in your answer!"
)
code_to_return = gpt_say
history.extend([i_say, gpt_say])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
# # 第三步
# i_say = "Please list to packages to install to run the code above. Then show me how to use `try_install_deps` function to install them."
# i_say += 'For instance. `try_install_deps(["opencv-python", "scipy", "numpy"])`'
# installation_advance = yield from request_gpt_model_in_new_thread_with_ui_alive(
# inputs=i_say, inputs_show_user=inputs_show_user,
# llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
# sys_prompt= r"You are a programmer."
# )
# # # 第三步
# i_say = "Show me how to use `pip` to install packages to run the code above. "
# i_say += 'For instance. `pip install -r opencv-python scipy numpy`'
# installation_advance = yield from request_gpt_model_in_new_thread_with_ui_alive(
# inputs=i_say, inputs_show_user=i_say,
# llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
# sys_prompt= r"You are a programmer."
# )
installation_advance = ""
return code_to_return, installation_advance, txt, file_type, llm_kwargs, chatbot, history
def for_immediate_show_off_when_possible(file_type, fp, chatbot):
if file_type in ['png', 'jpg']:
image_path = os.path.abspath(fp)
chatbot.append(['这是一张图片, 展示如下:',
f'本地文件地址: <br/>`{image_path}`<br/>'+
f'本地文件预览: <br/><div align="center"><img src="file={image_path}"></div>'
])
return chatbot
def have_any_recent_upload_files(chatbot):
_5min = 5 * 60
if not chatbot: return False # chatbot is None
most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None)
if not most_recent_uploaded: return False # most_recent_uploaded is None
if time.time() - most_recent_uploaded["time"] < _5min: return True # most_recent_uploaded is new
else: return False # most_recent_uploaded is too old
def get_recent_file_prompt_support(chatbot):
most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None)
path = most_recent_uploaded['path']
return path
@CatchException
def 函数动态生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
"""
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数如温度和top_p等一般原样传递下去就行
plugin_kwargs 插件模型的参数,暂时没有用武之地
chatbot 聊天显示框的句柄,用于显示给用户
history 聊天历史,前情提要
system_prompt 给gpt的静默提醒
web_port 当前软件运行的端口号
"""
# 清空历史
history = []
# 基本信息:功能、贡献者
chatbot.append(["正在启动: 插件动态生成插件", "插件动态生成, 执行开始, 作者Binary-Husky."])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
# ⭐ 文件上传区是否有东西
# 1. 如果有文件: 作为函数参数
# 2. 如果没有文件需要用GPT提取参数 (太懒了,以后再写,虚空终端已经实现了类似的代码)
file_list = []
if get_plugin_arg(plugin_kwargs, key="file_path_arg", default=False):
file_path = get_plugin_arg(plugin_kwargs, key="file_path_arg", default=None)
file_list.append(file_path)
yield from update_ui_lastest_msg(f"当前文件: {file_path}", chatbot, history, 1)
elif have_any_recent_upload_files(chatbot):
file_dir = get_recent_file_prompt_support(chatbot)
file_list = glob.glob(os.path.join(file_dir, '**/*'), recursive=True)
yield from update_ui_lastest_msg(f"当前文件处理列表: {file_list}", chatbot, history, 1)
else:
chatbot.append(["文件检索", "没有发现任何近期上传的文件。"])
yield from update_ui_lastest_msg("没有发现任何近期上传的文件。", chatbot, history, 1)
return # 2. 如果没有文件
if len(file_list) == 0:
chatbot.append(["文件检索", "没有发现任何近期上传的文件。"])
yield from update_ui_lastest_msg("没有发现任何近期上传的文件。", chatbot, history, 1)
return # 2. 如果没有文件
# 读取文件
file_type = file_list[0].split('.')[-1]
# 粗心检查
if is_the_upload_folder(txt):
yield from update_ui_lastest_msg(f"请在输入框内填写需求, 然后再次点击该插件! 至于您的文件,不用担心, 文件路径 {txt} 已经被记忆. ", chatbot, history, 1)
return
# 开始干正事
MAX_TRY = 3
for j in range(MAX_TRY): # 最多重试5次
traceback = ""
try:
# ⭐ 开始啦
code, installation_advance, txt, file_type, llm_kwargs, chatbot, history = \
yield from gpt_interact_multi_step(txt, file_type, llm_kwargs, chatbot, history)
chatbot.append(["代码生成阶段结束", ""])
yield from update_ui_lastest_msg(f"正在验证上述代码的有效性 ...", chatbot, history, 1)
# ⭐ 分离代码块
code = get_code_block(code)
# ⭐ 检查模块
ok, traceback = try_make_module(code, chatbot)
# 搞定代码生成
if ok: break
except Exception as e:
if not traceback: traceback = trimmed_format_exc()
# 处理异常
if not traceback: traceback = trimmed_format_exc()
yield from update_ui_lastest_msg(f"{j+1}/{MAX_TRY} 次代码生成尝试, 失败了~ 别担心, 我们5秒后再试一次... \n\n此次我们的错误追踪是\n```\n{traceback}\n```\n", chatbot, history, 5)
# 代码生成结束, 开始执行
TIME_LIMIT = 15
yield from update_ui_lastest_msg(f"开始创建新进程并执行代码! 时间限制 {TIME_LIMIT} 秒. 请等待任务完成... ", chatbot, history, 1)
manager = multiprocessing.Manager()
return_dict = manager.dict()
# ⭐ 到最后一步了,开始逐个文件进行处理
for file_path in file_list:
if os.path.exists(file_path):
chatbot.append([f"正在处理文件: {file_path}", f"请稍等..."])
chatbot = for_immediate_show_off_when_possible(file_type, file_path, chatbot)
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
else:
continue
# ⭐⭐⭐ subprocess_worker ⭐⭐⭐
p = multiprocessing.Process(target=subprocess_worker, args=(code, file_path, return_dict))
# ⭐ 开始执行时间限制TIME_LIMIT
p.start(); p.join(timeout=TIME_LIMIT)
if p.is_alive(): p.terminate(); p.join()
p.close()
res = return_dict['result']
success = return_dict['success']
traceback = return_dict['traceback']
if not success:
if not traceback: traceback = trimmed_format_exc()
chatbot.append(["执行失败了", f"错误追踪\n```\n{trimmed_format_exc()}\n```\n"])
# chatbot.append(["如果是缺乏依赖,请参考以下建议", installation_advance])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
# 顺利完成,收尾
res = str(res)
if os.path.exists(res):
chatbot.append(["执行成功了,结果是一个有效文件", "结果:" + res])
new_file_path = promote_file_to_downloadzone(res, chatbot=chatbot)
chatbot = for_immediate_show_off_when_possible(file_type, new_file_path, chatbot)
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
else:
chatbot.append(["执行成功了,结果是一个字符串", "结果:" + res])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新

View File

@@ -136,6 +136,23 @@ def 解析一个Python项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
return
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
@CatchException
def 解析一个Matlab项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
history = [] # 清空历史,以免输入溢出
import glob, os
if os.path.exists(txt):
project_folder = txt
else:
if txt == "": txt = '空空如也的输入栏'
report_execption(chatbot, history, a = f"解析Matlab项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.m', recursive=True)]
if len(file_manifest) == 0:
report_execption(chatbot, history, a = f"解析Matlab项目: {txt}", b = f"找不到任何`.m`源文件: {txt}")
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
@CatchException
def 解析一个C项目的头文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):

View File

@@ -1,4 +1,28 @@
#【请修改完参数后删除此行】请在以下方案中选择一种然后删除其他的方案最后docker-compose up运行 | Please choose from one of these options below, delete other options as well as This Line
## ===================================================
# docker-compose.yml
## ===================================================
# 1. 请在以下方案中选择任意一种,然后删除其他的方案
# 2. 修改你选择的方案中的environment环境变量详情请见github wiki或者config.py
# 3. 选择一种暴露服务端口的方法,并对相应的配置做出修改:
# 【方法1: 适用于Linux很方便可惜windows不支持】与宿主的网络融合为一体这个是默认配置
# network_mode: "host"
# 【方法2: 适用于所有系统包括Windows和MacOS】端口映射把容器的端口映射到宿主的端口注意您需要先删除network_mode: "host",再追加以下内容)
# ports:
# - "12345:12345" # 注意12345必须与WEB_PORT环境变量相互对应
# 4. 最后`docker-compose up`运行
# 5. 如果希望使用显卡,请关注 LOCAL_MODEL_DEVICE 和 英伟达显卡运行时 选项
## ===================================================
# 1. Please choose one of the following options and delete the others.
# 2. Modify the environment variables in the selected option, see GitHub wiki or config.py for more details.
# 3. Choose a method to expose the server port and make the corresponding configuration changes:
# [Method 1: Suitable for Linux, convenient, but not supported for Windows] Fusion with the host network, this is the default configuration
# network_mode: "host"
# [Method 2: Suitable for all systems including Windows and MacOS] Port mapping, mapping the container port to the host port (note that you need to delete network_mode: "host" first, and then add the following content)
# ports:
# - "12345: 12345" # Note! 12345 must correspond to the WEB_PORT environment variable.
# 4. Finally, run `docker-compose up`.
# 5. If you want to use a graphics card, pay attention to the LOCAL_MODEL_DEVICE and Nvidia GPU runtime options.
## ===================================================
## ===================================================
## 【方案零】 部署项目的全部能力这个是包含cuda和latex的大型镜像。如果您网速慢、硬盘小或没有显卡则不推荐使用这个
@@ -39,10 +63,14 @@ services:
# count: 1
# capabilities: [gpu]
# 与宿主的网络融合
# 【WEB_PORT暴露方法1: 适用于Linux】与宿主的网络融合
network_mode: "host"
# 不使用代理网络拉取最新代码
# 【WEB_PORT暴露方法2: 适用于所有系统】端口映射
# ports:
# - "12345:12345" # 12345必须与WEB_PORT相互对应
# 启动容器后运行main.py主程序
command: >
bash -c "python3 -u main.py"

View File

@@ -13,21 +13,20 @@ RUN python3 -m pip install openai numpy arxiv rich
RUN python3 -m pip install colorama Markdown pygments pymupdf
RUN python3 -m pip install python-docx moviepy pdfminer
RUN python3 -m pip install zh_langchain==0.2.1 pypinyin
RUN python3 -m pip install nougat-ocr
RUN python3 -m pip install rarfile py7zr
RUN python3 -m pip install aliyun-python-sdk-core==2.13.3 pyOpenSSL scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git
# 下载分支
WORKDIR /gpt
RUN git clone --depth=1 https://github.com/binary-husky/gpt_academic.git
WORKDIR /gpt/gpt_academic
RUN git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss
RUN git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llm/moss
RUN python3 -m pip install -r requirements.txt
RUN python3 -m pip install -r request_llm/requirements_moss.txt
RUN python3 -m pip install -r request_llm/requirements_qwen.txt
RUN python3 -m pip install -r request_llm/requirements_chatglm.txt
RUN python3 -m pip install -r request_llm/requirements_newbing.txt
RUN python3 -m pip install nougat-ocr
# 预热Tiktoken模块

View File

@@ -322,7 +322,7 @@
"任何文件": "Any file",
"但推荐上传压缩文件": "But it is recommended to upload compressed files",
"更换模型 & SysPrompt & 交互界面布局": "Change model & SysPrompt & interactive interface layout",
"底部输入区": "Bottom input area",
"浮动输入区": "Floating input area",
"输入清除键": "Input clear key",
"插件参数区": "Plugin parameter area",
"显示/隐藏功能区": "Show/hide function area",
@@ -2513,5 +2513,141 @@
"此处待注入的知识库名称id": "The knowledge base name ID to be injected here",
"您需要构建知识库后再运行此插件": "You need to build the knowledge base before running this plugin",
"判定是否为公式 | 测试1 写出洛伦兹定律": "Determine whether it is a formula | Test 1 write out the Lorentz law",
"构建知识库后": "After building the knowledge base"
"构建知识库后": "After building the knowledge base",
"找不到本地项目或无法处理": "Unable to find local project or unable to process",
"再做一个小修改": "Make another small modification",
"解析整个Matlab项目": "Parse the entire Matlab project",
"需要用GPT提取参数": "Need to extract parameters using GPT",
"文件路径": "File path",
"正在排队": "In queue",
"-=-=-=-=-=-=-=-= 写出第1个文件": "-=-=-=-=-=-=-=-= Write the first file",
"仅翻译后的文本 -=-=-=-=-=-=-=-=": "Translated text only -=-=-=-=-=-=-=-=",
"对话通道": "Conversation channel",
"找不到任何": "Unable to find any",
"正在启动": "Starting",
"开始创建新进程并执行代码! 时间限制": "Start creating a new process and executing the code! Time limit",
"解析Matlab项目": "Parse Matlab project",
"更换UI主题": "Change UI theme",
"⭐ 开始啦 ": "⭐ Let's start!",
"先提取当前英文标题": "First extract the current English title",
"睡一会防止触发google反爬虫": "Sleep for a while to prevent triggering Google anti-crawler",
"测试": "Test",
"-=-=-=-=-=-=-=-= 写出Markdown文件 -=-=-=-=-=-=-=-=": "-=-=-=-=-=-=-=-= Write out Markdown file",
"如果index是1的话": "If the index is 1",
"VoidTerminal已经实现了类似的代码": "VoidTerminal has already implemented similar code",
"等待线程锁": "Waiting for thread lock",
"那么我们默认代理生效": "Then we default to proxy",
"结果是一个有效文件": "The result is a valid file",
"⭐ 检查模块": "⭐ Check module",
"备份一份History作为记录": "Backup a copy of History as a record",
"作者Binary-Husky": "Author Binary-Husky",
"将csv文件转excel表格": "Convert CSV file to Excel table",
"获取文章摘要": "Get article summary",
"次代码生成尝试": "Attempt to generate code",
"如果参数是空的": "If the parameter is empty",
"请配置讯飞星火大模型的XFYUN_APPID": "Please configure XFYUN_APPID for the Xunfei Starfire model",
"-=-=-=-=-=-=-=-= 写出第2个文件": "Write the second file",
"代码生成阶段结束": "Code generation phase completed",
"则进行提醒": "Then remind",
"处理异常": "Handle exception",
"可能触发了google反爬虫机制": "May have triggered Google anti-crawler mechanism",
"AnalyzeAMatlabProject的所有源文件": "All source files of AnalyzeAMatlabProject",
"写入": "Write",
"我们5秒后再试一次...": "Let's try again in 5 seconds...",
"判断一下用户是否错误地通过对话通道进入": "Check if the user entered through the dialogue channel by mistake",
"结果": "Result",
"2. 如果没有文件": "2. If there is no file",
"由 test_on_sentence_end": "By test_on_sentence_end",
"则直接使用first section name": "Then directly use the first section name",
"太懒了": "Too lazy",
"记录当前的大章节标题": "Record the current chapter title",
"然后再次点击该插件! 至于您的文件": "Then click the plugin again! As for your file",
"此次我们的错误追踪是": "This time our error tracking is",
"首先在arxiv上搜索": "First search on arxiv",
"被新插件取代": "Replaced by a new plugin",
"正在处理文件": "Processing file",
"除了连接OpenAI之外": "In addition to connecting OpenAI",
"我们检查一下": "Let's check",
"进度": "Progress",
"处理少数情况下的特殊插件的锁定状态": "Handle the locked state of special plugins in a few cases",
"⭐ 开始执行": "⭐ Start execution",
"正常情况": "Normal situation",
"下个句子中已经说完的部分": "The part that has already been said in the next sentence",
"首次运行需要花费较长时间下载NOUGAT参数": "The first run takes a long time to download NOUGAT parameters",
"使用tex格式公式 测试2 给出柯西不等式": "Use the tex format formula to test 2 and give the Cauchy inequality",
"无法从bing获取信息": "Unable to retrieve information from Bing!",
"秒. 请等待任务完成": "Wait for the task to complete",
"开始干正事": "Start doing real work",
"需要花费较长时间下载NOUGAT参数": "It takes a long time to download NOUGAT parameters",
"然后再次点击该插件": "Then click the plugin again",
"受到bing限制": "Restricted by Bing",
"检索文章的历史版本的题目": "Retrieve the titles of historical versions of the article",
"收尾": "Wrap up",
"给定了task": "Given a task",
"某段话的整个句子": "The whole sentence of a paragraph",
"-=-=-=-=-=-=-=-= 写出HTML文件 -=-=-=-=-=-=-=-=": "-=-=-=-=-=-=-=-= Write out HTML file -=-=-=-=-=-=-=-=",
"当前文件": "Current file",
"请在输入框内填写需求": "Please fill in the requirements in the input box",
"结果是一个字符串": "The result is a string",
"用插件实现」": "Implemented with a plugin",
"⭐ 到最后一步了": "⭐ Reached the final step",
"重新修改当前part的标题": "Modify the title of the current part again",
"请勿点击“提交”按钮或者“基础功能区”按钮": "Do not click the 'Submit' button or the 'Basic Function Area' button",
"正在执行命令": "Executing command",
"检测到**滞留的缓存文档**": "Detected **stuck cache document**",
"第三步": "Step three",
"失败了~ 别担心": "Failed~ Don't worry",
"动态代码解释器": "Dynamic code interpreter",
"开始执行": "Start executing",
"不给定task": "No task given",
"正在加载NOUGAT...": "Loading NOUGAT...",
"精准翻译PDF文档": "Accurate translation of PDF documents",
"时间限制TIME_LIMIT": "Time limit TIME_LIMIT",
"翻译前后混合 -=-=-=-=-=-=-=-=": "Mixed translation before and after -=-=-=-=-=-=-=-=",
"搞定代码生成": "Code generation is done",
"插件通道": "Plugin channel",
"智能体": "Intelligent agent",
"切换界面明暗 ☀": "Switch interface brightness ☀",
"交换图像的蓝色通道和红色通道": "Swap blue channel and red channel of the image",
"作为函数参数": "As a function parameter",
"先挑选偶数序列号": "First select even serial numbers",
"仅供测试": "For testing only",
"执行成功了": "Execution succeeded",
"开始逐个文件进行处理": "Start processing files one by one",
"当前文件处理列表": "Current file processing list",
"执行失败了": "Execution failed",
"请及时处理": "Please handle it in time",
"源文件": "Source file",
"裁剪图像": "Crop image",
"插件动态生成插件": "Dynamic generation of plugins",
"正在验证上述代码的有效性": "Validating the above code",
"⭐ = 关键步骤": "⭐ = Key step",
"!= 0 代表“提交”键对话通道": "!= 0 represents the 'Submit' key dialogue channel",
"解析python源代码项目": "Parsing Python source code project",
"请检查PDF是否损坏": "Please check if the PDF is damaged",
"插件动态生成": "Dynamic generation of plugins",
"⭐ 分离代码块": "⭐ Separating code blocks",
"已经被记忆": "Already memorized",
"默认用英文的": "Default to English",
"错误追踪": "Error tracking",
"对话|编程|学术|智能体": "Dialogue|Programming|Academic|Intelligent agent",
"请检查": "Please check",
"检测到被滞留的缓存文档": "Detected cached documents being left behind",
"还有哪些场合允许使用代理": "What other occasions allow the use of proxies",
"1. 如果有文件": "1. If there is a file",
"执行开始": "Execution starts",
"代码生成结束": "Code generation ends",
"请及时点击“**保存当前对话**”获取所有滞留文档": "Please click '**Save Current Dialogue**' in time to obtain all cached documents",
"需点击“**函数插件区**”按钮进行处理": "Click the '**Function Plugin Area**' button for processing",
"此函数已经弃用": "This function has been deprecated",
"以后再写": "Write it later",
"返回给定的url解析出的arxiv_id": "Return the arxiv_id parsed from the given URL",
"⭐ 文件上传区是否有东西": "⭐ Is there anything in the file upload area",
"Nougat解析论文失败": "Nougat failed to parse the paper",
"本源代码中": "In this source code",
"或者基础功能通道": "Or the basic function channel",
"使用zip压缩格式": "Using zip compression format",
"受到google限制": "Restricted by Google",
"如果是": "If it is",
"不用担心": "don't worry"
}

View File

@@ -1007,7 +1007,6 @@
"第一部分": "第1部分",
"的分析如下": "の分析は以下の通りです",
"解决一个mdx_math的bug": "mdx_mathのバグを解決する",
"底部输入区": "下部の入力エリア",
"函数插件输入输出接驳区": "関数プラグインの入出力接続エリア",
"打开浏览器": "ブラウザを開く",
"免费用户填3": "無料ユーザーは3を入力してください",

View File

@@ -90,5 +90,7 @@
"解析PDF_基于GROBID": "ParsePDF_BasedOnGROBID",
"虚空终端主路由": "VoidTerminalMainRoute",
"批量翻译PDF文档_NOUGAT": "BatchTranslatePDFDocuments_NOUGAT",
"解析PDF_基于NOUGAT": "ParsePDF_NOUGAT"
"解析PDF_基于NOUGAT": "ParsePDF_NOUGAT",
"解析一个Matlab项目": "AnalyzeAMatlabProject",
"函数动态生成": "DynamicFunctionGeneration"
}

View File

@@ -346,7 +346,6 @@
"情况会好转": "情況會好轉",
"超过512个": "超過512個",
"多线": "多線",
"底部输入区": "底部輸入區",
"合并小写字母开头的段落块并替换为空格": "合併小寫字母開頭的段落塊並替換為空格",
"暗色主题": "暗色主題",
"提高限制请查询": "提高限制請查詢",

105
main.py
View File

@@ -2,13 +2,15 @@ import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
def main():
import gradio as gr
if gr.__version__ not in ['3.28.3','3.32.2']: assert False, "需要特殊依赖,请务必用 pip install -r requirements.txt 指令安装依赖详情信息见requirements.txt"
if gr.__version__ not in ['3.32.6']:
raise ModuleNotFoundError("使用项目内置Gradio获取最优体验! 请运行 `pip install -r requirements.txt` 指令安装内置Gradio及其他依赖, 详情信息见requirements.txt.")
from request_llm.bridge_all import predict
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, load_chat_cookies, DummyWith
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION')
CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT')
ENABLE_AUDIO, AUTO_CLEAR_TXT, PATH_LOGGING, AVAIL_THEMES, THEME = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT', 'PATH_LOGGING', 'AVAIL_THEMES', 'THEME')
DARK_MODE, = get_conf('DARK_MODE')
# 如果WEB_PORT是-1, 则随机选取WEB端口
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
@@ -17,8 +19,16 @@ def main():
initial_prompt = "Serve me as a writing and programming assistant."
title_html = f"<h1 align=\"center\">GPT 学术优化 {get_current_version()}</h1>{theme_declaration}"
description = "代码开源和更新[地址🚀](https://github.com/binary-husky/gpt_academic)"
description += "感谢热情的[开发者们❤️](https://github.com/binary-husky/gpt_academic/graphs/contributors)"
description = "Github源代码开源和更新[地址🚀](https://github.com/binary-husky/gpt_academic), "
description += "感谢热情的[开发者们❤️](https://github.com/binary-husky/gpt_academic/graphs/contributors)."
description += "</br></br>常见问题请查阅[项目Wiki](https://github.com/binary-husky/gpt_academic/wiki), "
description += "如遇到Bug请前往[Bug反馈](https://github.com/binary-husky/gpt_academic/issues)."
description += "</br></br>普通对话使用说明: 1. 输入问题; 2. 点击提交"
description += "</br></br>基础功能区使用说明: 1. 输入文本; 2. 点击任意基础功能区按钮"
description += "</br></br>函数插件区使用说明: 1. 输入路径/问题, 或者上传文件; 2. 点击任意函数插件区按钮"
description += "</br></br>虚空终端使用说明: 点击虚空终端, 然后根据提示输入指令, 再次点击虚空终端"
description += "</br></br>如何保存对话: 点击保存当前的对话按钮"
description += "</br></br>如何语音对话: 请阅读Wiki"
# 问询记录, python 版本建议3.9+(越新越好)
import logging, uuid
@@ -60,7 +70,7 @@ def main():
cancel_handles = []
with gr.Blocks(title="GPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
gr.HTML(title_html)
secret_css, secret_font = gr.Textbox(visible=False), gr.Textbox(visible=False)
secret_css, dark_mode = gr.Textbox(visible=False), gr.Textbox(DARK_MODE, visible=False)
cookies = gr.State(load_chat_cookies())
with gr_L1():
with gr_L2(scale=2, elem_id="gpt-chat"):
@@ -72,11 +82,11 @@ def main():
with gr.Row():
txt = gr.Textbox(show_label=False, placeholder="Input question here.").style(container=False)
with gr.Row():
submitBtn = gr.Button("提交", variant="primary")
submitBtn = gr.Button("提交", elem_id="elem_submit", variant="primary")
with gr.Row():
resetBtn = gr.Button("重置", variant="secondary"); resetBtn.style(size="sm")
stopBtn = gr.Button("停止", variant="secondary"); stopBtn.style(size="sm")
clearBtn = gr.Button("清除", variant="secondary", visible=False); clearBtn.style(size="sm")
resetBtn = gr.Button("重置", elem_id="elem_reset", variant="secondary"); resetBtn.style(size="sm")
stopBtn = gr.Button("停止", elem_id="elem_stop", variant="secondary"); stopBtn.style(size="sm")
clearBtn = gr.Button("清除", elem_id="elem_clear", variant="secondary", visible=False); clearBtn.style(size="sm")
if ENABLE_AUDIO:
with gr.Row():
audio_mic = gr.Audio(source="microphone", type="numpy", streaming=True, show_label=False).style(container=False)
@@ -87,7 +97,7 @@ def main():
for k in functional:
if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue
variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
functional[k]["Button"] = gr.Button(k, variant=variant)
functional[k]["Button"] = gr.Button(k, variant=variant, info_str=f'基础功能区: {k}')
functional[k]["Button"].style(size="sm")
with gr.Accordion("函数插件区", open=True, elem_id="plugin-panel") as area_crazy_fn:
with gr.Row():
@@ -100,7 +110,9 @@ def main():
if not plugin.get("AsButton", True): continue
visible = True if match_group(plugin['Group'], DEFAULT_FN_GROUPS) else False
variant = plugins[k]["Color"] if "Color" in plugin else "secondary"
plugin['Button'] = plugins[k]['Button'] = gr.Button(k, variant=variant, visible=visible).style(size="sm")
info = plugins[k].get("Info", k)
plugin['Button'] = plugins[k]['Button'] = gr.Button(k, variant=variant,
visible=visible, info_str=f'函数插件区: {info}').style(size="sm")
with gr.Row():
with gr.Accordion("更多函数插件", open=True):
dropdown_fn_list = []
@@ -117,15 +129,26 @@ def main():
switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary").style(size="sm")
with gr.Row():
with gr.Accordion("点击展开“文件上传区”。上传本地文件/压缩包供函数插件调用。", open=False) as area_file_up:
file_upload = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple")
with gr.Accordion("更换模型 & SysPrompt & 交互界面布局", open=(LAYOUT == "TOP-DOWN"), elem_id="interact-panel"):
system_prompt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt)
file_upload = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple", elem_id="elem_upload")
with gr.Floating(init_x="0%", init_y="0%", visible=True, width=None, drag="forbidden"):
with gr.Row():
with gr.Tab("上传文件", elem_id="interact-panel"):
gr.Markdown("请上传本地文件/压缩包供“函数插件区”功能调用。请注意: 上传文件后会自动把输入区修改为相应路径。")
file_upload_2 = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple")
with gr.Tab("更换模型 & Prompt", elem_id="interact-panel"):
md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False)
top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
max_length_sl = gr.Slider(minimum=256, maximum=8192, value=4096, step=1, interactive=True, label="Local LLM MaxLength",)
checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "底部输入区", "输入清除键", "插件参数区"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区")
md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False)
max_length_sl = gr.Slider(minimum=256, maximum=1024*32, value=4096, step=128, interactive=True, label="Local LLM MaxLength",)
system_prompt = gr.Textbox(show_label=True, lines=2, placeholder=f"System Prompt", label="System prompt", value=initial_prompt)
with gr.Tab("界面外观", elem_id="interact-panel"):
theme_dropdown = gr.Dropdown(AVAIL_THEMES, value=THEME, label="更换UI主题").style(container=False)
checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "浮动输入区", "输入清除键", "插件参数区"],
value=["基础功能区", "函数插件区"], label="显示/隐藏功能区", elem_id='cbs').style(container=False)
dark_mode_btn = gr.Button("切换界面明暗 ☀", variant="secondary").style(size="sm")
dark_mode_btn.click(None, None, None, _js="""() => {
if (document.querySelectorAll('.dark').length) {
@@ -135,13 +158,17 @@ def main():
}
}""",
)
with gr.Tab("帮助", elem_id="interact-panel"):
gr.Markdown(description)
with gr.Accordion("备选输入区", open=True, visible=False, elem_id="input-panel2") as area_input_secondary:
with gr.Row():
txt2 = gr.Textbox(show_label=False, placeholder="Input question here.", label="输入区2").style(container=False)
with gr.Row():
submitBtn2 = gr.Button("提交", variant="primary")
with gr.Row():
with gr.Floating(init_x="20%", init_y="50%", visible=False, width="40%", drag="top") as area_input_secondary:
with gr.Accordion("浮动输入区", open=True, elem_id="input-panel2"):
with gr.Row() as row:
row.style(equal_height=True)
with gr.Column(scale=10):
txt2 = gr.Textbox(show_label=False, placeholder="Input question here.", lines=8, label="输入区2").style(container=False)
with gr.Column(scale=1, min_width=40):
submitBtn2 = gr.Button("提交", variant="primary"); submitBtn2.style(size="sm")
resetBtn2 = gr.Button("重置", variant="secondary"); resetBtn2.style(size="sm")
stopBtn2 = gr.Button("停止", variant="secondary"); stopBtn2.style(size="sm")
clearBtn2 = gr.Button("清除", variant="secondary", visible=False); clearBtn2.style(size="sm")
@@ -151,12 +178,12 @@ def main():
ret = {}
ret.update({area_basic_fn: gr.update(visible=("基础功能区" in a))})
ret.update({area_crazy_fn: gr.update(visible=("函数插件区" in a))})
ret.update({area_input_primary: gr.update(visible=("底部输入区" not in a))})
ret.update({area_input_secondary: gr.update(visible=("底部输入区" in a))})
ret.update({area_input_primary: gr.update(visible=("浮动输入区" not in a))})
ret.update({area_input_secondary: gr.update(visible=("浮动输入区" in a))})
ret.update({clearBtn: gr.update(visible=("输入清除键" in a))})
ret.update({clearBtn2: gr.update(visible=("输入清除键" in a))})
ret.update({plugin_advanced_arg: gr.update(visible=("插件参数区" in a))})
if "底部输入区" in a: ret.update({txt: gr.update(value="")})
if "浮动输入区" in a: ret.update({txt: gr.update(value="")})
return ret
checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn, area_input_primary, area_input_secondary, txt, txt2, clearBtn, clearBtn2, plugin_advanced_arg] )
# 整理反复出现的控件句柄组合
@@ -184,6 +211,7 @@ def main():
cancel_handles.append(click_handle)
# 文件上传区接收文件后与chatbot的互动
file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies])
file_upload_2.upload(on_file_uploaded, [file_upload_2, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies])
# 函数插件-固定按钮区
for k in plugins:
if not plugins[k].get("AsButton", True): continue
@@ -193,7 +221,8 @@ def main():
# 函数插件-下拉菜单与随变按钮的互动
def on_dropdown_changed(k):
variant = plugins[k]["Color"] if "Color" in plugins[k] else "secondary"
ret = {switchy_bt: gr.update(value=k, variant=variant)}
info = plugins[k].get("Info", k)
ret = {switchy_bt: gr.update(value=k, variant=variant, info_str=f'函数插件区: {info}')}
if plugins[k].get("AdvancedArgs", False): # 是否唤起高级插件参数区
ret.update({plugin_advanced_arg: gr.update(visible=True, label=f"插件[{k}]的高级参数说明:" + plugins[k].get("ArgsReminder", [f"没有提供高级参数功能说明"]))})
else:
@@ -266,25 +295,37 @@ def main():
cookies.update({'uuid': uuid.uuid4()})
return cookies
demo.load(init_cookie, inputs=[cookies, chatbot], outputs=[cookies])
demo.load(lambda: 0, inputs=None, outputs=None, _js='()=>{ChatBotHeight();}')
darkmode_js = """(dark) => {
dark = dark == "True";
if (document.querySelectorAll('.dark').length) {
if (!dark){
document.querySelectorAll('.dark').forEach(el => el.classList.remove('dark'));
}
} else {
if (dark){
document.querySelector('body').classList.add('dark');
}
}
}"""
demo.load(None, inputs=[dark_mode], outputs=None, _js=darkmode_js) # 配置暗色主题或亮色主题
demo.load(None, inputs=[gr.Textbox(LAYOUT, visible=False)], outputs=None, _js='(LAYOUT)=>{GptAcademicJavaScriptInit(LAYOUT);}')
# gradio的inbrowser触发不太稳定回滚代码到原始的浏览器打开函数
def auto_opentab_delay():
import threading, webbrowser, time
print(f"如果浏览器没有自动打开请复制并转到以下URL")
print(f"\t(亮色主题): http://localhost:{PORT}")
print(f"\t(暗色主题): http://localhost:{PORT}/?__theme=dark")
if DARK_MODE: print(f"\t「暗色主题已启用(支持动态切换主题): http://localhost:{PORT}")
else: print(f"\t「亮色主题已启用(支持动态切换主题): http://localhost:{PORT}")
def open():
time.sleep(2) # 打开浏览器
DARK_MODE, = get_conf('DARK_MODE')
if DARK_MODE: webbrowser.open_new_tab(f"http://localhost:{PORT}/?__theme=dark")
else: webbrowser.open_new_tab(f"http://localhost:{PORT}")
webbrowser.open_new_tab(f"http://localhost:{PORT}")
threading.Thread(target=open, name="open-browser", daemon=True).start()
threading.Thread(target=auto_update, name="self-upgrade", daemon=True).start()
threading.Thread(target=warm_up_modules, name="warm-up", daemon=True).start()
auto_opentab_delay()
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(
quiet=True,
server_name="0.0.0.0",
server_port=PORT,
favicon_path="docs/logo.png",

View File

@@ -126,6 +126,15 @@ model_info = {
"token_cnt": get_token_num_gpt4,
},
"gpt-4-32k": {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
"endpoint": openai_endpoint,
"max_token": 32768,
"tokenizer": tokenizer_gpt4,
"token_cnt": get_token_num_gpt4,
},
# azure openai
"azure-gpt-3.5":{
"fn_with_ui": chatgpt_ui,
@@ -136,6 +145,15 @@ model_info = {
"token_cnt": get_token_num_gpt35,
},
"azure-gpt-4":{
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
"endpoint": azure_endpoint,
"max_token": 8192,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
# api_2d
"api2d-gpt-3.5-turbo": {
"fn_with_ui": chatgpt_ui,

View File

@@ -3,7 +3,7 @@ from transformers import AutoModel, AutoTokenizer
import time
import threading
import importlib
from toolbox import update_ui, get_conf
from toolbox import update_ui, get_conf, ProxyNetworkActivate
from multiprocessing import Process, Pipe
load_message = "ChatGLM尚未加载加载需要一段时间。注意取决于`config.py`的配置ChatGLM消耗大量的内存CPU或显存GPU也许会导致低配计算机卡死 ……"
@@ -48,16 +48,17 @@ class GetGLMHandle(Process):
while True:
try:
if self.chatglm_model is None:
self.chatglm_tokenizer = AutoTokenizer.from_pretrained(_model_name_, trust_remote_code=True)
if device=='cpu':
self.chatglm_model = AutoModel.from_pretrained(_model_name_, trust_remote_code=True).float()
with ProxyNetworkActivate('Download_LLM'):
if self.chatglm_model is None:
self.chatglm_tokenizer = AutoTokenizer.from_pretrained(_model_name_, trust_remote_code=True)
if device=='cpu':
self.chatglm_model = AutoModel.from_pretrained(_model_name_, trust_remote_code=True).float()
else:
self.chatglm_model = AutoModel.from_pretrained(_model_name_, trust_remote_code=True).half().cuda()
self.chatglm_model = self.chatglm_model.eval()
break
else:
self.chatglm_model = AutoModel.from_pretrained(_model_name_, trust_remote_code=True).half().cuda()
self.chatglm_model = self.chatglm_model.eval()
break
else:
break
break
except:
retry += 1
if retry > 3:

View File

@@ -30,7 +30,7 @@ class GetONNXGLMHandle(LocalLLMHandle):
with open(os.path.expanduser('~/.cache/huggingface/token'), 'w') as f:
f.write(huggingface_token)
model_id = 'meta-llama/Llama-2-7b-chat-hf'
with ProxyNetworkActivate():
with ProxyNetworkActivate('Download_LLM'):
self._tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=huggingface_token)
# use fp16
model = AutoModelForCausalLM.from_pretrained(model_id, use_auth_token=huggingface_token).eval()

View File

@@ -1,5 +1,4 @@
protobuf
transformers>=4.27.1
cpm_kernels
torch>=1.10
mdtex2html

View File

@@ -1,5 +1,4 @@
protobuf
transformers>=4.27.1
cpm_kernels
torch>=1.10
mdtex2html

View File

@@ -2,6 +2,5 @@ jittor >= 1.3.7.9
jtorch >= 0.1.3
torch
torchvision
transformers==4.26.1
pandas
jieba

View File

@@ -1,5 +1,4 @@
torch
transformers==4.25.1
sentencepiece
datasets
accelerate

View File

@@ -1,8 +1,8 @@
./docs/gradio-3.32.2-py3-none-any.whl
./docs/gradio-3.32.6-py3-none-any.whl
pydantic==1.10.11
tiktoken>=0.3.3
requests[socks]
transformers
transformers>=4.27.1
python-markdown-math
beautifulsoup4
prompt_toolkit

View File

@@ -6,11 +6,14 @@
import os, sys
def validate_path(): dir_name = os.path.dirname(__file__); root_dir_assume = os.path.abspath(dir_name + '/..'); os.chdir(root_dir_assume); sys.path.append(root_dir_assume)
validate_path() # 返回项目根路径
from tests.test_utils import plugin_test
if __name__ == "__main__":
from tests.test_utils import plugin_test
plugin_test(plugin='crazy_functions.函数动态生成->函数动态生成', main_input='交换图像的蓝色通道和红色通道', advanced_arg={"file_path_arg": "./build/ants.jpg"})
# plugin_test(plugin='crazy_functions.虚空终端->虚空终端', main_input='修改api-key为sk-jhoejriotherjep')
plugin_test(plugin='crazy_functions.批量翻译PDF文档_NOUGAT->批量翻译PDF文档', main_input='crazy_functions/test_project/pdf_and_word/aaai.pdf')
# plugin_test(plugin='crazy_functions.批量翻译PDF文档_NOUGAT->批量翻译PDF文档', main_input='crazy_functions/test_project/pdf_and_word/aaai.pdf')
# plugin_test(plugin='crazy_functions.虚空终端->虚空终端', main_input='调用插件对C:/Users/fuqingxu/Desktop/旧文件/gpt/chatgpt_academic/crazy_functions/latex_fns中的python文件进行解析')

View File

@@ -74,7 +74,7 @@ def plugin_test(main_input, plugin, advanced_arg=None):
plugin_kwargs['plugin_kwargs'] = advanced_arg
my_working_plugin = silence_stdout(plugin)(**plugin_kwargs)
with Live(Markdown(""), auto_refresh=False) as live:
with Live(Markdown(""), auto_refresh=False, vertical_overflow="visible") as live:
for cookies, chat, hist, msg in my_working_plugin:
md_str = vt.chat_to_markdown_str(chat)
md = Markdown(md_str)

View File

@@ -9,6 +9,11 @@
box-shadow: none;
}
#input-plugin-group .secondary-wrap.svelte-aqlk7e.svelte-aqlk7e.svelte-aqlk7e {
border: none;
min-width: 0;
}
/* hide selector label */
#input-plugin-group .svelte-1gfkn6j {
visibility: hidden;
@@ -23,4 +28,87 @@
/* status bar height */
.min.svelte-1yrv54 {
min-height: var(--size-12);
}
/* copy btn */
.message-btn-row {
width: 19px;
height: 19px;
position: absolute;
left: calc(100% + 3px);
top: 0;
display: flex;
justify-content: space-between;
}
/* .message-btn-row-leading, .message-btn-row-trailing {
display: inline-flex;
gap: 4px;
} */
.message-btn-row button {
font-size: 18px;
align-self: center;
align-items: center;
flex-wrap: nowrap;
white-space: nowrap;
display: inline-flex;
flex-direction: row;
gap: 4px;
padding-block: 2px !important;
}
/* Scrollbar Width */
::-webkit-scrollbar {
width: 12px;
}
/* Scrollbar Track */
::-webkit-scrollbar-track {
background: #f1f1f1;
border-radius: 12px;
}
/* Scrollbar Handle */
::-webkit-scrollbar-thumb {
background: #888;
border-radius: 12px;
}
/* Scrollbar Handle on hover */
::-webkit-scrollbar-thumb:hover {
background: #555;
}
/* input btns: clear, reset, stop */
#input-panel button {
min-width: min(80px, 100%);
}
/* input btns: clear, reset, stop */
#input-panel2 button {
min-width: min(80px, 100%);
}
#cbs {
background-color: var(--block-background-fill) !important;
}
#interact-panel .form {
border: hidden
}
.drag-area {
border: solid;
border-width: thin;
user-select: none;
padding-left: 2%;
}
.floating-component #input-panel2 {
border-top-left-radius: 0px;
border-top-right-radius: 0px;
border: solid;
border-width: thin;
border-top-width: 0;
}

View File

@@ -1,4 +1,81 @@
function ChatBotHeight() {
function gradioApp() {
// https://github.com/GaiZhenbiao/ChuanhuChatGPT/tree/main/web_assets/javascript
const elems = document.getElementsByTagName('gradio-app');
const elem = elems.length == 0 ? document : elems[0];
if (elem !== document) {
elem.getElementById = function(id) {
return document.getElementById(id);
};
}
return elem.shadowRoot ? elem.shadowRoot : elem;
}
function addCopyButton(botElement) {
// https://github.com/GaiZhenbiao/ChuanhuChatGPT/tree/main/web_assets/javascript
// Copy bot button
const copiedIcon = '<span><svg stroke="currentColor" fill="none" stroke-width="2" viewBox="0 0 24 24" stroke-linecap="round" stroke-linejoin="round" height=".8em" width=".8em" xmlns="http://www.w3.org/2000/svg"><polyline points="20 6 9 17 4 12"></polyline></svg></span>';
const copyIcon = '<span><svg stroke="currentColor" fill="none" stroke-width="2" viewBox="0 0 24 24" stroke-linecap="round" stroke-linejoin="round" height=".8em" width=".8em" xmlns="http://www.w3.org/2000/svg"><rect x="9" y="9" width="13" height="13" rx="2" ry="2"></rect><path d="M5 15H4a2 2 0 0 1-2-2V4a2 2 0 0 1 2-2h9a2 2 0 0 1 2 2v1"></path></svg></span>';
const messageBtnColumnElement = botElement.querySelector('.message-btn-row');
if (messageBtnColumnElement) {
// Do something if .message-btn-column exists, for example, remove it
// messageBtnColumnElement.remove();
return;
}
var copyButton = document.createElement('button');
copyButton.classList.add('copy-bot-btn');
copyButton.setAttribute('aria-label', 'Copy');
copyButton.innerHTML = copyIcon;
copyButton.addEventListener('click', async () => {
const textToCopy = botElement.innerText;
try {
if ("clipboard" in navigator) {
await navigator.clipboard.writeText(textToCopy);
copyButton.innerHTML = copiedIcon;
setTimeout(() => {
copyButton.innerHTML = copyIcon;
}, 1500);
} else {
const textArea = document.createElement("textarea");
textArea.value = textToCopy;
document.body.appendChild(textArea);
textArea.select();
try {
document.execCommand('copy');
copyButton.innerHTML = copiedIcon;
setTimeout(() => {
copyButton.innerHTML = copyIcon;
}, 1500);
} catch (error) {
console.error("Copy failed: ", error);
}
document.body.removeChild(textArea);
}
} catch (error) {
console.error("Copy failed: ", error);
}
});
var messageBtnColumn = document.createElement('div');
messageBtnColumn.classList.add('message-btn-row');
messageBtnColumn.appendChild(copyButton);
botElement.appendChild(messageBtnColumn);
}
function chatbotContentChanged(attempt = 1, force = false) {
// https://github.com/GaiZhenbiao/ChuanhuChatGPT/tree/main/web_assets/javascript
for (var i = 0; i < attempt; i++) {
setTimeout(() => {
gradioApp().querySelectorAll('#gpt-chatbot .message-wrap .message.bot').forEach(addCopyButton);
}, i === 0 ? 0 : 200);
}
}
function chatbotAutoHeight(){
// 自动调整高度
function update_height(){
var { panel_height_target, chatbot_height, chatbot } = get_elements(true);
if (panel_height_target!=chatbot_height)
@@ -28,6 +105,15 @@ function ChatBotHeight() {
}, 50); // 每100毫秒执行一次
}
function GptAcademicJavaScriptInit(LAYOUT = "LEFT-RIGHT") {
chatbotIndicator = gradioApp().querySelector('#gpt-chatbot > div.wrap');
var chatbotObserver = new MutationObserver(() => {
chatbotContentChanged(1);
});
chatbotObserver.observe(chatbotIndicator, { attributes: true, childList: true, subtree: true });
if (LAYOUT === "LEFT-RIGHT") {chatbotAutoHeight();}
}
function get_elements(consider_state_panel=false) {
var chatbot = document.querySelector('#gpt-chatbot > div.wrap.svelte-18telvq');
if (!chatbot) {
@@ -36,14 +122,14 @@ function get_elements(consider_state_panel=false) {
const panel1 = document.querySelector('#input-panel').getBoundingClientRect();
const panel2 = document.querySelector('#basic-panel').getBoundingClientRect()
const panel3 = document.querySelector('#plugin-panel').getBoundingClientRect();
const panel4 = document.querySelector('#interact-panel').getBoundingClientRect();
// const panel4 = document.querySelector('#interact-panel').getBoundingClientRect();
const panel5 = document.querySelector('#input-panel2').getBoundingClientRect();
const panel_active = document.querySelector('#state-panel').getBoundingClientRect();
if (consider_state_panel || panel_active.height < 25){
document.state_panel_height = panel_active.height;
}
// 25 是chatbot的label高度, 16 是右侧的gap
var panel_height_target = panel1.height + panel2.height + panel3.height + panel4.height + panel5.height - 25 + 16*3;
var panel_height_target = panel1.height + panel2.height + panel3.height + 0 + 0 - 25 + 16*2;
// 禁止动态的state-panel高度影响
panel_height_target = panel_height_target + (document.state_panel_height-panel_active.height)
var panel_height_target = parseInt(panel_height_target);

View File

@@ -198,7 +198,7 @@
}
/* 小按钮 */
.sm.svelte-1ipelgc {
.sm {
font-family: "Microsoft YaHei UI", "Helvetica", "Microsoft YaHei", "ui-sans-serif", "sans-serif", "system-ui";
--button-small-text-weight: 600;
--button-small-text-size: 16px;
@@ -208,7 +208,7 @@
border-top-left-radius: 0px;
}
#plugin-panel .sm.svelte-1ipelgc {
#plugin-panel .sm {
font-family: "Microsoft YaHei UI", "Helvetica", "Microsoft YaHei", "ui-sans-serif", "sans-serif", "system-ui";
--button-small-text-weight: 400;
--button-small-text-size: 14px;

View File

@@ -57,12 +57,9 @@ def adjust_theme():
button_cancel_text_color_dark="white",
)
if LAYOUT=="TOP-DOWN":
js = ""
else:
with open('themes/common.js', 'r', encoding='utf8') as f:
js = f"<script>{f.read()}</script>"
with open('themes/common.js', 'r', encoding='utf8') as f:
js = f"<script>{f.read()}</script>"
# 添加一个萌萌的看板娘
if ADD_WAIFU:
js += """

View File

@@ -9,15 +9,15 @@
border-radius: 4px;
}
#plugin-panel .dropdown-arrow.svelte-p5edak {
width: 50px;
#plugin-panel .dropdown-arrow {
width: 25px;
}
#plugin-panel input.svelte-aqlk7e.svelte-aqlk7e.svelte-aqlk7e {
padding-left: 5px;
}
/* 小按钮 */
.sm.svelte-1ipelgc {
#basic-panel .sm {
font-family: "Microsoft YaHei UI", "Helvetica", "Microsoft YaHei", "ui-sans-serif", "sans-serif", "system-ui";
--button-small-text-weight: 600;
--button-small-text-size: 16px;
@@ -27,7 +27,7 @@
border-top-left-radius: 6px;
}
#plugin-panel .sm.svelte-1ipelgc {
#plugin-panel .sm {
font-family: "Microsoft YaHei UI", "Helvetica", "Microsoft YaHei", "ui-sans-serif", "sans-serif", "system-ui";
--button-small-text-weight: 400;
--button-small-text-size: 14px;

View File

@@ -57,11 +57,8 @@ def adjust_theme():
button_cancel_text_color_dark="white",
)
if LAYOUT=="TOP-DOWN":
js = ""
else:
with open('themes/common.js', 'r', encoding='utf8') as f:
js = f"<script>{f.read()}</script>"
with open('themes/common.js', 'r', encoding='utf8') as f:
js = f"<script>{f.read()}</script>"
# 添加一个萌萌的看板娘
if ADD_WAIFU:

View File

@@ -5,7 +5,7 @@ CODE_HIGHLIGHT, ADD_WAIFU, LAYOUT = get_conf('CODE_HIGHLIGHT', 'ADD_WAIFU', 'LAY
def dynamic_set_theme(THEME):
set_theme = gr.themes.ThemeClass()
with ProxyNetworkActivate():
with ProxyNetworkActivate('Download_Gradio_Theme'):
logging.info('正在下载Gradio主题请稍等。')
if THEME.startswith('Huggingface-'): THEME = THEME.lstrip('Huggingface-')
if THEME.startswith('huggingface-'): THEME = THEME.lstrip('huggingface-')
@@ -16,19 +16,16 @@ def adjust_theme():
try:
set_theme = gr.themes.ThemeClass()
with ProxyNetworkActivate():
with ProxyNetworkActivate('Download_Gradio_Theme'):
logging.info('正在下载Gradio主题请稍等。')
THEME, = get_conf('THEME')
if THEME.startswith('Huggingface-'): THEME = THEME.lstrip('Huggingface-')
if THEME.startswith('huggingface-'): THEME = THEME.lstrip('huggingface-')
set_theme = set_theme.from_hub(THEME.lower())
if LAYOUT=="TOP-DOWN":
js = ""
else:
with open('themes/common.js', 'r', encoding='utf8') as f:
js = f"<script>{f.read()}</script>"
with open('themes/common.js', 'r', encoding='utf8') as f:
js = f"<script>{f.read()}</script>"
# 添加一个萌萌的看板娘
if ADD_WAIFU:
js += """

View File

@@ -73,12 +73,8 @@ def adjust_theme():
chatbot_code_background_color_dark="*neutral_950",
)
js = ''
if LAYOUT=="TOP-DOWN":
js = ""
else:
with open('themes/common.js', 'r', encoding='utf8') as f:
js = f"<script>{f.read()}</script>"
with open('themes/common.js', 'r', encoding='utf8') as f:
js = f"<script>{f.read()}</script>"
# 添加一个萌萌的看板娘
if ADD_WAIFU:

View File

@@ -472,7 +472,7 @@ def extract_archive(file_path, dest_dir):
print("Successfully extracted rar archive to {}".format(dest_dir))
except:
print("Rar format requires additional dependencies to install")
return '\n\n解压失败! 需要安装pip install rarfile来解压rar文件'
return '\n\n解压失败! 需要安装pip install rarfile来解压rar文件。建议使用zip压缩格式。'
# 第三方库需要预先pip install py7zr
elif file_extension == '.7z':
@@ -523,10 +523,11 @@ def promote_file_to_downloadzone(file, rename_file=None, chatbot=None):
# 把文件复制过去
if not os.path.exists(new_path): shutil.copyfile(file, new_path)
# 将文件添加到chatbot cookie中避免多用户干扰
if chatbot:
if chatbot is not None:
if 'files_to_promote' in chatbot._cookies: current = chatbot._cookies['files_to_promote']
else: current = []
chatbot._cookies.update({'files_to_promote': [new_path] + current})
return new_path
def disable_auto_promotion(chatbot):
chatbot._cookies.update({'files_to_promote': []})
@@ -580,7 +581,7 @@ def on_file_uploaded(request: gradio.Request, files, chatbot, txt, txt2, checkbo
# 整理文件集合
moved_files = [fp for fp in glob.glob(f'{target_path_base}/**/*', recursive=True)]
if "底部输入区" in checkboxes:
if "浮动输入区" in checkboxes:
txt, txt2 = "", target_path_base
else:
txt, txt2 = target_path_base, ""
@@ -955,7 +956,19 @@ class ProxyNetworkActivate():
"""
这段代码定义了一个名为TempProxy的空上下文管理器, 用于给一小段代码上代理
"""
def __init__(self, task=None) -> None:
self.task = task
if not task:
# 不给定task, 那么我们默认代理生效
self.valid = True
else:
# 给定了task, 我们检查一下
from toolbox import get_conf
WHEN_TO_USE_PROXY, = get_conf('WHEN_TO_USE_PROXY')
self.valid = (task in WHEN_TO_USE_PROXY)
def __enter__(self):
if not self.valid: return self
from toolbox import get_conf
proxies, = get_conf('proxies')
if 'no_proxy' in os.environ: os.environ.pop('no_proxy')

View File

@@ -1,5 +1,5 @@
{
"version": 3.53,
"version": 3.55,
"show_feature": true,
"new_feature": "支持动态选择不同界面主题 <-> 提高稳定性&解决多用户冲突问题 <-> 支持插件分类和更多UI皮肤外观 <-> 支持用户使用自然语言调度各个插件(虚空终端) <-> 改进UI设计新主题 <-> 支持借助GROBID实现PDF高精度翻译 <-> 接入百度千帆平台和文心一言 <-> 接入阿里通义千问、讯飞星火、上海AI-Lab书生 <-> 优化一键升级 <-> 提高arxiv翻译速度和成功率"
"new_feature": "重新编译Gradio优化使用体验 <-> 新增动态代码解释器CodeInterpreter <-> 增加文本回答复制按钮 <-> 细分代理场合 <-> 支持动态选择不同界面主题 <-> 提高稳定性&解决多用户冲突问题 <-> 支持插件分类和更多UI皮肤外观 <-> 支持用户使用自然语言调度各个插件(虚空终端) <-> 改进UI设计新主题 <-> 支持借助GROBID实现PDF高精度翻译 <-> 接入百度千帆平台和文心一言 <-> 接入阿里通义千问、讯飞星火、上海AI-Lab书生 <-> 优化一键升级 <-> 提高arxiv翻译速度和成功率"
}