Compare commits

..

32 Commits

Author SHA1 Message Date
binary-husky
32ddcd067a Merge branch 'master' of github.com:binary-husky/chatgpt_academic 2023-10-16 00:05:53 +08:00
binary-husky
98ef658307 修复warmup模块的延迟问题 2023-10-16 00:05:31 +08:00
binary-husky
a4de91d000 修改缩进 2023-10-15 22:53:57 +08:00
binary-husky
1bb437a5d0 微调提示 2023-10-15 21:17:00 +08:00
binary-husky
4421219c2b Merge branch 'frontier' 2023-10-15 20:56:49 +08:00
binary-husky
ea28db855d 完善自定义菜单 2023-10-15 20:54:16 +08:00
binary-husky
5aea7b3d09 多线程运行微调 2023-10-15 19:13:25 +08:00
binary-husky
5274117cf1 缺失摘要时,插入伪摘要 2023-10-14 23:48:37 +08:00
binary-husky
673faf8cef Grobid负载均衡 2023-10-14 19:59:35 +08:00
binary-husky
130ae31d55 Merge pull request #1168 from Menghuan1918/master
fix bug  #1167 学术小助手在proxies返回空时会首先尝试直接连接
2023-10-13 17:02:01 +08:00
Menghuan1918
c3abc46d4d 在proxies返回空时会首先尝试直接连接 2023-10-13 15:23:06 +08:00
binary-husky
4df75d49ad 兼容一些第三方代理 2023-10-12 23:42:45 +08:00
binary-husky
9ea0fe4de2 Update GithubAction+NoLocal+Latex 2023-10-12 21:23:15 +08:00
binary-husky
8698c5a80f Merge pull request #1159 from Skyzayre/patch-1
Update Dockerfile
2023-10-11 17:18:28 +08:00
binary-husky
383f7f4f77 add webrtcvad dependency 2023-10-11 15:51:34 +08:00
binary-husky
34d784df79 12 2023-10-11 15:48:25 +08:00
binary-husky
662bebfc02 SSL 2023-10-11 15:34:06 +08:00
binary-husky
0c3b00fc6b cookie space 2023-10-11 12:33:50 +08:00
binary-husky
b6e370e8c9 ymp 2023-10-11 11:30:34 +08:00
binary-husky
71ea8e584a 自定义基础功能区按钮 2023-10-11 11:21:41 +08:00
Skyzayre
a5491b9199 Update Dockerfile
gradio已经更新到3.32.6,但是Dockerfile中仍然是3.32.2
2023-10-11 00:26:16 +08:00
binary-husky
6f383c1dc8 支持自定义基础功能区 2023-10-11 00:14:56 +08:00
binary-husky
500a0cbd16 大幅优化语音助手 2023-10-09 01:18:05 +08:00
binary-husky
1ef6730369 Update README.md 2023-10-08 23:14:07 +08:00
binary-husky
491174095a 更新docker-compose说明 2023-10-07 11:59:06 +08:00
binary-husky
02c270410c 减小Latex容器体积 2023-10-06 11:44:10 +08:00
binary-husky
89eec21f27 随机选择, 绕过openai访问频率限制 2023-10-06 10:50:41 +08:00
binary-husky
49cea97822 启动主题自动转换 2023-10-06 10:36:30 +08:00
binary-husky
6310b65d70 重新编译Gradio优化使用体验 2023-10-06 10:32:03 +08:00
binary-husky
93c76e1809 更新内置gradio版本 2023-10-06 09:54:07 +08:00
binary-husky
f64cf7a3d1 update translation matrix 2023-10-02 14:24:01 +08:00
binary-husky
fdffbee1b0 Update toolbox.py 2023-09-30 09:56:30 +08:00
34 changed files with 771 additions and 159 deletions

View File

@@ -17,7 +17,7 @@ WORKDIR /gpt
# 安装大部分依赖利用Docker缓存加速以后的构建 # 安装大部分依赖利用Docker缓存加速以后的构建
COPY requirements.txt ./ COPY requirements.txt ./
COPY ./docs/gradio-3.32.2-py3-none-any.whl ./docs/gradio-3.32.2-py3-none-any.whl COPY ./docs/gradio-3.32.6-py3-none-any.whl ./docs/gradio-3.32.6-py3-none-any.whl
RUN pip3 install -r requirements.txt RUN pip3 install -r requirements.txt

View File

@@ -1,6 +1,6 @@
> **Note** > **Note**
> >
> 2023.7.8: Gradio, Pydantic依赖调整已修改 `requirements.txt`。请及时**更新代码**,安装依赖时,请严格选择`requirements.txt`中**指定的版本** > 2023.10.8: Gradio, Pydantic依赖调整已修改 `requirements.txt`。请及时**更新代码**,安装依赖时,请严格选择`requirements.txt`中**指定的版本**
> >
> `pip install -r requirements.txt` > `pip install -r requirements.txt`
@@ -310,6 +310,8 @@ Tip不指定文件直接点击 `载入对话历史存档` 可以查看历史h
### II版本: ### II版本:
- version 3.60todo: 优化虚空终端引入code interpreter和更多插件 - version 3.60todo: 优化虚空终端引入code interpreter和更多插件
- version 3.55: 重构前端界面,引入悬浮窗口与菜单栏
- version 3.54: 新增动态代码解释器Code Interpreter待完善
- version 3.53: 支持动态选择不同界面主题,提高稳定性&解决多用户冲突问题 - version 3.53: 支持动态选择不同界面主题,提高稳定性&解决多用户冲突问题
- version 3.50: 使用自然语言调用本项目的所有函数插件虚空终端支持插件分类改进UI设计新主题 - version 3.50: 使用自然语言调用本项目的所有函数插件虚空终端支持插件分类改进UI设计新主题
- version 3.49: 支持百度千帆平台和文心一言 - version 3.49: 支持百度千帆平台和文心一言
@@ -331,7 +333,7 @@ Tip不指定文件直接点击 `载入对话历史存档` 可以查看历史h
- version 2.0: 引入模块化函数插件 - version 2.0: 引入模块化函数插件
- version 1.0: 基础功能 - version 1.0: 基础功能
gpt_academic开发者QQ群-2610599535 GPT Academic开发者QQ群`610599535`
- 已知问题 - 已知问题
- 某些浏览器翻译插件干扰此软件前端的运行 - 某些浏览器翻译插件干扰此软件前端的运行

View File

@@ -48,6 +48,7 @@ DEFAULT_WORKER_NUM = 3
THEME = "Default" THEME = "Default"
AVAIL_THEMES = ["Default", "Chuanhu-Small-and-Beautiful", "High-Contrast", "Gstaff/Xkcd", "NoCrypt/Miku"] AVAIL_THEMES = ["Default", "Chuanhu-Small-and-Beautiful", "High-Contrast", "Gstaff/Xkcd", "NoCrypt/Miku"]
# 对话窗的高度 仅在LAYOUT="TOP-DOWN"时生效) # 对话窗的高度 仅在LAYOUT="TOP-DOWN"时生效)
CHATBOT_HEIGHT = 1115 CHATBOT_HEIGHT = 1115
@@ -58,7 +59,10 @@ CODE_HIGHLIGHT = True
# 窗口布局 # 窗口布局
LAYOUT = "LEFT-RIGHT" # "LEFT-RIGHT"(左右布局) # "TOP-DOWN"(上下布局) LAYOUT = "LEFT-RIGHT" # "LEFT-RIGHT"(左右布局) # "TOP-DOWN"(上下布局)
DARK_MODE = True # 暗色模式 / 亮色模式
# 暗色模式 / 亮色模式
DARK_MODE = True
# 发送请求到OpenAI后等待多久判定为超时 # 发送请求到OpenAI后等待多久判定为超时
@@ -81,7 +85,7 @@ DEFAULT_FN_GROUPS = ['对话', '编程', '学术', '智能体']
LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓ LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "api2d-gpt-3.5-turbo",
"gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "stack-claude"] "gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "stack-claude"]
# P.S. 其他可用的模型还包括 ["qianfan", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", # P.S. 其他可用的模型还包括 ["qianfan", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-random"
# "spark", "sparkv2", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"] # "spark", "sparkv2", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"]
@@ -121,6 +125,11 @@ AUTHENTICATION = []
CUSTOM_PATH = "/" CUSTOM_PATH = "/"
# HTTPS 秘钥和证书(不需要修改)
SSL_KEYFILE = ""
SSL_CERTFILE = ""
# 极少数情况下openai的官方KEY需要伴随组织编码格式如org-xxxxxxxxxxxxxxxxxxxxxxxx使用 # 极少数情况下openai的官方KEY需要伴随组织编码格式如org-xxxxxxxxxxxxxxxxxxxxxxxx使用
API_ORG = "" API_ORG = ""
@@ -136,7 +145,7 @@ AZURE_API_KEY = "填入azure openai api的密钥" # 建议直接在API_KEY处
AZURE_ENGINE = "填入你亲手写的部署名" # 读 docs\use_azure.md AZURE_ENGINE = "填入你亲手写的部署名" # 读 docs\use_azure.md
# 使用Newbing # 使用Newbing (不推荐使用,未来将删除)
NEWBING_STYLE = "creative" # ["creative", "balanced", "precise"] NEWBING_STYLE = "creative" # ["creative", "balanced", "precise"]
NEWBING_COOKIES = """ NEWBING_COOKIES = """
put your new bing cookies here put your new bing cookies here
@@ -173,20 +182,30 @@ HUGGINGFACE_ACCESS_TOKEN = "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"
# 获取方法复制以下空间https://huggingface.co/spaces/qingxu98/grobid设为public然后GROBID_URL = "https://(你的hf用户名如qingxu98)-(你的填写的空间名如grobid).hf.space" # 获取方法复制以下空间https://huggingface.co/spaces/qingxu98/grobid设为public然后GROBID_URL = "https://(你的hf用户名如qingxu98)-(你的填写的空间名如grobid).hf.space"
GROBID_URLS = [ GROBID_URLS = [
"https://qingxu98-grobid.hf.space","https://qingxu98-grobid2.hf.space","https://qingxu98-grobid3.hf.space", "https://qingxu98-grobid.hf.space","https://qingxu98-grobid2.hf.space","https://qingxu98-grobid3.hf.space",
"https://shaocongma-grobid.hf.space","https://FBR123-grobid.hf.space", "https://yeku-grobid.hf.space", "https://qingxu98-grobid4.hf.space","https://qingxu98-grobid5.hf.space", "https://qingxu98-grobid6.hf.space",
"https://qingxu98-grobid7.hf.space", "https://qingxu98-grobid8.hf.space",
] ]
# 是否允许通过自然语言描述修改本页的配置,该功能具有一定的危险性,默认关闭 # 是否允许通过自然语言描述修改本页的配置,该功能具有一定的危险性,默认关闭
ALLOW_RESET_CONFIG = False ALLOW_RESET_CONFIG = False
# 临时的上传文件夹位置,请勿修改 # 临时的上传文件夹位置,请勿修改
PATH_PRIVATE_UPLOAD = "private_upload" PATH_PRIVATE_UPLOAD = "private_upload"
# 日志文件夹的位置,请勿修改 # 日志文件夹的位置,请勿修改
PATH_LOGGING = "gpt_log" PATH_LOGGING = "gpt_log"
# 除了连接OpenAI之外还有哪些场合允许使用代理请勿修改 # 除了连接OpenAI之外还有哪些场合允许使用代理请勿修改
WHEN_TO_USE_PROXY = ["Download_LLM", "Download_Gradio_Theme", "Connect_Grobid", "Warmup_Modules"] WHEN_TO_USE_PROXY = ["Download_LLM", "Download_Gradio_Theme", "Connect_Grobid", "Warmup_Modules"]
# 自定义按钮的最大数量限制
NUM_CUSTOM_BASIC_BTN = 4
""" """
在线大模型配置关联关系示意图 在线大模型配置关联关系示意图

View File

@@ -91,6 +91,13 @@ def handle_core_functionality(additional_fn, inputs, history, chatbot):
import core_functional import core_functional
importlib.reload(core_functional) # 热更新prompt importlib.reload(core_functional) # 热更新prompt
core_functional = core_functional.get_core_functions() core_functional = core_functional.get_core_functions()
addition = chatbot._cookies['customize_fn_overwrite']
if additional_fn in addition:
# 自定义功能
inputs = addition[additional_fn]["Prefix"] + inputs + addition[additional_fn]["Suffix"]
return inputs, history
else:
# 预制功能
if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话) if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"] inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
if core_functional[additional_fn].get("AutoClearHistory", False): if core_functional[additional_fn].get("AutoClearHistory", False):

View File

@@ -190,10 +190,10 @@ def get_crazy_functions():
"Info": "多线程解析并翻译此项目的源码 | 不需要输入参数", "Info": "多线程解析并翻译此项目的源码 | 不需要输入参数",
"Function": HotReload(解析项目本身) "Function": HotReload(解析项目本身)
}, },
"[插件demo]历史上的今天": { "历史上的今天": {
"Group": "对话", "Group": "对话",
"AsButton": True, "AsButton": True,
"Info": "查看历史上的今天事件 | 不需要输入参数", "Info": "查看历史上的今天事件 (这是一个面向开发者的插件Demo) | 不需要输入参数",
"Function": HotReload(高阶功能模板函数) "Function": HotReload(高阶功能模板函数)
}, },
"精准翻译PDF论文": { "精准翻译PDF论文": {
@@ -252,7 +252,7 @@ def get_crazy_functions():
"Function": HotReload(Latex中文润色) "Function": HotReload(Latex中文润色)
}, },
# 被新插件取代 # 已经被新插件取代
# "Latex项目全文中译英输入路径或上传压缩包": { # "Latex项目全文中译英输入路径或上传压缩包": {
# "Group": "学术", # "Group": "学术",
# "Color": "stop", # "Color": "stop",
@@ -260,6 +260,8 @@ def get_crazy_functions():
# "Info": "对Latex项目全文进行中译英处理 | 输入参数为路径或上传压缩包", # "Info": "对Latex项目全文进行中译英处理 | 输入参数为路径或上传压缩包",
# "Function": HotReload(Latex中译英) # "Function": HotReload(Latex中译英)
# }, # },
# 已经被新插件取代
# "Latex项目全文英译中输入路径或上传压缩包": { # "Latex项目全文英译中输入路径或上传压缩包": {
# "Group": "学术", # "Group": "学术",
# "Color": "stop", # "Color": "stop",
@@ -395,7 +397,7 @@ def get_crazy_functions():
try: try:
from crazy_functions.批量Markdown翻译 import Markdown翻译指定语言 from crazy_functions.批量Markdown翻译 import Markdown翻译指定语言
function_plugins.update({ function_plugins.update({
"Markdown翻译手动指定语言)": { "Markdown翻译指定翻译成何种语言)": {
"Group": "编程", "Group": "编程",
"Color": "stop", "Color": "stop",
"AsButton": False, "AsButton": False,
@@ -440,7 +442,7 @@ def get_crazy_functions():
try: try:
from crazy_functions.交互功能函数模板 import 交互功能模板函数 from crazy_functions.交互功能函数模板 import 交互功能模板函数
function_plugins.update({ function_plugins.update({
"交互功能模板函数": { "交互功能模板Demo函数查找wallhaven.cc的壁纸": {
"Group": "对话", "Group": "对话",
"Color": "stop", "Color": "stop",
"AsButton": False, "AsButton": False,
@@ -500,11 +502,11 @@ def get_crazy_functions():
if ENABLE_AUDIO: if ENABLE_AUDIO:
from crazy_functions.语音助手 import 语音助手 from crazy_functions.语音助手 import 语音助手
function_plugins.update({ function_plugins.update({
"实时音频采集": { "实时语音对话": {
"Group": "对话", "Group": "对话",
"Color": "stop", "Color": "stop",
"AsButton": True, "AsButton": True,
"Info": "开始语言对话 | 没有输入参数", "Info": "这是一个时刻聆听着的语音对话助手 | 没有输入参数",
"Function": HotReload(语音助手) "Function": HotReload(语音助手)
} }
}) })
@@ -537,18 +539,6 @@ def get_crazy_functions():
except: except:
print('Load function plugin failed') print('Load function plugin failed')
# try:
# from crazy_functions.CodeInterpreter import 虚空终端CodeInterpreter
# function_plugins.update({
# "CodeInterpreter开发中仅供测试": {
# "Group": "编程|对话",
# "Color": "stop",
# "AsButton": False,
# "Function": HotReload(虚空终端CodeInterpreter)
# }
# })
# except:
# print('Load function plugin failed')
# try: # try:
# from crazy_functions.chatglm微调工具 import 微调数据集生成 # from crazy_functions.chatglm微调工具 import 微调数据集生成

View File

@@ -69,12 +69,15 @@ def request_gpt_model_in_new_thread_with_ui_alive(
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面 yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
executor = ThreadPoolExecutor(max_workers=16) executor = ThreadPoolExecutor(max_workers=16)
mutable = ["", time.time(), ""] mutable = ["", time.time(), ""]
# 看门狗耐心
watch_dog_patience = 5
# 请求任务
def _req_gpt(inputs, history, sys_prompt): def _req_gpt(inputs, history, sys_prompt):
retry_op = retry_times_at_unknown_error retry_op = retry_times_at_unknown_error
exceeded_cnt = 0 exceeded_cnt = 0
while True: while True:
# watchdog error # watchdog error
if len(mutable) >= 2 and (time.time()-mutable[1]) > 5: if len(mutable) >= 2 and (time.time()-mutable[1]) > watch_dog_patience:
raise RuntimeError("检测到程序终止。") raise RuntimeError("检测到程序终止。")
try: try:
# 【第一种情况】:顺利完成 # 【第一种情况】:顺利完成
@@ -193,6 +196,9 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
# 跨线程传递 # 跨线程传递
mutable = [["", time.time(), "等待中"] for _ in range(n_frag)] mutable = [["", time.time(), "等待中"] for _ in range(n_frag)]
# 看门狗耐心
watch_dog_patience = 5
# 子线程任务 # 子线程任务
def _req_gpt(index, inputs, history, sys_prompt): def _req_gpt(index, inputs, history, sys_prompt):
gpt_say = "" gpt_say = ""
@@ -201,7 +207,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
mutable[index][2] = "执行中" mutable[index][2] = "执行中"
while True: while True:
# watchdog error # watchdog error
if len(mutable[index]) >= 2 and (time.time()-mutable[index][1]) > 5: if len(mutable[index]) >= 2 and (time.time()-mutable[index][1]) > watch_dog_patience:
raise RuntimeError("检测到程序终止。") raise RuntimeError("检测到程序终止。")
try: try:
# 【第一种情况】:顺利完成 # 【第一种情况】:顺利完成
@@ -275,7 +281,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
# 在前端打印些好玩的东西 # 在前端打印些好玩的东西
for thread_index, _ in enumerate(worker_done): for thread_index, _ in enumerate(worker_done):
print_something_really_funny = "[ ...`"+mutable[thread_index][0][-scroller_max_len:].\ print_something_really_funny = "[ ...`"+mutable[thread_index][0][-scroller_max_len:].\
replace('\n', '').replace('```', '...').replace( replace('\n', '').replace('`', '.').replace(
' ', '.').replace('<br/>', '.....').replace('$', '.')+"`... ]" ' ', '.').replace('<br/>', '.....').replace('$', '.')+"`... ]"
observe_win.append(print_something_really_funny) observe_win.append(print_something_really_funny)
# 在前端打印些好玩的东西 # 在前端打印些好玩的东西
@@ -301,7 +307,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
gpt_res = f.result() gpt_res = f.result()
chatbot.append([inputs_show_user, gpt_res]) chatbot.append([inputs_show_user, gpt_res])
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面 yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
time.sleep(0.3) time.sleep(0.5)
return gpt_response_collection return gpt_response_collection

View File

@@ -342,10 +342,33 @@ def merge_tex_files(project_foler, main_file, mode):
pattern_opt2 = re.compile(r"\\abstract\{(.*?)\}", flags=re.DOTALL) pattern_opt2 = re.compile(r"\\abstract\{(.*?)\}", flags=re.DOTALL)
match_opt1 = pattern_opt1.search(main_file) match_opt1 = pattern_opt1.search(main_file)
match_opt2 = pattern_opt2.search(main_file) match_opt2 = pattern_opt2.search(main_file)
if (match_opt1 is None) and (match_opt2 is None):
# "Cannot find paper abstract section!"
main_file = insert_abstract(main_file)
match_opt1 = pattern_opt1.search(main_file)
match_opt2 = pattern_opt2.search(main_file)
assert (match_opt1 is not None) or (match_opt2 is not None), "Cannot find paper abstract section!" assert (match_opt1 is not None) or (match_opt2 is not None), "Cannot find paper abstract section!"
return main_file return main_file
insert_missing_abs_str = r"""
\begin{abstract}
The GPT-Academic program cannot find abstract section in this paper.
\end{abstract}
"""
def insert_abstract(tex_content):
if "\\maketitle" in tex_content:
# find the position of "\maketitle"
find_index = tex_content.index("\\maketitle")
# find the nearest ending line
end_line_index = tex_content.find("\n", find_index)
# insert "abs_str" on the next line
modified_tex = tex_content[:end_line_index+1] + '\n\n' + insert_missing_abs_str + '\n\n' + tex_content[end_line_index+1:]
return modified_tex
else:
return tex_content
""" """
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
Post process Post process

View File

@@ -1,4 +1,106 @@
import time, logging, json import time, logging, json, sys, struct
import numpy as np
from scipy.io.wavfile import WAVE_FORMAT
def write_numpy_to_wave(filename, rate, data, add_header=False):
"""
Write a NumPy array as a WAV file.
"""
def _array_tofile(fid, data):
# ravel gives a c-contiguous buffer
fid.write(data.ravel().view('b').data)
if hasattr(filename, 'write'):
fid = filename
else:
fid = open(filename, 'wb')
fs = rate
try:
dkind = data.dtype.kind
if not (dkind == 'i' or dkind == 'f' or (dkind == 'u' and
data.dtype.itemsize == 1)):
raise ValueError("Unsupported data type '%s'" % data.dtype)
header_data = b''
header_data += b'RIFF'
header_data += b'\x00\x00\x00\x00'
header_data += b'WAVE'
# fmt chunk
header_data += b'fmt '
if dkind == 'f':
format_tag = WAVE_FORMAT.IEEE_FLOAT
else:
format_tag = WAVE_FORMAT.PCM
if data.ndim == 1:
channels = 1
else:
channels = data.shape[1]
bit_depth = data.dtype.itemsize * 8
bytes_per_second = fs*(bit_depth // 8)*channels
block_align = channels * (bit_depth // 8)
fmt_chunk_data = struct.pack('<HHIIHH', format_tag, channels, fs,
bytes_per_second, block_align, bit_depth)
if not (dkind == 'i' or dkind == 'u'):
# add cbSize field for non-PCM files
fmt_chunk_data += b'\x00\x00'
header_data += struct.pack('<I', len(fmt_chunk_data))
header_data += fmt_chunk_data
# fact chunk (non-PCM files)
if not (dkind == 'i' or dkind == 'u'):
header_data += b'fact'
header_data += struct.pack('<II', 4, data.shape[0])
# check data size (needs to be immediately before the data chunk)
if ((len(header_data)-4-4) + (4+4+data.nbytes)) > 0xFFFFFFFF:
raise ValueError("Data exceeds wave file size limit")
if add_header:
fid.write(header_data)
# data chunk
fid.write(b'data')
fid.write(struct.pack('<I', data.nbytes))
if data.dtype.byteorder == '>' or (data.dtype.byteorder == '=' and
sys.byteorder == 'big'):
data = data.byteswap()
_array_tofile(fid, data)
if add_header:
# Determine file size and place it in correct
# position at start of the file.
size = fid.tell()
fid.seek(4)
fid.write(struct.pack('<I', size-8))
finally:
if not hasattr(filename, 'write'):
fid.close()
else:
fid.seek(0)
def is_speaker_speaking(vad, data, sample_rate):
# Function to detect if the speaker is speaking
# The WebRTC VAD only accepts 16-bit mono PCM audio,
# sampled at 8000, 16000, 32000 or 48000 Hz.
# A frame must be either 10, 20, or 30 ms in duration:
frame_duration = 30
n_bit_each = int(sample_rate * frame_duration / 1000)*2 # x2 because audio is 16 bit (2 bytes)
res_list = []
for t in range(len(data)):
if t!=0 and t % n_bit_each == 0:
res_list.append(vad.is_speech(data[t-n_bit_each:t], sample_rate))
info = ''.join(['^' if r else '.' for r in res_list])
info = info[:10]
if any(res_list):
return True, info
else:
return False, info
class AliyunASR(): class AliyunASR():
@@ -66,12 +168,22 @@ class AliyunASR():
on_close=self.test_on_close, on_close=self.test_on_close,
callback_args=[uuid.hex] callback_args=[uuid.hex]
) )
timeout_limit_second = 20
r = sr.start(aformat="pcm", r = sr.start(aformat="pcm",
timeout=timeout_limit_second,
enable_intermediate_result=True, enable_intermediate_result=True,
enable_punctuation_prediction=True, enable_punctuation_prediction=True,
enable_inverse_text_normalization=True) enable_inverse_text_normalization=True)
import webrtcvad
vad = webrtcvad.Vad()
vad.set_mode(1)
is_previous_frame_transmitted = False # 上一帧是否有人说话
previous_frame_data = None
echo_cnt = 0 # 在没有声音之后继续向服务器发送n次音频数据
echo_cnt_max = 4 # 在没有声音之后继续向服务器发送n次音频数据
keep_alive_last_send_time = time.time()
while not self.stop: while not self.stop:
# time.sleep(self.capture_interval) # time.sleep(self.capture_interval)
audio = rad.read(uuid.hex) audio = rad.read(uuid.hex)
@@ -79,12 +191,32 @@ class AliyunASR():
# convert to pcm file # convert to pcm file
temp_file = f'{temp_folder}/{uuid.hex}.pcm' # temp_file = f'{temp_folder}/{uuid.hex}.pcm' #
dsdata = change_sample_rate(audio, rad.rate, NEW_SAMPLERATE) # 48000 --> 16000 dsdata = change_sample_rate(audio, rad.rate, NEW_SAMPLERATE) # 48000 --> 16000
io.wavfile.write(temp_file, NEW_SAMPLERATE, dsdata) write_numpy_to_wave(temp_file, NEW_SAMPLERATE, dsdata)
# read pcm binary # read pcm binary
with open(temp_file, "rb") as f: data = f.read() with open(temp_file, "rb") as f: data = f.read()
# print('audio len:', len(audio), '\t ds len:', len(dsdata), '\t need n send:', len(data)//640) is_speaking, info = is_speaker_speaking(vad, data, NEW_SAMPLERATE)
if is_speaking or echo_cnt > 0:
# 如果话筒激活 / 如果处于回声收尾阶段
echo_cnt -= 1
if not is_previous_frame_transmitted: # 上一帧没有人声,但是我们把上一帧同样加上
if previous_frame_data is not None: data = previous_frame_data + data
if is_speaking:
echo_cnt = echo_cnt_max
slices = zip(*(iter(data),) * 640) # 640个字节为一组 slices = zip(*(iter(data),) * 640) # 640个字节为一组
for i in slices: sr.send_audio(bytes(i)) for i in slices: sr.send_audio(bytes(i))
keep_alive_last_send_time = time.time()
is_previous_frame_transmitted = True
else:
is_previous_frame_transmitted = False
echo_cnt = 0
# 保持链接激活,即使没有声音,也根据时间间隔,发送一些音频片段给服务器
if time.time() - keep_alive_last_send_time > timeout_limit_second/2:
slices = zip(*(iter(data),) * 640) # 640个字节为一组
for i in slices: sr.send_audio(bytes(i))
keep_alive_last_send_time = time.time()
is_previous_frame_transmitted = True
self.audio_shape = info
else: else:
time.sleep(0.1) time.sleep(0.1)

View File

@@ -35,7 +35,7 @@ class RealtimeAudioDistribution():
def read(self, uuid): def read(self, uuid):
if uuid in self.data: if uuid in self.data:
res = self.data.pop(uuid) res = self.data.pop(uuid)
print('\r read-', len(res), '-', max(res), end='', flush=True) # print('\r read-', len(res), '-', max(res), end='', flush=True)
else: else:
res = None res = None
return res return res

View File

@@ -6,6 +6,7 @@ import threading, time
import numpy as np import numpy as np
from .live_audio.aliyunASR import AliyunASR from .live_audio.aliyunASR import AliyunASR
import json import json
import re
class WatchDog(): class WatchDog():
def __init__(self, timeout, bark_fn, interval=3, msg="") -> None: def __init__(self, timeout, bark_fn, interval=3, msg="") -> None:
@@ -38,10 +39,22 @@ def chatbot2history(chatbot):
history = [] history = []
for c in chatbot: for c in chatbot:
for q in c: for q in c:
if q not in ["[请讲话]", "[等待GPT响应]", "[正在等您说完问题]"]: if q in ["[ 请讲话 ]", "[ 等待GPT响应 ]", "[ 正在等您说完问题 ]"]:
continue
elif q.startswith("[ 正在等您说完问题 ]"):
continue
else:
history.append(q.strip('<div class="markdown-body">').strip('</div>').strip('<p>').strip('</p>')) history.append(q.strip('<div class="markdown-body">').strip('</div>').strip('<p>').strip('</p>'))
return history return history
def visualize_audio(chatbot, audio_shape):
if len(chatbot) == 0: chatbot.append(["[ 请讲话 ]", "[ 正在等您说完问题 ]"])
chatbot[-1] = list(chatbot[-1])
p1 = ''
p2 = ''
chatbot[-1][-1] = re.sub(p1+r'(.*)'+p2, '', chatbot[-1][-1])
chatbot[-1][-1] += (p1+f"`{audio_shape}`"+p2)
class AsyncGptTask(): class AsyncGptTask():
def __init__(self) -> None: def __init__(self) -> None:
self.observe_future = [] self.observe_future = []
@@ -83,6 +96,7 @@ class InterviewAssistant(AliyunASR):
self.parsed_text = "" # 下个句子中已经说完的部分, 由 test_on_result_chg() 写入 self.parsed_text = "" # 下个句子中已经说完的部分, 由 test_on_result_chg() 写入
self.parsed_sentence = "" # 某段话的整个句子, 由 test_on_sentence_end() 写入 self.parsed_sentence = "" # 某段话的整个句子, 由 test_on_sentence_end() 写入
self.buffered_sentence = "" # self.buffered_sentence = "" #
self.audio_shape = "" # 音频的可视化表现, 由 audio_convertion_thread() 写入
self.event_on_result_chg = threading.Event() self.event_on_result_chg = threading.Event()
self.event_on_entence_end = threading.Event() self.event_on_entence_end = threading.Event()
self.event_on_commit_question = threading.Event() self.event_on_commit_question = threading.Event()
@@ -167,6 +181,10 @@ class InterviewAssistant(AliyunASR):
chatbot.append(["[ 请讲话 ]", "[ 正在等您说完问题 ]"]) chatbot.append(["[ 请讲话 ]", "[ 正在等您说完问题 ]"])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
if not self.event_on_result_chg.is_set() and not self.event_on_entence_end.is_set() and not self.event_on_commit_question.is_set():
visualize_audio(chatbot, self.audio_shape)
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
if len(self.stop_msg) != 0: if len(self.stop_msg) != 0:
raise RuntimeError(self.stop_msg) raise RuntimeError(self.stop_msg)
@@ -183,7 +201,7 @@ def 语音助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
import nls import nls
from scipy import io from scipy import io
except: except:
chatbot.append(["导入依赖失败", "使用该模块需要额外依赖, 安装方法:```pip install --upgrade aliyun-python-sdk-core==2.13.3 pyOpenSSL scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git```"]) chatbot.append(["导入依赖失败", "使用该模块需要额外依赖, 安装方法:```pip install --upgrade aliyun-python-sdk-core==2.13.3 pyOpenSSL webrtcvad scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git```"])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return return

View File

@@ -26,7 +26,13 @@ def get_meta_information(url, chatbot, history):
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
'Connection': 'keep-alive' 'Connection': 'keep-alive'
} }
try:
session.proxies.update(proxies) session.proxies.update(proxies)
except:
report_execption(chatbot, history,
a=f"获取代理失败 无代理状态下很可能无法访问OpenAI家族的模型及谷歌学术 建议检查USE_PROXY选项是否修改。",
b=f"尝试直接连接")
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
session.headers.update(headers) session.headers.update(headers)
response = session.get(url) response = session.get(url)

View File

@@ -1,4 +1,28 @@
#【请修改完参数后删除此行】请在以下方案中选择一种然后删除其他的方案最后docker-compose up运行 | Please choose from one of these options below, delete other options as well as This Line ## ===================================================
# docker-compose.yml
## ===================================================
# 1. 请在以下方案中选择任意一种,然后删除其他的方案
# 2. 修改你选择的方案中的environment环境变量详情请见github wiki或者config.py
# 3. 选择一种暴露服务端口的方法,并对相应的配置做出修改:
# 【方法1: 适用于Linux很方便可惜windows不支持】与宿主的网络融合为一体这个是默认配置
# network_mode: "host"
# 【方法2: 适用于所有系统包括Windows和MacOS】端口映射把容器的端口映射到宿主的端口注意您需要先删除network_mode: "host",再追加以下内容)
# ports:
# - "12345:12345" # 注意12345必须与WEB_PORT环境变量相互对应
# 4. 最后`docker-compose up`运行
# 5. 如果希望使用显卡,请关注 LOCAL_MODEL_DEVICE 和 英伟达显卡运行时 选项
## ===================================================
# 1. Please choose one of the following options and delete the others.
# 2. Modify the environment variables in the selected option, see GitHub wiki or config.py for more details.
# 3. Choose a method to expose the server port and make the corresponding configuration changes:
# [Method 1: Suitable for Linux, convenient, but not supported for Windows] Fusion with the host network, this is the default configuration
# network_mode: "host"
# [Method 2: Suitable for all systems including Windows and MacOS] Port mapping, mapping the container port to the host port (note that you need to delete network_mode: "host" first, and then add the following content)
# ports:
# - "12345: 12345" # Note! 12345 must correspond to the WEB_PORT environment variable.
# 4. Finally, run `docker-compose up`.
# 5. If you want to use a graphics card, pay attention to the LOCAL_MODEL_DEVICE and Nvidia GPU runtime options.
## ===================================================
## =================================================== ## ===================================================
## 【方案零】 部署项目的全部能力这个是包含cuda和latex的大型镜像。如果您网速慢、硬盘小或没有显卡则不推荐使用这个 ## 【方案零】 部署项目的全部能力这个是包含cuda和latex的大型镜像。如果您网速慢、硬盘小或没有显卡则不推荐使用这个
@@ -39,10 +63,14 @@ services:
# count: 1 # count: 1
# capabilities: [gpu] # capabilities: [gpu]
# 与宿主的网络融合 # 【WEB_PORT暴露方法1: 适用于Linux】与宿主的网络融合
network_mode: "host" network_mode: "host"
# 不使用代理网络拉取最新代码 # 【WEB_PORT暴露方法2: 适用于所有系统】端口映射
# ports:
# - "12345:12345" # 12345必须与WEB_PORT相互对应
# 启动容器后运行main.py主程序
command: > command: >
bash -c "python3 -u main.py" bash -c "python3 -u main.py"

View File

@@ -14,7 +14,7 @@ RUN python3 -m pip install colorama Markdown pygments pymupdf
RUN python3 -m pip install python-docx moviepy pdfminer RUN python3 -m pip install python-docx moviepy pdfminer
RUN python3 -m pip install zh_langchain==0.2.1 pypinyin RUN python3 -m pip install zh_langchain==0.2.1 pypinyin
RUN python3 -m pip install rarfile py7zr RUN python3 -m pip install rarfile py7zr
RUN python3 -m pip install aliyun-python-sdk-core==2.13.3 pyOpenSSL scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git RUN python3 -m pip install aliyun-python-sdk-core==2.13.3 pyOpenSSL webrtcvad scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git
# 下载分支 # 下载分支
WORKDIR /gpt WORKDIR /gpt
RUN git clone --depth=1 https://github.com/binary-husky/gpt_academic.git RUN git clone --depth=1 https://github.com/binary-husky/gpt_academic.git

View File

@@ -5,15 +5,16 @@
FROM fuqingxu/python311_texlive_ctex:latest FROM fuqingxu/python311_texlive_ctex:latest
# 删除文档文件以节约空间
rm -rf /usr/local/texlive/2023/texmf-dist/doc
# 指定路径 # 指定路径
WORKDIR /gpt WORKDIR /gpt
RUN pip3 install gradio openai numpy arxiv rich RUN pip3 install openai numpy arxiv rich
RUN pip3 install colorama Markdown pygments pymupdf RUN pip3 install colorama Markdown pygments pymupdf
RUN pip3 install python-docx moviepy pdfminer RUN pip3 install python-docx pdfminer
RUN pip3 install zh_langchain==0.2.1
RUN pip3 install nougat-ocr RUN pip3 install nougat-ocr
RUN pip3 install aliyun-python-sdk-core==2.13.3 pyOpenSSL scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git
# 装载项目文件 # 装载项目文件
COPY . . COPY . .

View File

@@ -322,7 +322,7 @@
"任何文件": "Any file", "任何文件": "Any file",
"但推荐上传压缩文件": "But it is recommended to upload compressed files", "但推荐上传压缩文件": "But it is recommended to upload compressed files",
"更换模型 & SysPrompt & 交互界面布局": "Change model & SysPrompt & interactive interface layout", "更换模型 & SysPrompt & 交互界面布局": "Change model & SysPrompt & interactive interface layout",
"底部输入区": "Bottom input area", "浮动输入区": "Floating input area",
"输入清除键": "Input clear key", "输入清除键": "Input clear key",
"插件参数区": "Plugin parameter area", "插件参数区": "Plugin parameter area",
"显示/隐藏功能区": "Show/hide function area", "显示/隐藏功能区": "Show/hide function area",
@@ -2513,5 +2513,141 @@
"此处待注入的知识库名称id": "The knowledge base name ID to be injected here", "此处待注入的知识库名称id": "The knowledge base name ID to be injected here",
"您需要构建知识库后再运行此插件": "You need to build the knowledge base before running this plugin", "您需要构建知识库后再运行此插件": "You need to build the knowledge base before running this plugin",
"判定是否为公式 | 测试1 写出洛伦兹定律": "Determine whether it is a formula | Test 1 write out the Lorentz law", "判定是否为公式 | 测试1 写出洛伦兹定律": "Determine whether it is a formula | Test 1 write out the Lorentz law",
"构建知识库后": "After building the knowledge base" "构建知识库后": "After building the knowledge base",
"找不到本地项目或无法处理": "Unable to find local project or unable to process",
"再做一个小修改": "Make another small modification",
"解析整个Matlab项目": "Parse the entire Matlab project",
"需要用GPT提取参数": "Need to extract parameters using GPT",
"文件路径": "File path",
"正在排队": "In queue",
"-=-=-=-=-=-=-=-= 写出第1个文件": "-=-=-=-=-=-=-=-= Write the first file",
"仅翻译后的文本 -=-=-=-=-=-=-=-=": "Translated text only -=-=-=-=-=-=-=-=",
"对话通道": "Conversation channel",
"找不到任何": "Unable to find any",
"正在启动": "Starting",
"开始创建新进程并执行代码! 时间限制": "Start creating a new process and executing the code! Time limit",
"解析Matlab项目": "Parse Matlab project",
"更换UI主题": "Change UI theme",
"⭐ 开始啦 ": "⭐ Let's start!",
"先提取当前英文标题": "First extract the current English title",
"睡一会防止触发google反爬虫": "Sleep for a while to prevent triggering Google anti-crawler",
"测试": "Test",
"-=-=-=-=-=-=-=-= 写出Markdown文件 -=-=-=-=-=-=-=-=": "-=-=-=-=-=-=-=-= Write out Markdown file",
"如果index是1的话": "If the index is 1",
"VoidTerminal已经实现了类似的代码": "VoidTerminal has already implemented similar code",
"等待线程锁": "Waiting for thread lock",
"那么我们默认代理生效": "Then we default to proxy",
"结果是一个有效文件": "The result is a valid file",
"⭐ 检查模块": "⭐ Check module",
"备份一份History作为记录": "Backup a copy of History as a record",
"作者Binary-Husky": "Author Binary-Husky",
"将csv文件转excel表格": "Convert CSV file to Excel table",
"获取文章摘要": "Get article summary",
"次代码生成尝试": "Attempt to generate code",
"如果参数是空的": "If the parameter is empty",
"请配置讯飞星火大模型的XFYUN_APPID": "Please configure XFYUN_APPID for the Xunfei Starfire model",
"-=-=-=-=-=-=-=-= 写出第2个文件": "Write the second file",
"代码生成阶段结束": "Code generation phase completed",
"则进行提醒": "Then remind",
"处理异常": "Handle exception",
"可能触发了google反爬虫机制": "May have triggered Google anti-crawler mechanism",
"AnalyzeAMatlabProject的所有源文件": "All source files of AnalyzeAMatlabProject",
"写入": "Write",
"我们5秒后再试一次...": "Let's try again in 5 seconds...",
"判断一下用户是否错误地通过对话通道进入": "Check if the user entered through the dialogue channel by mistake",
"结果": "Result",
"2. 如果没有文件": "2. If there is no file",
"由 test_on_sentence_end": "By test_on_sentence_end",
"则直接使用first section name": "Then directly use the first section name",
"太懒了": "Too lazy",
"记录当前的大章节标题": "Record the current chapter title",
"然后再次点击该插件! 至于您的文件": "Then click the plugin again! As for your file",
"此次我们的错误追踪是": "This time our error tracking is",
"首先在arxiv上搜索": "First search on arxiv",
"被新插件取代": "Replaced by a new plugin",
"正在处理文件": "Processing file",
"除了连接OpenAI之外": "In addition to connecting OpenAI",
"我们检查一下": "Let's check",
"进度": "Progress",
"处理少数情况下的特殊插件的锁定状态": "Handle the locked state of special plugins in a few cases",
"⭐ 开始执行": "⭐ Start execution",
"正常情况": "Normal situation",
"下个句子中已经说完的部分": "The part that has already been said in the next sentence",
"首次运行需要花费较长时间下载NOUGAT参数": "The first run takes a long time to download NOUGAT parameters",
"使用tex格式公式 测试2 给出柯西不等式": "Use the tex format formula to test 2 and give the Cauchy inequality",
"无法从bing获取信息": "Unable to retrieve information from Bing!",
"秒. 请等待任务完成": "Wait for the task to complete",
"开始干正事": "Start doing real work",
"需要花费较长时间下载NOUGAT参数": "It takes a long time to download NOUGAT parameters",
"然后再次点击该插件": "Then click the plugin again",
"受到bing限制": "Restricted by Bing",
"检索文章的历史版本的题目": "Retrieve the titles of historical versions of the article",
"收尾": "Wrap up",
"给定了task": "Given a task",
"某段话的整个句子": "The whole sentence of a paragraph",
"-=-=-=-=-=-=-=-= 写出HTML文件 -=-=-=-=-=-=-=-=": "-=-=-=-=-=-=-=-= Write out HTML file -=-=-=-=-=-=-=-=",
"当前文件": "Current file",
"请在输入框内填写需求": "Please fill in the requirements in the input box",
"结果是一个字符串": "The result is a string",
"用插件实现」": "Implemented with a plugin",
"⭐ 到最后一步了": "⭐ Reached the final step",
"重新修改当前part的标题": "Modify the title of the current part again",
"请勿点击“提交”按钮或者“基础功能区”按钮": "Do not click the 'Submit' button or the 'Basic Function Area' button",
"正在执行命令": "Executing command",
"检测到**滞留的缓存文档**": "Detected **stuck cache document**",
"第三步": "Step three",
"失败了~ 别担心": "Failed~ Don't worry",
"动态代码解释器": "Dynamic code interpreter",
"开始执行": "Start executing",
"不给定task": "No task given",
"正在加载NOUGAT...": "Loading NOUGAT...",
"精准翻译PDF文档": "Accurate translation of PDF documents",
"时间限制TIME_LIMIT": "Time limit TIME_LIMIT",
"翻译前后混合 -=-=-=-=-=-=-=-=": "Mixed translation before and after -=-=-=-=-=-=-=-=",
"搞定代码生成": "Code generation is done",
"插件通道": "Plugin channel",
"智能体": "Intelligent agent",
"切换界面明暗 ☀": "Switch interface brightness ☀",
"交换图像的蓝色通道和红色通道": "Swap blue channel and red channel of the image",
"作为函数参数": "As a function parameter",
"先挑选偶数序列号": "First select even serial numbers",
"仅供测试": "For testing only",
"执行成功了": "Execution succeeded",
"开始逐个文件进行处理": "Start processing files one by one",
"当前文件处理列表": "Current file processing list",
"执行失败了": "Execution failed",
"请及时处理": "Please handle it in time",
"源文件": "Source file",
"裁剪图像": "Crop image",
"插件动态生成插件": "Dynamic generation of plugins",
"正在验证上述代码的有效性": "Validating the above code",
"⭐ = 关键步骤": "⭐ = Key step",
"!= 0 代表“提交”键对话通道": "!= 0 represents the 'Submit' key dialogue channel",
"解析python源代码项目": "Parsing Python source code project",
"请检查PDF是否损坏": "Please check if the PDF is damaged",
"插件动态生成": "Dynamic generation of plugins",
"⭐ 分离代码块": "⭐ Separating code blocks",
"已经被记忆": "Already memorized",
"默认用英文的": "Default to English",
"错误追踪": "Error tracking",
"对话|编程|学术|智能体": "Dialogue|Programming|Academic|Intelligent agent",
"请检查": "Please check",
"检测到被滞留的缓存文档": "Detected cached documents being left behind",
"还有哪些场合允许使用代理": "What other occasions allow the use of proxies",
"1. 如果有文件": "1. If there is a file",
"执行开始": "Execution starts",
"代码生成结束": "Code generation ends",
"请及时点击“**保存当前对话**”获取所有滞留文档": "Please click '**Save Current Dialogue**' in time to obtain all cached documents",
"需点击“**函数插件区**”按钮进行处理": "Click the '**Function Plugin Area**' button for processing",
"此函数已经弃用": "This function has been deprecated",
"以后再写": "Write it later",
"返回给定的url解析出的arxiv_id": "Return the arxiv_id parsed from the given URL",
"⭐ 文件上传区是否有东西": "⭐ Is there anything in the file upload area",
"Nougat解析论文失败": "Nougat failed to parse the paper",
"本源代码中": "In this source code",
"或者基础功能通道": "Or the basic function channel",
"使用zip压缩格式": "Using zip compression format",
"受到google限制": "Restricted by Google",
"如果是": "If it is",
"不用担心": "don't worry"
} }

View File

@@ -1007,7 +1007,6 @@
"第一部分": "第1部分", "第一部分": "第1部分",
"的分析如下": "の分析は以下の通りです", "的分析如下": "の分析は以下の通りです",
"解决一个mdx_math的bug": "mdx_mathのバグを解決する", "解决一个mdx_math的bug": "mdx_mathのバグを解決する",
"底部输入区": "下部の入力エリア",
"函数插件输入输出接驳区": "関数プラグインの入出力接続エリア", "函数插件输入输出接驳区": "関数プラグインの入出力接続エリア",
"打开浏览器": "ブラウザを開く", "打开浏览器": "ブラウザを開く",
"免费用户填3": "無料ユーザーは3を入力してください", "免费用户填3": "無料ユーザーは3を入力してください",

View File

@@ -90,5 +90,7 @@
"解析PDF_基于GROBID": "ParsePDF_BasedOnGROBID", "解析PDF_基于GROBID": "ParsePDF_BasedOnGROBID",
"虚空终端主路由": "VoidTerminalMainRoute", "虚空终端主路由": "VoidTerminalMainRoute",
"批量翻译PDF文档_NOUGAT": "BatchTranslatePDFDocuments_NOUGAT", "批量翻译PDF文档_NOUGAT": "BatchTranslatePDFDocuments_NOUGAT",
"解析PDF_基于NOUGAT": "ParsePDF_NOUGAT" "解析PDF_基于NOUGAT": "ParsePDF_NOUGAT",
"解析一个Matlab项目": "AnalyzeAMatlabProject",
"函数动态生成": "DynamicFunctionGeneration"
} }

View File

@@ -346,7 +346,6 @@
"情况会好转": "情況會好轉", "情况会好转": "情況會好轉",
"超过512个": "超過512個", "超过512个": "超過512個",
"多线": "多線", "多线": "多線",
"底部输入区": "底部輸入區",
"合并小写字母开头的段落块并替换为空格": "合併小寫字母開頭的段落塊並替換為空格", "合并小写字母开头的段落块并替换为空格": "合併小寫字母開頭的段落塊並替換為空格",
"暗色主题": "暗色主題", "暗色主题": "暗色主題",
"提高限制请查询": "提高限制請查詢", "提高限制请查询": "提高限制請查詢",

221
main.py
View File

@@ -1,14 +1,19 @@
import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染 import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
import pickle
import codecs
import base64
def main(): def main():
import gradio as gr import gradio as gr
if gr.__version__ not in ['3.28.3','3.32.2']: assert False, "需要特殊依赖,请务必用 pip install -r requirements.txt 指令安装依赖详情信息见requirements.txt" if gr.__version__ not in ['3.32.6']:
raise ModuleNotFoundError("使用项目内置Gradio获取最优体验! 请运行 `pip install -r requirements.txt` 指令安装内置Gradio及其他依赖, 详情信息见requirements.txt.")
from request_llm.bridge_all import predict from request_llm.bridge_all import predict
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, load_chat_cookies, DummyWith from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, load_chat_cookies, DummyWith
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到 # 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION') proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION')
CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT') CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT')
ENABLE_AUDIO, AUTO_CLEAR_TXT, PATH_LOGGING, AVAIL_THEMES, THEME = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT', 'PATH_LOGGING', 'AVAIL_THEMES', 'THEME') ENABLE_AUDIO, AUTO_CLEAR_TXT, PATH_LOGGING, AVAIL_THEMES, THEME = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT', 'PATH_LOGGING', 'AVAIL_THEMES', 'THEME')
DARK_MODE, NUM_CUSTOM_BASIC_BTN, SSL_KEYFILE, SSL_CERTFILE = get_conf('DARK_MODE', 'NUM_CUSTOM_BASIC_BTN', 'SSL_KEYFILE', 'SSL_CERTFILE')
# 如果WEB_PORT是-1, 则随机选取WEB端口 # 如果WEB_PORT是-1, 则随机选取WEB端口
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
@@ -17,8 +22,16 @@ def main():
initial_prompt = "Serve me as a writing and programming assistant." initial_prompt = "Serve me as a writing and programming assistant."
title_html = f"<h1 align=\"center\">GPT 学术优化 {get_current_version()}</h1>{theme_declaration}" title_html = f"<h1 align=\"center\">GPT 学术优化 {get_current_version()}</h1>{theme_declaration}"
description = "代码开源和更新[地址🚀](https://github.com/binary-husky/gpt_academic)" description = "Github源代码开源和更新[地址🚀](https://github.com/binary-husky/gpt_academic), "
description += "感谢热情的[开发者们❤️](https://github.com/binary-husky/gpt_academic/graphs/contributors)" description += "感谢热情的[开发者们❤️](https://github.com/binary-husky/gpt_academic/graphs/contributors)."
description += "</br></br>常见问题请查阅[项目Wiki](https://github.com/binary-husky/gpt_academic/wiki), "
description += "如遇到Bug请前往[Bug反馈](https://github.com/binary-husky/gpt_academic/issues)."
description += "</br></br>普通对话使用说明: 1. 输入问题; 2. 点击提交"
description += "</br></br>基础功能区使用说明: 1. 输入文本; 2. 点击任意基础功能区按钮"
description += "</br></br>函数插件区使用说明: 1. 输入路径/问题, 或者上传文件; 2. 点击任意函数插件区按钮"
description += "</br></br>虚空终端使用说明: 点击虚空终端, 然后根据提示输入指令, 再次点击虚空终端"
description += "</br></br>如何保存对话: 点击保存当前的对话按钮"
description += "</br></br>如何语音对话: 请阅读Wiki"
# 问询记录, python 版本建议3.9+(越新越好) # 问询记录, python 版本建议3.9+(越新越好)
import logging, uuid import logging, uuid
@@ -58,9 +71,11 @@ def main():
CHATBOT_HEIGHT /= 2 CHATBOT_HEIGHT /= 2
cancel_handles = [] cancel_handles = []
customize_btns = {}
predefined_btns = {}
with gr.Blocks(title="GPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo: with gr.Blocks(title="GPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
gr.HTML(title_html) gr.HTML(title_html)
secret_css, secret_font = gr.Textbox(visible=False), gr.Textbox(visible=False) secret_css, dark_mode, persistent_cookie = gr.Textbox(visible=False), gr.Textbox(DARK_MODE, visible=False), gr.Textbox(visible=False)
cookies = gr.State(load_chat_cookies()) cookies = gr.State(load_chat_cookies())
with gr_L1(): with gr_L1():
with gr_L2(scale=2, elem_id="gpt-chat"): with gr_L2(scale=2, elem_id="gpt-chat"):
@@ -72,11 +87,11 @@ def main():
with gr.Row(): with gr.Row():
txt = gr.Textbox(show_label=False, placeholder="Input question here.").style(container=False) txt = gr.Textbox(show_label=False, placeholder="Input question here.").style(container=False)
with gr.Row(): with gr.Row():
submitBtn = gr.Button("提交", variant="primary") submitBtn = gr.Button("提交", elem_id="elem_submit", variant="primary")
with gr.Row(): with gr.Row():
resetBtn = gr.Button("重置", variant="secondary"); resetBtn.style(size="sm") resetBtn = gr.Button("重置", elem_id="elem_reset", variant="secondary"); resetBtn.style(size="sm")
stopBtn = gr.Button("停止", variant="secondary"); stopBtn.style(size="sm") stopBtn = gr.Button("停止", elem_id="elem_stop", variant="secondary"); stopBtn.style(size="sm")
clearBtn = gr.Button("清除", variant="secondary", visible=False); clearBtn.style(size="sm") clearBtn = gr.Button("清除", elem_id="elem_clear", variant="secondary", visible=False); clearBtn.style(size="sm")
if ENABLE_AUDIO: if ENABLE_AUDIO:
with gr.Row(): with gr.Row():
audio_mic = gr.Audio(source="microphone", type="numpy", streaming=True, show_label=False).style(container=False) audio_mic = gr.Audio(source="microphone", type="numpy", streaming=True, show_label=False).style(container=False)
@@ -84,11 +99,16 @@ def main():
status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}", elem_id="state-panel") status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}", elem_id="state-panel")
with gr.Accordion("基础功能区", open=True, elem_id="basic-panel") as area_basic_fn: with gr.Accordion("基础功能区", open=True, elem_id="basic-panel") as area_basic_fn:
with gr.Row(): with gr.Row():
for k in range(NUM_CUSTOM_BASIC_BTN):
customize_btn = gr.Button("自定义按钮" + str(k+1), visible=False, variant="secondary", info_str=f'基础功能区: 自定义按钮')
customize_btn.style(size="sm")
customize_btns.update({"自定义按钮" + str(k+1): customize_btn})
for k in functional: for k in functional:
if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue
variant = functional[k]["Color"] if "Color" in functional[k] else "secondary" variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
functional[k]["Button"] = gr.Button(k, variant=variant) functional[k]["Button"] = gr.Button(k, variant=variant, info_str=f'基础功能区: {k}')
functional[k]["Button"].style(size="sm") functional[k]["Button"].style(size="sm")
predefined_btns.update({k: functional[k]["Button"]})
with gr.Accordion("函数插件区", open=True, elem_id="plugin-panel") as area_crazy_fn: with gr.Accordion("函数插件区", open=True, elem_id="plugin-panel") as area_crazy_fn:
with gr.Row(): with gr.Row():
gr.Markdown("插件可读取“输入区”文本/路径作为参数(上传文件自动修正路径)") gr.Markdown("插件可读取“输入区”文本/路径作为参数(上传文件自动修正路径)")
@@ -100,7 +120,9 @@ def main():
if not plugin.get("AsButton", True): continue if not plugin.get("AsButton", True): continue
visible = True if match_group(plugin['Group'], DEFAULT_FN_GROUPS) else False visible = True if match_group(plugin['Group'], DEFAULT_FN_GROUPS) else False
variant = plugins[k]["Color"] if "Color" in plugin else "secondary" variant = plugins[k]["Color"] if "Color" in plugin else "secondary"
plugin['Button'] = plugins[k]['Button'] = gr.Button(k, variant=variant, visible=visible).style(size="sm") info = plugins[k].get("Info", k)
plugin['Button'] = plugins[k]['Button'] = gr.Button(k, variant=variant,
visible=visible, info_str=f'函数插件区: {info}').style(size="sm")
with gr.Row(): with gr.Row():
with gr.Accordion("更多函数插件", open=True): with gr.Accordion("更多函数插件", open=True):
dropdown_fn_list = [] dropdown_fn_list = []
@@ -117,15 +139,28 @@ def main():
switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary").style(size="sm") switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary").style(size="sm")
with gr.Row(): with gr.Row():
with gr.Accordion("点击展开“文件上传区”。上传本地文件/压缩包供函数插件调用。", open=False) as area_file_up: with gr.Accordion("点击展开“文件上传区”。上传本地文件/压缩包供函数插件调用。", open=False) as area_file_up:
file_upload = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple") file_upload = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple", elem_id="elem_upload")
with gr.Accordion("更换模型 & SysPrompt & 交互界面布局", open=(LAYOUT == "TOP-DOWN"), elem_id="interact-panel"):
system_prompt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt)
with gr.Floating(init_x="0%", init_y="0%", visible=True, width=None, drag="forbidden"):
with gr.Row():
with gr.Tab("上传文件", elem_id="interact-panel"):
gr.Markdown("请上传本地文件/压缩包供“函数插件区”功能调用。请注意: 上传文件后会自动把输入区修改为相应路径。")
file_upload_2 = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple")
with gr.Tab("更换模型 & Prompt", elem_id="interact-panel"):
md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False)
top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",) top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",) temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
max_length_sl = gr.Slider(minimum=256, maximum=8192, value=4096, step=1, interactive=True, label="Local LLM MaxLength",) max_length_sl = gr.Slider(minimum=256, maximum=1024*32, value=4096, step=128, interactive=True, label="Local LLM MaxLength",)
checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "底部输入区", "输入清除键", "插件参数区"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区") system_prompt = gr.Textbox(show_label=True, lines=2, placeholder=f"System Prompt", label="System prompt", value=initial_prompt)
md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False)
with gr.Tab("界面外观", elem_id="interact-panel"):
theme_dropdown = gr.Dropdown(AVAIL_THEMES, value=THEME, label="更换UI主题").style(container=False) theme_dropdown = gr.Dropdown(AVAIL_THEMES, value=THEME, label="更换UI主题").style(container=False)
checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "浮动输入区", "输入清除键", "插件参数区"],
value=["基础功能区", "函数插件区"], label="显示/隐藏功能区", elem_id='cbs').style(container=False)
checkboxes_2 = gr.CheckboxGroup(["自定义菜单"],
value=[], label="显示/隐藏自定义菜单", elem_id='cbs').style(container=False)
dark_mode_btn = gr.Button("切换界面明暗 ☀", variant="secondary").style(size="sm") dark_mode_btn = gr.Button("切换界面明暗 ☀", variant="secondary").style(size="sm")
dark_mode_btn.click(None, None, None, _js="""() => { dark_mode_btn.click(None, None, None, _js="""() => {
if (document.querySelectorAll('.dark').length) { if (document.querySelectorAll('.dark').length) {
@@ -135,30 +170,113 @@ def main():
} }
}""", }""",
) )
with gr.Tab("帮助", elem_id="interact-panel"):
gr.Markdown(description) gr.Markdown(description)
with gr.Accordion("备选输入区", open=True, visible=False, elem_id="input-panel2") as area_input_secondary:
with gr.Row(): with gr.Floating(init_x="20%", init_y="50%", visible=False, width="40%", drag="top") as area_input_secondary:
txt2 = gr.Textbox(show_label=False, placeholder="Input question here.", label="输入区2").style(container=False) with gr.Accordion("浮动输入区", open=True, elem_id="input-panel2"):
with gr.Row(): with gr.Row() as row:
submitBtn2 = gr.Button("提交", variant="primary") row.style(equal_height=True)
with gr.Row(): with gr.Column(scale=10):
txt2 = gr.Textbox(show_label=False, placeholder="Input question here.", lines=8, label="输入区2").style(container=False)
with gr.Column(scale=1, min_width=40):
submitBtn2 = gr.Button("提交", variant="primary"); submitBtn2.style(size="sm")
resetBtn2 = gr.Button("重置", variant="secondary"); resetBtn2.style(size="sm") resetBtn2 = gr.Button("重置", variant="secondary"); resetBtn2.style(size="sm")
stopBtn2 = gr.Button("停止", variant="secondary"); stopBtn2.style(size="sm") stopBtn2 = gr.Button("停止", variant="secondary"); stopBtn2.style(size="sm")
clearBtn2 = gr.Button("清除", variant="secondary", visible=False); clearBtn2.style(size="sm") clearBtn2 = gr.Button("清除", variant="secondary", visible=False); clearBtn2.style(size="sm")
def to_cookie_str(d):
# Pickle the dictionary and encode it as a string
pickled_dict = pickle.dumps(d)
cookie_value = base64.b64encode(pickled_dict).decode('utf-8')
return cookie_value
def from_cookie_str(c):
# Decode the base64-encoded string and unpickle it into a dictionary
pickled_dict = base64.b64decode(c.encode('utf-8'))
return pickle.loads(pickled_dict)
with gr.Floating(init_x="20%", init_y="50%", visible=False, width="40%", drag="top") as area_customize:
with gr.Accordion("自定义菜单", open=True, elem_id="edit-panel"):
with gr.Row() as row:
with gr.Column(scale=10):
AVAIL_BTN = [btn for btn in customize_btns.keys()] + [k for k in functional]
basic_btn_dropdown = gr.Dropdown(AVAIL_BTN, value="自定义按钮1", label="选择一个需要自定义基础功能区按钮").style(container=False)
basic_fn_title = gr.Textbox(show_label=False, placeholder="输入新按钮名称", lines=1).style(container=False)
basic_fn_prefix = gr.Textbox(show_label=False, placeholder="输入新提示前缀", lines=4).style(container=False)
basic_fn_suffix = gr.Textbox(show_label=False, placeholder="输入新提示后缀", lines=4).style(container=False)
with gr.Column(scale=1, min_width=70):
basic_fn_confirm = gr.Button("确认并保存", variant="primary"); basic_fn_confirm.style(size="sm")
basic_fn_load = gr.Button("加载已保存", variant="primary"); basic_fn_load.style(size="sm")
def assign_btn(persistent_cookie_, cookies_, basic_btn_dropdown_, basic_fn_title, basic_fn_prefix, basic_fn_suffix):
ret = {}
customize_fn_overwrite_ = cookies_['customize_fn_overwrite']
customize_fn_overwrite_.update({
basic_btn_dropdown_:
{
"Title":basic_fn_title,
"Prefix":basic_fn_prefix,
"Suffix":basic_fn_suffix,
}
}
)
cookies_.update(customize_fn_overwrite_)
if basic_btn_dropdown_ in customize_btns:
ret.update({customize_btns[basic_btn_dropdown_]: gr.update(visible=True, value=basic_fn_title)})
else:
ret.update({predefined_btns[basic_btn_dropdown_]: gr.update(visible=True, value=basic_fn_title)})
ret.update({cookies: cookies_})
try: persistent_cookie_ = from_cookie_str(persistent_cookie_) # persistent cookie to dict
except: persistent_cookie_ = {}
persistent_cookie_["custom_bnt"] = customize_fn_overwrite_ # dict update new value
persistent_cookie_ = to_cookie_str(persistent_cookie_) # persistent cookie to dict
ret.update({persistent_cookie: persistent_cookie_}) # write persistent cookie
return ret
def reflesh_btn(persistent_cookie_, cookies_):
ret = {}
for k in customize_btns:
ret.update({customize_btns[k]: gr.update(visible=False, value="")})
try: persistent_cookie_ = from_cookie_str(persistent_cookie_) # persistent cookie to dict
except: return ret
customize_fn_overwrite_ = persistent_cookie_.get("custom_bnt", {})
cookies_['customize_fn_overwrite'] = customize_fn_overwrite_
ret.update({cookies: cookies_})
for k,v in persistent_cookie_["custom_bnt"].items():
if v['Title'] == "": continue
if k in customize_btns: ret.update({customize_btns[k]: gr.update(visible=True, value=v['Title'])})
else: ret.update({predefined_btns[k]: gr.update(visible=True, value=v['Title'])})
return ret
basic_fn_load.click(reflesh_btn, [persistent_cookie, cookies],[cookies, *customize_btns.values(), *predefined_btns.values()])
h = basic_fn_confirm.click(assign_btn, [persistent_cookie, cookies, basic_btn_dropdown, basic_fn_title, basic_fn_prefix, basic_fn_suffix],
[persistent_cookie, cookies, *customize_btns.values(), *predefined_btns.values()])
h.then(None, [persistent_cookie], None, _js="""(persistent_cookie)=>{setCookie("persistent_cookie", persistent_cookie, 5);}""") # save persistent cookie
# 功能区显示开关与功能区的互动 # 功能区显示开关与功能区的互动
def fn_area_visibility(a): def fn_area_visibility(a):
ret = {} ret = {}
ret.update({area_basic_fn: gr.update(visible=("基础功能区" in a))}) ret.update({area_basic_fn: gr.update(visible=("基础功能区" in a))})
ret.update({area_crazy_fn: gr.update(visible=("函数插件区" in a))}) ret.update({area_crazy_fn: gr.update(visible=("函数插件区" in a))})
ret.update({area_input_primary: gr.update(visible=("底部输入区" not in a))}) ret.update({area_input_primary: gr.update(visible=("浮动输入区" not in a))})
ret.update({area_input_secondary: gr.update(visible=("底部输入区" in a))}) ret.update({area_input_secondary: gr.update(visible=("浮动输入区" in a))})
ret.update({clearBtn: gr.update(visible=("输入清除键" in a))}) ret.update({clearBtn: gr.update(visible=("输入清除键" in a))})
ret.update({clearBtn2: gr.update(visible=("输入清除键" in a))}) ret.update({clearBtn2: gr.update(visible=("输入清除键" in a))})
ret.update({plugin_advanced_arg: gr.update(visible=("插件参数区" in a))}) ret.update({plugin_advanced_arg: gr.update(visible=("插件参数区" in a))})
if "底部输入区" in a: ret.update({txt: gr.update(value="")}) if "浮动输入区" in a: ret.update({txt: gr.update(value="")})
return ret return ret
checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn, area_input_primary, area_input_secondary, txt, txt2, clearBtn, clearBtn2, plugin_advanced_arg] ) checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn, area_input_primary, area_input_secondary, txt, txt2, clearBtn, clearBtn2, plugin_advanced_arg] )
# 功能区显示开关与功能区的互动
def fn_area_visibility_2(a):
ret = {}
ret.update({area_customize: gr.update(visible=("自定义菜单" in a))})
return ret
checkboxes_2.select(fn_area_visibility_2, [checkboxes_2], [area_customize] )
# 整理反复出现的控件句柄组合 # 整理反复出现的控件句柄组合
input_combo = [cookies, max_length_sl, md_dropdown, txt, txt2, top_p, temperature, chatbot, history, system_prompt, plugin_advanced_arg] input_combo = [cookies, max_length_sl, md_dropdown, txt, txt2, top_p, temperature, chatbot, history, system_prompt, plugin_advanced_arg]
output_combo = [cookies, chatbot, history, status] output_combo = [cookies, chatbot, history, status]
@@ -182,8 +300,12 @@ def main():
if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue
click_handle = functional[k]["Button"].click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(k)], outputs=output_combo) click_handle = functional[k]["Button"].click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(k)], outputs=output_combo)
cancel_handles.append(click_handle) cancel_handles.append(click_handle)
for btn in customize_btns.values():
click_handle = btn.click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(btn.value)], outputs=output_combo)
cancel_handles.append(click_handle)
# 文件上传区接收文件后与chatbot的互动 # 文件上传区接收文件后与chatbot的互动
file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies]) file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies])
file_upload_2.upload(on_file_uploaded, [file_upload_2, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies])
# 函数插件-固定按钮区 # 函数插件-固定按钮区
for k in plugins: for k in plugins:
if not plugins[k].get("AsButton", True): continue if not plugins[k].get("AsButton", True): continue
@@ -193,7 +315,8 @@ def main():
# 函数插件-下拉菜单与随变按钮的互动 # 函数插件-下拉菜单与随变按钮的互动
def on_dropdown_changed(k): def on_dropdown_changed(k):
variant = plugins[k]["Color"] if "Color" in plugins[k] else "secondary" variant = plugins[k]["Color"] if "Color" in plugins[k] else "secondary"
ret = {switchy_bt: gr.update(value=k, variant=variant)} info = plugins[k].get("Info", k)
ret = {switchy_bt: gr.update(value=k, variant=variant, info_str=f'函数插件区: {info}')}
if plugins[k].get("AdvancedArgs", False): # 是否唤起高级插件参数区 if plugins[k].get("AdvancedArgs", False): # 是否唤起高级插件参数区
ret.update({plugin_advanced_arg: gr.update(visible=True, label=f"插件[{k}]的高级参数说明:" + plugins[k].get("ArgsReminder", [f"没有提供高级参数功能说明"]))}) ret.update({plugin_advanced_arg: gr.update(visible=True, label=f"插件[{k}]的高级参数说明:" + plugins[k].get("ArgsReminder", [f"没有提供高级参数功能说明"]))})
else: else:
@@ -266,27 +389,47 @@ def main():
cookies.update({'uuid': uuid.uuid4()}) cookies.update({'uuid': uuid.uuid4()})
return cookies return cookies
demo.load(init_cookie, inputs=[cookies, chatbot], outputs=[cookies]) demo.load(init_cookie, inputs=[cookies, chatbot], outputs=[cookies])
demo.load(lambda: 0, inputs=None, outputs=None, _js='()=>{GptAcademicJavaScriptInit();}') darkmode_js = """(dark) => {
dark = dark == "True";
if (document.querySelectorAll('.dark').length) {
if (!dark){
document.querySelectorAll('.dark').forEach(el => el.classList.remove('dark'));
}
} else {
if (dark){
document.querySelector('body').classList.add('dark');
}
}
}"""
load_cookie_js = """(persistent_cookie) => {
return getCookie("persistent_cookie");
}"""
demo.load(None, inputs=None, outputs=[persistent_cookie], _js=load_cookie_js)
demo.load(None, inputs=[dark_mode], outputs=None, _js=darkmode_js) # 配置暗色主题或亮色主题
demo.load(None, inputs=[gr.Textbox(LAYOUT, visible=False)], outputs=None, _js='(LAYOUT)=>{GptAcademicJavaScriptInit(LAYOUT);}')
# gradio的inbrowser触发不太稳定回滚代码到原始的浏览器打开函数 # gradio的inbrowser触发不太稳定回滚代码到原始的浏览器打开函数
def auto_opentab_delay(): def run_delayed_tasks():
import threading, webbrowser, time import threading, webbrowser, time
print(f"如果浏览器没有自动打开请复制并转到以下URL") print(f"如果浏览器没有自动打开请复制并转到以下URL")
print(f"\t(亮色主题): http://localhost:{PORT}") if DARK_MODE: print(f"\t「暗色主题已启用(支持动态切换主题): http://localhost:{PORT}")
print(f"\t(暗色主题): http://localhost:{PORT}/?__theme=dark") else: print(f"\t「亮色主题已启用(支持动态切换主题): http://localhost:{PORT}")
def open():
time.sleep(2) # 打开浏览器
DARK_MODE, = get_conf('DARK_MODE')
if DARK_MODE: webbrowser.open_new_tab(f"http://localhost:{PORT}/?__theme=dark")
else: webbrowser.open_new_tab(f"http://localhost:{PORT}")
threading.Thread(target=open, name="open-browser", daemon=True).start()
threading.Thread(target=auto_update, name="self-upgrade", daemon=True).start()
threading.Thread(target=warm_up_modules, name="warm-up", daemon=True).start()
auto_opentab_delay() def auto_updates(): time.sleep(0); auto_update()
def open_browser(): time.sleep(2); webbrowser.open_new_tab(f"http://localhost:{PORT}")
def warm_up_mods(): time.sleep(4); warm_up_modules()
threading.Thread(target=auto_updates, name="self-upgrade", daemon=True).start() # 查看自动更新
threading.Thread(target=open_browser, name="open-browser", daemon=True).start() # 打开浏览器页面
threading.Thread(target=warm_up_mods, name="warm-up", daemon=True).start() # 预热tiktoken模块
run_delayed_tasks()
demo.queue(concurrency_count=CONCURRENT_COUNT).launch( demo.queue(concurrency_count=CONCURRENT_COUNT).launch(
quiet=True, quiet=True,
server_name="0.0.0.0", server_name="0.0.0.0",
ssl_keyfile=None if SSL_KEYFILE == "" else SSL_KEYFILE,
ssl_certfile=None if SSL_CERTFILE == "" else SSL_CERTFILE,
ssl_verify=False,
server_port=PORT, server_port=PORT,
favicon_path="docs/logo.png", favicon_path="docs/logo.png",
auth=AUTHENTICATION if len(AUTHENTICATION) != 0 else None, auth=AUTHENTICATION if len(AUTHENTICATION) != 0 else None,

View File

@@ -135,6 +135,15 @@ model_info = {
"token_cnt": get_token_num_gpt4, "token_cnt": get_token_num_gpt4,
}, },
"gpt-3.5-random": {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
"endpoint": openai_endpoint,
"max_token": 4096,
"tokenizer": tokenizer_gpt4,
"token_cnt": get_token_num_gpt4,
},
# azure openai # azure openai
"azure-gpt-3.5":{ "azure-gpt-3.5":{
"fn_with_ui": chatgpt_ui, "fn_with_ui": chatgpt_ui,

View File

@@ -18,6 +18,7 @@ import logging
import traceback import traceback
import requests import requests
import importlib import importlib
import random
# config_private.py放自己的秘密如API和代理网址 # config_private.py放自己的秘密如API和代理网址
# 读取时首先看是否存在私密的config_private配置文件不受git管控如果有则覆盖原config文件 # 读取时首先看是否存在私密的config_private配置文件不受git管控如果有则覆盖原config文件
@@ -39,6 +40,21 @@ def get_full_error(chunk, stream_response):
break break
return chunk return chunk
def decode_chunk(chunk):
# 提前读取一些信息 (用于判断异常)
chunk_decoded = chunk.decode()
chunkjson = None
has_choices = False
has_content = False
has_role = False
try:
chunkjson = json.loads(chunk_decoded[6:])
has_choices = 'choices' in chunkjson
if has_choices: has_content = "content" in chunkjson['choices'][0]["delta"]
if has_choices: has_role = "role" in chunkjson['choices'][0]["delta"]
except:
pass
return chunk_decoded, chunkjson, has_choices, has_content, has_role
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False): def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
""" """
@@ -191,7 +207,9 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
yield from update_ui(chatbot=chatbot, history=history, msg="非OpenAI官方接口返回了错误:" + chunk.decode()) # 刷新界面 yield from update_ui(chatbot=chatbot, history=history, msg="非OpenAI官方接口返回了错误:" + chunk.decode()) # 刷新界面
return return
chunk_decoded = chunk.decode() # 提前读取一些信息 (用于判断异常)
chunk_decoded, chunkjson, has_choices, has_content, has_role = decode_chunk(chunk)
if is_head_of_the_stream and (r'"object":"error"' not in chunk_decoded) and (r"content" not in chunk_decoded): if is_head_of_the_stream and (r'"object":"error"' not in chunk_decoded) and (r"content" not in chunk_decoded):
# 数据流的第一帧不携带content # 数据流的第一帧不携带content
is_head_of_the_stream = False; continue is_head_of_the_stream = False; continue
@@ -199,15 +217,23 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
if chunk: if chunk:
try: try:
# 前者是API2D的结束条件后者是OPENAI的结束条件 # 前者是API2D的结束条件后者是OPENAI的结束条件
if ('data: [DONE]' in chunk_decoded) or (len(json.loads(chunk_decoded[6:])['choices'][0]["delta"]) == 0): if ('data: [DONE]' in chunk_decoded) or (len(chunkjson['choices'][0]["delta"]) == 0):
# 判定为数据流的结束gpt_replying_buffer也写完了 # 判定为数据流的结束gpt_replying_buffer也写完了
logging.info(f'[response] {gpt_replying_buffer}') logging.info(f'[response] {gpt_replying_buffer}')
break break
# 处理数据流的主体 # 处理数据流的主体
chunkjson = json.loads(chunk_decoded[6:])
status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}" status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}"
# 如果这里抛出异常一般是文本过长详情见get_full_error的输出 # 如果这里抛出异常一般是文本过长详情见get_full_error的输出
if has_content:
# 正常情况
gpt_replying_buffer = gpt_replying_buffer + chunkjson['choices'][0]["delta"]["content"] gpt_replying_buffer = gpt_replying_buffer + chunkjson['choices'][0]["delta"]["content"]
elif has_role:
# 一些第三方接口的出现这样的错误,兼容一下吧
continue
else:
# 一些垃圾第三方接口的出现这样的错误
gpt_replying_buffer = gpt_replying_buffer + chunkjson['choices'][0]["delta"]["content"]
history[-1] = gpt_replying_buffer history[-1] = gpt_replying_buffer
chatbot[-1] = (history[-2], history[-1]) chatbot[-1] = (history[-2], history[-1])
yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面 yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面
@@ -288,9 +314,19 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
what_i_ask_now["role"] = "user" what_i_ask_now["role"] = "user"
what_i_ask_now["content"] = inputs what_i_ask_now["content"] = inputs
messages.append(what_i_ask_now) messages.append(what_i_ask_now)
model = llm_kwargs['llm_model'].strip('api2d-')
if model == "gpt-3.5-random": # 随机选择, 绕过openai访问频率限制
model = random.choice([
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k-0613",
"gpt-3.5-turbo-0301",
])
logging.info("Random select model:" + model)
payload = { payload = {
"model": llm_kwargs['llm_model'].strip('api2d-'), "model": model,
"messages": messages, "messages": messages,
"temperature": llm_kwargs['temperature'], # 1.0, "temperature": llm_kwargs['temperature'], # 1.0,
"top_p": llm_kwargs['top_p'], # 1.0, "top_p": llm_kwargs['top_p'], # 1.0,

View File

@@ -1,4 +1,4 @@
./docs/gradio-3.32.2-py3-none-any.whl ./docs/gradio-3.32.6-py3-none-any.whl
pydantic==1.10.11 pydantic==1.10.11
tiktoken>=0.3.3 tiktoken>=0.3.3
requests[socks] requests[socks]

View File

@@ -9,7 +9,9 @@ validate_path() # 返回项目根路径
if __name__ == "__main__": if __name__ == "__main__":
from tests.test_utils import plugin_test from tests.test_utils import plugin_test
plugin_test(plugin='crazy_functions.函数动态生成->函数动态生成', main_input='交换图像的蓝色通道和红色通道', advanced_arg={"file_path_arg": "./build/ants.jpg"}) # plugin_test(plugin='crazy_functions.函数动态生成->函数动态生成', main_input='交换图像的蓝色通道和红色通道', advanced_arg={"file_path_arg": "./build/ants.jpg"})
plugin_test(plugin='crazy_functions.Latex输出PDF结果->Latex翻译中文并重新编译PDF', main_input="2307.07522")
# plugin_test(plugin='crazy_functions.虚空终端->虚空终端', main_input='修改api-key为sk-jhoejriotherjep') # plugin_test(plugin='crazy_functions.虚空终端->虚空终端', main_input='修改api-key为sk-jhoejriotherjep')

View File

@@ -9,6 +9,11 @@
box-shadow: none; box-shadow: none;
} }
#input-plugin-group .secondary-wrap.svelte-aqlk7e.svelte-aqlk7e.svelte-aqlk7e {
border: none;
min-width: 0;
}
/* hide selector label */ /* hide selector label */
#input-plugin-group .svelte-1gfkn6j { #input-plugin-group .svelte-1gfkn6j {
visibility: hidden; visibility: hidden;
@@ -83,3 +88,27 @@
#input-panel2 button { #input-panel2 button {
min-width: min(80px, 100%); min-width: min(80px, 100%);
} }
#cbs {
background-color: var(--block-background-fill) !important;
}
#interact-panel .form {
border: hidden
}
.drag-area {
border: solid;
border-width: thin;
user-select: none;
padding-left: 2%;
}
.floating-component #input-panel2 {
border-top-left-radius: 0px;
border-top-right-radius: 0px;
border: solid;
border-width: thin;
border-top-width: 0;
}

View File

@@ -10,8 +10,32 @@ function gradioApp() {
return elem.shadowRoot ? elem.shadowRoot : elem; return elem.shadowRoot ? elem.shadowRoot : elem;
} }
function setCookie(name, value, days) {
var expires = "";
if (days) {
var date = new Date();
date.setTime(date.getTime() + (days * 24 * 60 * 60 * 1000));
expires = "; expires=" + date.toUTCString();
}
document.cookie = name + "=" + value + expires + "; path=/";
}
function getCookie(name) {
var decodedCookie = decodeURIComponent(document.cookie);
var cookies = decodedCookie.split(';');
for (var i = 0; i < cookies.length; i++) {
var cookie = cookies[i].trim();
if (cookie.indexOf(name + "=") === 0) {
return cookie.substring(name.length + 1, cookie.length);
}
}
return null;
}
function addCopyButton(botElement) { function addCopyButton(botElement) {
// https://github.com/GaiZhenbiao/ChuanhuChatGPT/tree/main/web_assets/javascript // https://github.com/GaiZhenbiao/ChuanhuChatGPT/tree/main/web_assets/javascript
@@ -74,13 +98,8 @@ function chatbotContentChanged(attempt = 1, force = false) {
} }
} }
function GptAcademicJavaScriptInit() { function chatbotAutoHeight(){
chatbotIndicator = gradioApp().querySelector('#gpt-chatbot > div.wrap'); // 自动调整高度
var chatbotObserver = new MutationObserver(() => {
chatbotContentChanged(1);
});
chatbotObserver.observe(chatbotIndicator, { attributes: true, childList: true, subtree: true });
function update_height(){ function update_height(){
var { panel_height_target, chatbot_height, chatbot } = get_elements(true); var { panel_height_target, chatbot_height, chatbot } = get_elements(true);
if (panel_height_target!=chatbot_height) if (panel_height_target!=chatbot_height)
@@ -110,6 +129,15 @@ function GptAcademicJavaScriptInit() {
}, 50); // 每100毫秒执行一次 }, 50); // 每100毫秒执行一次
} }
function GptAcademicJavaScriptInit(LAYOUT = "LEFT-RIGHT") {
chatbotIndicator = gradioApp().querySelector('#gpt-chatbot > div.wrap');
var chatbotObserver = new MutationObserver(() => {
chatbotContentChanged(1);
});
chatbotObserver.observe(chatbotIndicator, { attributes: true, childList: true, subtree: true });
if (LAYOUT === "LEFT-RIGHT") {chatbotAutoHeight();}
}
function get_elements(consider_state_panel=false) { function get_elements(consider_state_panel=false) {
var chatbot = document.querySelector('#gpt-chatbot > div.wrap.svelte-18telvq'); var chatbot = document.querySelector('#gpt-chatbot > div.wrap.svelte-18telvq');
if (!chatbot) { if (!chatbot) {
@@ -118,14 +146,14 @@ function get_elements(consider_state_panel=false) {
const panel1 = document.querySelector('#input-panel').getBoundingClientRect(); const panel1 = document.querySelector('#input-panel').getBoundingClientRect();
const panel2 = document.querySelector('#basic-panel').getBoundingClientRect() const panel2 = document.querySelector('#basic-panel').getBoundingClientRect()
const panel3 = document.querySelector('#plugin-panel').getBoundingClientRect(); const panel3 = document.querySelector('#plugin-panel').getBoundingClientRect();
const panel4 = document.querySelector('#interact-panel').getBoundingClientRect(); // const panel4 = document.querySelector('#interact-panel').getBoundingClientRect();
const panel5 = document.querySelector('#input-panel2').getBoundingClientRect(); const panel5 = document.querySelector('#input-panel2').getBoundingClientRect();
const panel_active = document.querySelector('#state-panel').getBoundingClientRect(); const panel_active = document.querySelector('#state-panel').getBoundingClientRect();
if (consider_state_panel || panel_active.height < 25){ if (consider_state_panel || panel_active.height < 25){
document.state_panel_height = panel_active.height; document.state_panel_height = panel_active.height;
} }
// 25 是chatbot的label高度, 16 是右侧的gap // 25 是chatbot的label高度, 16 是右侧的gap
var panel_height_target = panel1.height + panel2.height + panel3.height + panel4.height + panel5.height - 25 + 16*3; var panel_height_target = panel1.height + panel2.height + panel3.height + 0 + 0 - 25 + 16*2;
// 禁止动态的state-panel高度影响 // 禁止动态的state-panel高度影响
panel_height_target = panel_height_target + (document.state_panel_height-panel_active.height) panel_height_target = panel_height_target + (document.state_panel_height-panel_active.height)
var panel_height_target = parseInt(panel_height_target); var panel_height_target = parseInt(panel_height_target);

View File

@@ -198,7 +198,7 @@
} }
/* 小按钮 */ /* 小按钮 */
.sm.svelte-1ipelgc { .sm {
font-family: "Microsoft YaHei UI", "Helvetica", "Microsoft YaHei", "ui-sans-serif", "sans-serif", "system-ui"; font-family: "Microsoft YaHei UI", "Helvetica", "Microsoft YaHei", "ui-sans-serif", "sans-serif", "system-ui";
--button-small-text-weight: 600; --button-small-text-weight: 600;
--button-small-text-size: 16px; --button-small-text-size: 16px;
@@ -208,7 +208,7 @@
border-top-left-radius: 0px; border-top-left-radius: 0px;
} }
#plugin-panel .sm.svelte-1ipelgc { #plugin-panel .sm {
font-family: "Microsoft YaHei UI", "Helvetica", "Microsoft YaHei", "ui-sans-serif", "sans-serif", "system-ui"; font-family: "Microsoft YaHei UI", "Helvetica", "Microsoft YaHei", "ui-sans-serif", "sans-serif", "system-ui";
--button-small-text-weight: 400; --button-small-text-weight: 400;
--button-small-text-size: 14px; --button-small-text-size: 14px;

View File

@@ -57,9 +57,6 @@ def adjust_theme():
button_cancel_text_color_dark="white", button_cancel_text_color_dark="white",
) )
if LAYOUT=="TOP-DOWN":
js = ""
else:
with open('themes/common.js', 'r', encoding='utf8') as f: with open('themes/common.js', 'r', encoding='utf8') as f:
js = f"<script>{f.read()}</script>" js = f"<script>{f.read()}</script>"

View File

@@ -9,15 +9,15 @@
border-radius: 4px; border-radius: 4px;
} }
#plugin-panel .dropdown-arrow.svelte-p5edak { #plugin-panel .dropdown-arrow {
width: 50px; width: 25px;
} }
#plugin-panel input.svelte-aqlk7e.svelte-aqlk7e.svelte-aqlk7e { #plugin-panel input.svelte-aqlk7e.svelte-aqlk7e.svelte-aqlk7e {
padding-left: 5px; padding-left: 5px;
} }
/* 小按钮 */ /* 小按钮 */
.sm.svelte-1ipelgc { #basic-panel .sm {
font-family: "Microsoft YaHei UI", "Helvetica", "Microsoft YaHei", "ui-sans-serif", "sans-serif", "system-ui"; font-family: "Microsoft YaHei UI", "Helvetica", "Microsoft YaHei", "ui-sans-serif", "sans-serif", "system-ui";
--button-small-text-weight: 600; --button-small-text-weight: 600;
--button-small-text-size: 16px; --button-small-text-size: 16px;
@@ -27,7 +27,7 @@
border-top-left-radius: 6px; border-top-left-radius: 6px;
} }
#plugin-panel .sm.svelte-1ipelgc { #plugin-panel .sm {
font-family: "Microsoft YaHei UI", "Helvetica", "Microsoft YaHei", "ui-sans-serif", "sans-serif", "system-ui"; font-family: "Microsoft YaHei UI", "Helvetica", "Microsoft YaHei", "ui-sans-serif", "sans-serif", "system-ui";
--button-small-text-weight: 400; --button-small-text-weight: 400;
--button-small-text-size: 14px; --button-small-text-size: 14px;

View File

@@ -57,9 +57,6 @@ def adjust_theme():
button_cancel_text_color_dark="white", button_cancel_text_color_dark="white",
) )
if LAYOUT=="TOP-DOWN":
js = ""
else:
with open('themes/common.js', 'r', encoding='utf8') as f: with open('themes/common.js', 'r', encoding='utf8') as f:
js = f"<script>{f.read()}</script>" js = f"<script>{f.read()}</script>"

View File

@@ -23,9 +23,6 @@ def adjust_theme():
if THEME.startswith('huggingface-'): THEME = THEME.lstrip('huggingface-') if THEME.startswith('huggingface-'): THEME = THEME.lstrip('huggingface-')
set_theme = set_theme.from_hub(THEME.lower()) set_theme = set_theme.from_hub(THEME.lower())
if LAYOUT=="TOP-DOWN":
js = ""
else:
with open('themes/common.js', 'r', encoding='utf8') as f: with open('themes/common.js', 'r', encoding='utf8') as f:
js = f"<script>{f.read()}</script>" js = f"<script>{f.read()}</script>"

View File

@@ -73,10 +73,6 @@ def adjust_theme():
chatbot_code_background_color_dark="*neutral_950", chatbot_code_background_color_dark="*neutral_950",
) )
js = ''
if LAYOUT=="TOP-DOWN":
js = ""
else:
with open('themes/common.js', 'r', encoding='utf8') as f: with open('themes/common.js', 'r', encoding='utf8') as f:
js = f"<script>{f.read()}</script>" js = f"<script>{f.read()}</script>"

View File

@@ -472,7 +472,7 @@ def extract_archive(file_path, dest_dir):
print("Successfully extracted rar archive to {}".format(dest_dir)) print("Successfully extracted rar archive to {}".format(dest_dir))
except: except:
print("Rar format requires additional dependencies to install") print("Rar format requires additional dependencies to install")
return '\n\n解压失败! 需要安装pip install rarfile来解压rar文件' return '\n\n解压失败! 需要安装pip install rarfile来解压rar文件。建议使用zip压缩格式。'
# 第三方库需要预先pip install py7zr # 第三方库需要预先pip install py7zr
elif file_extension == '.7z': elif file_extension == '.7z':
@@ -523,7 +523,7 @@ def promote_file_to_downloadzone(file, rename_file=None, chatbot=None):
# 把文件复制过去 # 把文件复制过去
if not os.path.exists(new_path): shutil.copyfile(file, new_path) if not os.path.exists(new_path): shutil.copyfile(file, new_path)
# 将文件添加到chatbot cookie中避免多用户干扰 # 将文件添加到chatbot cookie中避免多用户干扰
if chatbot: if chatbot is not None:
if 'files_to_promote' in chatbot._cookies: current = chatbot._cookies['files_to_promote'] if 'files_to_promote' in chatbot._cookies: current = chatbot._cookies['files_to_promote']
else: current = [] else: current = []
chatbot._cookies.update({'files_to_promote': [new_path] + current}) chatbot._cookies.update({'files_to_promote': [new_path] + current})
@@ -581,7 +581,7 @@ def on_file_uploaded(request: gradio.Request, files, chatbot, txt, txt2, checkbo
# 整理文件集合 # 整理文件集合
moved_files = [fp for fp in glob.glob(f'{target_path_base}/**/*', recursive=True)] moved_files = [fp for fp in glob.glob(f'{target_path_base}/**/*', recursive=True)]
if "底部输入区" in checkboxes: if "浮动输入区" in checkboxes:
txt, txt2 = "", target_path_base txt, txt2 = "", target_path_base
else: else:
txt, txt2 = target_path_base, "" txt, txt2 = target_path_base, ""
@@ -621,10 +621,20 @@ def on_report_generated(cookies, files, chatbot):
def load_chat_cookies(): def load_chat_cookies():
API_KEY, LLM_MODEL, AZURE_API_KEY = get_conf('API_KEY', 'LLM_MODEL', 'AZURE_API_KEY') API_KEY, LLM_MODEL, AZURE_API_KEY = get_conf('API_KEY', 'LLM_MODEL', 'AZURE_API_KEY')
DARK_MODE, NUM_CUSTOM_BASIC_BTN = get_conf('DARK_MODE', 'NUM_CUSTOM_BASIC_BTN')
if is_any_api_key(AZURE_API_KEY): if is_any_api_key(AZURE_API_KEY):
if is_any_api_key(API_KEY): API_KEY = API_KEY + ',' + AZURE_API_KEY if is_any_api_key(API_KEY): API_KEY = API_KEY + ',' + AZURE_API_KEY
else: API_KEY = AZURE_API_KEY else: API_KEY = AZURE_API_KEY
return {'api_key': API_KEY, 'llm_model': LLM_MODEL} customize_fn_overwrite_ = {}
for k in range(NUM_CUSTOM_BASIC_BTN):
customize_fn_overwrite_.update({
"自定义按钮" + str(k+1):{
"Title": r"",
"Prefix": r"请在自定义菜单中定义提示词前缀.",
"Suffix": r"请在自定义菜单中定义提示词后缀",
}
})
return {'api_key': API_KEY, 'llm_model': LLM_MODEL, 'customize_fn_overwrite': customize_fn_overwrite_}
def is_openai_api_key(key): def is_openai_api_key(key):
CUSTOM_API_KEY_PATTERN, = get_conf('CUSTOM_API_KEY_PATTERN') CUSTOM_API_KEY_PATTERN, = get_conf('CUSTOM_API_KEY_PATTERN')

View File

@@ -1,5 +1,5 @@
{ {
"version": 3.54, "version": 3.55,
"show_feature": true, "show_feature": true,
"new_feature": "新增动态代码解释器CodeInterpreter <-> 增加文本回答复制按钮 <-> 细分代理场合 <-> 支持动态选择不同界面主题 <-> 提高稳定性&解决多用户冲突问题 <-> 支持插件分类和更多UI皮肤外观 <-> 支持用户使用自然语言调度各个插件(虚空终端) <-> 改进UI设计新主题 <-> 支持借助GROBID实现PDF高精度翻译 <-> 接入百度千帆平台和文心一言 <-> 接入阿里通义千问、讯飞星火、上海AI-Lab书生 <-> 优化一键升级 <-> 提高arxiv翻译速度和成功率" "new_feature": "重新编译Gradio优化使用体验 <-> 新增动态代码解释器CodeInterpreter <-> 增加文本回答复制按钮 <-> 细分代理场合 <-> 支持动态选择不同界面主题 <-> 提高稳定性&解决多用户冲突问题 <-> 支持插件分类和更多UI皮肤外观 <-> 支持用户使用自然语言调度各个插件(虚空终端) <-> 改进UI设计新主题 <-> 支持借助GROBID实现PDF高精度翻译 <-> 接入百度千帆平台和文心一言 <-> 接入阿里通义千问、讯飞星火、上海AI-Lab书生 <-> 优化一键升级 <-> 提高arxiv翻译速度和成功率"
} }