Compare commits

...

53 Commits

Author SHA1 Message Date
binary-husky
f35f6633e0 fix: welcome card flip bug 2024-08-02 11:20:41 +00:00
hongyi-zhao
573dc4d184 Add claude-3-5-sonnet-20240620 (#1907)
See https://docs.anthropic.com/en/docs/about-claude/models#model-names fore model names.
2024-08-02 18:04:42 +08:00
binary-husky
da8b2d69ce update version 3.8 2024-08-02 10:02:04 +00:00
binary-husky
58e732c26f Merge branch 'frontier' 2024-08-02 09:50:40 +00:00
Menghuan1918
ca238daa8c 改进联网搜索插件-新增搜索模式,搜索增强 (#1874)
* Change default to Mixed option

* Add option optimizer

* Add search optimizer prompts

* Enhanced Processing

* Finish search_optimizer part

* prompts bug fix

* Bug fix
2024-07-23 00:55:48 +08:00
jiangfy-ihep
60b3491513 add gpt-4o-mini (#1904)
Co-authored-by: Fayu Jiang <jiangfayu@hotmail.com>
2024-07-23 00:55:34 +08:00
binary-husky
c1175bfb7d add flip card animation 2024-07-22 04:53:59 +00:00
binary-husky
b705afd5ff welcome menu bug fix 2024-07-22 04:35:52 +00:00
binary-husky
dfcd28abce add width_to_hide_welcome 2024-07-22 03:34:35 +00:00
binary-husky
1edaa9e234 hide when too narrow 2024-07-21 15:04:38 +00:00
binary-husky
f0cd617ec2 minor css improve 2024-07-20 10:29:47 +00:00
binary-husky
0b08bb2cea update svg 2024-07-20 07:15:08 +00:00
Keldos
d1f8607ac8 Update submit button dropdown style (#1900) 2024-07-20 14:50:56 +08:00
binary-husky
7eb68a2086 tune 2024-07-17 17:16:34 +00:00
binary-husky
ee9e99036a Merge branch 'frontier' of github.com:binary-husky/chatgpt_academic into frontier 2024-07-17 17:14:49 +00:00
binary-husky
55e255220b update 2024-07-17 17:12:32 +00:00
lbykkkk
019cd26ae8 Merge branch 'frontier' of https://github.com/binary-husky/gpt_academic into frontier 2024-07-18 00:35:51 +08:00
lbykkkk
a5b21d5cc0 修改content并统一logo颜色 2024-07-18 00:35:40 +08:00
binary-husky
ce940ff70f roll welcome msg 2024-07-17 16:34:24 +00:00
binary-husky
fc6a83c29f update 2024-07-17 15:44:08 +00:00
binary-husky
1d3212e367 reverse welcome msg 2024-07-17 15:43:41 +00:00
lbykkkk
8a835352a3 更新欢迎界面的用语和logo 2024-07-17 19:49:07 +08:00
binary-husky
5456c9fa43 improve welcome UI 2024-07-16 16:23:07 +00:00
binary-husky
ea67054c30 update chuanhu theme 2024-07-16 16:07:46 +00:00
binary-husky
1084108df6 adding welcome page 2024-07-16 10:41:25 +00:00
binary-husky
40c9700a8d add welcome page 2024-07-15 15:47:24 +00:00
binary-husky
6da5623813 多用途复用提交按钮 2024-07-15 04:23:43 +00:00
binary-husky
778c9cd9ec roll version 2024-07-15 03:29:56 +00:00
binary-husky
e290317146 proxy submit btn 2024-07-15 03:28:59 +00:00
binary-husky
85b92b7f07 move python comment agent to dropdown 2024-07-13 16:26:36 +00:00
binary-husky
ff899777ce improve source code comment plugin functionality 2024-07-13 16:20:17 +00:00
binary-husky
c1b8c773c3 stage compare source code comment 2024-07-13 15:28:53 +00:00
binary-husky
8747c48175 mt improvement 2024-07-12 08:26:40 +00:00
binary-husky
c0010c88bc implement auto comment 2024-07-12 07:36:40 +00:00
binary-husky
68838da8ad finish test 2024-07-12 04:19:07 +00:00
binary-husky
ca7de8fcdd version up 2024-07-10 02:00:36 +00:00
binary-husky
7ebc2d00e7 Merge branch 'master' into frontier 2024-07-09 03:19:35 +00:00
binary-husky
47fb81cfde Merge branch 'master' of github.com:binary-husky/chatgpt_academic 2024-07-09 03:18:19 +00:00
binary-husky
83961c1002 optimize image generation fn 2024-07-09 03:18:14 +00:00
binary-husky
a8621333af js impl bug fix 2024-07-08 15:50:12 +00:00
binary-husky
f402ef8134 hide ask btn 2024-07-08 15:15:30 +00:00
binary-husky
65d0f486f1 change cache to lru_cache for lower python version 2024-07-07 16:02:05 +00:00
binary-husky
41f25a6a9b Merge branch 'bold_frontier' into frontier 2024-07-04 14:16:08 +00:00
binary-husky
4a6a032334 ignore 2024-07-04 14:14:49 +00:00
Menghuan1918
114192e025 Bug fix: can not chat with deepseek (#1879) 2024-07-04 20:28:53 +08:00
binary-husky
9d11b17f25 Merge branch 'master' into frontier 2024-07-02 08:06:34 +00:00
binary-husky
1d9e9fa6a1 new page btn 2024-07-01 16:27:23 +00:00
binary-husky
6babcb4a9c Merge branch 'master' into frontier 2024-06-27 06:52:03 +00:00
binary-husky
b7b4e201cb fix latex auto correct 2024-06-27 06:49:10 +00:00
binary-husky
26e7677dc3 fix new api for taichu 2024-06-26 15:18:11 +00:00
binary-husky
5e64a50898 Merge branch 'master' into frontier 2024-06-25 11:43:40 +00:00
binary-husky
60a42fb070 Merge branch 'master' into frontier 2024-06-25 11:14:32 +00:00
binary-husky
c94d5054a2 move fn 2024-06-25 08:53:28 +00:00
56 changed files with 2430 additions and 549 deletions

8
.gitignore vendored
View File

@@ -131,6 +131,9 @@ dmypy.json
# Pyre type checker
.pyre/
# macOS files
.DS_Store
.vscode
.idea
@@ -153,6 +156,7 @@ media
flagged
request_llms/ChatGLM-6b-onnx-u8s8
.pre-commit-config.yaml
test.html
test.*
temp.*
objdump*
*.min.*.js
*.min.*.js

View File

@@ -33,7 +33,7 @@ else:
# [step 3]>> 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 )
LLM_MODEL = "gpt-3.5-turbo-16k" # 可选 ↓↓↓
AVAIL_LLM_MODELS = ["gpt-4-1106-preview", "gpt-4-turbo-preview", "gpt-4-vision-preview",
"gpt-4o", "gpt-4-turbo", "gpt-4-turbo-2024-04-09",
"gpt-4o", "gpt-4o-mini", "gpt-4-turbo", "gpt-4-turbo-2024-04-09",
"gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5",
"gpt-4", "gpt-4-32k", "azure-gpt-4", "glm-4", "glm-4v", "glm-3-turbo",
"gemini-pro", "chatglm3"

View File

@@ -5,21 +5,21 @@ from toolbox import trimmed_format_exc
def get_crazy_functions():
from crazy_functions.读文章写摘要 import 读文章写摘要
from crazy_functions.生成函数注释 import 批量生成函数注释
from crazy_functions.解析项目源代码 import 解析项目本身
from crazy_functions.解析项目源代码 import 解析一个Python项目
from crazy_functions.解析项目源代码 import 解析一个Matlab项目
from crazy_functions.解析项目源代码 import 解析一个C项目的头文件
from crazy_functions.解析项目源代码 import 解析一个C项目
from crazy_functions.解析项目源代码 import 解析一个Golang项目
from crazy_functions.解析项目源代码 import 解析一个Rust项目
from crazy_functions.解析项目源代码 import 解析一个Java项目
from crazy_functions.解析项目源代码 import 解析一个前端项目
from crazy_functions.SourceCode_Analyse import 解析项目本身
from crazy_functions.SourceCode_Analyse import 解析一个Python项目
from crazy_functions.SourceCode_Analyse import 解析一个Matlab项目
from crazy_functions.SourceCode_Analyse import 解析一个C项目的头文件
from crazy_functions.SourceCode_Analyse import 解析一个C项目
from crazy_functions.SourceCode_Analyse import 解析一个Golang项目
from crazy_functions.SourceCode_Analyse import 解析一个Rust项目
from crazy_functions.SourceCode_Analyse import 解析一个Java项目
from crazy_functions.SourceCode_Analyse import 解析一个前端项目
from crazy_functions.高级功能函数模板 import 高阶功能模板函数
from crazy_functions.高级功能函数模板 import Demo_Wrap
from crazy_functions.Latex全文润色 import Latex英文润色
from crazy_functions.询问多个大语言模型 import 同时问询
from crazy_functions.解析项目源代码 import 解析一个Lua项目
from crazy_functions.解析项目源代码 import 解析一个CSharp项目
from crazy_functions.SourceCode_Analyse import 解析一个Lua项目
from crazy_functions.SourceCode_Analyse import 解析一个CSharp项目
from crazy_functions.总结word文档 import 总结word文档
from crazy_functions.解析JupyterNotebook import 解析ipynb文件
from crazy_functions.Conversation_To_File import 载入对话历史存档
@@ -45,6 +45,9 @@ def get_crazy_functions():
from crazy_functions.Latex_Function_Wrap import PDF_Localize
from crazy_functions.Internet_GPT import 连接网络回答问题
from crazy_functions.Internet_GPT_Wrap import NetworkGPT_Wrap
from crazy_functions.Image_Generate import 图片生成_DALLE2, 图片生成_DALLE3, 图片修改_DALLE2
from crazy_functions.Image_Generate_Wrap import ImageGen_Wrap
from crazy_functions.SourceCode_Comment import 注释Python项目
function_plugins = {
"虚空终端": {
@@ -61,6 +64,13 @@ def get_crazy_functions():
"Info": "解析一个Python项目的所有源文件(.py) | 输入参数为路径",
"Function": HotReload(解析一个Python项目),
},
"注释Python项目": {
"Group": "编程",
"Color": "stop",
"AsButton": False,
"Info": "上传一系列python源文件(或者压缩包), 为这些代码添加docstring | 输入参数为路径",
"Function": HotReload(注释Python项目),
},
"载入对话历史存档(先上传存档或输入路径)": {
"Group": "对话",
"Color": "stop",
@@ -324,7 +334,7 @@ def get_crazy_functions():
"ArgsReminder": "如果有必要, 请在此处追加更细致的矫错指令(使用英文)。",
"Function": HotReload(Latex英文纠错加PDF对比),
},
"Arxiv论文精细翻译输入arxivID[需Latex]": {
"📚Arxiv论文精细翻译输入arxivID[需Latex]": {
"Group": "学术",
"Color": "stop",
"AsButton": False,
@@ -336,7 +346,7 @@ def get_crazy_functions():
"Function": HotReload(Latex翻译中文并重新编译PDF), # 当注册Class后Function旧接口仅会在“虚空终端”中起作用
"Class": Arxiv_Localize, # 新一代插件需要注册Class
},
"本地Latex论文精细翻译上传Latex项目[需Latex]": {
"📚本地Latex论文精细翻译上传Latex项目[需Latex]": {
"Group": "学术",
"Color": "stop",
"AsButton": False,
@@ -361,6 +371,39 @@ def get_crazy_functions():
}
}
function_plugins.update(
{
"🎨图片生成DALLE2/DALLE3, 使用前切换到GPT系列模型": {
"Group": "对话",
"Color": "stop",
"AsButton": False,
"Info": "使用 DALLE2/DALLE3 生成图片 | 输入参数字符串,提供图像的内容",
"Function": HotReload(图片生成_DALLE2), # 当注册Class后Function旧接口仅会在“虚空终端”中起作用
"Class": ImageGen_Wrap # 新一代插件需要注册Class
},
}
)
function_plugins.update(
{
"🎨图片修改_DALLE2 使用前请切换模型到GPT系列": {
"Group": "对话",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": False, # 调用时唤起高级参数输入区默认False
# "Info": "使用DALLE2修改图片 | 输入参数字符串,提供图像的内容",
"Function": HotReload(图片修改_DALLE2),
},
}
)
# -=--=- 尚未充分测试的实验性插件 & 需要额外依赖的插件 -=--=-
try:
@@ -413,7 +456,7 @@ def get_crazy_functions():
# print("Load function plugin failed")
try:
from crazy_functions.解析项目源代码 import 解析任意code项目
from crazy_functions.SourceCode_Analyse import 解析任意code项目
function_plugins.update(
{
@@ -439,7 +482,7 @@ def get_crazy_functions():
"询问多个GPT模型手动指定询问哪些模型": {
"Group": "对话",
"Color": "stop",
"AsButton": True,
"AsButton": False,
"AdvancedArgs": True, # 调用时唤起高级参数输入区默认False
"ArgsReminder": "支持任意数量的llm接口用&符号分隔。例如chatglm&gpt-3.5-turbo&gpt-4", # 高级参数输入区的显示提示
"Function": HotReload(同时问询_指定模型),
@@ -450,50 +493,7 @@ def get_crazy_functions():
print(trimmed_format_exc())
print("Load function plugin failed")
try:
from crazy_functions.图片生成 import 图片生成_DALLE2, 图片生成_DALLE3, 图片修改_DALLE2
function_plugins.update(
{
"图片生成_DALLE2 先切换模型到gpt-*": {
"Group": "对话",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True, # 调用时唤起高级参数输入区默认False
"ArgsReminder": "在这里输入分辨率, 如1024x1024默认支持 256x256, 512x512, 1024x1024", # 高级参数输入区的显示提示
"Info": "使用DALLE2生成图片 | 输入参数字符串,提供图像的内容",
"Function": HotReload(图片生成_DALLE2),
},
}
)
function_plugins.update(
{
"图片生成_DALLE3 先切换模型到gpt-*": {
"Group": "对话",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True, # 调用时唤起高级参数输入区默认False
"ArgsReminder": "在这里输入自定义参数「分辨率-质量(可选)-风格(可选)」, 参数示例「1024x1024-hd-vivid」 || 分辨率支持 「1024x1024」(默认) /「1792x1024」/「1024x1792」 || 质量支持 「-standard」(默认) /「-hd」 || 风格支持 「-vivid」(默认) /「-natural」", # 高级参数输入区的显示提示
"Info": "使用DALLE3生成图片 | 输入参数字符串,提供图像的内容",
"Function": HotReload(图片生成_DALLE3),
},
}
)
function_plugins.update(
{
"图片修改_DALLE2 先切换模型到gpt-*": {
"Group": "对话",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": False, # 调用时唤起高级参数输入区默认False
# "Info": "使用DALLE2修改图片 | 输入参数字符串,提供图像的内容",
"Function": HotReload(图片修改_DALLE2),
},
}
)
except:
print(trimmed_format_exc())
print("Load function plugin failed")
try:
from crazy_functions.总结音视频 import 总结音视频

View File

@@ -108,7 +108,7 @@ def 图片生成_DALLE2(prompt, llm_kwargs, plugin_kwargs, chatbot, history, sys
chatbot.append((prompt, "[Local Message] 图像生成提示为空白,请在“输入区”输入图像生成提示。"))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 界面更新
return
chatbot.append(("您正在调用“图像生成”插件。", "[Local Message] 生成图像, 请先把模型切换至gpt-*。如果中文Prompt效果不理想, 请尝试英文Prompt。正在处理中 ....."))
chatbot.append(("您正在调用“图像生成”插件。", "[Local Message] 生成图像, 使用前请切换模型到GPT系列。如果中文Prompt效果不理想, 请尝试英文Prompt。正在处理中 ....."))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 由于请求gpt需要一段时间,我们先及时地做一次界面更新
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
resolution = plugin_kwargs.get("advanced_arg", '1024x1024')
@@ -129,7 +129,7 @@ def 图片生成_DALLE3(prompt, llm_kwargs, plugin_kwargs, chatbot, history, sys
chatbot.append((prompt, "[Local Message] 图像生成提示为空白,请在“输入区”输入图像生成提示。"))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 界面更新
return
chatbot.append(("您正在调用“图像生成”插件。", "[Local Message] 生成图像, 请先把模型切换至gpt-*。如果中文Prompt效果不理想, 请尝试英文Prompt。正在处理中 ....."))
chatbot.append(("您正在调用“图像生成”插件。", "[Local Message] 生成图像, 使用前请切换模型到GPT系列。如果中文Prompt效果不理想, 请尝试英文Prompt。正在处理中 ....."))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 由于请求gpt需要一段时间,我们先及时地做一次界面更新
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
resolution_arg = plugin_kwargs.get("advanced_arg", '1024x1024-standard-vivid').lower()
@@ -166,7 +166,7 @@ class ImageEditState(GptAcademicState):
return confirm, file
def lock_plugin(self, chatbot):
chatbot._cookies['lock_plugin'] = 'crazy_functions.图片生成->图片修改_DALLE2'
chatbot._cookies['lock_plugin'] = 'crazy_functions.Image_Generate->图片修改_DALLE2'
self.dump_state(chatbot)
def unlock_plugin(self, chatbot):

View File

@@ -0,0 +1,56 @@
from toolbox import get_conf, update_ui
from crazy_functions.Image_Generate import 图片生成_DALLE2, 图片生成_DALLE3, 图片修改_DALLE2
from crazy_functions.plugin_template.plugin_class_template import GptAcademicPluginTemplate, ArgProperty
class ImageGen_Wrap(GptAcademicPluginTemplate):
def __init__(self):
"""
请注意`execute`会执行在不同的线程中,因此您在定义和使用类变量时,应当慎之又慎!
"""
pass
def define_arg_selection_menu(self):
"""
定义插件的二级选项菜单
第一个参数,名称`main_input`,参数`type`声明这是一个文本框,文本框上方显示`title`,文本框内部显示`description``default_value`为默认值;
第二个参数,名称`advanced_arg`,参数`type`声明这是一个文本框,文本框上方显示`title`,文本框内部显示`description``default_value`为默认值;
"""
gui_definition = {
"main_input":
ArgProperty(title="输入图片描述", description="需要生成图像的文本描述,尽量使用英文", default_value="", type="string").model_dump_json(), # 主输入,自动从输入框同步
"model_name":
ArgProperty(title="模型", options=["DALLE2", "DALLE3"], default_value="DALLE3", description="", type="dropdown").model_dump_json(),
"resolution":
ArgProperty(title="分辨率", options=["256x256(限DALLE2)", "512x512(限DALLE2)", "1024x1024", "1792x1024(限DALLE3)", "1024x1792(限DALLE3)"], default_value="1024x1024", description="", type="dropdown").model_dump_json(),
"quality (仅DALLE3生效)":
ArgProperty(title="质量", options=["standard", "hd"], default_value="standard", description="", type="dropdown").model_dump_json(),
"style (仅DALLE3生效)":
ArgProperty(title="风格", options=["vivid", "natural"], default_value="vivid", description="", type="dropdown").model_dump_json(),
}
return gui_definition
def execute(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
"""
执行插件
"""
# 分辨率
resolution = plugin_kwargs["resolution"].replace("(限DALLE2)", "").replace("(限DALLE3)", "")
if plugin_kwargs["model_name"] == "DALLE2":
plugin_kwargs["advanced_arg"] = resolution
yield from 图片生成_DALLE2(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)
elif plugin_kwargs["model_name"] == "DALLE3":
quality = plugin_kwargs["quality (仅DALLE3生效)"]
style = plugin_kwargs["style (仅DALLE3生效)"]
plugin_kwargs["advanced_arg"] = f"{resolution}-{quality}-{style}"
yield from 图片生成_DALLE3(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)
else:
chatbot.append([None, "抱歉,找不到该模型"])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面

View File

@@ -1,12 +1,109 @@
from toolbox import CatchException, update_ui, get_conf
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, input_clipping
import requests
from bs4 import BeautifulSoup
from request_llms.bridge_all import model_info
import urllib.request
import random
import time
import re
import json
from bs4 import BeautifulSoup
from functools import lru_cache
from itertools import zip_longest
from check_proxy import check_proxy
from toolbox import CatchException, update_ui, get_conf
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, input_clipping
from request_llms.bridge_all import model_info
from request_llms.bridge_all import predict_no_ui_long_connection
from crazy_functions.prompts.internet import SearchOptimizerPrompt, SearchAcademicOptimizerPrompt
def search_optimizer(
query,
proxies,
history,
llm_kwargs,
optimizer=1,
categories="general",
searxng_url=None,
engines=None,
):
# ------------- < 第1步尝试进行搜索优化 > -------------
# * 增强优化,会尝试结合历史记录进行搜索优化
if optimizer == 2:
his = " "
if len(history) == 0:
pass
else:
for i, h in enumerate(history):
if i % 2 == 0:
his += f"Q: {h}\n"
else:
his += f"A: {h}\n"
if categories == "general":
sys_prompt = SearchOptimizerPrompt.format(query=query, history=his, num=4)
elif categories == "science":
sys_prompt = SearchAcademicOptimizerPrompt.format(query=query, history=his, num=4)
else:
his = " "
if categories == "general":
sys_prompt = SearchOptimizerPrompt.format(query=query, history=his, num=3)
elif categories == "science":
sys_prompt = SearchAcademicOptimizerPrompt.format(query=query, history=his, num=3)
mutable = ["", time.time(), ""]
llm_kwargs["temperature"] = 0.8
try:
querys_json = predict_no_ui_long_connection(
inputs=query,
llm_kwargs=llm_kwargs,
history=[],
sys_prompt=sys_prompt,
observe_window=mutable,
)
except Exception:
querys_json = "1234"
#* 尝试解码优化后的搜索结果
querys_json = re.sub(r"```json|```", "", querys_json)
try:
querys = json.loads(querys_json)
except Exception:
#* 如果解码失败,降低温度再试一次
try:
llm_kwargs["temperature"] = 0.4
querys_json = predict_no_ui_long_connection(
inputs=query,
llm_kwargs=llm_kwargs,
history=[],
sys_prompt=sys_prompt,
observe_window=mutable,
)
querys_json = re.sub(r"```json|```", "", querys_json)
querys = json.loads(querys_json)
except Exception:
#* 如果再次失败,直接返回原始问题
querys = [query]
links = []
success = 0
Exceptions = ""
for q in querys:
try:
link = searxng_request(q, proxies, categories, searxng_url, engines=engines)
if len(link) > 0:
links.append(link[:-5])
success += 1
except Exception:
Exceptions = Exception
pass
if success == 0:
raise ValueError(f"在线搜索失败!\n{Exceptions}")
# * 清洗搜索结果,依次放入每组第一,第二个搜索结果,并清洗重复的搜索结果
seen_links = set()
result = []
for tuple in zip_longest(*links, fillvalue=None):
for item in tuple:
if item is not None:
link = item["link"]
if link not in seen_links:
seen_links.add(link)
result.append(item)
return result
@lru_cache
def get_auth_ip():
@@ -15,14 +112,15 @@ def get_auth_ip():
return '114.114.114.' + str(random.randint(1, 10))
return ip
def searxng_request(query, proxies, categories='general', searxng_url=None, engines=None):
if searxng_url is None:
url = get_conf("SEARXNG_URL")
else:
url = searxng_url
if engines is None:
engines = 'bing'
if engines == "Mixed":
engines = None
if categories == 'general':
params = {
@@ -66,6 +164,7 @@ def searxng_request(query, proxies, categories='general', searxng_url=None, engi
else:
raise ValueError("在线搜索失败,状态码: " + str(response.status_code) + '\t' + response.content.decode('utf-8'))
def scrape_text(url, proxies) -> str:
"""Scrape text from a webpage
@@ -93,9 +192,10 @@ def scrape_text(url, proxies) -> str:
text = "\n".join(chunk for chunk in chunks if chunk)
return text
@CatchException
def 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
optimizer_history = history[:-8]
history = [] # 清空历史,以免输入溢出
chatbot.append((f"请结合互联网信息回答以下问题:{txt}", "检索中..."))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
@@ -106,16 +206,23 @@ def 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
categories = plugin_kwargs.get('categories', 'general')
searxng_url = plugin_kwargs.get('searxng_url', None)
engines = plugin_kwargs.get('engine', None)
urls = searxng_request(txt, proxies, categories, searxng_url, engines=engines)
optimizer = plugin_kwargs.get('optimizer', "关闭")
if optimizer == "关闭":
urls = searxng_request(txt, proxies, categories, searxng_url, engines=engines)
else:
urls = search_optimizer(txt, proxies, optimizer_history, llm_kwargs, optimizer, categories, searxng_url, engines)
history = []
if len(urls) == 0:
chatbot.append((f"结论:{txt}",
"[Local Message] 受到限制无法从searxng获取信息请尝试更换搜索引擎。"))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
# ------------- < 第2步依次访问网页 > -------------
max_search_result = 5 # 最多收纳多少个网页的结果
chatbot.append([f"联网检索中 ...", None])
if optimizer == "开启(增强)":
max_search_result = 8
chatbot.append(["联网检索中 ...", None])
for index, url in enumerate(urls[:max_search_result]):
res = scrape_text(url['link'], proxies)
prefix = f"{index}份搜索结果 [源自{url['source'][0]}搜索] {url['title'][:25]}"
@@ -125,18 +232,47 @@ def 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
# ------------- < 第3步ChatGPT综合 > -------------
i_say = f"从以上搜索结果中抽取信息,然后回答问题:{txt}"
i_say, history = input_clipping( # 裁剪输入从最长的条目开始裁剪防止爆token
inputs=i_say,
history=history,
max_token_limit=min(model_info[llm_kwargs['llm_model']]['max_token']*3//4, 8192)
)
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
inputs=i_say, inputs_show_user=i_say,
llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
sys_prompt="请从给定的若干条搜索结果中抽取信息,对最相关的两个搜索结果进行总结,然后回答问题。"
)
chatbot[-1] = (i_say, gpt_say)
history.append(i_say);history.append(gpt_say)
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
if (optimizer != "开启(增强)"):
i_say = f"从以上搜索结果中抽取信息,然后回答问题:{txt}"
i_say, history = input_clipping( # 裁剪输入从最长的条目开始裁剪防止爆token
inputs=i_say,
history=history,
max_token_limit=min(model_info[llm_kwargs['llm_model']]['max_token']*3//4, 8192)
)
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
inputs=i_say, inputs_show_user=i_say,
llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
sys_prompt="请从给定的若干条搜索结果中抽取信息,对最相关的两个搜索结果进行总结,然后回答问题。"
)
chatbot[-1] = (i_say, gpt_say)
history.append(i_say);history.append(gpt_say)
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
#* 或者使用搜索优化器,这样可以保证后续问答能读取到有效的历史记录
else:
i_say = f"从以上搜索结果中抽取与问题:{txt} 相关的信息:"
i_say, history = input_clipping( # 裁剪输入从最长的条目开始裁剪防止爆token
inputs=i_say,
history=history,
max_token_limit=min(model_info[llm_kwargs['llm_model']]['max_token']*3//4, 8192)
)
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
inputs=i_say, inputs_show_user=i_say,
llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
sys_prompt="请从给定的若干条搜索结果中抽取信息,对最相关的三个搜索结果进行总结"
)
chatbot[-1] = (i_say, gpt_say)
history = []
history.append(i_say);history.append(gpt_say)
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
# ------------- < 第4步根据综合回答问题 > -------------
i_say = f"请根据以上搜索结果回答问题:{txt}"
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
inputs=i_say, inputs_show_user=i_say,
llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
sys_prompt="请根据给定的若干条搜索结果回答问题"
)
chatbot[-1] = (i_say, gpt_say)
history.append(i_say);history.append(gpt_say)
yield from update_ui(chatbot=chatbot, history=history)

View File

@@ -22,11 +22,13 @@ class NetworkGPT_Wrap(GptAcademicPluginTemplate):
"""
gui_definition = {
"main_input":
ArgProperty(title="输入问题", description="待通过互联网检索的问题", default_value="", type="string").model_dump_json(), # 主输入,自动从输入框同步
ArgProperty(title="输入问题", description="待通过互联网检索的问题,会自动读取输入框内容", default_value="", type="string").model_dump_json(), # 主输入,自动从输入框同步
"categories":
ArgProperty(title="搜索分类", options=["网页", "学术论文"], default_value="网页", description="", type="dropdown").model_dump_json(),
"engine":
ArgProperty(title="选择搜索引擎", options=["bing", "google", "duckduckgo"], default_value="bing", description="", type="dropdown").model_dump_json(),
ArgProperty(title="选择搜索引擎", options=["Mixed", "bing", "google", "duckduckgo"], default_value="google", description="", type="dropdown").model_dump_json(),
"optimizer":
ArgProperty(title="搜索优化", options=["关闭", "开启", "开启(增强)"], default_value="关闭", description="是否使用搜索增强。注意这可能会消耗较多token", type="dropdown").model_dump_json(),
"searxng_url":
ArgProperty(title="Searxng服务地址", description="输入Searxng的地址", default_value=get_conf("SEARXNG_URL"), type="string").model_dump_json(), # 主输入,自动从输入框同步
@@ -39,6 +41,5 @@ class NetworkGPT_Wrap(GptAcademicPluginTemplate):
"""
if plugin_kwargs["categories"] == "网页": plugin_kwargs["categories"] = "general"
if plugin_kwargs["categories"] == "学术论文": plugin_kwargs["categories"] = "science"
yield from 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)

View File

@@ -1,4 +1,4 @@
from toolbox import update_ui, promote_file_to_downloadzone, disable_auto_promotion
from toolbox import update_ui, promote_file_to_downloadzone
from toolbox import CatchException, report_exception, write_history_to_file
from shared_utils.fastapi_server import validate_path_safety
from crazy_functions.crazy_utils import input_clipping
@@ -7,7 +7,6 @@ def 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
import os, copy
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
disable_auto_promotion(chatbot=chatbot)
summary_batch_isolation = True
inputs_array = []
@@ -24,7 +23,7 @@ def 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
file_content = f.read()
prefix = "接下来请你逐文件分析下面的工程" if index==0 else ""
i_say = prefix + f'请对下面的程序文件做一个概述文件名是{os.path.relpath(fp, project_folder)},文件代码是 ```{file_content}```'
i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述: {fp}'
i_say_show_user = prefix + f'[{index+1}/{len(file_manifest)}] 请对下面的程序文件做一个概述: {fp}'
# 装载请求内容
inputs_array.append(i_say)
inputs_show_user_array.append(i_say_show_user)

View File

@@ -0,0 +1,138 @@
import os, copy, time
from toolbox import CatchException, report_exception, update_ui, zip_result, promote_file_to_downloadzone, update_ui_lastest_msg, get_conf, generate_file_link
from shared_utils.fastapi_server import validate_path_safety
from crazy_functions.crazy_utils import input_clipping
from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
from crazy_functions.agent_fns.python_comment_agent import PythonCodeComment
from crazy_functions.diagram_fns.file_tree import FileNode
from shared_utils.advanced_markdown_format import markdown_convertion_for_file
def 注释源代码(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
summary_batch_isolation = True
inputs_array = []
inputs_show_user_array = []
history_array = []
sys_prompt_array = []
assert len(file_manifest) <= 512, "源文件太多超过512个, 请缩减输入文件的数量。或者您也可以选择删除此行警告并修改代码拆分file_manifest列表从而实现分批次处理。"
# 建立文件树
file_tree_struct = FileNode("root", build_manifest=True)
for file_path in file_manifest:
file_tree_struct.add_file(file_path, file_path)
# <第一步,逐个文件分析,多线程>
for index, fp in enumerate(file_manifest):
# 读取文件
with open(fp, 'r', encoding='utf-8', errors='replace') as f:
file_content = f.read()
prefix = ""
i_say = prefix + f'Please conclude the following source code at {os.path.relpath(fp, project_folder)} with only one sentence, the code is:\n```{file_content}```'
i_say_show_user = prefix + f'[{index+1}/{len(file_manifest)}] 请用一句话对下面的程序文件做一个整体概述: {fp}'
# 装载请求内容
MAX_TOKEN_SINGLE_FILE = 2560
i_say, _ = input_clipping(inputs=i_say, history=[], max_token_limit=MAX_TOKEN_SINGLE_FILE)
inputs_array.append(i_say)
inputs_show_user_array.append(i_say_show_user)
history_array.append([])
sys_prompt_array.append("You are a software architecture analyst analyzing a source code project. Do not dig into details, tell me what the code is doing in general. Your answer must be short, simple and clear.")
# 文件读取完成,对每一个源代码文件,生成一个请求线程,发送到大模型进行分析
gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
inputs_array = inputs_array,
inputs_show_user_array = inputs_show_user_array,
history_array = history_array,
sys_prompt_array = sys_prompt_array,
llm_kwargs = llm_kwargs,
chatbot = chatbot,
show_user_at_complete = True
)
# <第二步,逐个文件分析,生成带注释文件>
from concurrent.futures import ThreadPoolExecutor
executor = ThreadPoolExecutor(max_workers=get_conf('DEFAULT_WORKER_NUM'))
def _task_multi_threading(i_say, gpt_say, fp, file_tree_struct):
pcc = PythonCodeComment(llm_kwargs, language='English')
pcc.read_file(path=fp, brief=gpt_say)
revised_path, revised_content = pcc.begin_comment_source_code(None, None)
file_tree_struct.manifest[fp].revised_path = revised_path
file_tree_struct.manifest[fp].revised_content = revised_content
# <将结果写回源文件>
with open(fp, 'w', encoding='utf-8') as f:
f.write(file_tree_struct.manifest[fp].revised_content)
# <生成对比html>
with open("crazy_functions/agent_fns/python_comment_compare.html", 'r', encoding='utf-8') as f:
html_template = f.read()
warp = lambda x: "```python\n\n" + x + "\n\n```"
from themes.theme import advanced_css
html_template = html_template.replace("ADVANCED_CSS", advanced_css)
html_template = html_template.replace("REPLACE_CODE_FILE_LEFT", pcc.get_markdown_block_in_html(markdown_convertion_for_file(warp(pcc.original_content))))
html_template = html_template.replace("REPLACE_CODE_FILE_RIGHT", pcc.get_markdown_block_in_html(markdown_convertion_for_file(warp(revised_content))))
compare_html_path = fp + '.compare.html'
file_tree_struct.manifest[fp].compare_html = compare_html_path
with open(compare_html_path, 'w', encoding='utf-8') as f:
f.write(html_template)
print('done 1')
chatbot.append([None, f"正在处理:"])
futures = []
for i_say, gpt_say, fp in zip(gpt_response_collection[0::2], gpt_response_collection[1::2], file_manifest):
future = executor.submit(_task_multi_threading, i_say, gpt_say, fp, file_tree_struct)
futures.append(future)
cnt = 0
while True:
cnt += 1
time.sleep(3)
worker_done = [h.done() for h in futures]
remain = len(worker_done) - sum(worker_done)
# <展示已经完成的部分>
preview_html_list = []
for done, fp in zip(worker_done, file_manifest):
if not done: continue
preview_html_list.append(file_tree_struct.manifest[fp].compare_html)
file_links = generate_file_link(preview_html_list)
yield from update_ui_lastest_msg(
f"剩余源文件数量: {remain}.\n\n" +
f"已完成的文件: {sum(worker_done)}.\n\n" +
file_links +
"\n\n" +
''.join(['.']*(cnt % 10 + 1)
), chatbot=chatbot, history=history, delay=0)
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
if all(worker_done):
executor.shutdown()
break
# <第四步,压缩结果>
zip_res = zip_result(project_folder)
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
# <END>
chatbot.append((None, "所有源文件均已处理完毕。"))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
@CatchException
def 注释Python项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
history = [] # 清空历史,以免输入溢出
import glob, os
if os.path.exists(txt):
project_folder = txt
validate_path_safety(project_folder, chatbot.get_user())
else:
if txt == "": txt = '空空如也的输入栏'
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)]
if len(file_manifest) == 0:
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}")
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
yield from 注释源代码(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)

View File

@@ -0,0 +1,391 @@
from toolbox import CatchException, update_ui
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
from request_llms.bridge_all import predict_no_ui_long_connection
import datetime
import re
import os
from textwrap import dedent
# TODO: 解决缩进问题
find_function_end_prompt = '''
Below is a page of code that you need to read. This page may not yet complete, you job is to split this page to sperate functions, class functions etc.
- Provide the line number where the first visible function ends.
- Provide the line number where the next visible function begins.
- If there are no other functions in this page, you should simply return the line number of the last line.
- Only focus on functions declared by `def` keyword. Ignore inline functions. Ignore function calls.
------------------ Example ------------------
INPUT:
```
L0000 |import sys
L0001 |import re
L0002 |
L0003 |def trimmed_format_exc():
L0004 | import os
L0005 | import traceback
L0006 | str = traceback.format_exc()
L0007 | current_path = os.getcwd()
L0008 | replace_path = "."
L0009 | return str.replace(current_path, replace_path)
L0010 |
L0011 |
L0012 |def trimmed_format_exc_markdown():
L0013 | ...
L0014 | ...
```
OUTPUT:
```
<first_function_end_at>L0009</first_function_end_at>
<next_function_begin_from>L0012</next_function_begin_from>
```
------------------ End of Example ------------------
------------------ the real INPUT you need to process NOW ------------------
```
{THE_TAGGED_CODE}
```
'''
revise_funtion_prompt = '''
You need to read the following code, and revise the source code ({FILE_BASENAME}) according to following instructions:
1. You should analyze the purpose of the functions (if there are any).
2. You need to add docstring for the provided functions (if there are any).
Be aware:
1. You must NOT modify the indent of code.
2. You are NOT authorized to change or translate non-comment code, and you are NOT authorized to add empty lines either, toggle qu.
3. Use {LANG} to add comments and docstrings. Do NOT translate Chinese that is already in the code.
------------------ Example ------------------
INPUT:
```
L0000 |
L0001 |def zip_result(folder):
L0002 | t = gen_time_str()
L0003 | zip_folder(folder, get_log_folder(), f"result.zip")
L0004 | return os.path.join(get_log_folder(), f"result.zip")
L0005 |
L0006 |
```
OUTPUT:
<instruction_1_purpose>
This function compresses a given folder, and return the path of the resulting `zip` file.
</instruction_1_purpose>
<instruction_2_revised_code>
```
def zip_result(folder):
"""
Compresses the specified folder into a zip file and stores it in the log folder.
Args:
folder (str): The path to the folder that needs to be compressed.
Returns:
str: The path to the created zip file in the log folder.
"""
t = gen_time_str()
zip_folder(folder, get_log_folder(), f"result.zip") # ⭐ Execute the zipping of folder
return os.path.join(get_log_folder(), f"result.zip")
```
</instruction_2_revised_code>
------------------ End of Example ------------------
------------------ the real INPUT you need to process NOW ({FILE_BASENAME}) ------------------
```
{THE_CODE}
```
{INDENT_REMINDER}
{BRIEF_REMINDER}
{HINT_REMINDER}
'''
class PythonCodeComment():
def __init__(self, llm_kwargs, language) -> None:
self.original_content = ""
self.full_context = []
self.full_context_with_line_no = []
self.current_page_start = 0
self.page_limit = 100 # 100 lines of code each page
self.ignore_limit = 20
self.llm_kwargs = llm_kwargs
self.language = language
self.path = None
self.file_basename = None
self.file_brief = ""
def generate_tagged_code_from_full_context(self):
for i, code in enumerate(self.full_context):
number = i
padded_number = f"{number:04}"
result = f"L{padded_number}"
self.full_context_with_line_no.append(f"{result} | {code}")
return self.full_context_with_line_no
def read_file(self, path, brief):
with open(path, 'r', encoding='utf8') as f:
self.full_context = f.readlines()
self.original_content = ''.join(self.full_context)
self.file_basename = os.path.basename(path)
self.file_brief = brief
self.full_context_with_line_no = self.generate_tagged_code_from_full_context()
self.path = path
def find_next_function_begin(self, tagged_code:list, begin_and_end):
begin, end = begin_and_end
THE_TAGGED_CODE = ''.join(tagged_code)
self.llm_kwargs['temperature'] = 0
result = predict_no_ui_long_connection(
inputs=find_function_end_prompt.format(THE_TAGGED_CODE=THE_TAGGED_CODE),
llm_kwargs=self.llm_kwargs,
history=[],
sys_prompt="",
observe_window=[],
console_slience=True
)
def extract_number(text):
# 使用正则表达式匹配模式
match = re.search(r'<next_function_begin_from>L(\d+)</next_function_begin_from>', text)
if match:
# 提取匹配的数字部分并转换为整数
return int(match.group(1))
return None
line_no = extract_number(result)
if line_no is not None:
return line_no
else:
return end
def _get_next_window(self):
#
current_page_start = self.current_page_start
if self.current_page_start == len(self.full_context) + 1:
raise StopIteration
# 如果剩余的行数非常少,一鼓作气处理掉
if len(self.full_context) - self.current_page_start < self.ignore_limit:
future_page_start = len(self.full_context) + 1
self.current_page_start = future_page_start
return current_page_start, future_page_start
tagged_code = self.full_context_with_line_no[ self.current_page_start: self.current_page_start + self.page_limit]
line_no = self.find_next_function_begin(tagged_code, [self.current_page_start, self.current_page_start + self.page_limit])
if line_no > len(self.full_context) - 5:
line_no = len(self.full_context) + 1
future_page_start = line_no
self.current_page_start = future_page_start
# ! consider eof
return current_page_start, future_page_start
def dedent(self, text):
"""Remove any common leading whitespace from every line in `text`.
"""
# Look for the longest leading string of spaces and tabs common to
# all lines.
margin = None
_whitespace_only_re = re.compile('^[ \t]+$', re.MULTILINE)
_leading_whitespace_re = re.compile('(^[ \t]*)(?:[^ \t\n])', re.MULTILINE)
text = _whitespace_only_re.sub('', text)
indents = _leading_whitespace_re.findall(text)
for indent in indents:
if margin is None:
margin = indent
# Current line more deeply indented than previous winner:
# no change (previous winner is still on top).
elif indent.startswith(margin):
pass
# Current line consistent with and no deeper than previous winner:
# it's the new winner.
elif margin.startswith(indent):
margin = indent
# Find the largest common whitespace between current line and previous
# winner.
else:
for i, (x, y) in enumerate(zip(margin, indent)):
if x != y:
margin = margin[:i]
break
# sanity check (testing/debugging only)
if 0 and margin:
for line in text.split("\n"):
assert not line or line.startswith(margin), \
"line = %r, margin = %r" % (line, margin)
if margin:
text = re.sub(r'(?m)^' + margin, '', text)
return text, len(margin)
else:
return text, 0
def get_next_batch(self):
current_page_start, future_page_start = self._get_next_window()
return ''.join(self.full_context[current_page_start: future_page_start]), current_page_start, future_page_start
def tag_code(self, fn, hint):
code = fn
_, n_indent = self.dedent(code)
indent_reminder = "" if n_indent == 0 else "(Reminder: as you can see, this piece of code has indent made up with {n_indent} whitespace, please preseve them in the OUTPUT.)"
brief_reminder = "" if self.file_brief == "" else f"({self.file_basename} abstract: {self.file_brief})"
hint_reminder = "" if hint is None else f"(Reminder: do not ignore or modify code such as `{hint}`, provide complete code in the OUTPUT.)"
self.llm_kwargs['temperature'] = 0
result = predict_no_ui_long_connection(
inputs=revise_funtion_prompt.format(
LANG=self.language,
FILE_BASENAME=self.file_basename,
THE_CODE=code,
INDENT_REMINDER=indent_reminder,
BRIEF_REMINDER=brief_reminder,
HINT_REMINDER=hint_reminder
),
llm_kwargs=self.llm_kwargs,
history=[],
sys_prompt="",
observe_window=[],
console_slience=True
)
def get_code_block(reply):
import re
pattern = r"```([\s\S]*?)```" # regex pattern to match code blocks
matches = re.findall(pattern, reply) # find all code blocks in text
if len(matches) == 1:
return matches[0].strip('python') # code block
return None
code_block = get_code_block(result)
if code_block is not None:
code_block = self.sync_and_patch(original=code, revised=code_block)
return code_block
else:
return code
def get_markdown_block_in_html(self, html):
from bs4 import BeautifulSoup
soup = BeautifulSoup(html, 'lxml')
found_list = soup.find_all("div", class_="markdown-body")
if found_list:
res = found_list[0]
return res.prettify()
else:
return None
def sync_and_patch(self, original, revised):
"""Ensure the number of pre-string empty lines in revised matches those in original."""
def count_leading_empty_lines(s, reverse=False):
"""Count the number of leading empty lines in a string."""
lines = s.split('\n')
if reverse: lines = list(reversed(lines))
count = 0
for line in lines:
if line.strip() == '':
count += 1
else:
break
return count
original_empty_lines = count_leading_empty_lines(original)
revised_empty_lines = count_leading_empty_lines(revised)
if original_empty_lines > revised_empty_lines:
additional_lines = '\n' * (original_empty_lines - revised_empty_lines)
revised = additional_lines + revised
elif original_empty_lines < revised_empty_lines:
lines = revised.split('\n')
revised = '\n'.join(lines[revised_empty_lines - original_empty_lines:])
original_empty_lines = count_leading_empty_lines(original, reverse=True)
revised_empty_lines = count_leading_empty_lines(revised, reverse=True)
if original_empty_lines > revised_empty_lines:
additional_lines = '\n' * (original_empty_lines - revised_empty_lines)
revised = revised + additional_lines
elif original_empty_lines < revised_empty_lines:
lines = revised.split('\n')
revised = '\n'.join(lines[:-(revised_empty_lines - original_empty_lines)])
return revised
def begin_comment_source_code(self, chatbot=None, history=None):
# from toolbox import update_ui_lastest_msg
assert self.path is not None
assert '.py' in self.path # must be python source code
# write_target = self.path + '.revised.py'
write_content = ""
# with open(self.path + '.revised.py', 'w+', encoding='utf8') as f:
while True:
try:
# yield from update_ui_lastest_msg(f"({self.file_basename}) 正在读取下一段代码片段:\n", chatbot=chatbot, history=history, delay=0)
next_batch, line_no_start, line_no_end = self.get_next_batch()
# yield from update_ui_lastest_msg(f"({self.file_basename}) 处理代码片段:\n\n{next_batch}", chatbot=chatbot, history=history, delay=0)
hint = None
MAX_ATTEMPT = 2
for attempt in range(MAX_ATTEMPT):
result = self.tag_code(next_batch, hint)
try:
successful, hint = self.verify_successful(next_batch, result)
except Exception as e:
print('ignored exception:\n' + str(e))
break
if successful:
break
if attempt == MAX_ATTEMPT - 1:
# cannot deal with this, give up
result = next_batch
break
# f.write(result)
write_content += result
except StopIteration:
next_batch, line_no_start, line_no_end = [], -1, -1
return None, write_content
def verify_successful(self, original, revised):
""" Determine whether the revised code contains every line that already exists
"""
from crazy_functions.ast_fns.comment_remove import remove_python_comments
original = remove_python_comments(original)
original_lines = original.split('\n')
revised_lines = revised.split('\n')
for l in original_lines:
l = l.strip()
if '\'' in l or '\"' in l: continue # ast sometimes toggle " to '
found = False
for lt in revised_lines:
if l in lt:
found = True
break
if not found:
return False, l
return True, None

View File

@@ -0,0 +1,45 @@
<!DOCTYPE html>
<html lang="zh-CN">
<head>
<style>ADVANCED_CSS</style>
<meta charset="UTF-8">
<title>源文件对比</title>
<style>
body {
font-family: Arial, sans-serif;
display: flex;
justify-content: center;
align-items: center;
height: 100vh;
margin: 0;
}
.container {
display: flex;
width: 95%;
height: -webkit-fill-available;
}
.code-container {
flex: 1;
margin: 0px;
padding: 0px;
border: 1px solid #ccc;
background-color: #f9f9f9;
overflow: auto;
}
pre {
white-space: pre-wrap;
word-wrap: break-word;
}
</style>
</head>
<body>
<div class="container">
<div class="code-container">
REPLACE_CODE_FILE_LEFT
</div>
<div class="code-container">
REPLACE_CODE_FILE_RIGHT
</div>
</div>
</body>
</html>

View File

@@ -0,0 +1,46 @@
import ast
class CommentRemover(ast.NodeTransformer):
def visit_FunctionDef(self, node):
# 移除函数的文档字符串
if (node.body and isinstance(node.body[0], ast.Expr) and
isinstance(node.body[0].value, ast.Str)):
node.body = node.body[1:]
self.generic_visit(node)
return node
def visit_ClassDef(self, node):
# 移除类的文档字符串
if (node.body and isinstance(node.body[0], ast.Expr) and
isinstance(node.body[0].value, ast.Str)):
node.body = node.body[1:]
self.generic_visit(node)
return node
def visit_Module(self, node):
# 移除模块的文档字符串
if (node.body and isinstance(node.body[0], ast.Expr) and
isinstance(node.body[0].value, ast.Str)):
node.body = node.body[1:]
self.generic_visit(node)
return node
def remove_python_comments(source_code):
# 解析源代码为 AST
tree = ast.parse(source_code)
# 移除注释
transformer = CommentRemover()
tree = transformer.visit(tree)
# 将处理后的 AST 转换回源代码
return ast.unparse(tree)
# 示例使用
if __name__ == "__main__":
with open("source.py", "r", encoding="utf-8") as f:
source_code = f.read()
cleaned_code = remove_python_comments(source_code)
with open("cleaned_source.py", "w", encoding="utf-8") as f:
f.write(cleaned_code)

View File

@@ -2,7 +2,7 @@ import os
from textwrap import indent
class FileNode:
def __init__(self, name):
def __init__(self, name, build_manifest=False):
self.name = name
self.children = []
self.is_leaf = False
@@ -10,6 +10,8 @@ class FileNode:
self.parenting_ship = []
self.comment = ""
self.comment_maxlen_show = 50
self.build_manifest = build_manifest
self.manifest = {}
@staticmethod
def add_linebreaks_at_spaces(string, interval=10):
@@ -29,6 +31,7 @@ class FileNode:
level = 1
if directory_names == "":
new_node = FileNode(file_name)
self.manifest[file_path] = new_node
current_node.children.append(new_node)
new_node.is_leaf = True
new_node.comment = self.sanitize_comment(file_comment)
@@ -50,6 +53,7 @@ class FileNode:
new_node.level = level - 1
current_node = new_node
term = FileNode(file_name)
self.manifest[file_path] = term
term.level = level
term.comment = self.sanitize_comment(file_comment)
term.is_leaf = True

View File

@@ -92,7 +92,7 @@ class MiniGame_ResumeStory(GptAcademicGameBaseState):
def generate_story_image(self, story_paragraph):
try:
from crazy_functions.图片生成 import gen_image
from crazy_functions.Image_Generate import gen_image
prompt_ = predict_no_ui_long_connection(inputs=story_paragraph, llm_kwargs=self.llm_kwargs, history=[], sys_prompt='你需要根据用户给出的小说段落进行简短的环境描写。要求80字以内。')
image_url, image_path = gen_image(self.llm_kwargs, prompt_, '512x512', model="dall-e-2", quality='standard', style='natural')
return f'<br/><div align="center"><img src="file={image_path}"></div>'

View File

@@ -0,0 +1,87 @@
SearchOptimizerPrompt="""作为一个网页搜索助手,你的任务是结合历史记录,从不同角度,为“原问题”生成个不同版本的“检索词”,从而提高网页检索的精度。生成的问题要求指向对象清晰明确,并与“原问题语言相同”。例如:
历史记录:
"
Q: 对话背景。
A: 当前对话是关于 Nginx 的介绍和在Ubuntu上的使用等。
"
原问题: 怎么下载
检索词: ["Nginx 下载","Ubuntu Nginx","Ubuntu安装Nginx"]
----------------
历史记录:
"
Q: 对话背景。
A: 当前对话是关于 Nginx 的介绍和使用等。
Q: 报错 "no connection"
A: 报错"no connection"可能是因为……
"
原问题: 怎么解决
检索词: ["Nginx报错"no connection" 解决","Nginx'no connection'报错 原因","Nginx提示'no connection'"]
----------------
历史记录:
"
"
原问题: 你知道 Python 么?
检索词: ["Python","Python 使用教程。","Python 特点和优势"]
----------------
历史记录:
"
Q: 列出Java的三种特点
A: 1. Java 是一种编译型语言。
2. Java 是一种面向对象的编程语言。
3. Java 是一种跨平台的编程语言。
"
原问题: 介绍下第2点。
检索词: ["Java 面向对象特点","Java 面向对象编程优势。","Java 面向对象编程"]
----------------
现在有历史记录:
"
{history}
"
有其原问题: {query}
直接给出最多{num}个检索词必须以json形式给出不得有多余字符:
"""
SearchAcademicOptimizerPrompt="""作为一个学术论文搜索助手,你的任务是结合历史记录,从不同角度,为“原问题”生成个不同版本的“检索词”,从而提高学术论文检索的精度。生成的问题要求指向对象清晰明确,并与“原问题语言相同”。例如:
历史记录:
"
Q: 对话背景。
A: 当前对话是关于深度学习的介绍和在图像识别中的应用等。
"
原问题: 怎么下载相关论文
检索词: ["深度学习 图像识别 论文下载","图像识别 深度学习 研究论文","深度学习 图像识别 论文资源","Deep Learning Image Recognition Paper Download","Image Recognition Deep Learning Research Paper"]
----------------
历史记录:
"
Q: 对话背景。
A: 当前对话是关于深度学习的介绍和应用等。
Q: 报错 "模型不收敛"
A: 报错"模型不收敛"可能是因为……
"
原问题: 怎么解决
检索词: ["深度学习 模型不收敛 解决方案 论文","深度学习 模型不收敛 原因 研究","深度学习 模型不收敛 论文","Deep Learning Model Convergence Issue Solution Paper","Deep Learning Model Convergence Problem Research"]
----------------
历史记录:
"
"
原问题: 你知道 GAN 么?
检索词: ["生成对抗网络 论文","GAN 使用教程 论文","GAN 特点和优势 研究","Generative Adversarial Network Paper","GAN Usage Tutorial Paper"]
----------------
历史记录:
"
Q: 列出机器学习的三种应用?
A: 1. 机器学习在图像识别中的应用。
2. 机器学习在自然语言处理中的应用。
3. 机器学习在推荐系统中的应用。
"
原问题: 介绍下第2点。
检索词: ["机器学习 自然语言处理 应用 论文","机器学习 自然语言处理 研究","机器学习 NLP 应用 论文","Machine Learning Natural Language Processing Application Paper","Machine Learning NLP Research"]
----------------
现在有历史记录:
"
{history}
"
有其原问题: {query}
直接给出最多{num}个检索词必须以json形式给出不得有多余字符:
"""

View File

@@ -77,7 +77,7 @@ def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbo
prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else ""
i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```'
i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}'
i_say_show_user = prefix + f'[{index+1}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}'
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面

View File

@@ -12,7 +12,7 @@ def 生成函数注释(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
file_content = f.read()
i_say = f'请对下面的程序文件做一个概述并对文件中的所有函数生成注释使用markdown表格输出结果文件名是{os.path.relpath(fp, project_folder)},文件内容是 ```{file_content}```'
i_say_show_user = f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述,并对文件中的所有函数生成注释: {os.path.abspath(fp)}'
i_say_show_user = f'[{index+1}/{len(file_manifest)}] 请对下面的程序文件做一个概述,并对文件中的所有函数生成注释: {os.path.abspath(fp)}'
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面

View File

@@ -13,7 +13,7 @@ def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbo
prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else ""
i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```'
i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}'
i_say_show_user = prefix + f'[{index+1}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}'
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面

64
main.py
View File

@@ -24,6 +24,20 @@ def enable_log(PATH_LOGGING):
logging.getLogger("httpx").setLevel(logging.WARNING)
print(f"所有对话记录将自动保存在本地目录{log_dir}, 请注意自我隐私保护哦!")
def encode_plugin_info(k, plugin)->str:
import copy
from themes.theme import to_cookie_str
plugin_ = copy.copy(plugin)
plugin_.pop("Function", None)
plugin_.pop("Class", None)
plugin_.pop("Button", None)
plugin_["Info"] = plugin.get("Info", k)
if plugin.get("AdvancedArgs", False):
plugin_["Label"] = f"插件[{k}]的高级参数说明:" + plugin.get("ArgsReminder", f"没有提供高级参数功能说明")
else:
plugin_["Label"] = f"插件[{k}]不需要高级参数。"
return to_cookie_str(plugin_)
def main():
import gradio as gr
if gr.__version__ not in ['3.32.9', '3.32.10', '3.32.11']:
@@ -98,8 +112,18 @@ def main():
with gr.Accordion("输入区", open=True, elem_id="input-panel") as area_input_primary:
with gr.Row():
txt = gr.Textbox(show_label=False, placeholder="Input question here.", elem_id='user_input_main').style(container=False)
with gr.Row():
submitBtn = gr.Button("提交", elem_id="elem_submit", variant="primary")
with gr.Row(elem_id="gpt-submit-row"):
multiplex_submit_btn = gr.Button("提交", elem_id="elem_submit_visible", variant="primary")
multiplex_sel = gr.Dropdown(
choices=[
"常规对话",
"多模型对话",
# "智能上下文",
# "智能召回 RAG",
], value="常规对话",
interactive=True, label='', show_label=False,
elem_classes='normal_mut_select', elem_id="gpt-submit-dropdown").style(container=False)
submit_btn = gr.Button("提交", elem_id="elem_submit", variant="primary", visible=False)
with gr.Row():
resetBtn = gr.Button("重置", elem_id="elem_reset", variant="secondary"); resetBtn.style(size="sm")
stopBtn = gr.Button("停止", elem_id="elem_stop", variant="secondary"); stopBtn.style(size="sm")
@@ -146,7 +170,7 @@ def main():
if not plugin.get("AsButton", True): dropdown_fn_list.append(k) # 排除已经是按钮的插件
elif plugin.get('AdvancedArgs', False): dropdown_fn_list.append(k) # 对于需要高级参数的插件,亦在下拉菜单中显示
with gr.Row():
dropdown = gr.Dropdown(dropdown_fn_list, value=r"点击这里搜索插件列表", label="", show_label=False).style(container=False)
dropdown = gr.Dropdown(dropdown_fn_list, value=r"点击这里输入「关键词」搜索插件", label="", show_label=False).style(container=False)
with gr.Row():
plugin_advanced_arg = gr.Textbox(show_label=True, label="高级参数输入区", visible=False, elem_id="advance_arg_input_legacy",
placeholder="这里是特殊函数插件的高级参数输入区").style(container=False)
@@ -163,7 +187,7 @@ def main():
# 浮动菜单定义
from themes.gui_floating_menu import define_gui_floating_menu
area_input_secondary, txt2, area_customize, submitBtn2, resetBtn2, clearBtn2, stopBtn2 = \
area_input_secondary, txt2, area_customize, _, resetBtn2, clearBtn2, stopBtn2 = \
define_gui_floating_menu(customize_btns, functional, predefined_btns, cookies, web_cookie_cache)
# 插件二级菜单的实现
@@ -195,11 +219,15 @@ def main():
input_combo_order = ["cookies", "max_length_sl", "md_dropdown", "txt", "txt2", "top_p", "temperature", "chatbot", "history", "system_prompt", "plugin_advanced_arg"]
output_combo = [cookies, chatbot, history, status]
predict_args = dict(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True)], outputs=output_combo)
# 提交按钮、重置按钮
cancel_handles.append(txt.submit(**predict_args))
cancel_handles.append(txt2.submit(**predict_args))
cancel_handles.append(submitBtn.click(**predict_args))
cancel_handles.append(submitBtn2.click(**predict_args))
multiplex_submit_btn.click(
None, [multiplex_sel], None, _js="""(multiplex_sel)=>multiplex_function_begin(multiplex_sel)""")
txt.submit(
None, [multiplex_sel], None, _js="""(multiplex_sel)=>multiplex_function_begin(multiplex_sel)""")
multiplex_sel.select(
None, [multiplex_sel], None, _js=f"""(multiplex_sel)=>run_multiplex_shift(multiplex_sel)""")
cancel_handles.append(submit_btn.click(**predict_args))
resetBtn.click(None, None, [chatbot, history, status], _js=js_code_reset) # 先在前端快速清除chatbot&status
resetBtn2.click(None, None, [chatbot, history, status], _js=js_code_reset) # 先在前端快速清除chatbot&status
reset_server_side_args = (lambda history: ([], [], "已重置", json.dumps(history)), [history], [chatbot, history, status, history_cache])
@@ -208,10 +236,7 @@ def main():
clearBtn.click(None, None, [txt, txt2], _js=js_code_clear)
clearBtn2.click(None, None, [txt, txt2], _js=js_code_clear)
if AUTO_CLEAR_TXT:
submitBtn.click(None, None, [txt, txt2], _js=js_code_clear)
submitBtn2.click(None, None, [txt, txt2], _js=js_code_clear)
txt.submit(None, None, [txt, txt2], _js=js_code_clear)
txt2.submit(None, None, [txt, txt2], _js=js_code_clear)
submit_btn.click(None, None, [txt, txt2], _js=js_code_clear)
# 基础功能区的回调函数注册
for k in functional:
if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue
@@ -224,21 +249,6 @@ def main():
file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies]).then(None, None, None, _js=r"()=>{toast_push('上传完毕 ...'); cancel_loading_status();}")
file_upload_2.upload(on_file_uploaded, [file_upload_2, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies]).then(None, None, None, _js=r"()=>{toast_push('上传完毕 ...'); cancel_loading_status();}")
# 函数插件-固定按钮区
def encode_plugin_info(k, plugin)->str:
import copy
from themes.theme import to_cookie_str
plugin_ = copy.copy(plugin)
plugin_.pop("Function", None)
plugin_.pop("Class", None)
plugin_.pop("Button", None)
plugin_["Info"] = plugin.get("Info", k)
if plugin.get("AdvancedArgs", False):
plugin_["Label"] = f"插件[{k}]的高级参数说明:" + plugin.get("ArgsReminder", f"没有提供高级参数功能说明")
else:
plugin_["Label"] = f"插件[{k}]不需要高级参数。"
return to_cookie_str(plugin_)
# 插件的注册(前端代码注册)
for k in plugins:
register_advanced_plugin_init_arr += f"""register_plugin_init("{k}","{encode_plugin_info(k, plugins[k])}");"""
if plugins[k].get("Class", None):

View File

@@ -201,6 +201,16 @@ model_info = {
"token_cnt": get_token_num_gpt4,
},
"gpt-4o-mini": {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
"endpoint": openai_endpoint,
"has_multimodal_capacity": True,
"max_token": 128000,
"tokenizer": tokenizer_gpt4,
"token_cnt": get_token_num_gpt4,
},
"gpt-4o-2024-05-13": {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
@@ -258,7 +268,6 @@ model_info = {
"token_cnt": get_token_num_gpt4,
},
"gpt-3.5-random": {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
@@ -475,7 +484,7 @@ for model in AVAIL_LLM_MODELS:
# -=-=-=-=-=-=- 以下部分是新加入的模型,可能附带额外依赖 -=-=-=-=-=-=-
# claude家族
claude_models = ["claude-instant-1.2","claude-2.0","claude-2.1","claude-3-haiku-20240307","claude-3-sonnet-20240229","claude-3-opus-20240229"]
claude_models = ["claude-instant-1.2","claude-2.0","claude-2.1","claude-3-haiku-20240307","claude-3-sonnet-20240229","claude-3-opus-20240229","claude-3-5-sonnet-20240620"]
if any(item in claude_models for item in AVAIL_LLM_MODELS):
from .bridge_claude import predict_no_ui_long_connection as claude_noui
from .bridge_claude import predict as claude_ui
@@ -539,6 +548,16 @@ if any(item in claude_models for item in AVAIL_LLM_MODELS):
"token_cnt": get_token_num_gpt35,
},
})
model_info.update({
"claude-3-5-sonnet-20240620": {
"fn_with_ui": claude_ui,
"fn_without_ui": claude_noui,
"endpoint": claude_endpoint,
"max_token": 200000,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
})
if "jittorllms_rwkv" in AVAIL_LLM_MODELS:
from .bridge_jittorllms_rwkv import predict_no_ui_long_connection as rwkv_noui
from .bridge_jittorllms_rwkv import predict as rwkv_ui

View File

@@ -496,10 +496,10 @@ def generate_payload(inputs:str, llm_kwargs:dict, history:list, system_prompt:st
"n": 1,
"stream": stream,
}
try:
print(f" {llm_kwargs['llm_model']} : {conversation_cnt} : {inputs[:100]} ..........")
except:
print('输入中可能存在乱码。')
# try:
# print(f" {llm_kwargs['llm_model']} : {conversation_cnt} : {inputs[:100]} ..........")
# except:
# print('输入中可能存在乱码。')
return headers,payload

View File

@@ -17,7 +17,7 @@ import json
import requests
from toolbox import get_conf, update_ui, trimmed_format_exc, encode_image, every_image_file_in_path, log_chat
picture_system_prompt = "\n当回复图像时,必须说明正在回复哪张图像。所有图像仅在最后一个问题中提供,即使它们在历史记录中被提及。请使用'这是第X张图像:'的格式来指明您正在描述的是哪张图像。"
Claude_3_Models = ["claude-3-haiku-20240307", "claude-3-sonnet-20240229", "claude-3-opus-20240229"]
Claude_3_Models = ["claude-3-haiku-20240307", "claude-3-sonnet-20240229", "claude-3-opus-20240229", "claude-3-5-sonnet-20240620"]
# config_private.py放自己的秘密如API和代理网址
# 读取时首先看是否存在私密的config_private配置文件不受git管控如果有则覆盖原config文件

View File

@@ -43,7 +43,8 @@ class TaichuChatInit:
if response.status_code == 200:
response.encoding = 'utf-8'
for line in response.iter_lines(decode_unicode=True):
delta = json.loads(line)['choices'][0]['text']
try: delta = json.loads(line)['data']['content']
except: delta = json.loads(line)['choices'][0]['text']
results += delta
yield delta, results
else:

View File

@@ -44,7 +44,7 @@ def decode_chunk(chunk):
try:
chunk = json.loads(chunk[6:])
except:
respose = "API_ERROR"
respose = ""
finish_reason = chunk
# 错误处理部分
if "error" in chunk:
@@ -200,10 +200,13 @@ def get_predict_function(
stream_response = response.iter_lines()
result = ""
finish_reason = ""
while True:
try:
chunk = next(stream_response)
except StopIteration:
if result == "":
raise RuntimeError(f"获得空的回复,可能原因:{finish_reason}")
break
except requests.exceptions.ConnectionError:
chunk = next(stream_response) # 失败了,重试一次?再失败就没办法了。
@@ -351,6 +354,10 @@ def get_predict_function(
response_text, finish_reason = decode_chunk(chunk)
# 返回的数据流第一次为空,继续等待
if response_text == "" and finish_reason != "False":
status_text = f"finish_reason: {finish_reason}"
yield from update_ui(
chatbot=chatbot, history=history, msg=status_text
)
continue
if chunk:
try:

View File

@@ -271,7 +271,7 @@ def markdown_convertion_for_file(txt):
"""
from themes.theme import advanced_css
pre = f"""
<!DOCTYPE html><head><meta charset="utf-8"><title>PDF文档翻译</title><style>{advanced_css}</style></head>
<!DOCTYPE html><head><meta charset="utf-8"><title>GPT-Academic输出文档</title><style>{advanced_css}</style></head>
<body>
<div class="test_temp1" style="width:10%; height: 500px; float:left;"></div>
<div class="test_temp2" style="width:80%;padding: 40px;float:left;padding-left: 20px;padding-right: 20px;box-shadow: rgba(0, 0, 0, 0.2) 0px 0px 8px 8px;border-radius: 10px;">

View File

@@ -57,7 +57,7 @@ def validate_path_safety(path_or_url, user):
sensitive_path = PATH_LOGGING
elif path_or_url.startswith(PATH_PRIVATE_UPLOAD): # 用户的上传目录(按用户划分)
sensitive_path = PATH_PRIVATE_UPLOAD
elif path_or_url.startswith('tests'): # 一个常用的测试目录
elif path_or_url.startswith('tests') or path_or_url.startswith('build'): # 一个常用的测试目录
return True
else:
raise FriendlyException(f"输入文件的路径 ({path_or_url}) 存在,但位置非法。请将文件上传后再执行该任务。") # return False

10
tests/init_test.py Normal file
View File

@@ -0,0 +1,10 @@
def validate_path():
import os, sys
os.path.dirname(__file__)
root_dir_assume = os.path.abspath(os.path.dirname(__file__) + "/..")
os.chdir(root_dir_assume)
sys.path.append(root_dir_assume)
validate_path() # validate path so you can run from base directory

View File

@@ -2,23 +2,16 @@
对项目中的各个插件进行测试。运行方法:直接运行 python tests/test_plugins.py
"""
import init_test
import os, sys
def validate_path():
dir_name = os.path.dirname(__file__)
root_dir_assume = os.path.abspath(dir_name + "/..")
os.chdir(root_dir_assume)
sys.path.append(root_dir_assume)
validate_path() # 返回项目根路径
if __name__ == "__main__":
from tests.test_utils import plugin_test
from test_utils import plugin_test
plugin_test(plugin='crazy_functions.Internet_GPT->连接网络回答问题', main_input="谁是应急食品?")
plugin_test(plugin='crazy_functions.SourceCode_Comment->注释Python项目', main_input="build/test/python_comment")
# plugin_test(plugin='crazy_functions.Internet_GPT->连接网络回答问题', main_input="谁是应急食品?")
# plugin_test(plugin='crazy_functions.函数动态生成->函数动态生成', main_input='交换图像的蓝色通道和红色通道', advanced_arg={"file_path_arg": "./build/ants.jpg"})
@@ -39,9 +32,9 @@ if __name__ == "__main__":
# plugin_test(plugin='crazy_functions.命令行助手->命令行助手', main_input='查看当前的docker容器列表')
# plugin_test(plugin='crazy_functions.解析项目源代码->解析一个Python项目', main_input="crazy_functions/test_project/python/dqn")
# plugin_test(plugin='crazy_functions.SourceCode_Analyse->解析一个Python项目', main_input="crazy_functions/test_project/python/dqn")
# plugin_test(plugin='crazy_functions.解析项目源代码->解析一个C项目', main_input="crazy_functions/test_project/cpp/cppipc")
# plugin_test(plugin='crazy_functions.SourceCode_Analyse->解析一个C项目', main_input="crazy_functions/test_project/cpp/cppipc")
# plugin_test(plugin='crazy_functions.Latex全文润色->Latex英文润色', main_input="crazy_functions/test_project/latex/attention")

View File

@@ -0,0 +1,342 @@
import init_test
from toolbox import CatchException, update_ui
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
from request_llms.bridge_all import predict_no_ui_long_connection
import datetime
import re
from textwrap import dedent
# TODO: 解决缩进问题
find_function_end_prompt = '''
Below is a page of code that you need to read. This page may not yet complete, you job is to split this page to sperate functions, class functions etc.
- Provide the line number where the first visible function ends.
- Provide the line number where the next visible function begins.
- If there are no other functions in this page, you should simply return the line number of the last line.
- Only focus on functions declared by `def` keyword. Ignore inline functions. Ignore function calls.
------------------ Example ------------------
INPUT:
```
L0000 |import sys
L0001 |import re
L0002 |
L0003 |def trimmed_format_exc():
L0004 | import os
L0005 | import traceback
L0006 | str = traceback.format_exc()
L0007 | current_path = os.getcwd()
L0008 | replace_path = "."
L0009 | return str.replace(current_path, replace_path)
L0010 |
L0011 |
L0012 |def trimmed_format_exc_markdown():
L0013 | ...
L0014 | ...
```
OUTPUT:
```
<first_function_end_at>L0009</first_function_end_at>
<next_function_begin_from>L0012</next_function_begin_from>
```
------------------ End of Example ------------------
------------------ the real INPUT you need to process NOW ------------------
```
{THE_TAGGED_CODE}
```
'''
revise_funtion_prompt = '''
You need to read the following code, and revise the code according to following instructions:
1. You should analyze the purpose of the functions (if there are any).
2. You need to add docstring for the provided functions (if there are any).
Be aware:
1. You must NOT modify the indent of code.
2. You are NOT authorized to change or translate non-comment code, and you are NOT authorized to add empty lines either.
3. Use English to add comments and docstrings. Do NOT translate Chinese that is already in the code.
------------------ Example ------------------
INPUT:
```
L0000 |
L0001 |def zip_result(folder):
L0002 | t = gen_time_str()
L0003 | zip_folder(folder, get_log_folder(), f"result.zip")
L0004 | return os.path.join(get_log_folder(), f"result.zip")
L0005 |
L0006 |
```
OUTPUT:
<instruction_1_purpose>
This function compresses a given folder, and return the path of the resulting `zip` file.
</instruction_1_purpose>
<instruction_2_revised_code>
```
def zip_result(folder):
"""
Compresses the specified folder into a zip file and stores it in the log folder.
Args:
folder (str): The path to the folder that needs to be compressed.
Returns:
str: The path to the created zip file in the log folder.
"""
t = gen_time_str()
zip_folder(folder, get_log_folder(), f"result.zip") # ⭐ Execute the zipping of folder
return os.path.join(get_log_folder(), f"result.zip")
```
</instruction_2_revised_code>
------------------ End of Example ------------------
------------------ the real INPUT you need to process NOW ------------------
```
{THE_CODE}
```
{INDENT_REMINDER}
'''
class ContextWindowManager():
def __init__(self, llm_kwargs) -> None:
self.full_context = []
self.full_context_with_line_no = []
self.current_page_start = 0
self.page_limit = 100 # 100 lines of code each page
self.ignore_limit = 20
self.llm_kwargs = llm_kwargs
def generate_tagged_code_from_full_context(self):
for i, code in enumerate(self.full_context):
number = i
padded_number = f"{number:04}"
result = f"L{padded_number}"
self.full_context_with_line_no.append(f"{result} | {code}")
return self.full_context_with_line_no
def read_file(self, path):
with open(path, 'r', encoding='utf8') as f:
self.full_context = f.readlines()
self.full_context_with_line_no = self.generate_tagged_code_from_full_context()
def find_next_function_begin(self, tagged_code:list, begin_and_end):
begin, end = begin_and_end
THE_TAGGED_CODE = ''.join(tagged_code)
self.llm_kwargs['temperature'] = 0
result = predict_no_ui_long_connection(
inputs=find_function_end_prompt.format(THE_TAGGED_CODE=THE_TAGGED_CODE),
llm_kwargs=self.llm_kwargs,
history=[],
sys_prompt="",
observe_window=[],
console_slience=True
)
def extract_number(text):
# 使用正则表达式匹配模式
match = re.search(r'<next_function_begin_from>L(\d+)</next_function_begin_from>', text)
if match:
# 提取匹配的数字部分并转换为整数
return int(match.group(1))
return None
line_no = extract_number(result)
if line_no is not None:
return line_no
else:
raise RuntimeError
return end
def _get_next_window(self):
#
current_page_start = self.current_page_start
if self.current_page_start == len(self.full_context) + 1:
raise StopIteration
# 如果剩余的行数非常少,一鼓作气处理掉
if len(self.full_context) - self.current_page_start < self.ignore_limit:
future_page_start = len(self.full_context) + 1
self.current_page_start = future_page_start
return current_page_start, future_page_start
tagged_code = self.full_context_with_line_no[ self.current_page_start: self.current_page_start + self.page_limit]
line_no = self.find_next_function_begin(tagged_code, [self.current_page_start, self.current_page_start + self.page_limit])
if line_no > len(self.full_context) - 5:
line_no = len(self.full_context) + 1
future_page_start = line_no
self.current_page_start = future_page_start
# ! consider eof
return current_page_start, future_page_start
def dedent(self, text):
"""Remove any common leading whitespace from every line in `text`.
"""
# Look for the longest leading string of spaces and tabs common to
# all lines.
margin = None
_whitespace_only_re = re.compile('^[ \t]+$', re.MULTILINE)
_leading_whitespace_re = re.compile('(^[ \t]*)(?:[^ \t\n])', re.MULTILINE)
text = _whitespace_only_re.sub('', text)
indents = _leading_whitespace_re.findall(text)
for indent in indents:
if margin is None:
margin = indent
# Current line more deeply indented than previous winner:
# no change (previous winner is still on top).
elif indent.startswith(margin):
pass
# Current line consistent with and no deeper than previous winner:
# it's the new winner.
elif margin.startswith(indent):
margin = indent
# Find the largest common whitespace between current line and previous
# winner.
else:
for i, (x, y) in enumerate(zip(margin, indent)):
if x != y:
margin = margin[:i]
break
# sanity check (testing/debugging only)
if 0 and margin:
for line in text.split("\n"):
assert not line or line.startswith(margin), \
"line = %r, margin = %r" % (line, margin)
if margin:
text = re.sub(r'(?m)^' + margin, '', text)
return text, len(margin)
def get_next_batch(self):
current_page_start, future_page_start = self._get_next_window()
return self.full_context[current_page_start: future_page_start], current_page_start, future_page_start
def tag_code(self, fn):
code = ''.join(fn)
_, n_indent = self.dedent(code)
indent_reminder = "" if n_indent == 0 else "(Reminder: as you can see, this piece of code has indent made up with {n_indent} whitespace, please preseve them in the OUTPUT.)"
self.llm_kwargs['temperature'] = 0
result = predict_no_ui_long_connection(
inputs=revise_funtion_prompt.format(THE_CODE=code, INDENT_REMINDER=indent_reminder),
llm_kwargs=self.llm_kwargs,
history=[],
sys_prompt="",
observe_window=[],
console_slience=True
)
def get_code_block(reply):
import re
pattern = r"```([\s\S]*?)```" # regex pattern to match code blocks
matches = re.findall(pattern, reply) # find all code blocks in text
if len(matches) == 1:
return matches[0].strip('python') # code block
return None
code_block = get_code_block(result)
if code_block is not None:
code_block = self.sync_and_patch(original=code, revised=code_block)
return code_block
else:
return code
def sync_and_patch(self, original, revised):
"""Ensure the number of pre-string empty lines in revised matches those in original."""
def count_leading_empty_lines(s, reverse=False):
"""Count the number of leading empty lines in a string."""
lines = s.split('\n')
if reverse: lines = list(reversed(lines))
count = 0
for line in lines:
if line.strip() == '':
count += 1
else:
break
return count
original_empty_lines = count_leading_empty_lines(original)
revised_empty_lines = count_leading_empty_lines(revised)
if original_empty_lines > revised_empty_lines:
additional_lines = '\n' * (original_empty_lines - revised_empty_lines)
revised = additional_lines + revised
elif original_empty_lines < revised_empty_lines:
lines = revised.split('\n')
revised = '\n'.join(lines[revised_empty_lines - original_empty_lines:])
original_empty_lines = count_leading_empty_lines(original, reverse=True)
revised_empty_lines = count_leading_empty_lines(revised, reverse=True)
if original_empty_lines > revised_empty_lines:
additional_lines = '\n' * (original_empty_lines - revised_empty_lines)
revised = revised + additional_lines
elif original_empty_lines < revised_empty_lines:
lines = revised.split('\n')
revised = '\n'.join(lines[:-(revised_empty_lines - original_empty_lines)])
return revised
from toolbox import get_plugin_default_kwargs
llm_kwargs = get_plugin_default_kwargs()["llm_kwargs"]
cwm = ContextWindowManager(llm_kwargs)
cwm.read_file(path="./test.py")
output_buf = ""
with open('temp.py', 'w+', encoding='utf8') as f:
while True:
try:
next_batch, line_no_start, line_no_end = cwm.get_next_batch()
result = cwm.tag_code(next_batch)
f.write(result)
output_buf += result
except StopIteration:
next_batch, line_no_start, line_no_end = [], -1, -1
break
print('-------------------------------------------')
print(''.join(next_batch))
print('-------------------------------------------')
print(cwm)

View File

@@ -142,3 +142,132 @@
border-top-width: 0;
}
.welcome-card-container {
text-align: center;
margin: 0 auto;
display: flex;
position: absolute;
width: inherit;
padding: 50px;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
flex-wrap: wrap;
justify-content: center;
transition: opacity 1s ease-in-out;
opacity: 0;
}
.welcome-card-container.show {
opacity: 1;
}
.welcome-card-container.hide {
opacity: 0;
}
.welcome-card {
border-radius: 10px;
box-shadow: 0px 0px 6px 3px #e5e7eb6b;
padding: 15px;
margin: 10px;
flex: 1 0 calc(30% - 5px);
transform: rotateY(0deg);
transition: transform 0.1s;
transform-style: preserve-3d;
}
.welcome-card.show {
transform: rotateY(0deg);
}
.welcome-card.hide {
transform: rotateY(90deg);
}
.welcome-title {
font-size: 40px;
padding: 20px;
margin: 10px;
flex: 0 0 calc(90%);
}
.welcome-card-title {
font-size: 20px;
margin: 2px;
flex: 0 0 calc(95%);
padding-bottom: 8px;
padding-top: 8px;
padding-right: 8px;
padding-left: 8px;
display: flex;
justify-content: center;
}
.welcome-svg {
padding-right: 10px;
}
.welcome-title-text {
text-wrap: nowrap;
}
.welcome-content {
text-wrap: balance;
height: 55px;
display: flex;
align-items: center;
}
#gpt-submit-row {
display: flex;
gap: 0 !important;
border-radius: var(--button-large-radius);
border: var(--button-border-width) solid var(--button-primary-border-color);
/* background: var(--button-primary-background-fill); */
background: var(--button-primary-background-fill-hover);
color: var(--button-primary-text-color);
box-shadow: var(--button-shadow);
transition: var(--button-transition);
display: flex;
}
#gpt-submit-row:hover {
border-color: var(--button-primary-border-color-hover);
/* background: var(--button-primary-background-fill-hover); */
/* color: var(--button-primary-text-color-hover); */
}
#gpt-submit-row button#elem_submit_visible {
border-top-right-radius: 0px;
border-bottom-right-radius: 0px;
box-shadow: none !important;
flex-grow: 1;
}
#gpt-submit-row #gpt-submit-dropdown {
border-top-left-radius: 0px;
border-bottom-left-radius: 0px;
border-left: 0.5px solid #FFFFFF88 !important;
display: flex;
overflow: unset !important;
max-width: 40px !important;
min-width: 40px !important;
}
#gpt-submit-row #gpt-submit-dropdown input {
pointer-events: none;
opacity: 0; /* 隐藏输入框 */
width: 0;
margin-inline: 0;
cursor: pointer;
}
#gpt-submit-row #gpt-submit-dropdown label {
display: flex;
width: 0;
}
#gpt-submit-row #gpt-submit-dropdown label div.wrap {
background: none;
box-shadow: none;
border: none;
}
#gpt-submit-row #gpt-submit-dropdown label div.wrap div.wrap-inner {
background: none;
padding-inline: 0;
height: 100%;
}
#gpt-submit-row #gpt-submit-dropdown svg.dropdown-arrow {
transform: scale(2) translate(4.5px, -0.3px);
}
#gpt-submit-row #gpt-submit-dropdown > *:hover {
cursor: context-menu;
}

View File

@@ -796,6 +796,26 @@ function minor_ui_adjustment() {
}, 200); // 每50毫秒执行一次
}
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
// 对提交按钮的下拉选框做的变化
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
function ButtonWithDropdown_init() {
let submitButton = document.querySelector('button#elem_submit_visible');
let submitDropdown = document.querySelector('#gpt-submit-dropdown');
function updateDropdownWidth() {
if (submitButton) {
let setWidth = submitButton.clientWidth + submitDropdown.clientWidth;
let setLeft = -1 * submitButton.clientWidth;
document.getElementById('submit-dropdown-style')?.remove();
const styleElement = document.createElement('style');
styleElement.id = 'submit-dropdown-style';
styleElement.innerHTML = `#gpt-submit-dropdown ul.options { width: ${setWidth}px; left: ${setLeft}px; }`;
document.head.appendChild(styleElement);
}
}
window.addEventListener('resize', updateDropdownWidth);
updateDropdownWidth();
}
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
// 第 6 部分: 避免滑动
@@ -1050,364 +1070,6 @@ async function on_plugin_exe_complete(fn_name) {
}
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
// 第 8 部分: TTS语音生成函数
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
audio_debug = false;
class AudioPlayer {
constructor() {
this.audioCtx = new (window.AudioContext || window.webkitAudioContext)();
this.queue = [];
this.isPlaying = false;
this.currentSource = null; // 添加属性来保存当前播放的源
}
// Base64 编码的字符串转换为 ArrayBuffer
base64ToArrayBuffer(base64) {
const binaryString = window.atob(base64);
const len = binaryString.length;
const bytes = new Uint8Array(len);
for (let i = 0; i < len; i++) {
bytes[i] = binaryString.charCodeAt(i);
}
return bytes.buffer;
}
// 检查音频播放队列并播放音频
checkQueue() {
if (!this.isPlaying && this.queue.length > 0) {
this.isPlaying = true;
const nextAudio = this.queue.shift();
this.play_wave(nextAudio);
}
}
// 将音频添加到播放队列
enqueueAudio(audio_buf_wave) {
if (allow_auto_read_tts_flag) {
this.queue.push(audio_buf_wave);
this.checkQueue();
}
}
// 播放音频
async play_wave(encodedAudio) {
//const audioData = this.base64ToArrayBuffer(encodedAudio);
const audioData = encodedAudio;
try {
const buffer = await this.audioCtx.decodeAudioData(audioData);
const source = this.audioCtx.createBufferSource();
source.buffer = buffer;
source.connect(this.audioCtx.destination);
source.onended = () => {
if (allow_auto_read_tts_flag) {
this.isPlaying = false;
this.currentSource = null; // 播放结束后清空当前源
this.checkQueue();
}
};
this.currentSource = source; // 保存当前播放的源
source.start();
} catch (e) {
console.log("Audio error!", e);
this.isPlaying = false;
this.currentSource = null; // 出错时也应清空当前源
this.checkQueue();
}
}
// 新增:立即停止播放音频的方法
stop() {
if (this.currentSource) {
this.queue = []; // 清空队列
this.currentSource.stop(); // 停止当前源
this.currentSource = null; // 清空当前源
this.isPlaying = false; // 更新播放状态
// 关闭音频上下文可能会导致无法再次播放音频,因此仅停止当前源
// this.audioCtx.close(); // 可选:如果需要可以关闭音频上下文
}
}
}
const audioPlayer = new AudioPlayer();
class FIFOLock {
constructor() {
this.queue = [];
this.currentTaskExecuting = false;
}
lock() {
let resolveLock;
const lock = new Promise(resolve => {
resolveLock = resolve;
});
this.queue.push(resolveLock);
if (!this.currentTaskExecuting) {
this._dequeueNext();
}
return lock;
}
_dequeueNext() {
if (this.queue.length === 0) {
this.currentTaskExecuting = false;
return;
}
this.currentTaskExecuting = true;
const resolveLock = this.queue.shift();
resolveLock();
}
unlock() {
this.currentTaskExecuting = false;
this._dequeueNext();
}
}
function delay(ms) {
return new Promise(resolve => setTimeout(resolve, ms));
}
// Define the trigger function with delay parameter T in milliseconds
function trigger(T, fire) {
// Variable to keep track of the timer ID
let timeoutID = null;
// Variable to store the latest arguments
let lastArgs = null;
return function (...args) {
// Update lastArgs with the latest arguments
lastArgs = args;
// Clear the existing timer if the function is called again
if (timeoutID !== null) {
clearTimeout(timeoutID);
}
// Set a new timer that calls the `fire` function with the latest arguments after T milliseconds
timeoutID = setTimeout(() => {
fire(...lastArgs);
}, T);
};
}
prev_text = ""; // previous text, this is used to check chat changes
prev_text_already_pushed = ""; // previous text already pushed to audio, this is used to check where we should continue to play audio
prev_chatbot_index = -1;
const delay_live_text_update = trigger(3000, on_live_stream_terminate);
function on_live_stream_terminate(latest_text) {
// remove `prev_text_already_pushed` from `latest_text`
if (audio_debug) console.log("on_live_stream_terminate", latest_text);
remaining_text = latest_text.slice(prev_text_already_pushed.length);
if ((!isEmptyOrWhitespaceOnly(remaining_text)) && remaining_text.length != 0) {
prev_text_already_pushed = latest_text;
push_text_to_audio(remaining_text);
}
}
function is_continue_from_prev(text, prev_text) {
abl = 5
if (text.length < prev_text.length - abl) {
return false;
}
if (prev_text.length > 10) {
return text.startsWith(prev_text.slice(0, Math.min(prev_text.length - abl, 100)));
} else {
return text.startsWith(prev_text);
}
}
function isEmptyOrWhitespaceOnly(remaining_text) {
// Replace \n and 。 with empty strings
let textWithoutSpecifiedCharacters = remaining_text.replace(/[\n。]/g, '');
// Check if the remaining string is empty
return textWithoutSpecifiedCharacters.trim().length === 0;
}
function process_increased_text(remaining_text) {
// console.log('[is continue], remaining_text: ', remaining_text)
// remaining_text starts with \n or 。, then move these chars into prev_text_already_pushed
while (remaining_text.startsWith('\n') || remaining_text.startsWith('。')) {
prev_text_already_pushed = prev_text_already_pushed + remaining_text[0];
remaining_text = remaining_text.slice(1);
}
if (remaining_text.includes('\n') || remaining_text.includes('。')) { // determine remaining_text contain \n or 。
// new message begin!
index_of_last_sep = Math.max(remaining_text.lastIndexOf('\n'), remaining_text.lastIndexOf('。'));
// break the text into two parts
tobe_pushed = remaining_text.slice(0, index_of_last_sep + 1);
prev_text_already_pushed = prev_text_already_pushed + tobe_pushed;
// console.log('[is continue], push: ', tobe_pushed)
// console.log('[is continue], update prev_text_already_pushed: ', prev_text_already_pushed)
if (!isEmptyOrWhitespaceOnly(tobe_pushed)) {
// console.log('[is continue], remaining_text is empty')
push_text_to_audio(tobe_pushed);
}
}
}
function process_latest_text_output(text, chatbot_index) {
if (text.length == 0) {
prev_text = text;
prev_text_mask = text;
// console.log('empty text')
return;
}
if (text == prev_text) {
// console.log('[nothing changed]')
return;
}
var is_continue = is_continue_from_prev(text, prev_text_already_pushed);
if (chatbot_index == prev_chatbot_index && is_continue) {
// on_text_continue_grow
remaining_text = text.slice(prev_text_already_pushed.length);
process_increased_text(remaining_text);
delay_live_text_update(text); // in case of no \n or 。 in the text, this timer will finally commit
}
else if (chatbot_index == prev_chatbot_index && !is_continue) {
if (audio_debug) console.log('---------------------');
if (audio_debug) console.log('text twisting!');
if (audio_debug) console.log('[new message begin]', 'text', text, 'prev_text_already_pushed', prev_text_already_pushed);
if (audio_debug) console.log('---------------------');
prev_text_already_pushed = "";
delay_live_text_update(text); // in case of no \n or 。 in the text, this timer will finally commit
}
else {
// on_new_message_begin, we have to clear `prev_text_already_pushed`
if (audio_debug) console.log('---------------------');
if (audio_debug) console.log('new message begin!');
if (audio_debug) console.log('[new message begin]', 'text', text, 'prev_text_already_pushed', prev_text_already_pushed);
if (audio_debug) console.log('---------------------');
prev_text_already_pushed = "";
process_increased_text(text);
delay_live_text_update(text); // in case of no \n or 。 in the text, this timer will finally commit
}
prev_text = text;
prev_chatbot_index = chatbot_index;
}
const audio_push_lock = new FIFOLock();
async function push_text_to_audio(text) {
if (!allow_auto_read_tts_flag) {
return;
}
await audio_push_lock.lock();
var lines = text.split(/[\n。]/);
for (const audio_buf_text of lines) {
if (audio_buf_text) {
// Append '/vits' to the current URL to form the target endpoint
const url = `${window.location.href}vits`;
// Define the payload to be sent in the POST request
const payload = {
text: audio_buf_text, // Ensure 'audio_buf_text' is defined with valid data
text_language: "zh"
};
// Call the async postData function and log the response
post_text(url, payload, send_index);
send_index = send_index + 1;
if (audio_debug) console.log(send_index, audio_buf_text);
// sleep 2 seconds
if (allow_auto_read_tts_flag) {
await delay(3000);
}
}
}
audio_push_lock.unlock();
}
send_index = 0;
recv_index = 0;
to_be_processed = [];
async function UpdatePlayQueue(cnt, audio_buf_wave) {
if (cnt != recv_index) {
to_be_processed.push([cnt, audio_buf_wave]);
if (audio_debug) console.log('cache', cnt);
}
else {
if (audio_debug) console.log('processing', cnt);
recv_index = recv_index + 1;
if (audio_buf_wave) {
audioPlayer.enqueueAudio(audio_buf_wave);
}
// deal with other cached audio
while (true) {
find_any = false;
for (i = to_be_processed.length - 1; i >= 0; i--) {
if (to_be_processed[i][0] == recv_index) {
if (audio_debug) console.log('processing cached', recv_index);
if (to_be_processed[i][1]) {
audioPlayer.enqueueAudio(to_be_processed[i][1]);
}
to_be_processed.pop(i);
find_any = true;
recv_index = recv_index + 1;
}
}
if (!find_any) { break; }
}
}
}
function post_text(url, payload, cnt) {
if (allow_auto_read_tts_flag) {
postData(url, payload, cnt)
.then(data => {
UpdatePlayQueue(cnt, data);
return;
});
} else {
UpdatePlayQueue(cnt, null);
return;
}
}
notify_user_error = false
// Create an async function to perform the POST request
async function postData(url = '', data = {}) {
try {
// Use the Fetch API with await
const response = await fetch(url, {
method: 'POST', // Specify the request method
body: JSON.stringify(data), // Convert the JavaScript object to a JSON string
});
// Check if the response is ok (status in the range 200-299)
if (!response.ok) {
// If not OK, throw an error
console.info('There was a problem during audio generation requests:', response.status);
// if (!notify_user_error){
// notify_user_error = true;
// alert('There was a problem during audio generation requests:', response.status);
// }
return null;
}
// If OK, parse and return the JSON response
return await response.arrayBuffer();
} catch (error) {
// Log any errors that occur during the fetch operation
console.info('There was a problem during audio generation requests:', error);
// if (!notify_user_error){
// notify_user_error = true;
// alert('There was a problem during audio generation requests:', error);
// }
return null;
}
}
async function generate_menu(guiBase64String, btnName){
// assign the button and menu data
push_data_to_gradio_component(guiBase64String, "invisible_current_pop_up_plugin_arg", "string");
@@ -1634,33 +1296,74 @@ async function run_dropdown_shift(dropdown){
}
}
async function duplicate_in_new_window() {
// 获取当前页面的URL
var url = window.location.href;
// 在新标签页中打开这个URL
window.open(url, '_blank');
}
async function run_classic_plugin_via_id(plugin_elem_id){
// find elementid
for (key in plugin_init_info_lib){
if (plugin_init_info_lib[key].elem_id == plugin_elem_id){
// 获取按钮名称
let current_btn_name = await get_data_from_gradio_component(plugin_elem_id);
console.log(current_btn_name);
gui_args = {}
// 关闭菜单 (如果处于开启状态)
push_data_to_gradio_component({
visible: false,
__type__: 'update'
}, "plugin_arg_menu", "obj");
hide_all_elem();
// 为了与旧插件兼容,生成菜单时,自动加载旧高级参数输入区的值
let advance_arg_input_legacy = await get_data_from_gradio_component('advance_arg_input_legacy');
if (advance_arg_input_legacy.length != 0){
gui_args["advanced_arg"] = {};
gui_args["advanced_arg"].user_confirmed_value = advance_arg_input_legacy;
}
// execute the plugin
push_data_to_gradio_component(JSON.stringify(gui_args), "invisible_current_pop_up_plugin_arg_final", "string");
push_data_to_gradio_component(current_btn_name, "invisible_callback_btn_for_plugin_exe", "string");
document.getElementById("invisible_callback_btn_for_plugin_exe").click();
// 执行
call_plugin_via_name(current_btn_name);
return;
}
}
// console.log('unable to find function');
return;
}
async function call_plugin_via_name(current_btn_name) {
gui_args = {}
// 关闭菜单 (如果处于开启状态)
push_data_to_gradio_component({
visible: false,
__type__: 'update'
}, "plugin_arg_menu", "obj");
hide_all_elem();
// 为了与旧插件兼容,生成菜单时,自动加载旧高级参数输入区的值
let advance_arg_input_legacy = await get_data_from_gradio_component('advance_arg_input_legacy');
if (advance_arg_input_legacy.length != 0){
gui_args["advanced_arg"] = {};
gui_args["advanced_arg"].user_confirmed_value = advance_arg_input_legacy;
}
// execute the plugin
push_data_to_gradio_component(JSON.stringify(gui_args), "invisible_current_pop_up_plugin_arg_final", "string");
push_data_to_gradio_component(current_btn_name, "invisible_callback_btn_for_plugin_exe", "string");
document.getElementById("invisible_callback_btn_for_plugin_exe").click();
}
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
// 多用途复用提交按钮
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
async function click_real_submit_btn() {
document.getElementById("elem_submit").click();
}
async function multiplex_function_begin(multiplex_sel) {
if (multiplex_sel === "常规对话") {
click_real_submit_btn();
return;
}
if (multiplex_sel === "多模型对话") {
let _align_name_in_crazy_function_py = "询问多个GPT模型";
call_plugin_via_name(_align_name_in_crazy_function_py);
return;
}
}
async function run_multiplex_shift(multiplex_sel){
let key = multiplex_sel;
if (multiplex_sel === "常规对话") {
key = "提交";
} else {
key = "提交 (" + multiplex_sel + ")";
}
push_data_to_gradio_component({
value: key,
__type__: 'update'
}, "elem_submit_visible", "obj");
}

View File

@@ -1,4 +1,4 @@
from functools import cache
from functools import lru_cache
from toolbox import get_conf
CODE_HIGHLIGHT, ADD_WAIFU, LAYOUT = get_conf("CODE_HIGHLIGHT", "ADD_WAIFU", "LAYOUT")
@@ -24,13 +24,15 @@ def minimize_js(common_js_path):
except:
return common_js_path
@cache
@lru_cache
def get_common_html_javascript_code():
js = "\n"
common_js_path_list = [
"themes/common.js",
"themes/theme.js",
"themes/tts.js",
"themes/init.js",
"themes/welcome.js",
]
if ADD_WAIFU: # 添加Live2D
@@ -42,10 +44,10 @@ def get_common_html_javascript_code():
for common_js_path in common_js_path_list:
if '.min.' not in common_js_path:
minimized_js_path = minimize_js(common_js_path)
for jsf in [
f"file={minimized_js_path}",
]:
js += f"""<script src="{jsf}"></script>\n"""
else:
minimized_js_path = common_js_path
jsf = f"file={minimized_js_path}"
js += f"""<script src="{jsf}"></script>\n"""
if not ADD_WAIFU:
js += """<script>window.loadLive2D = function(){};</script>\n"""

View File

@@ -1,7 +1,7 @@
:root {
--chatbot-color-light: #000000;
--chatbot-color-dark: #FFFFFF;
--chatbot-background-color-light: #F3F3F3;
--chatbot-background-color-light: #FFFFFF;
--chatbot-background-color-dark: #121111;
--message-user-background-color-light: #95EC69;
--message-user-background-color-dark: #26B561;
@@ -196,7 +196,7 @@ footer {
transition: opacity 0.3s ease-in-out;
}
textarea.svelte-1pie7s6 {
background: #e7e6e6 !important;
background: #f1f1f1 !important;
width: 100% !important;
}

View File

@@ -62,11 +62,11 @@ def adjust_theme():
button_primary_text_color="white",
button_primary_text_color_dark="white",
button_secondary_background_fill="*neutral_100",
button_secondary_background_fill_hover="*neutral_50",
button_secondary_background_fill_hover="#FEFEFE",
button_secondary_background_fill_dark="*neutral_900",
button_secondary_text_color="*neutral_800",
button_secondary_text_color_dark="white",
background_fill_primary="*neutral_50",
background_fill_primary="#FEFEFE",
background_fill_primary_dark="#1F1F1F",
block_title_text_color="*primary_500",
block_title_background_fill_dark="*primary_900",

View File

@@ -8,8 +8,10 @@ def define_gui_floating_menu(customize_btns, functional, predefined_btns, cookie
with gr.Column(scale=10):
txt2 = gr.Textbox(show_label=False, placeholder="Input question here.",
elem_id='user_input_float', lines=8, label="输入区2").style(container=False)
txt2.submit(None, None, None, _js="""click_real_submit_btn""")
with gr.Column(scale=1, min_width=40):
submitBtn2 = gr.Button("提交", variant="primary"); submitBtn2.style(size="sm")
submitBtn2.click(None, None, None, _js="""click_real_submit_btn""")
resetBtn2 = gr.Button("重置", variant="secondary"); resetBtn2.style(size="sm")
stopBtn2 = gr.Button("停止", variant="secondary"); stopBtn2.style(size="sm")
clearBtn2 = gr.Button("清除", elem_id="elem_clear2", variant="secondary", visible=False); clearBtn2.style(size="sm")

View File

@@ -29,6 +29,10 @@ def define_gui_toolbar(AVAIL_LLM_MODELS, LLM_MODEL, INIT_SYS_PROMPT, THEME, AVAI
checkboxes_2 = gr.CheckboxGroup(opt, value=value, label="显示/隐藏自定义菜单", elem_id='cbsc').style(container=False)
dark_mode_btn = gr.Button("切换界面明暗 ☀", variant="secondary").style(size="sm")
dark_mode_btn.click(None, None, None, _js=js_code_for_toggle_darkmode)
open_new_tab = gr.Button("打开新对话", variant="secondary").style(size="sm")
open_new_tab.click(None, None, None, _js=f"""()=>duplicate_in_new_window()""")
with gr.Tab("帮助", elem_id="interact-panel"):
gr.Markdown(help_menu_description)
return checkboxes, checkboxes_2, max_length_sl, theme_dropdown, system_prompt, file_upload_2, md_dropdown, top_p, temperature

View File

@@ -2,11 +2,18 @@ async function GptAcademicJavaScriptInit(dark, prompt, live2d, layout, tts) {
// 第一部分,布局初始化
audio_fn_init();
minor_ui_adjustment();
ButtonWithDropdown_init();
// 加载欢迎页面
const welcomeMessage = new WelcomeMessage();
welcomeMessage.begin_render();
chatbotIndicator = gradioApp().querySelector('#gpt-chatbot > div.wrap');
var chatbotObserver = new MutationObserver(() => {
chatbotContentChanged(1);
welcomeMessage.update();
});
chatbotObserver.observe(chatbotIndicator, { attributes: true, childList: true, subtree: true });
if (layout === "LEFT-RIGHT") { chatbotAutoHeight(); }
if (layout === "LEFT-RIGHT") { limit_scroll_position(); }
@@ -122,4 +129,5 @@ async function GptAcademicJavaScriptInit(dark, prompt, live2d, layout, tts) {
// 主题加载(恢复到上次)
change_theme("", "")
}

View File

@@ -1 +0,0 @@
// we have moved mermaid-related code to gradio-fix repository: binary-husky/gradio-fix@32150d0

View File

@@ -1 +0,0 @@
// we have moved mermaid-related code to gradio-fix repository: binary-husky/gradio-fix@32150d0

View File

@@ -1 +0,0 @@
// we have moved mermaid-related code to gradio-fix repository: binary-husky/gradio-fix@32150d0

View File

@@ -1 +0,0 @@
// we have moved mermaid-related code to gradio-fix repository: binary-husky/gradio-fix@32150d0

View File

7
themes/svg/arxiv.svg Normal file
View File

@@ -0,0 +1,7 @@
<?xml version="1.0"?>
<svg width="1024" height="1024" xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg" class="icon" version="1.1">
<g class="layer">
<title>Layer 1</title>
<path d="m140,188l584,0l0,164l76,0l0,-208c0,-17.7 -14.3,-32 -32,-32l-672,0c-17.7,0 -32,14.3 -32,32l0,736c0,17.7 14.3,32 32,32l544,0l0,-76l-500,0l0,-648zm274.3,68l-60.6,0c-3.4,0 -6.4,2.2 -7.6,5.4l-127.1,368c-0.3,0.8 -0.4,1.7 -0.4,2.6c0,4.4 3.6,8 8,8l55.1,0c3.4,0 6.4,-2.2 7.6,-5.4l32.7,-94.6l196.2,0l-96.2,-278.6c-1.3,-3.2 -4.3,-5.4 -7.7,-5.4zm12.4,228l-85.5,0l42.8,-123.8l42.7,123.8zm509.3,44l-136,0l0,-93c0,-4.4 -3.6,-8 -8,-8l-56,0c-4.4,0 -8,3.6 -8,8l0,93l-136,0c-13.3,0 -24,10.7 -24,24l0,176c0,13.3 10.7,24 24,24l136,0l0,152c0,4.4 3.6,8 8,8l56,0c4.4,0 8,-3.6 8,-8l0,-152l136,0c13.3,0 24,-10.7 24,-24l0,-176c0,-13.3 -10.7,-24 -24,-24zm-208,152l-88,0l0,-80l88,0l0,80zm160,0l-88,0l0,-80l88,0l0,80z" fill="#00aeff" id="svg_1"/>
</g>
</svg>

After

Width:  |  Height:  |  Size: 940 B

8
themes/svg/brain.svg Normal file
View File

@@ -0,0 +1,8 @@
<?xml version="1.0"?>
<svg width="1024" height="1024" xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg" class="icon" version="1.1">
<g class="layer">
<title>Layer 1</title>
<path d="m832,96a96,96 0 1 1 -90.72,127.52l-1.54,0.26l-3.74,0.22l-256,0l0,256l256,0c1.82,0 3.58,0.16 5.31,0.45a96,96 0 1 1 0,63.07l-1.6,0.26l-3.71,0.22l-256,0l0,256l256,0c1.82,0 3.58,0.16 5.31,0.45a96,96 0 1 1 0,63.07l-1.6,0.26l-3.71,0.22l-288,0a32,32 0 0 1 -31.78,-28.26l-0.22,-3.74l0,-288l-68.03,0a128.06,128.06 0 0 1 -117.57,95.87l-6.4,0.13a128,128 0 1 1 123.97,-160l68.03,0l0,-288a32,32 0 0 1 28.26,-31.78l3.74,-0.22l288,0c1.82,0 3.58,0.16 5.31,0.45a96,96 0 0 1 90.69,-64.45zm0,704a32,32 0 1 0 0,64a32,32 0 0 0 0,-64zm-608,-352a64,64 0 1 0 0,128a64,64 0 0 0 0,-128zm608,32a32,32 0 1 0 0,64a32,32 0 0 0 0,-64zm0,-320a32,32 0 1 0 0,64a32,32 0 0 0 0,-64z" fill="#00aeff" id="svg_1"/>
<path d="m224,384a128,128 0 1 1 0,256a128,128 0 0 1 0,-256zm0,64a64,64 0 1 0 0,128a64,64 0 0 0 0,-128z" fill="#00aeff" id="svg_2"/>
</g>
</svg>

After

Width:  |  Height:  |  Size: 1.0 KiB

8
themes/svg/conf.svg Normal file
View File

@@ -0,0 +1,8 @@
<?xml version="1.0"?>
<svg width="1024" height="1024" xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg" class="icon" version="1.1">
<g class="layer">
<title>Layer 1</title>
<path d="m373.83,194.49a44.92,44.92 0 1 1 0,-89.84a44.96,44.96 0 1 1 0,89.84m165.43,-163.51l-63.23,0s-189.11,-1.28 -204.17,99.33l0,122.19l241.47,0l0,34.95l-358.3,-0.66s-136.03,9.94 -138.51,206.74l0,57.75s-3.96,190.03 130.12,216.55l92.32,0l0,-128.09a132,132 0 0 1 132.06,-132.03l256.43,0a123.11,123.11 0 0 0 123.18,-123.12l0,-238.62c0,-2.25 -1.19,-103.13 -211.37,-115.02" fill="#00aeff" id="svg_1"/>
<path d="m647.01,853.16c24.84,0 44.96,20.01 44.96,44.85a44.96,44.96 0 1 1 -44.96,-44.85m-165.43,163.51l63.23,0s189.14,1.22 204.17,-99.36l0,-122.22l-241.47,0l0,-34.95l358.27,0.66s136.06,-9.88 138.54,-206.65l0,-57.74s3.96,-190.04 -130.12,-216.56l-92.32,0l0,128.03a132.06,132.06 0 0 1 -132.06,132.1l-256.47,0a123.11,123.11 0 0 0 -123.14,123.08l0,238.59c0,2.24 1.19,103.16 211.37,115.02" fill="#00aeff" id="svg_2"/>
</g>
</svg>

After

Width:  |  Height:  |  Size: 1.0 KiB

1
themes/svg/default.svg Normal file
View File

@@ -0,0 +1 @@
<svg t="1721122982934" class="icon" viewBox="0 0 1024 1024" version="1.1" xmlns="http://www.w3.org/2000/svg" p-id="1823" width="200" height="200"><path d="M512 512m-512 0a512 512 0 1 0 1024 0 512 512 0 1 0-1024 0Z" fill="#04BEE8" p-id="1824"></path><path d="M324.408781 655.018925C505.290126 655.018925 651.918244 508.387706 651.918244 327.509463c0-152.138029-103.733293-280.047334-244.329811-316.853972C205.813923 52.463528 47.497011 213.017581 8.987325 415.981977 47.587706 553.880127 174.183098 655.018925 324.408781 655.018925z" fill="#FFFFFF" fill-opacity=".2" p-id="1825"></path><path d="M512 1024c282.766631 0 512-229.233369 512-512 0-31.765705-2.891385-62.853911-8.433853-93.018889C928.057169 336.0999 809.874701 285.26268 679.824375 285.26268c-269.711213 0-488.357305 218.645317-488.357305 488.357305 0 54.959576 9.084221 107.802937 25.822474 157.10377C300.626556 989.489417 402.283167 1024 512 1024z" fill="#FFFFFF" fill-opacity=".15" p-id="1826"></path><path d="M732.535958 756.566238c36.389596 0 65.889478-29.499882 65.889477-65.889478 0 36.389596 29.502983 65.889478 65.889478 65.889478-17.053747 0-65.889478 29.502983-65.889478 65.889477 0-36.386495-29.499882-65.889478-65.889477-65.889477zM159.685087 247.279334c25.686819 0 46.51022-20.8234 46.51022-46.51022 0 25.686819 20.8234 46.51022 46.510219 46.51022-12.03607 0-46.51022 20.8234-46.510219 46.510219 0-25.686819-20.8234-46.51022-46.51022-46.510219z" fill="#FFFFFF" fill-opacity=".5" p-id="1827"></path><path d="M206.195307 333.32324c8.562531 0 15.503407-6.940875 15.503406-15.503407 0 8.562531 6.940875 15.503407 15.503407 15.503407-4.012282 0-15.503407 6.940875-15.503407 15.503406 0-8.562531-6.940875-15.503407-15.503406-15.503406z" fill="#FFFFFF" fill-opacity=".3" p-id="1828"></path><path d="M282.161998 248.054504m80.617714 0l299.215746 0q80.617714 0 80.617714 80.617714l0 366.380379q0 80.617714-80.617714 80.617713l-299.215746 0q-80.617714 0-80.617714-80.617713l0-366.380379q0-80.617714 80.617714-80.617714Z" fill="#FFFFFF" p-id="1829"></path><path d="M530.216503 280.611658h113.433774v146.467658c0 10.89967-13.049992 16.498725-20.948978 8.9881l-35.767909-34.009048-35.767909 34.009048C543.266495 443.578041 530.216503 437.978986 530.216503 427.079316V280.611658z" fill="#29C8EB" p-id="1830"></path><path d="M365.105223 280.611658m14.728237 0l0 0q14.728236 0 14.728236 14.728236l0 417.041635q0 14.728236-14.728236 14.728236l0 0q-14.728236 0-14.728237-14.728236l0-417.041635q0-14.728236 14.728237-14.728236Z" fill="#29C8EB" p-id="1831"></path></svg>

After

Width:  |  Height:  |  Size: 2.5 KiB

9
themes/svg/doc.svg Normal file
View File

@@ -0,0 +1,9 @@
<?xml version="1.0"?>
<svg width="1024" height="1024" xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg" class="icon" version="1.1">
<g class="layer">
<title>Layer 1</title>
<path d="m298.67,96a32,32 0 0 1 0,64a138.67,138.67 0 0 0 -138.67,138.67l0,42.66a32,32 0 0 1 -64,0l0,-42.66a202.67,202.67 0 0 1 202.67,-202.67zm597.33,554.67a32,32 0 0 1 32,32l0,42.66a202.67,202.67 0 0 1 -202.67,202.67l-42.66,0a32,32 0 0 1 0,-64l42.66,0a138.67,138.67 0 0 0 138.67,-138.67l0,-42.66a32,32 0 0 1 32,-32zm-128,-405.34a32,32 0 0 1 0,64l-213.33,0a32,32 0 0 1 0,-64l213.33,0zm0,128a32,32 0 0 1 0,64l-128,0a32,32 0 0 1 0,-64l128,0z" fill="#00aeff" id="svg_1"/>
<path d="m780.8,96a138.67,138.67 0 0 1 138.67,138.67l0,213.33a138.67,138.67 0 0 1 -138.67,138.67l-98.13,0a32,32 0 0 1 0,-64l98.13,0a74.67,74.67 0 0 0 74.67,-74.67l0,-213.33a74.67,74.67 0 0 0 -74.67,-74.67l-247.47,0a74.67,74.67 0 0 0 -74.66,74.67l0,106.66a32,32 0 0 1 -64,0l0,-106.66a138.67,138.67 0 0 1 138.66,-138.67l247.47,0zm-487.68,500.05a32,32 0 0 1 45.23,0l64,64a32,32 0 0 1 0,45.23l-64,64a32,32 0 0 1 -45.23,-45.23l41.34,-41.38l-41.38,-41.39a32,32 0 0 1 -3.07,-41.64l3.11,-3.59z" fill="#00aeff" id="svg_2"/>
<path d="m448,437.33a138.67,138.67 0 0 1 138.67,138.67l0,213.33a138.67,138.67 0 0 1 -138.67,138.67l-213.33,0a138.67,138.67 0 0 1 -138.67,-138.67l0,-213.33a138.67,138.67 0 0 1 138.67,-138.67l213.33,0zm0,64l-213.33,0a74.67,74.67 0 0 0 -74.67,74.67l0,213.33c0,41.22 33.45,74.67 74.67,74.67l213.33,0a74.67,74.67 0 0 0 74.67,-74.67l0,-213.33a74.67,74.67 0 0 0 -74.67,-74.67z" fill="#00aeff" id="svg_3"/>
</g>
</svg>

After

Width:  |  Height:  |  Size: 1.6 KiB

16
themes/svg/img.svg Normal file
View File

@@ -0,0 +1,16 @@
<?xml version="1.0"?>
<svg width="1024" height="1024" xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg" class="icon" version="1.1">
<g class="layer">
<title>Layer 1</title>
<path d="m960,234l0,556c0,5.52 -4.48,10 -10,10l-438,0l0,-576l438,0c5.52,0 10,4.48 10,10z" fill="#F7FEFE" id="svg_1"/>
<path d="m931.93,800l-419.93,0l0,-202.03l114.19,-114.19c5.21,-5.21 13.71,-5 18.66,0.44l287.08,315.78z" fill="#3404FC" id="svg_2"/>
<path d="m512,800l256,0l-256,-275.69l0,275.69z" fill="#0097FF" id="svg_3"/>
<path d="m848,336m-48,0a48,48 0 1 0 96,0a48,48 0 1 0 -96,0z" fill="#EC1C36" id="svg_4"/>
<path d="m305.29,242.44m-32,0a32,32 0 1 0 64,0a32,32 0 1 0 -64,0z" fill="#00aeff" id="svg_5"/>
<path d="m112,320m-32,0a32,32 0 1 0 64,0a32,32 0 1 0 -64,0z" fill="#00aeff" id="svg_6"/>
<path d="m96,512m-32,0a32,32 0 1 0 64,0a32,32 0 1 0 -64,0z" fill="#00aeff" id="svg_7"/>
<path d="m305.29,781.56m-32,0a32,32 0 1 0 64,0a32,32 0 1 0 -64,0z" fill="#00aeff" id="svg_8"/>
<path d="m112,704m-32,0a32,32 0 1 0 64,0a32,32 0 1 0 -64,0z" fill="#00aeff" id="svg_9"/>
<path d="m950,816.31l-438,0l0,-32l432,0l0,-544.62l-432,0l0,-32l438,0c14.34,0 26,11.66 26,26l0,556.62c0,14.34 -11.66,26 -26,26zm-630,-192.31c8.84,0 16,-7.16 16,-16s-7.16,-16 -16,-16l-38.63,0l-96,96l-11.4,0c-7.12,-27.57 -32.21,-48 -61.97,-48c-35.29,0 -64,28.71 -64,64s28.71,64 64,64c29.77,0 54.85,-20.43 61.97,-48l24.65,0l96.01,-96l25.37,0zm-208,112c-17.67,0 -32,-14.33 -32,-32s14.33,-32 32,-32s32,14.33 32,32s-14.33,32 -32,32zm288,-304c8.84,0 16,-7.16 16,-16s-7.16,-16 -16,-16l-32,0c-8.84,0 -16,7.16 -16,16s7.16,16 16,16l32,0zm-80,80c0,8.84 7.16,16 16,16l32,0c8.84,0 16,-7.16 16,-16s-7.16,-16 -16,-16l-32,0c-8.84,0 -16,7.16 -16,16zm32,96c0,8.84 7.16,16 16,16l32,0c8.84,0 16,-7.16 16,-16s-7.16,-16 -16,-16l-32,0c-8.84,0 -16,7.16 -16,16zm-240,-224c29.77,0 54.85,-20.43 61.97,-48l11.4,0l96,96l38.63,0c8.84,0 16,-7.16 16,-16s-7.16,-16 -16,-16l-25.37,0l-96,-96l-24.65,0c-7.12,-27.57 -32.21,-48 -61.97,-48c-35.29,0 -64,28.71 -64,64s28.7,64 63.99,64zm0,-96c17.67,0 32,14.33 32,32s-14.33,32 -32,32s-32,-14.33 -32,-32s14.33,-32 32,-32zm-16,288c29.77,0 54.85,-20.43 61.97,-48l133.43,0c6.96,0 12.59,-7.16 12.59,-16s-5.64,-16 -12.59,-16l-133.43,0c-7.12,-27.57 -32.21,-48 -61.97,-48c-35.29,0 -64,28.71 -64,64s28.71,64 64,64zm0,-96c17.67,0 32,14.33 32,32s-14.33,32 -32,32s-32,-14.33 -32,-32s14.33,-32 32,-32zm384,-416c-8.84,0 -16,7.16 -16,16l0,224l-73.37,0l-29.8,-29.79a63.62,63.62 0 0 0 8.46,-31.77c0,-35.29 -28.71,-64 -64,-64s-64,28.71 -64,64s28.71,64 64,64c12.16,0 23.53,-3.41 33.21,-9.31l38.87,38.87l86.63,0l0,64l-16,0c-8.84,0 -16,7.16 -16,16s7.16,16 16,16l16,0l0,64l-48,0c-8.84,0 -16,7.16 -16,16s7.16,16 16,16l48,0l0,64l-16,0c-8.84,0 -16,7.16 -16,16s7.16,16 16,16l16,0l0,64l-86.63,0l-38.87,38.87a63.61,63.61 0 0 0 -33.21,-9.31c-35.29,0 -64,28.71 -64,64s28.71,64 64,64s64,-28.71 64,-64c0,-11.55 -3.08,-22.4 -8.46,-31.77l29.8,-29.79l73.37,0l0,224c0,8.84 7.16,16 16,16s16,-7.16 16,-16l0,-864c0,-8.84 -7.16,-16 -16,-16zm-143.57,185.81c-2.62,11.14 -11.07,20.03 -21.95,23.29c-2.91,0.87 -6,1.34 -9.19,1.34c-17.68,0 -32,-14.33 -32,-32c0,-17.68 14.32,-32 32,-32c17.67,0 32,14.32 32,32c0,2.54 -0.29,5 -0.86,7.37zm-31.14,563.75c-17.68,0 -32,-14.32 -32,-32c0,-17.67 14.32,-32 32,-32c3.19,0 6.28,0.47 9.19,1.34c10.88,3.26 19.33,12.15 21.95,23.29c0.57,2.37 0.86,4.83 0.86,7.37c0,17.68 -14.33,32 -32,32z" fill="#00aeff" id="svg_10"/>
</g>
</svg>

After

Width:  |  Height:  |  Size: 3.3 KiB

10
themes/svg/mm.svg Normal file
View File

@@ -0,0 +1,10 @@
<?xml version="1.0"?>
<svg width="1066" height="1024" xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg" class="icon" version="1.1">
<g class="layer">
<title>Layer 1</title>
<path d="m443.9,511.45a96,96 0 1 0 192,0a96,96 0 0 0 -192,0zm96,32a32,32 0 1 1 0,-64a32,32 0 0 1 0,64z" fill="#00aeff" id="svg_1"/>
<path d="m74.67,512c0,80.17 65.87,142.08 147.54,181.67c84.26,40.84 198.06,65.07 321.7,65.07c123.74,0 237.49,-24.23 321.75,-65.07c81.71,-39.63 147.59,-101.54 147.59,-181.71c0,-80.22 -65.88,-142.08 -147.59,-181.68c-84.26,-40.87 -198.05,-65.11 -321.7,-65.11c-123.69,0 -237.49,24.24 -321.71,65.11c-81.71,39.6 -147.58,101.51 -147.58,181.68l0,0.04zm469.29,172.07c-114.9,0 -217.09,-22.61 -289.15,-57.6c-74.67,-36.18 -105.48,-79.01 -105.48,-114.47c0,-35.46 30.85,-78.29 105.48,-114.52c72.06,-34.94 174.25,-57.6 289.15,-57.6c114.9,0 217.09,22.66 289.15,57.6c74.67,36.23 105.47,79.06 105.47,114.52c0,35.5 -30.85,78.34 -105.47,114.52c-72.11,34.98 -174.25,57.6 -289.15,57.6l0,-0.05z" fill="#00aeff" id="svg_2"/>
<path d="m300.2,705.19c-5.97,82.74 15.7,130.86 46.42,148.57c30.72,17.75 83.25,12.46 151.9,-34.09c66.3,-44.93 137.04,-122.07 194.47,-221.57c57.47,-99.5 88.92,-199.34 94.72,-279.21c5.98,-82.77 -15.74,-130.86 -46.46,-148.61c-30.72,-17.75 -83.2,-12.46 -151.9,34.09c-66.3,44.93 -137.04,122.12 -194.47,221.61c-57.43,99.5 -88.92,199.3 -94.72,279.21l0.04,0zm-74.49,-5.37c6.78,-93.44 42.66,-204.08 104.53,-311.17c61.82,-107.09 139.69,-193.54 217.17,-246.1c75.18,-50.94 161.71,-77.01 231.17,-36.95c69.46,40.11 90.11,128.09 83.59,218.67c-6.75,93.39 -42.67,204.07 -104.54,311.16c-61.82,107.1 -139.69,193.5 -217.17,246.06c-75.18,50.95 -161.71,77.06 -231.17,36.95c-69.46,-40.1 -90.11,-128.08 -83.58,-218.62z" fill="#00aeff" id="svg_3"/>
<path d="m300.2,318.85c5.76,79.87 37.21,179.71 94.68,279.21c57.43,99.5 128.17,176.64 194.43,221.61c68.7,46.51 121.18,51.84 151.9,34.09c30.72,-17.75 52.43,-65.88 46.46,-148.61c-5.76,-79.87 -37.25,-179.71 -94.72,-279.21c-57.43,-99.5 -128.13,-176.64 -194.43,-221.61c-68.7,-46.51 -121.18,-51.8 -151.9,-34.09c-30.72,17.75 -52.43,65.88 -46.42,148.61zm-74.49,5.37c-6.53,-90.53 14.12,-178.51 83.58,-218.62c69.46,-40.11 155.99,-13.99 231.13,36.95c77.52,52.52 155.39,138.96 217.21,246.06c61.87,107.09 97.75,217.77 104.54,311.17c6.52,90.53 -14.17,178.51 -83.63,218.62c-69.42,40.11 -155.95,13.99 -231.08,-36.95c-77.53,-52.52 -155.44,-138.96 -217.26,-246.06c-61.83,-107.09 -97.71,-217.77 -104.49,-311.17z" fill="#00aeff" id="svg_4"/>
</g>
</svg>

After

Width:  |  Height:  |  Size: 2.4 KiB

7
themes/svg/polish.svg Normal file
View File

@@ -0,0 +1,7 @@
<?xml version="1.0"?>
<svg width="1024" height="1024" xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg" class="icon" version="1.1">
<g class="layer">
<title>Layer 1</title>
<path d="m671.27,337.36l2.05,1.92a174.18,174.18 0 0 1 0.19,244.18l-383.3,390.22a168.04,168.04 0 0 1 -237.73,1.79l-2.04,-2.05a174.18,174.18 0 0 1 -0.26,-244.12l383.3,-390.15a168.04,168.04 0 0 1 237.73,-1.79l0.06,0zm-165.09,73.2l-383.31,390.22a72.31,72.31 0 0 0 0.07,101.36l0.77,0.83a66.29,66.29 0 0 0 93.87,-0.7l383.3,-390.22a72.31,72.31 0 0 0 0,-101.42l-0.83,-0.77a66.36,66.36 0 0 0 -93.87,0.7zm282.32,209.7a47.35,47.35 0 0 1 0.64,0.45l122.48,72.05c23.04,13.57 30.91,43.07 17.79,66.29a47.35,47.35 0 0 1 -64.63,17.92l-0.58,-0.45l-122.48,-72.05a48.95,48.95 0 0 1 -17.78,-66.29a47.35,47.35 0 0 1 64.63,-17.92l-0.07,0zm187.43,-191.84a48.38,48.38 0 0 1 0,96.69l-140.52,0a48.38,48.38 0 0 1 0,-96.69l140.52,0zm-49.27,-292.63l0.64,0.64a48.82,48.82 0 0 1 0,68.4l-100.66,102.58a46.97,46.97 0 0 1 -66.42,0.64l-0.64,-0.64a48.82,48.82 0 0 1 0,-68.41l100.66,-102.57a46.97,46.97 0 0 1 66.42,-0.64zm-632.55,-35.64a46.97,46.97 0 0 1 0.58,0.63l100.65,102.52a48.82,48.82 0 0 1 0,68.47a46.97,46.97 0 0 1 -66.42,0.64l-0.64,-0.64l-100.65,-102.58a48.82,48.82 0 0 1 0,-68.47a46.97,46.97 0 0 1 66.48,-0.57zm284.57,-100.15c26.23,0 47.48,21.24 47.48,47.48l0,146.92a47.42,47.42 0 1 1 -94.9,0l0,-146.92c0,-26.24 21.31,-47.48 47.42,-47.48z" fill="#00aeff" id="svg_1"/>
</g>
</svg>

After

Width:  |  Height:  |  Size: 1.4 KiB

7
themes/svg/tts.svg Normal file
View File

@@ -0,0 +1,7 @@
<?xml version="1.0"?>
<svg width="1092" height="1024" xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg" class="icon" version="1.1">
<g class="layer">
<title>Layer 1</title>
<path d="m1010.55,424.93c0,86.01 -65.36,155.88 -147.04,166.63l-43.63,0c-81.68,-10.75 -147.04,-80.62 -147.04,-166.63l-92.57,0c0,123.63 92.6,231.11 212.41,252.62l0,107.52l87.17,0l0,-107.52c119.81,-21.51 212.42,-123.63 212.42,-252.59l-81.72,0l0,-0.03zm-76.25,-231.16c0,-53.76 -43.56,-91.37 -92.57,-91.37a91.2,91.2 0 0 0 -92.61,91.37l0,91.38l190.64,0l0,-91.38l-5.46,0zm-190.64,231.16c0,53.76 43.59,91.37 92.61,91.37a91.2,91.2 0 0 0 92.6,-91.37l0,-91.38l-185.21,0l0,91.38zm-279.45,-274.23l-139.94,-140.46l-6.83,-6.83l-3.41,-3.41l-3.42,3.41l-6.82,6.86l-20.48,20.55l-3.42,3.42l3.42,6.86l13.65,13.68l75.09,75.37l-153.6,0c-122.88,6.83 -218.45,109.57 -218.45,229.44l0,10.28l51.2,0l0,-10.24c0,-92.5 75.09,-171.28 167.25,-178.11l153.6,0l-75.09,75.33l-10.24,10.28l-3.41,6.82l-3.42,3.45l3.42,3.41l27.3,27.41l3.42,3.42l3.41,-3.42l30.72,-30.82l116.05,-116.43l3.42,-3.41l-3.42,-6.86zm-406.18,383.55l0,130.16l64.85,0l0,-65.06l129.71,0l0,359.59l-64.86,0l0,65.06l194.56,0l0,-65.06l-64.85,0l0,-359.59l129.71,0l0,65.06l64.85,0l0,-130.16l-453.97,0z" fill="#00aeff" id="svg_1"/>
</g>
</svg>

After

Width:  |  Height:  |  Size: 1.2 KiB

8
themes/svg/vt.svg Normal file
View File

@@ -0,0 +1,8 @@
<?xml version="1.0"?>
<svg width="1024" height="1024" xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg" class="icon" version="1.1">
<g class="layer">
<title>Layer 1</title>
<path d="m256,64a192,192 0 0 0 -192,192l0,512a192,192 0 0 0 192,192l512,0a192,192 0 0 0 192,-192l0,-512a192,192 0 0 0 -192,-192l-512,0zm92.16,416l-158.08,-158.08l67.84,-67.9l169.41,169.34a80,80 0 0 1 0,113.15l-169.41,169.35l-67.84,-67.78l158.08,-158.08zm131.84,160l288,0l0,96l-288,0l0,-96z" fill="#00aeff" id="svg_1"/>
<path d="m190.08,638.02l158.08,-158.08l-158.08,-158.08l67.84,-67.84l169.41,169.34a80,80 0 0 1 0,113.15l-169.41,169.35l-67.84,-67.78l0,-0.06zm577.92,1.92l-288,0l0,96l288,0l0,-95.94l0,-0.06z" fill="#2951E0" id="svg_2" opacity="0.2"/>
</g>
</svg>

After

Width:  |  Height:  |  Size: 767 B

351
themes/tts.js Normal file
View File

@@ -0,0 +1,351 @@
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
// TTS语音生成函数
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
audio_debug = false;
class AudioPlayer {
constructor() {
this.audioCtx = new (window.AudioContext || window.webkitAudioContext)();
this.queue = [];
this.isPlaying = false;
this.currentSource = null; // 添加属性来保存当前播放的源
}
// Base64 编码的字符串转换为 ArrayBuffer
base64ToArrayBuffer(base64) {
const binaryString = window.atob(base64);
const len = binaryString.length;
const bytes = new Uint8Array(len);
for (let i = 0; i < len; i++) {
bytes[i] = binaryString.charCodeAt(i);
}
return bytes.buffer;
}
// 检查音频播放队列并播放音频
checkQueue() {
if (!this.isPlaying && this.queue.length > 0) {
this.isPlaying = true;
const nextAudio = this.queue.shift();
this.play_wave(nextAudio);
}
}
// 将音频添加到播放队列
enqueueAudio(audio_buf_wave) {
if (allow_auto_read_tts_flag) {
this.queue.push(audio_buf_wave);
this.checkQueue();
}
}
// 播放音频
async play_wave(encodedAudio) {
//const audioData = this.base64ToArrayBuffer(encodedAudio);
const audioData = encodedAudio;
try {
const buffer = await this.audioCtx.decodeAudioData(audioData);
const source = this.audioCtx.createBufferSource();
source.buffer = buffer;
source.connect(this.audioCtx.destination);
source.onended = () => {
if (allow_auto_read_tts_flag) {
this.isPlaying = false;
this.currentSource = null; // 播放结束后清空当前源
this.checkQueue();
}
};
this.currentSource = source; // 保存当前播放的源
source.start();
} catch (e) {
console.log("Audio error!", e);
this.isPlaying = false;
this.currentSource = null; // 出错时也应清空当前源
this.checkQueue();
}
}
// 新增:立即停止播放音频的方法
stop() {
if (this.currentSource) {
this.queue = []; // 清空队列
this.currentSource.stop(); // 停止当前源
this.currentSource = null; // 清空当前源
this.isPlaying = false; // 更新播放状态
// 关闭音频上下文可能会导致无法再次播放音频,因此仅停止当前源
// this.audioCtx.close(); // 可选:如果需要可以关闭音频上下文
}
}
}
const audioPlayer = new AudioPlayer();
class FIFOLock {
constructor() {
this.queue = [];
this.currentTaskExecuting = false;
}
lock() {
let resolveLock;
const lock = new Promise(resolve => {
resolveLock = resolve;
});
this.queue.push(resolveLock);
if (!this.currentTaskExecuting) {
this._dequeueNext();
}
return lock;
}
_dequeueNext() {
if (this.queue.length === 0) {
this.currentTaskExecuting = false;
return;
}
this.currentTaskExecuting = true;
const resolveLock = this.queue.shift();
resolveLock();
}
unlock() {
this.currentTaskExecuting = false;
this._dequeueNext();
}
}
function delay(ms) {
return new Promise(resolve => setTimeout(resolve, ms));
}
// Define the trigger function with delay parameter T in milliseconds
function trigger(T, fire) {
// Variable to keep track of the timer ID
let timeoutID = null;
// Variable to store the latest arguments
let lastArgs = null;
return function (...args) {
// Update lastArgs with the latest arguments
lastArgs = args;
// Clear the existing timer if the function is called again
if (timeoutID !== null) {
clearTimeout(timeoutID);
}
// Set a new timer that calls the `fire` function with the latest arguments after T milliseconds
timeoutID = setTimeout(() => {
fire(...lastArgs);
}, T);
};
}
prev_text = ""; // previous text, this is used to check chat changes
prev_text_already_pushed = ""; // previous text already pushed to audio, this is used to check where we should continue to play audio
prev_chatbot_index = -1;
const delay_live_text_update = trigger(3000, on_live_stream_terminate);
function on_live_stream_terminate(latest_text) {
// remove `prev_text_already_pushed` from `latest_text`
if (audio_debug) console.log("on_live_stream_terminate", latest_text);
remaining_text = latest_text.slice(prev_text_already_pushed.length);
if ((!isEmptyOrWhitespaceOnly(remaining_text)) && remaining_text.length != 0) {
prev_text_already_pushed = latest_text;
push_text_to_audio(remaining_text);
}
}
function is_continue_from_prev(text, prev_text) {
abl = 5
if (text.length < prev_text.length - abl) {
return false;
}
if (prev_text.length > 10) {
return text.startsWith(prev_text.slice(0, Math.min(prev_text.length - abl, 100)));
} else {
return text.startsWith(prev_text);
}
}
function isEmptyOrWhitespaceOnly(remaining_text) {
// Replace \n and 。 with empty strings
let textWithoutSpecifiedCharacters = remaining_text.replace(/[\n。]/g, '');
// Check if the remaining string is empty
return textWithoutSpecifiedCharacters.trim().length === 0;
}
function process_increased_text(remaining_text) {
// console.log('[is continue], remaining_text: ', remaining_text)
// remaining_text starts with \n or 。, then move these chars into prev_text_already_pushed
while (remaining_text.startsWith('\n') || remaining_text.startsWith('。')) {
prev_text_already_pushed = prev_text_already_pushed + remaining_text[0];
remaining_text = remaining_text.slice(1);
}
if (remaining_text.includes('\n') || remaining_text.includes('。')) { // determine remaining_text contain \n or 。
// new message begin!
index_of_last_sep = Math.max(remaining_text.lastIndexOf('\n'), remaining_text.lastIndexOf('。'));
// break the text into two parts
tobe_pushed = remaining_text.slice(0, index_of_last_sep + 1);
prev_text_already_pushed = prev_text_already_pushed + tobe_pushed;
// console.log('[is continue], push: ', tobe_pushed)
// console.log('[is continue], update prev_text_already_pushed: ', prev_text_already_pushed)
if (!isEmptyOrWhitespaceOnly(tobe_pushed)) {
// console.log('[is continue], remaining_text is empty')
push_text_to_audio(tobe_pushed);
}
}
}
function process_latest_text_output(text, chatbot_index) {
if (text.length == 0) {
prev_text = text;
prev_text_mask = text;
// console.log('empty text')
return;
}
if (text == prev_text) {
// console.log('[nothing changed]')
return;
}
var is_continue = is_continue_from_prev(text, prev_text_already_pushed);
if (chatbot_index == prev_chatbot_index && is_continue) {
// on_text_continue_grow
remaining_text = text.slice(prev_text_already_pushed.length);
process_increased_text(remaining_text);
delay_live_text_update(text); // in case of no \n or 。 in the text, this timer will finally commit
}
else if (chatbot_index == prev_chatbot_index && !is_continue) {
if (audio_debug) console.log('---------------------');
if (audio_debug) console.log('text twisting!');
if (audio_debug) console.log('[new message begin]', 'text', text, 'prev_text_already_pushed', prev_text_already_pushed);
if (audio_debug) console.log('---------------------');
prev_text_already_pushed = "";
delay_live_text_update(text); // in case of no \n or 。 in the text, this timer will finally commit
}
else {
// on_new_message_begin, we have to clear `prev_text_already_pushed`
if (audio_debug) console.log('---------------------');
if (audio_debug) console.log('new message begin!');
if (audio_debug) console.log('[new message begin]', 'text', text, 'prev_text_already_pushed', prev_text_already_pushed);
if (audio_debug) console.log('---------------------');
prev_text_already_pushed = "";
process_increased_text(text);
delay_live_text_update(text); // in case of no \n or 。 in the text, this timer will finally commit
}
prev_text = text;
prev_chatbot_index = chatbot_index;
}
const audio_push_lock = new FIFOLock();
async function push_text_to_audio(text) {
if (!allow_auto_read_tts_flag) {
return;
}
await audio_push_lock.lock();
var lines = text.split(/[\n。]/);
for (const audio_buf_text of lines) {
if (audio_buf_text) {
// Append '/vits' to the current URL to form the target endpoint
const url = `${window.location.href}vits`;
// Define the payload to be sent in the POST request
const payload = {
text: audio_buf_text, // Ensure 'audio_buf_text' is defined with valid data
text_language: "zh"
};
// Call the async postData function and log the response
post_text(url, payload, send_index);
send_index = send_index + 1;
if (audio_debug) console.log(send_index, audio_buf_text);
// sleep 2 seconds
if (allow_auto_read_tts_flag) {
await delay(3000);
}
}
}
audio_push_lock.unlock();
}
send_index = 0;
recv_index = 0;
to_be_processed = [];
async function UpdatePlayQueue(cnt, audio_buf_wave) {
if (cnt != recv_index) {
to_be_processed.push([cnt, audio_buf_wave]);
if (audio_debug) console.log('cache', cnt);
}
else {
if (audio_debug) console.log('processing', cnt);
recv_index = recv_index + 1;
if (audio_buf_wave) {
audioPlayer.enqueueAudio(audio_buf_wave);
}
// deal with other cached audio
while (true) {
find_any = false;
for (i = to_be_processed.length - 1; i >= 0; i--) {
if (to_be_processed[i][0] == recv_index) {
if (audio_debug) console.log('processing cached', recv_index);
if (to_be_processed[i][1]) {
audioPlayer.enqueueAudio(to_be_processed[i][1]);
}
to_be_processed.pop(i);
find_any = true;
recv_index = recv_index + 1;
}
}
if (!find_any) { break; }
}
}
}
function post_text(url, payload, cnt) {
if (allow_auto_read_tts_flag) {
postData(url, payload, cnt)
.then(data => {
UpdatePlayQueue(cnt, data);
return;
});
} else {
UpdatePlayQueue(cnt, null);
return;
}
}
notify_user_error = false
// Create an async function to perform the POST request
async function postData(url = '', data = {}) {
try {
// Use the Fetch API with await
const response = await fetch(url, {
method: 'POST', // Specify the request method
body: JSON.stringify(data), // Convert the JavaScript object to a JSON string
});
// Check if the response is ok (status in the range 200-299)
if (!response.ok) {
// If not OK, throw an error
console.info('There was a problem during audio generation requests:', response.status);
// if (!notify_user_error){
// notify_user_error = true;
// alert('There was a problem during audio generation requests:', response.status);
// }
return null;
}
// If OK, parse and return the JSON response
return await response.arrayBuffer();
} catch (error) {
// Log any errors that occur during the fetch operation
console.info('There was a problem during audio generation requests:', error);
// if (!notify_user_error){
// notify_user_error = true;
// alert('There was a problem during audio generation requests:', error);
// }
return null;
}
}

317
themes/welcome.js Normal file
View File

@@ -0,0 +1,317 @@
class WelcomeMessage {
constructor() {
this.static_welcome_message = [
{
title: "环境配置教程",
content: "配置模型和插件,释放大语言模型的学术应用潜力。",
svg: "file=themes/svg/conf.svg",
url: "https://github.com/binary-husky/gpt_academic/wiki/%E9%A1%B9%E7%9B%AE%E9%85%8D%E7%BD%AE%E8%AF%B4%E6%98%8E",
},
{
title: "Arxiv论文一键翻译",
content: "无缝切换学术阅读语言,最优英文转中文的学术论文阅读体验。",
svg: "file=themes/svg/arxiv.svg",
url: "https://www.bilibili.com/video/BV1dz4y1v77A/",
},
{
title: "多模态模型",
content: "试试将截屏直接粘贴到输入框中,随后使用多模态模型提问。",
svg: "file=themes/svg/mm.svg",
url: "https://github.com/binary-husky/gpt_academic",
},
{
title: "文档与源码批处理",
content: "您可以将任意文件拖入「此处」,随后调用对应插件功能。",
svg: "file=themes/svg/doc.svg",
url: "https://github.com/binary-husky/gpt_academic",
},
{
title: "图表与脑图绘制",
content: "试试输入一段语料,然后点击「总结绘制脑图」。",
svg: "file=themes/svg/brain.svg",
url: "https://www.bilibili.com/video/BV18c41147H9/",
},
{
title: "虚空终端",
content: "点击右侧插件区的「虚空终端」插件,然后直接输入您的想法。",
svg: "file=themes/svg/vt.svg",
url: "https://github.com/binary-husky/gpt_academic",
},
{
title: "DALLE图像生成",
content: "接入DALLE生成插画或者项目Logo辅助头脑风暴并激发灵感。",
svg: "file=themes/svg/img.svg",
url: "https://github.com/binary-husky/gpt_academic",
},
{
title: "TTS语音克隆",
content: "借助SoVits以您喜爱的角色的声音回答问题。",
svg: "file=themes/svg/tts.svg",
url: "https://www.bilibili.com/video/BV1Rp421S7tF/",
},
{
title: "实时语音对话",
content: "配置实时语音对话功能,无须任何激活词,我将一直倾听。",
svg: "file=themes/svg/default.svg",
url: "https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md",
},
{
title: "Latex全文润色",
content: "上传需要润色的latex论文让大语言模型帮您改论文。",
svg: "file=themes/svg/polish.svg",
url: "https://github.com/binary-husky/gpt_academic",
}
];
this.visible = false;
this.max_welcome_card_num = 6;
this.card_array = [];
this.static_welcome_message_previous = [];
this.reflesh_time_interval = 15*1000;
const reflesh_render_status = () => {
for (let index = 0; index < this.card_array.length; index++) {
const card = this.card_array[index];
card.classList.remove('hide');
card.classList.remove('show');
}
};
const pageFocusHandler = new PageFocusHandler();
pageFocusHandler.addFocusCallback(reflesh_render_status);
}
begin_render() {
this.update();
}
async startRefleshCards() {
await new Promise(r => setTimeout(r, this.reflesh_time_interval));
await this.reflesh_cards();
if (this.visible){
setTimeout(() => {
this.startRefleshCards.call(this);
}, 1);
}
}
async reflesh_cards() {
if (!this.visible){
return;
}
// re-rank this.static_welcome_message randomly
this.static_welcome_message_temp = this.shuffle(this.static_welcome_message);
// find items that in this.static_welcome_message_temp but not in this.static_welcome_message_previous
const not_shown_previously = this.static_welcome_message_temp.filter(item => !this.static_welcome_message_previous.includes(item));
const already_shown_previously = this.static_welcome_message_temp.filter(item => this.static_welcome_message_previous.includes(item));
// combine two lists
this.static_welcome_message_previous = not_shown_previously.concat(already_shown_previously);
(async () => {
// 使用 for...of 循环来处理异步操作
for (let index = 0; index < this.card_array.length; index++) {
if (index >= this.max_welcome_card_num) {
break;
}
const card = this.card_array[index];
// 已经包含了 hide 属性?
if (card.classList.contains('hide') || card.classList.contains('show')) {
card.classList.remove('hide');
card.classList.remove('show');
continue;
}
// 等待动画结束
card.addEventListener('transitionend', () => {
// 更新卡片信息
const message = this.static_welcome_message_previous[index];
const title = card.getElementsByClassName('welcome-card-title')[0];
const content = card.getElementsByClassName('welcome-content-c')[0];
const svg = card.getElementsByClassName('welcome-svg')[0];
const text = card.getElementsByClassName('welcome-title-text')[0];
svg.src = message.svg;
text.textContent = message.title;
text.href = message.url;
content.textContent = message.content;
card.classList.remove('hide');
// 等待动画结束
card.addEventListener('transitionend', () => {
card.classList.remove('show');
}, { once: true });
card.classList.add('show');
}, { once: true });
card.classList.add('hide');
// 等待 250 毫秒
await new Promise(r => setTimeout(r, 200));
}
})();
}
shuffle(array) {
var currentIndex = array.length, randomIndex;
// While there remain elements to shuffle...
while (currentIndex != 0) {
// Pick a remaining element...
randomIndex = Math.floor(Math.random() * currentIndex);
currentIndex--;
// And swap it with the current element.
[array[currentIndex], array[randomIndex]] = [
array[randomIndex], array[currentIndex]];
}
return array;
}
async update() {
// console.log('update')
var page_width = document.documentElement.clientWidth;
const width_to_hide_welcome = 1200;
if (!await this.isChatbotEmpty() || page_width < width_to_hide_welcome) {
if (this.visible) {
this.removeWelcome();
this.visible = false;
this.card_array = [];
this.static_welcome_message_previous = [];
}
return;
}
if (this.visible){
return;
}
// console.log("welcome");
this.showWelcome();
this.visible = true;
this.startRefleshCards();
}
showCard(message) {
const card = document.createElement('div');
card.classList.add('welcome-card');
// 创建标题
const title = document.createElement('div');
title.classList.add('welcome-card-title');
// 创建图标
const svg = document.createElement('img');
svg.classList.add('welcome-svg');
svg.src = message.svg;
svg.style.height = '30px';
title.appendChild(svg);
// 创建标题
const text = document.createElement('a');
text.textContent = message.title;
text.classList.add('welcome-title-text');
text.href = message.url;
text.target = "_blank";
title.appendChild(text)
// 创建内容
const content = document.createElement('div');
content.classList.add('welcome-content');
const content_c = document.createElement('div');
content_c.classList.add('welcome-content-c');
content_c.textContent = message.content;
content.appendChild(content_c);
// 将标题和内容添加到卡片 div 中
card.appendChild(title);
card.appendChild(content);
return card;
}
async showWelcome() {
// 首先,找到想要添加子元素的父元素
const elem_chatbot = document.getElementById('gpt-chatbot');
// 创建一个新的div元素
const welcome_card_container = document.createElement('div');
welcome_card_container.classList.add('welcome-card-container');
// 创建主标题
const major_title = document.createElement('div');
major_title.classList.add('welcome-title');
major_title.textContent = "欢迎使用GPT-Academic";
welcome_card_container.appendChild(major_title)
// 创建卡片
this.static_welcome_message.forEach((message, index) => {
if (index >= this.max_welcome_card_num) {
return;
}
this.static_welcome_message_previous.push(message);
const card = this.showCard(message);
this.card_array.push(card);
welcome_card_container.appendChild(card);
});
elem_chatbot.appendChild(welcome_card_container);
// 添加显示动画
requestAnimationFrame(() => {
welcome_card_container.classList.add('show');
});
}
async removeWelcome() {
// remove welcome-card-container
const elem_chatbot = document.getElementById('gpt-chatbot');
const welcome_card_container = document.getElementsByClassName('welcome-card-container')[0];
// 添加隐藏动画
welcome_card_container.classList.add('hide');
// 等待动画结束后再移除元素
welcome_card_container.addEventListener('transitionend', () => {
elem_chatbot.removeChild(welcome_card_container);
}, { once: true });
}
async isChatbotEmpty() {
return (await get_data_from_gradio_component("gpt-chatbot")).length == 0;
}
}
class PageFocusHandler {
constructor() {
this.hasReturned = false;
this.focusCallbacks = [];
// Bind the focus and blur event handlers
window.addEventListener('visibilitychange', this.handleFocus.bind(this));
}
// Method to handle the focus event
handleFocus() {
if (this.hasReturned) {
this.focusCallbacks.forEach(callback => callback());
}
this.hasReturned = true;
}
// Method to add a custom callback function
addFocusCallback(callback) {
if (typeof callback === 'function') {
this.focusCallbacks.push(callback);
} else {
throw new Error('Callback must be a function');
}
}
}

View File

@@ -220,9 +220,10 @@ def CatchException(f):
try:
yield from f(main_input, llm_kwargs, plugin_kwargs, chatbot_with_cookie, history, *args, **kwargs)
except FriendlyException as e:
tb_str = '```\n' + trimmed_format_exc() + '```'
if len(chatbot_with_cookie) == 0:
chatbot_with_cookie.clear()
chatbot_with_cookie.append(["插件调度异常", None])
chatbot_with_cookie.append(["插件调度异常:\n" + tb_str, None])
chatbot_with_cookie[-1] = [chatbot_with_cookie[-1][0], e.generate_error_html()]
yield from update_ui(chatbot=chatbot_with_cookie, history=history, msg=f'异常') # 刷新界面
except Exception as e:
@@ -566,8 +567,6 @@ def generate_file_link(report_files:List[str]):
return file_links
def on_report_generated(cookies:dict, files:List[str], chatbot:ChatBotWithCookies):
if "files_to_promote" in cookies:
report_files = cookies["files_to_promote"]

View File

@@ -1,5 +1,5 @@
{
"version": 3.81,
"version": 3.83,
"show_feature": true,
"new_feature": "支持更复杂的插件框架 <-> 上传文件时显示进度条 <-> 添加TTS语音输出EdgeTTS和SoVits语音克隆 <-> Doc2x PDF翻译 <-> 添加回溯对话按钮"
"new_feature": "增加欢迎页面 <-> 优化图像生成插件 <-> 添加紫东太初大模型支持 <-> 保留主题选择 <-> 支持更复杂的插件框架 <-> 上传文件时显示进度条"
}