Compare commits

..

86 Commits

Author SHA1 Message Date
binary-husky
3071057e6d h 2023-07-23 22:37:10 +08:00
binary-husky
271379cdee Merge branch 'master' into threejs-app 2023-07-19 00:04:27 +08:00
qingxu fu
eef9e470c9 Latex解除非UTF8编码错误 2023-07-18 11:00:20 +08:00
binary-husky
3002c6318a Update README.md 2023-07-17 22:21:39 +08:00
binary-husky
6d0bceaebd 移除插件依赖 2023-07-17 22:00:29 +08:00
binary-husky
aa51d6fde6 up 2023-07-17 21:54:28 +08:00
binary-husky
136479e218 Update README.md 2023-07-17 10:38:46 +08:00
binary-husky
19a2742354 Merge pull request #957 from 1Haschwalth/patch-1
Update README.md
2023-07-17 10:35:15 +08:00
1Haschwalth
45aac96dd3 Update README.md 2023-07-16 21:50:08 +08:00
binary-husky
6f21ae8939 support claude api 2023-07-16 15:03:05 +08:00
binary-husky
add98f4eeb 修复自动版本升级Bug 2023-07-16 13:23:28 +08:00
binary-husky
fe231f72b6 fix theme folder rename problem 2023-07-16 13:15:55 +08:00
binary-husky
b308fde480 update readme 2023-07-15 19:19:39 +08:00
binary-husky
f3e14ff806 更新繁體中文映射詞典 2023-07-15 19:11:00 +08:00
binary-husky
79ef9bdf1c update English projection dictionary 2023-07-15 19:01:49 +08:00
binary-husky
a3e938aee9 Merge branch 'master' of github.com:binary-husky/chatgpt_academic 2023-07-15 18:41:46 +08:00
binary-husky
b19a6155f4 restore jittor support 2023-07-15 18:41:35 +08:00
binary-husky
801f7342b1 Update config.py 2023-07-15 17:58:34 +08:00
binary-husky
4829fa0f35 Update README.md 2023-07-15 17:46:19 +08:00
binary-husky
3671f4208e Update README.md 2023-07-15 17:39:04 +08:00
binary-husky
e8c51181ee 进一步提高语音识别的实时性 2023-07-15 17:02:00 +08:00
binary-husky
3ccbb4d6fb 移除google字体 2023-07-15 17:01:37 +08:00
binary-husky
93fe457e99 Merge branch 'master' of github.com:binary-husky/chatgpt_academic 2023-07-15 16:41:46 +08:00
binary-husky
afac657aaa 解决语音助手看门狗线程泄露的问题 2023-07-15 16:41:11 +08:00
binary-husky
3e5c32860a Update README.md 2023-07-15 14:59:05 +08:00
binary-husky
d577bb38b6 Update use_audio.md 2023-07-15 14:58:27 +08:00
binary-husky
418bc32b39 Update use_audio.md 2023-07-15 14:53:30 +08:00
binary-husky
7148ea0596 更新README 2023-07-15 14:44:07 +08:00
binary-husky
87adb17df4 3.46 2023-07-15 14:38:18 +08:00
binary-husky
3fcee3762d 微调样式 2023-07-15 14:35:24 +08:00
binary-husky
1f014779e4 微调样式 2023-07-15 14:31:38 +08:00
binary-husky
97879e73ef 恢复横向调整css 2023-07-15 13:35:11 +08:00
binary-husky
13d4cd3237 音频功能说明书 2023-07-15 13:30:12 +08:00
binary-husky
73e835885b Merge branch 'master' into improve_ui_master 2023-07-15 13:01:13 +08:00
binary-husky
2524c908fc 修改提示 2023-07-15 12:58:38 +08:00
binary-husky
0e71d81bb3 Update README.md 2023-07-14 16:30:03 +08:00
binary-husky
a47864888f Update build-with-latex.yml 2023-07-14 16:25:25 +08:00
binary-husky
9b61ac807c Update build-with-chatglm.yml 2023-07-14 16:25:03 +08:00
binary-husky
bc200dc555 Update build-without-local-llms.yml 2023-07-14 16:24:32 +08:00
binary-husky
2c18b84517 修复依赖自动安装程序 2023-07-12 22:16:25 +08:00
qingxu fu
fe7b651c56 更新提示 2023-07-11 15:56:28 +08:00
qingxu fu
9b8f160788 up 2023-07-11 15:52:38 +08:00
binary-husky
801d5e2fc2 audio readme 2023-07-11 11:11:06 +08:00
qingxu fu
0d655f2d18 三维场景生成 2023-07-10 12:58:17 +08:00
binary-husky
cecdd28e04 Update README.md 2023-07-10 03:41:19 +08:00
binary-husky
d364df1cd6 add test instance 2023-07-10 03:33:51 +08:00
binary-husky
f51bc03686 3.45版本说明 2023-07-10 03:24:34 +08:00
binary-husky
c010d50716 允许加入ChatGLM微调模型 2023-07-10 03:17:09 +08:00
binary-husky
acddb86f3a 小而美 2023-07-10 00:20:14 +08:00
binary-husky
4fde0120ab 完善提醒 2023-07-10 00:08:59 +08:00
binary-husky
592a354eef 完善插件提示 2023-07-10 00:06:48 +08:00
binary-husky
bd66cf3d8b 修复对话历史的问题 2023-07-10 00:02:22 +08:00
binary-husky
e6e5174734 改名 2023-07-09 23:47:10 +08:00
binary-husky
13ade82677 改善语音辅助 2023-07-09 23:18:06 +08:00
binary-husky
ce9eb8d20a UP 2023-07-09 21:18:04 +08:00
binary-husky
dd47c0a284 merge changes 2023-07-09 20:55:37 +08:00
binary-husky
f725ab1b31 Merge branch 'master' into improve_ui_master 2023-07-09 20:47:53 +08:00
binary-husky
7ce4192c52 add comments 2023-07-09 17:25:50 +08:00
binary-husky
c06aafb642 Merge branch 'master' of github.com:binary-husky/chatgpt_academic 2023-07-09 16:01:15 +08:00
binary-husky
b298c5416c 完善PDF总结插件 2023-07-09 16:01:08 +08:00
505030475
94abf302cb 修正模板注释 2023-07-09 12:50:51 +08:00
binary-husky
fcc5534e66 ChatGLM 黑盒微调插件 2023-07-09 03:37:47 +08:00
binary-husky
56c0e4d575 3.44说明 2023-07-09 01:21:18 +08:00
binary-husky
8a10db618e Merge branch 'master-interact' 2023-07-09 01:05:04 +08:00
binary-husky
1fe66f0291 优化azure的体验 2023-07-09 00:20:58 +08:00
binary-husky
ced977c443 修复双dollar公式匹配bug 2023-07-08 22:23:29 +08:00
binary-husky
6c2ffbae52 Update README.md 2023-07-08 19:17:35 +08:00
binary-husky
be2f54fac9 Update README.md 2023-07-08 18:21:20 +08:00
binary-husky
87b5e56378 Update requirements.txt 2023-07-08 18:10:33 +08:00
binary-husky
3a5764ed34 Update requirements.txt 2023-07-08 17:59:27 +08:00
qingxu fu
91aee50ea7 Chuanhu 主题 2023-07-07 20:12:06 +08:00
qingxu fu
e5ccedf491 名称修订 2023-07-07 20:08:26 +08:00
qingxu fu
f620666a58 Merge branch 'improve_ui_master' of https://github.com/binary-husky/chatgpt_academic into improve_ui_master 2023-07-07 19:51:48 +08:00
qingxu fu
594c63e5d6 主题修正 2023-07-07 19:51:09 +08:00
qingxu fu
67d9051890 update error message 2023-07-07 17:41:43 +08:00
binary-husky
be96232127 Merge pull request #933 from binary-husky/master-latex-patch
Latex File Name Bug Patch
2023-07-07 16:57:58 +08:00
binary-husky
3b5bc7a784 Update use_azure.md 2023-07-07 10:55:22 +08:00
binary-husky
5e92f437a1 Update use_azure.md 2023-07-07 10:54:21 +08:00
binary-husky
5c0d34793e Latex File Name Bug Patch 2023-07-07 00:09:50 +08:00
505030475
b082b5eb1b 将阿里云TOKEN移动到config中 2023-07-03 23:20:25 +08:00
505030475
9648d78453 重构异步代码,增强可读性 2023-07-03 22:44:10 +08:00
505030475
2dc8718041 语音模组第一个版本 2023-07-03 00:13:10 +08:00
505030475
a330d6636e error 2023-07-02 22:54:05 +08:00
qingxu fu
322c4be145 同步音频输入 2023-07-02 14:42:12 +08:00
qingxu fu
a3596ff60d audio 2023-07-02 01:05:20 +08:00
qingxu fu
e11d8132f8 add green theme 2023-07-01 23:02:44 +08:00
51 changed files with 3785 additions and 690 deletions

View File

@@ -1,5 +1,5 @@
# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
name: Create and publish a Docker image for ChatGLM support
name: build-with-chatglm
on:
push:

View File

@@ -1,5 +1,5 @@
# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
name: Create and publish a Docker image for ChatGLM support
name: build-with-jittorllms
on:
push:

View File

@@ -1,5 +1,5 @@
# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
name: Create and publish a Docker image for Latex support
name: build-with-latex
on:
push:

View File

@@ -1,5 +1,5 @@
# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
name: Create and publish a Docker image
name: build-without-local-llms
on:
push:

1
.gitignore vendored
View File

@@ -150,3 +150,4 @@ request_llm/jittorllms
multi-language
request_llm/moss
media
flagged

View File

@@ -1,11 +1,11 @@
> **Note**
>
> 2023.7.5: Gradio依赖进行了调整。请及时**更新代码**安装依赖时,请严格选择`requirements.txt`中**指定的版本**
> 2023.7.8: Gradio, Pydantic依赖调整已修改 `requirements.txt`。请及时**更新代码**安装依赖时,请严格选择`requirements.txt`中**指定的版本**
>
> `pip install -r requirements.txt`
# <div align=center><img src="docs/logo.png" width="40" > GPT 学术优化 (GPT Academic)</div>
# <div align=center><img src="docs/logo.png" width="40"> GPT 学术优化 (GPT Academic)</div>
**如果喜欢这个项目请给它一个Star如果您发明了好用的快捷键或函数插件欢迎发pull requests**
@@ -18,14 +18,14 @@ To translate this project to arbitary language with GPT, read and run [`multi_la
>
> 2.本项目中每个文件的功能都在自译解[`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A)详细说明。随着版本的迭代您也可以随时自行点击相关函数插件调用GPT重新生成项目的自我解析报告。常见问题汇总在[`wiki`](https://github.com/binary-husky/gpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98)当中。[安装方法](#installation)。
>
> 3.本项目兼容并鼓励尝试国产大语言模型ChatGLM和Moss等等。支持多个api-key共存可在配置文件中填写如`API_KEY="openai-key1,openai-key2,api2d-key3"`。需要临时更换`API_KEY`时,在输入区输入临时的`API_KEY`然后回车键提交后即可生效。
> 3.本项目兼容并鼓励尝试国产大语言模型ChatGLM和Moss等等。支持多个api-key共存可在配置文件中填写如`API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`。需要临时更换`API_KEY`时,在输入区输入临时的`API_KEY`然后回车键提交后即可生效。
<div align="center">
功能 | 描述
功能(⭐= 近期新增功能) | 描述
--- | ---
一键润色 | 支持一键润色、一键查找论文语法错误
一键中英互译 | 一键中英互译
@@ -41,15 +41,18 @@ Markdown[中英互译](https://www.bilibili.com/video/BV1yo4y157jV/) | [函数
chat分析报告生成 | [函数插件] 运行后自动生成总结汇报
[PDF论文全文翻译功能](https://www.bilibili.com/video/BV1KT411x7Wn) | [函数插件] PDF论文提取题目&摘要+翻译全文(多线程)
[Arxiv小助手](https://www.bilibili.com/video/BV1LM4y1279X) | [函数插件] 输入arxiv文章url即可一键翻译摘要+下载PDF
Latex论文一键校对 | [函数插件] 仿Grammarly对Latex文章进行语法、拼写纠错+输出对照PDF
[谷歌学术统合小助手](https://www.bilibili.com/video/BV19L411U7ia) | [函数插件] 给定任意谷歌学术搜索页面URL让gpt帮你[写relatedworks](https://www.bilibili.com/video/BV1GP411U7Az/)
互联网信息聚合+GPT | [函数插件] 一键[让GPT从互联网获取信息](https://www.bilibili.com/video/BV1om4y127ck)回答问题,让信息永不过时
⭐Arxiv论文精细翻译 | [函数插件] 一键[以超高质量翻译arxiv论文](https://www.bilibili.com/video/BV1dz4y1v77A/),目前最好的论文翻译工具
⭐[实时语音对话输入](https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md) | [函数插件] 异步[监听音频](https://www.bilibili.com/video/BV1AV4y187Uy/),自动断句,自动寻找回答时机
公式/图片/表格显示 | 可以同时显示公式的[tex形式和渲染形式](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png),支持公式、代码高亮
多线程函数插件支持 | 支持多线调用chatgpt一键处理[海量文本](https://www.bilibili.com/video/BV1FT411H7c5/)或程序
启动暗色[主题](https://github.com/binary-husky/gpt_academic/issues/173) | 在浏览器url后面添加```/?__theme=dark```可以切换dark主题
[多LLM模型](https://www.bilibili.com/video/BV1wT411p7yf)支持 | 同时被GPT3.5、GPT4、[清华ChatGLM](https://github.com/THUDM/ChatGLM-6B)、[复旦MOSS](https://github.com/OpenLMLab/MOSS)同时伺候的感觉一定会很不错吧?
[多LLM模型](https://www.bilibili.com/video/BV1wT411p7yf)支持 | 同时被GPT3.5、GPT4、[清华ChatGLM2](https://github.com/THUDM/ChatGLM2-6B)、[复旦MOSS](https://github.com/OpenLMLab/MOSS)同时伺候的感觉一定会很不错吧?
⭐ChatGLM2微调模型 | 支持加载ChatGLM2微调模型提供ChatGLM2微调辅助插件
更多LLM模型接入支持[huggingface部署](https://huggingface.co/spaces/qingxu98/gpt-academic) | 加入Newbing接口(新必应),引入清华[Jittorllms](https://github.com/Jittor/JittorLLMs)支持[LLaMA](https://github.com/facebookresearch/llama)和[盘古α](https://openi.org.cn/pangu/)
更多新功能展示(图像生成等) …… | 见本文档结尾处 ……
更多新功能展示 (图像生成等) …… | 见本文档结尾处 ……
</div>
@@ -113,12 +116,12 @@ python -m pip install -r requirements.txt # 这个步骤和pip安装一样的步
```
<details><summary>如果需要支持清华ChatGLM/复旦MOSS作为后端请点击展开此处</summary>
<details><summary>如果需要支持清华ChatGLM2/复旦MOSS作为后端请点击展开此处</summary>
<p>
【可选步骤】如果需要支持清华ChatGLM/复旦MOSS作为后端需要额外安装更多依赖前提条件熟悉Python + 用过Pytorch + 电脑配置够强):
【可选步骤】如果需要支持清华ChatGLM2/复旦MOSS作为后端需要额外安装更多依赖前提条件熟悉Python + 用过Pytorch + 电脑配置够强):
```sh
# 【可选步骤I】支持清华ChatGLM。清华ChatGLM备注如果遇到"Call ChatGLM fail 不能正常加载ChatGLM的参数" 错误,参考如下: 1以上默认安装的为torch+cpu版使用cuda需要卸载torch重新安装torch+cuda 2如因本机配置不够无法加载模型可以修改request_llm/bridge_chatglm.py中的模型精度, 将 AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) 都修改为 AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
# 【可选步骤I】支持清华ChatGLM2。清华ChatGLM备注如果遇到"Call ChatGLM fail 不能正常加载ChatGLM的参数" 错误,参考如下: 1以上默认安装的为torch+cpu版使用cuda需要卸载torch重新安装torch+cuda 2如因本机配置不够无法加载模型可以修改request_llm/bridge_chatglm.py中的模型精度, 将 AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) 都修改为 AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
python -m pip install -r request_llm/requirements_chatglm.txt
# 【可选步骤II】支持复旦MOSS
@@ -142,6 +145,8 @@ python main.py
### 安装方法II使用Docker
1. 仅ChatGPT推荐大多数人选择等价于docker-compose方案1
[![basic](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml)
[![basiclatex](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml)
``` sh
git clone https://github.com/binary-husky/gpt_academic.git # 下载项目
@@ -149,14 +154,15 @@ cd gpt_academic # 进入路径
nano config.py # 用任意文本编辑器编辑config.py, 配置 “Proxy” “API_KEY” 以及 “WEB_PORT” (例如50923) 等
docker build -t gpt-academic . # 安装
#(最后一步-选择1在Linux环境下用`--net=host`更方便快捷
#(最后一步-Linux操作系统用`--net=host`更方便快捷
docker run --rm -it --net=host gpt-academic
#(最后一步-选择2在macOS/windows环境下,只能用-p选项将容器上的端口(例如50923)暴露给主机上的端口
#(最后一步-MacOS/Windows操作系统)只能用-p选项将容器上的端口(例如50923)暴露给主机上的端口
docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic
```
P.S. 如果需要依赖Latex的插件功能请见Wiki。另外您也可以直接使用docker-compose获取Latex功能修改docker-compose.yml保留方案4并删除其他方案
2. ChatGPT + ChatGLM + MOSS需要熟悉Docker
2. ChatGPT + ChatGLM2 + MOSS需要熟悉Docker
[![chatglm](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml)
``` sh
# 修改docker-compose.yml保留方案2并删除其他方案。修改docker-compose.yml中方案2的配置参考其中注释即可
@@ -164,6 +170,8 @@ docker-compose up
```
3. ChatGPT + LLAMA + 盘古 + RWKV需要熟悉Docker
[![jittorllms](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-jittorllms.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-jittorllms.yml)
``` sh
# 修改docker-compose.yml保留方案3并删除其他方案。修改docker-compose.yml中方案3的配置参考其中注释即可
docker-compose up
@@ -282,6 +290,9 @@ Tip不指定文件直接点击 `载入对话历史存档` 可以查看历史h
### II版本:
- version 3.5(Todo): 使用自然语言调用本项目的所有函数插件(高优先级)
- version 3.46: 支持完全脱手操作的实时语音对话
- version 3.45: 支持自定义ChatGLM2微调模型
- version 3.44: 正式支持Azure优化界面易用性
- version 3.4: +arxiv论文翻译、latex论文批改功能
- version 3.3: +互联网信息综合功能
- version 3.2: 函数插件支持更多参数接口 (保存对话功能, 解读任意语言代码+同时询问任意的LLM组合)
@@ -302,13 +313,18 @@ gpt_academic开发者QQ群-2610599535
- 某些浏览器翻译插件干扰此软件前端的运行
- 官方Gradio目前有很多兼容性Bug请务必使用`requirement.txt`安装Gradio
### III参考与学习
### III主题
可以通过修改`THEME`选项config.py变更主题
1. `Chuanhu-Small-and-Beautiful` [网址](https://github.com/GaiZhenbiao/ChuanhuChatGPT/)
### IV参考与学习
```
代码中参考了很多其他优秀项目中的设计,顺序不分先后:
# 清华ChatGLM-6B:
https://github.com/THUDM/ChatGLM-6B
# 清华ChatGLM2-6B:
https://github.com/THUDM/ChatGLM2-6B
# 清华JittorLLMs:
https://github.com/Jittor/JittorLLMs

39
cc.json Normal file
View File

@@ -0,0 +1,39 @@
[
{
"name": "Box-1",
"width": 1,
"height": 1,
"depth": 1,
"location_x": 1,
"location_y": 0,
"location_z": 0
},
{
"name": "Box-2",
"width": 1,
"height": 1,
"depth": 1,
"location_x": -1,
"location_y": 0,
"location_z": 0
},
{
"name": "Box-3",
"width": 1,
"height": 1,
"depth": 1,
"location_x": 0,
"location_y": 1,
"location_z": 0
},
{
"name": "Box-4",
"width": 1,
"height": 1,
"depth": 1,
"location_x": 0,
"location_y": -1,
"location_z": 0
}
]

View File

@@ -117,7 +117,7 @@ def auto_update(raise_error=False):
with open('./version', 'r', encoding='utf8') as f:
current_version = f.read()
current_version = json.loads(current_version)['version']
if (remote_version - current_version) >= 0.01:
if (remote_version - current_version) >= 0.01-1e-5:
from colorful import print亮黄
print亮黄(
f'\n新版本可用。新版本:{remote_version},当前版本:{current_version}{new_feature}')
@@ -139,7 +139,7 @@ def auto_update(raise_error=False):
else:
return
except:
msg = '自动更新程序:已禁用'
msg = '自动更新程序:已禁用。建议排查:代理网络配置。'
if raise_error:
from toolbox import trimmed_format_exc
msg += trimmed_format_exc()

View File

@@ -8,7 +8,7 @@
"""
# [step 1]>> API_KEY = "sk-123456789xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx123456789"。极少数情况下还需要填写组织格式如org-123456789abcdefghijklmno的请向下翻找 API_ORG 设置项
API_KEY = "sk-此处填API密钥" # 可同时填写多个API-KEY用英文逗号分割例如API_KEY = "sk-openaikey1,sk-openaikey2,fkxxxx-api2dkey1,fkxxxx-api2dkey2"
API_KEY = "此处填API密钥" # 可同时填写多个API-KEY用英文逗号分割例如API_KEY = "sk-openaikey1,sk-openaikey2,fkxxxx-api2dkey3,azure-apikey4"
# [step 2]>> 改为True应用代理如果直接在海外服务器部署此处不修改
@@ -71,7 +71,11 @@ MAX_RETRY = 2
# 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 )
LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "stack-claude"]
# P.S. 其他可用的模型还包括 ["gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "newbing-free", "jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
# P.S. 其他可用的模型还包括 ["gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "claude-1-100k", "claude-2", "jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
# ChatGLM(2) Finetune Model Path 如果使用ChatGLM2微调模型需要把"chatglmft"加入AVAIL_LLM_MODELS中
ChatGLM_PTUNING_CHECKPOINT = "" # 例如"/home/hmp/ChatGLM2-6B/ptuning/output/6b-pt-128-1e-2/checkpoint-100"
# 本地LLM模型如ChatGLM的执行方式 CPU/GPU
@@ -86,6 +90,10 @@ CONCURRENT_COUNT = 100
AUTO_CLEAR_TXT = False
# 色彩主体,可选 ["Default", "Chuanhu-Small-and-Beautiful"]
THEME = "Default"
# 加一个live2d装饰
ADD_WAIFU = False
@@ -110,9 +118,8 @@ SLACK_CLAUDE_USER_TOKEN = ''
# 如果需要使用AZURE 详情请见额外文档 docs\use_azure.md
AZURE_ENDPOINT = "https://你亲手写的api名称.openai.azure.com/"
AZURE_API_KEY = "填入azure openai api的密钥"
AZURE_API_VERSION = "2023-05-15" # 一般不修改
AZURE_ENGINE = "填入你亲手写的部署名" # 读 docs\use_azure.md
AZURE_API_KEY = "填入azure openai api的密钥" # 建议直接在API_KEY处填写该选项即将被弃用
AZURE_ENGINE = "填入你亲手写的部署名" # 读 docs\use_azure.md
# 使用Newbing
@@ -120,3 +127,13 @@ NEWBING_STYLE = "creative" # ["creative", "balanced", "precise"]
NEWBING_COOKIES = """
put your new bing cookies here
"""
# 阿里云实时语音识别 配置难度较高 仅建议高手用户使用 参考 https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md
ENABLE_AUDIO = False
ALIYUN_TOKEN="" # 例如 f37f30e0f9934c34a992f6f64f7eba4f
ALIYUN_APPKEY="" # 例如 RoPlZrM88DnAFkZK
# Claude API KEY
ANTHROPIC_API_KEY = ""

View File

@@ -352,6 +352,32 @@ def get_crazy_functions():
})
except:
print('Load function plugin failed')
try:
from crazy_functions.交互功能函数模板 import 交互功能模板函数
function_plugins.update({
"交互功能模板函数": {
"Color": "stop",
"AsButton": False,
"Function": HotReload(交互功能模板函数)
}
})
except:
print('Load function plugin failed')
# try:
# from crazy_functions.chatglm微调工具 import 微调数据集生成
# function_plugins.update({
# "黑盒模型学习: 微调数据集生成 (先上传数据集)": {
# "Color": "stop",
# "AsButton": False,
# "AdvancedArgs": True,
# "ArgsReminder": "针对数据集输入(如 绿帽子*深蓝色衬衫*黑色运动裤)给出指令,例如您可以将以下命令复制到下方: --llm_to_learn=azure-gpt-3.5 --prompt_prefix='根据下面的服装类型提示想象一个穿着者对这个人外貌、身处的环境、内心世界、过去经历进行描写。要求100字以内用第二人称。' --system_prompt=''",
# "Function": HotReload(微调数据集生成)
# }
# })
# except:
# print('Load function plugin failed')
try:
from crazy_functions.Latex输出PDF结果 import Latex英文纠错加PDF对比
@@ -366,7 +392,7 @@ def get_crazy_functions():
})
from crazy_functions.Latex输出PDF结果 import Latex翻译中文并重新编译PDF
function_plugins.update({
"Arixv翻译输入arxivID[需Latex]": {
"Arixv论文精细翻译输入arxivID[需Latex]": {
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True,
@@ -377,7 +403,7 @@ def get_crazy_functions():
}
})
function_plugins.update({
"本地论文翻译上传Latex压缩包[需Latex]": {
"本地Latex论文精细翻译上传Latex项目[需Latex]": {
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True,
@@ -390,6 +416,33 @@ def get_crazy_functions():
except:
print('Load function plugin failed')
try:
from crazy_functions.Three场景交互3D import 三维生成
function_plugins.update({
"ThreeJS 三维交互": {
"Color": "stop",
"AsButton": False,
"Function": HotReload(三维生成)
}
})
except:
print('Load function plugin failed')
try:
from toolbox import get_conf
ENABLE_AUDIO, = get_conf('ENABLE_AUDIO')
if ENABLE_AUDIO:
from crazy_functions.语音助手 import 语音助手
function_plugins.update({
"实时音频采集": {
"Color": "stop",
"AsButton": True,
"Function": HotReload(语音助手)
}
})
except:
print('Load function plugin failed')
# try:
# from crazy_functions.虚空终端 import 终端
# function_plugins.update({

View File

@@ -30,7 +30,7 @@ def 知识库问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
)
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
from .crazy_utils import try_install_deps
try_install_deps(['zh_langchain==0.2.1'])
try_install_deps(['zh_langchain==0.2.1', 'pypinyin'])
# < --------------------读取参数--------------- >
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")

View File

@@ -0,0 +1,249 @@
from toolbox import CatchException, update_ui, gen_time_str
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
from .crazy_utils import input_clipping
def inspect_dependency(chatbot, history):
# 尝试导入依赖,如果缺少依赖,则给出安装建议
try:
from VISUALIZE.mcom import mcom
return True
except:
chatbot.append(["导入依赖失败", "使用该模块需要额外依赖,安装方法:```pip install vhmap```"])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return False
def get_code_block(reply):
try:
import json
json.loads(reply)
return reply
except:
pass
import re
pattern = r"```([\s\S]*?)```" # regex pattern to match code blocks
matches = re.findall(pattern, reply) # find all code blocks in text
res = ""
for match in matches:
if 'import ' not in match:
res = match.strip('python').strip('json')
break
if len(res) == 0:
print(reply)
raise RuntimeError("GPT is not generating proper Json.")
return res # code block
def get_json_blocks(reply):
import re, json
pattern = r"{([\s\S]*?)}" # regex pattern to match code blocks
matches = re.findall(pattern, reply) # find all code blocks in text
res = []
for match in matches:
if '"name"' in match:
try:
res.append(json.loads("{" + f'{match}' + "}"))
except:
pass
return res # code block
def read_json(code):
import json
return json.loads(code)
def parse_partial(vi, gpt_say):
# 解析Json
js = get_json_blocks(gpt_say)
vi.update(js)
@CatchException
def 三维生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
"""
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数如温度和top_p等一般原样传递下去就行
plugin_kwargs 插件模型的参数,暂时没有用武之地
chatbot 聊天显示框的句柄,用于显示给用户
history 聊天历史,前情提要
system_prompt 给gpt的静默提醒
web_port 当前软件运行的端口号
"""
from .vhmap_interact.vhmap import vhmp_interface
vi = vhmp_interface()
# 基本信息:功能、贡献者
chatbot.append([
"函数插件功能?",
"生成3D, 此插件处于开发阶段, 建议暂时不要使用, 作者: binary-husky, 插件初始化中 ..."
])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
# 尝试导入依赖, 如果缺少依赖, 则给出安装建议
dep_ok = yield from inspect_dependency(chatbot=chatbot, history=history) # 刷新界面
if not dep_ok: return
# 输入
i_say = prompt(txt)
# 开始
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
inputs=i_say, inputs_show_user=i_say,
llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
sys_prompt=r"You are a Json generator",
on_reply_update=lambda t:parse_partial(vi, t)
)
chatbot.append(["开始生成执行", "..."])
history.extend([i_say, gpt_say])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
# 解析Json
code = get_code_block(gpt_say)
js = read_json(code)
vi.update(js)
return
def prompt(text):
return r"""
> Requirements:
1. You can only use square Boxes to build cubes and walls.
2. The space you can work in is a sphere with origin (0,0,0) and radius 100.
3. The ground is z=0.
4. You can only use 100 boxes.
5. Format of each box is json, e.g.
{
"name": "Box-1",
"geometry": "box", // choose from "box", "octahedron", "sphere", "cylinder"
"size": 1.0,
"color": "rgb(255,165,0)",
"location_x": 1.0,
"location_y": 0.0,
"location_z": 0.0
},
6. Only produce json as output. Use markdown code block to wrap the json output.
> Example:
User: Generate 4 different objects around the origin.
You:
```
[
{
"name": "Box-1",
"size": 1.0,
"geometry": "box",
"color": "rgb(255,11,10)",
"location_x": 1.0,
"location_y": 0.0,
"location_z": 0.0
},
{
"name": "Box-2",
"size": 1.0,
"geometry": "octahedron",
"color": "rgb(255,11,10)",
"location_x": -1.0,
"location_y": 0.0,
"location_z": 0.0
},
{
"name": "Box-3",
"size": 1.0,
"geometry": "sphere",
"color": "rgb(255,11,10)",
"location_x": 0.0,
"location_y": 1.0,
"location_z": 0.0
},
{
"name": "Box-4",
"size": 1.0,
"geometry": "cylinder",
"color": "rgb(255,11,10)",
"location_x": 0.0,
"location_y": -1.0,
"location_z": 0.0
}
]
```
> User: """ + text
"""
Please construct a 3D environment where a girl is sitting under a tree in a garden.
Requirements:
1. List objects in this scene and make a markdown list.
2. The list must contain creative details, give at least 20 objects
"""
"""
Convert the result to json,
Requirements:
1. Format: [
{
"name": "object-1",
"location": [position_x, position_y, position_z]
}
]
2. Generate relative position of objects
"""
"""
> Requirements:
1. You can use box, octahedron, sphere, cylinder to build objects.
2. The ground is z=0.
3. You can only use 100 boxes.
4. Format of each box is json, e.g.
{
"name": "Box-1",
"geometry": "box", // choose from "box", "octahedron", "sphere", "cylinder"
"size": 1.0,
"color": "rgb(255,165,0)",
"location_x": 1.0,
"location_y": 0.0,
"location_z": 0.0
},
5. Only produce json as output. Use markdown code block to wrap the json output.
> Example:
```
[
{
"name": "Box-1",
"size": 1.0,
"geometry": "box",
"color": "rgb(255,11,10)",
"location_x": 1.0,
"location_y": 0.0,
"location_z": 0.0
},
{
"name": "Box-2",
"size": 1.0,
"geometry": "octahedron",
"color": "rgb(255,11,10)",
"location_x": -1.0,
"location_y": 0.0,
"location_z": 0.0
},
{
"name": "Box-3",
"size": 1.0,
"geometry": "sphere",
"color": "rgb(255,11,10)",
"location_x": 0.0,
"location_y": 1.0,
"location_z": 0.0
},
{
"name": "Box-4",
"size": 1.0,
"geometry": "cylinder",
"color": "rgb(255,11,10)",
"location_x": 0.0,
"location_y": -1.0,
"location_z": 0.0
}
]
```
"""

View File

@@ -0,0 +1,141 @@
from toolbox import CatchException, update_ui, promote_file_to_downloadzone
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
import datetime, json
def fetch_items(list_of_items, batch_size):
for i in range(0, len(list_of_items), batch_size):
yield list_of_items[i:i + batch_size]
def string_to_options(arguments):
import argparse
import shlex
# Create an argparse.ArgumentParser instance
parser = argparse.ArgumentParser()
# Add command-line arguments
parser.add_argument("--llm_to_learn", type=str, help="LLM model to learn", default="gpt-3.5-turbo")
parser.add_argument("--prompt_prefix", type=str, help="Prompt prefix", default='')
parser.add_argument("--system_prompt", type=str, help="System prompt", default='')
parser.add_argument("--batch", type=int, help="System prompt", default=50)
parser.add_argument("--pre_seq_len", type=int, help="pre_seq_len", default=50)
parser.add_argument("--learning_rate", type=float, help="learning_rate", default=2e-2)
parser.add_argument("--num_gpus", type=int, help="num_gpus", default=1)
parser.add_argument("--json_dataset", type=str, help="json_dataset", default="")
parser.add_argument("--ptuning_directory", type=str, help="ptuning_directory", default="")
# Parse the arguments
args = parser.parse_args(shlex.split(arguments))
return args
@CatchException
def 微调数据集生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
"""
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数如温度和top_p等一般原样传递下去就行
plugin_kwargs 插件模型的参数
chatbot 聊天显示框的句柄,用于显示给用户
history 聊天历史,前情提要
system_prompt 给gpt的静默提醒
web_port 当前软件运行的端口号
"""
history = [] # 清空历史,以免输入溢出
chatbot.append(("这是什么功能?", "[Local Message] 微调数据集生成"))
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
args = plugin_kwargs.get("advanced_arg", None)
if args is None:
chatbot.append(("没给定指令", "退出"))
yield from update_ui(chatbot=chatbot, history=history); return
else:
arguments = string_to_options(arguments=args)
dat = []
with open(txt, 'r', encoding='utf8') as f:
for line in f.readlines():
json_dat = json.loads(line)
dat.append(json_dat["content"])
llm_kwargs['llm_model'] = arguments.llm_to_learn
for batch in fetch_items(dat, arguments.batch):
res = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
inputs_array=[f"{arguments.prompt_prefix}\n\n{b}" for b in (batch)],
inputs_show_user_array=[f"Show Nothing" for _ in (batch)],
llm_kwargs=llm_kwargs,
chatbot=chatbot,
history_array=[[] for _ in (batch)],
sys_prompt_array=[arguments.system_prompt for _ in (batch)],
max_workers=10 # OpenAI所允许的最大并行过载
)
with open(txt+'.generated.json', 'a+', encoding='utf8') as f:
for b, r in zip(batch, res[1::2]):
f.write(json.dumps({"content":b, "summary":r}, ensure_ascii=False)+'\n')
promote_file_to_downloadzone(txt+'.generated.json', rename_file='generated.json', chatbot=chatbot)
return
@CatchException
def 启动微调(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
"""
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数如温度和top_p等一般原样传递下去就行
plugin_kwargs 插件模型的参数
chatbot 聊天显示框的句柄,用于显示给用户
history 聊天历史,前情提要
system_prompt 给gpt的静默提醒
web_port 当前软件运行的端口号
"""
import subprocess
history = [] # 清空历史,以免输入溢出
chatbot.append(("这是什么功能?", "[Local Message] 微调数据集生成"))
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
args = plugin_kwargs.get("advanced_arg", None)
if args is None:
chatbot.append(("没给定指令", "退出"))
yield from update_ui(chatbot=chatbot, history=history); return
else:
arguments = string_to_options(arguments=args)
pre_seq_len = arguments.pre_seq_len # 128
learning_rate = arguments.learning_rate # 2e-2
num_gpus = arguments.num_gpus # 1
json_dataset = arguments.json_dataset # 't_code.json'
ptuning_directory = arguments.ptuning_directory # '/home/hmp/ChatGLM2-6B/ptuning'
command = f"torchrun --standalone --nnodes=1 --nproc-per-node={num_gpus} main.py \
--do_train \
--train_file AdvertiseGen/{json_dataset} \
--validation_file AdvertiseGen/{json_dataset} \
--preprocessing_num_workers 20 \
--prompt_column content \
--response_column summary \
--overwrite_cache \
--model_name_or_path THUDM/chatglm2-6b \
--output_dir output/clothgen-chatglm2-6b-pt-{pre_seq_len}-{learning_rate} \
--overwrite_output_dir \
--max_source_length 256 \
--max_target_length 256 \
--per_device_train_batch_size 1 \
--per_device_eval_batch_size 1 \
--gradient_accumulation_steps 16 \
--predict_with_generate \
--max_steps 100 \
--logging_steps 10 \
--save_steps 20 \
--learning_rate {learning_rate} \
--pre_seq_len {pre_seq_len} \
--quantization_bit 4"
process = subprocess.Popen(command, shell=True, cwd=ptuning_directory)
try:
process.communicate(timeout=3600*24)
except subprocess.TimeoutExpired:
process.kill()
return

View File

@@ -17,7 +17,7 @@ validate_path() # validate path so you can run from base directory
# ==============================================================================================================================
from colorful import *
from toolbox import get_conf, ChatBotWithCookies
from toolbox import get_conf, ChatBotWithCookies, load_chat_cookies
import contextlib
import os
import sys
@@ -32,6 +32,7 @@ llm_kwargs = {
'max_length': None,
'temperature':1.0,
}
llm_kwargs.update(load_chat_cookies())
plugin_kwargs = { }
chatbot = ChatBotWithCookies(llm_kwargs)
history = []
@@ -211,22 +212,45 @@ def test_Latex():
# # for cookies, cb, hist, msg in silence_stdout(编译Latex)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
# cli_printer.print(cb) # print(cb)
def test_chatglm_finetune():
from crazy_functions.chatglm微调工具 import 微调数据集生成, 启动微调
txt = 'build/dev.json'
plugin_kwargs = {"advanced_arg":"--llm_to_learn=gpt-3.5-turbo --prompt_prefix='根据下面的服装类型提示想象一个穿着者对这个人外貌、身处的环境、内心世界、人设进行描写。要求100字以内用第二人称。' --system_prompt=''" }
# for cookies, cb, hist, msg in (微调数据集生成)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
# cli_printer.print(cb)
plugin_kwargs = {"advanced_arg":
" --pre_seq_len=128 --learning_rate=2e-2 --num_gpus=1 --json_dataset='t_code.json' --ptuning_directory='/home/hmp/ChatGLM2-6B/ptuning' " }
for cookies, cb, hist, msg in (启动微调)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
cli_printer.print(cb)
def 三维生成():
from crazy_functions.Three场景交互3D import 三维生成
txt = "Generate 10 boxes to form a triangle formation with random color."
plugin_kwargs = {"advanced_arg":""}
for cookies, cb, hist, msg in (三维生成)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
cli_printer.print(cb)
# test_解析一个Python项目()
# test_Latex英文润色()
# test_Markdown中译英()
# test_批量翻译PDF文档()
# test_谷歌检索小助手()
# test_总结word文档()
# test_下载arxiv论文并翻译摘要()
# test_解析一个Cpp项目()
# test_联网回答问题()
# test_解析ipynb文件()
# test_数学动画生成manim()
# test_Langchain知识库()
# test_Langchain知识库读取()
if __name__ == "__main__":
test_Latex()
# test_解析一个Python项目()
# test_Latex英文润色()
# test_Markdown中译英()
# test_批量翻译PDF文档()
# test_谷歌检索小助手()
# test_总结word文档()
# test_下载arxiv论文并翻译摘要()
# test_解析一个Cpp项目()
# test_联网回答问题()
# test_解析ipynb文件()
# test_数学动画生成manim()
# test_Langchain知识库()
# test_Langchain知识库读取()
# test_Latex()
三维生成()
input("程序完成,回车退出。")
print("退出。")

View File

@@ -40,6 +40,7 @@ def request_gpt_model_in_new_thread_with_ui_alive(
chatbot, history, sys_prompt, refresh_interval=0.2,
handle_token_exceed=True,
retry_times_at_unknown_error=2,
on_reply_update=None
):
"""
Request GPT model请求GPT模型同时维持用户界面活跃。
@@ -123,6 +124,7 @@ def request_gpt_model_in_new_thread_with_ui_alive(
if future.done():
break
chatbot[-1] = [chatbot[-1][0], mutable[0]]
if on_reply_update: on_reply_update(mutable[0])
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
final_result = future.result()

View File

@@ -147,7 +147,7 @@ def 寻找Latex主文件(file_manifest, mode):
for texf in file_manifest:
if os.path.basename(texf).startswith('merge'):
continue
with open(texf, 'r', encoding='utf8') as f:
with open(texf, 'r', encoding='utf8', errors='ignore') as f:
file_content = f.read()
if r'\documentclass' in file_content:
canidates.append(texf)
@@ -165,7 +165,7 @@ def 寻找Latex主文件(file_manifest, mode):
expected_words = ['\input', '\ref', '\cite']
for texf in canidates:
canidates_score.append(0)
with open(texf, 'r', encoding='utf8') as f:
with open(texf, 'r', encoding='utf8', errors='ignore') as f:
file_content = f.read()
for uw in unexpected_words:
if uw in file_content:
@@ -189,6 +189,18 @@ def rm_comments(main_file):
main_file = re.sub(r'(?<!\\)%.*', '', main_file) # 使用正则表达式查找半行注释, 并替换为空字符串
return main_file
def find_tex_file_ignore_case(fp):
dir_name = os.path.dirname(fp)
base_name = os.path.basename(fp)
if not base_name.endswith('.tex'): base_name+='.tex'
if os.path.exists(pj(dir_name, base_name)): return pj(dir_name, base_name)
# go case in-sensitive
import glob
for f in glob.glob(dir_name+'/*.tex'):
base_name_s = os.path.basename(fp)
if base_name_s.lower() == base_name.lower(): return f
return None
def merge_tex_files_(project_foler, main_file, mode):
"""
Merge Tex project recrusively
@@ -197,15 +209,11 @@ def merge_tex_files_(project_foler, main_file, mode):
for s in reversed([q for q in re.finditer(r"\\input\{(.*?)\}", main_file, re.M)]):
f = s.group(1)
fp = os.path.join(project_foler, f)
if os.path.exists(fp):
# e.g., \input{srcs/07_appendix.tex}
with open(fp, 'r', encoding='utf-8', errors='replace') as fx:
c = fx.read()
else:
# e.g., \input{srcs/07_appendix}
assert os.path.exists(fp+'.tex'), f'即找不到{fp},也找不到{fp}.texTex源文件缺失'
with open(fp+'.tex', 'r', encoding='utf-8', errors='replace') as fx:
c = fx.read()
fp = find_tex_file_ignore_case(fp)
if fp:
with open(fp, 'r', encoding='utf-8', errors='replace') as fx: c = fx.read()
else:
raise RuntimeError(f'找不到{fp}Tex源文件缺失')
c = merge_tex_files_(project_foler, c, mode)
main_file = main_file[:s.span()[0]] + c + main_file[s.span()[1]:]
return main_file
@@ -324,7 +332,7 @@ def split_subprocess(txt, project_folder, return_dict, opts):
# 吸收在42行以内的begin-end组合
text, mask = set_forbidden_text_begin_end(text, mask, r"\\begin\{([a-z\*]*)\}(.*?)\\end\{\1\}", re.DOTALL, limit_n_lines=42)
# 吸收匿名公式
text, mask = set_forbidden_text(text, mask, [ r"\$\$(.*?)\$\$", r"\\\[.*?\\\]" ], re.DOTALL)
text, mask = set_forbidden_text(text, mask, [ r"\$\$([^$]+)\$\$", r"\\\[.*?\\\]" ], re.DOTALL)
# 吸收其他杂项
text, mask = set_forbidden_text(text, mask, [ r"\\section\{(.*?)\}", r"\\section\*\{(.*?)\}", r"\\subsection\{(.*?)\}", r"\\subsubsection\{(.*?)\}" ])
text, mask = set_forbidden_text(text, mask, [ r"\\bibliography\{(.*?)\}", r"\\bibliographystyle\{(.*?)\}" ])

View File

@@ -0,0 +1,93 @@
import time, threading, json
class AliyunASR():
def test_on_sentence_begin(self, message, *args):
# print("test_on_sentence_begin:{}".format(message))
pass
def test_on_sentence_end(self, message, *args):
# print("test_on_sentence_end:{}".format(message))
message = json.loads(message)
self.parsed_sentence = message['payload']['result']
self.event_on_entence_end.set()
print(self.parsed_sentence)
def test_on_start(self, message, *args):
# print("test_on_start:{}".format(message))
pass
def test_on_error(self, message, *args):
# print("on_error args=>{}".format(args))
pass
def test_on_close(self, *args):
self.aliyun_service_ok = False
pass
def test_on_result_chg(self, message, *args):
# print("test_on_chg:{}".format(message))
message = json.loads(message)
self.parsed_text = message['payload']['result']
self.event_on_result_chg.set()
def test_on_completed(self, message, *args):
# print("on_completed:args=>{} message=>{}".format(args, message))
pass
def audio_convertion_thread(self, uuid):
# 在一个异步线程中采集音频
import nls # pip install git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git
import tempfile
from scipy import io
from toolbox import get_conf
from .audio_io import change_sample_rate
from .audio_io import RealtimeAudioDistribution
NEW_SAMPLERATE = 16000
rad = RealtimeAudioDistribution()
rad.clean_up()
temp_folder = tempfile.gettempdir()
TOKEN, APPKEY = get_conf('ALIYUN_TOKEN', 'ALIYUN_APPKEY')
self.aliyun_service_ok = True
URL="wss://nls-gateway.aliyuncs.com/ws/v1"
sr = nls.NlsSpeechTranscriber(
url=URL,
token=TOKEN,
appkey=APPKEY,
on_sentence_begin=self.test_on_sentence_begin,
on_sentence_end=self.test_on_sentence_end,
on_start=self.test_on_start,
on_result_changed=self.test_on_result_chg,
on_completed=self.test_on_completed,
on_error=self.test_on_error,
on_close=self.test_on_close,
callback_args=[uuid.hex]
)
r = sr.start(aformat="pcm",
enable_intermediate_result=True,
enable_punctuation_prediction=True,
enable_inverse_text_normalization=True)
while not self.stop:
# time.sleep(self.capture_interval)
audio = rad.read(uuid.hex)
if audio is not None:
# convert to pcm file
temp_file = f'{temp_folder}/{uuid.hex}.pcm' #
dsdata = change_sample_rate(audio, rad.rate, NEW_SAMPLERATE) # 48000 --> 16000
io.wavfile.write(temp_file, NEW_SAMPLERATE, dsdata)
# read pcm binary
with open(temp_file, "rb") as f: data = f.read()
# print('audio len:', len(audio), '\t ds len:', len(dsdata), '\t need n send:', len(data)//640)
slices = zip(*(iter(data),) * 640) # 640个字节为一组
for i in slices: sr.send_audio(bytes(i))
else:
time.sleep(0.1)
if not self.aliyun_service_ok:
self.stop = True
self.stop_msg = 'Aliyun音频服务异常请检查ALIYUN_TOKEN和ALIYUN_APPKEY是否过期。'
r = sr.stop()

View File

@@ -0,0 +1,51 @@
import numpy as np
from scipy import interpolate
def Singleton(cls):
_instance = {}
def _singleton(*args, **kargs):
if cls not in _instance:
_instance[cls] = cls(*args, **kargs)
return _instance[cls]
return _singleton
@Singleton
class RealtimeAudioDistribution():
def __init__(self) -> None:
self.data = {}
self.max_len = 1024*1024
self.rate = 48000 # 只读,每秒采样数量
def clean_up(self):
self.data = {}
def feed(self, uuid, audio):
self.rate, audio_ = audio
# print('feed', len(audio_), audio_[-25:])
if uuid not in self.data:
self.data[uuid] = audio_
else:
new_arr = np.concatenate((self.data[uuid], audio_))
if len(new_arr) > self.max_len: new_arr = new_arr[-self.max_len:]
self.data[uuid] = new_arr
def read(self, uuid):
if uuid in self.data:
res = self.data.pop(uuid)
print('\r read-', len(res), '-', max(res), end='', flush=True)
else:
res = None
return res
def change_sample_rate(audio, old_sr, new_sr):
duration = audio.shape[0] / old_sr
time_old = np.linspace(0, duration, audio.shape[0])
time_new = np.linspace(0, duration, int(audio.shape[0] * new_sr / old_sr))
interpolator = interpolate.interp1d(time_old, audio.T)
new_audio = interpolator(time_new).T
return new_audio.astype(np.int16)

View File

@@ -0,0 +1,38 @@
from toolbox import update_ui, get_conf, trimmed_format_exc
import threading
def Singleton(cls):
_instance = {}
def _singleton(*args, **kargs):
if cls not in _instance:
_instance[cls] = cls(*args, **kargs)
return _instance[cls]
return _singleton
@Singleton
class vhmp_interface():
def __init__(self) -> None:
from VISUALIZE.mcom_rt import mcom
self.vis3d = mcom(path='TEMP/v2d_logger/', draw_mode='Threejs')
self.vis3d.v2d_init()
self.vis3d.设置样式('star')
# vis3d.设置样式('star') # 布置星空
self.vis3d.其他几何体之旋转缩放和平移('box', 'BoxGeometry(1,1,1)', 0,0,0, 1,1,1, 0,0,0)
# declare geo 'oct1', init with OctahedronGeometry, then (1)rotate & (2)scale & (3)translate
self.vis3d.其他几何体之旋转缩放和平移('octahedron', 'OctahedronGeometry(1,0)', 0,0,0, 1,1,1, 0,0,0) # 八面体
# 需要换成其他几何体,请把'OctahedronGeometry(1,0)'替换,参考网址 https://threejs.org/docs/index.html?q=Geometry
self.vis3d.其他几何体之旋转缩放和平移('sphere', 'SphereGeometry(1)', 0,0,0, 1,1,1, 0,0,0) # 球体
self.vis3d.其他几何体之旋转缩放和平移('cylinder', 'CylinderGeometry(1,1,5,32)', 0,0,0, 1,1,1, 0,0,0) # 球体
def update(self, json):
for obj in json:
self.vis3d.发送几何体(
f'{obj["geometry"]}|{obj["name"]}|{obj["color"]}|{obj["size"]}', # 填入 ‘形状|几何体之ID标识|颜色|大小’即可
obj["location_x"],
obj["location_y"],
obj["location_z"],
ro_x=0, ro_y=0, ro_z=0, # 三维位置+欧拉旋转变换,六自由度
track_n_frame=0) # 显示历史20帧留下的轨迹
self.vis3d.结束关键帧()

View File

@@ -144,11 +144,11 @@ def 下载arxiv论文并翻译摘要(txt, llm_kwargs, plugin_kwargs, chatbot, hi
# 尝试导入依赖,如果缺少依赖,则给出安装建议
try:
import pdfminer, bs4
import bs4
except:
report_execption(chatbot, history,
a = f"解析项目: {txt}",
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pdfminer beautifulsoup4```。")
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade beautifulsoup4```。")
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return

View File

@@ -0,0 +1,63 @@
from toolbox import CatchException, update_ui
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
@CatchException
def 交互功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
"""
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数, 如温度和top_p等, 一般原样传递下去就行
plugin_kwargs 插件模型的参数, 如温度和top_p等, 一般原样传递下去就行
chatbot 聊天显示框的句柄,用于显示给用户
history 聊天历史,前情提要
system_prompt 给gpt的静默提醒
web_port 当前软件运行的端口号
"""
history = [] # 清空历史,以免输入溢出
chatbot.append(("这是什么功能?", "交互功能函数模板。在执行完成之后, 可以将自身的状态存储到cookie中, 等待用户的再次调用。"))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
state = chatbot._cookies.get('plugin_state_0001', None) # 初始化插件状态
if state is None:
chatbot._cookies['lock_plugin'] = 'crazy_functions.交互功能函数模板->交互功能模板函数' # 赋予插件锁定 锁定插件回调路径,当下一次用户提交时,会直接转到该函数
chatbot._cookies['plugin_state_0001'] = 'wait_user_keyword' # 赋予插件状态
chatbot.append(("第一次调用:", "请输入关键词, 我将为您查找相关壁纸, 建议使用英文单词, 插件锁定中,请直接提交即可。"))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
if state == 'wait_user_keyword':
chatbot._cookies['lock_plugin'] = None # 解除插件锁定,避免遗忘导致死锁
chatbot._cookies['plugin_state_0001'] = None # 解除插件状态,避免遗忘导致死锁
# 解除插件锁定
chatbot.append((f"获取关键词:{txt}", ""))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
page_return = get_image_page_by_keyword(txt)
inputs=inputs_show_user=f"Extract all image urls in this html page, pick the first 5 images and show them with markdown format: \n\n {page_return}"
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
inputs=inputs, inputs_show_user=inputs_show_user,
llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
sys_prompt="When you want to show an image, use markdown format. e.g. ![image_description](image_url). If there are no image url provided, answer 'no image url provided'"
)
chatbot[-1] = [chatbot[-1][0], gpt_say]
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
# ---------------------------------------------------------------------------------
def get_image_page_by_keyword(keyword):
import requests
from bs4 import BeautifulSoup
response = requests.get(f'https://wallhaven.cc/search?q={keyword}', timeout=2)
res = "image urls: \n"
for image_element in BeautifulSoup(response.content, 'html.parser').findAll("img"):
try:
res += image_element["data-src"]
res += "\n"
except:
pass
return res

View File

@@ -12,7 +12,7 @@ def write_chat_to_file(chatbot, history=None, file_name=None):
file_name = 'chatGPT对话历史' + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.html'
os.makedirs('./gpt_log/', exist_ok=True)
with open(f'./gpt_log/{file_name}', 'w', encoding='utf8') as f:
from theme import advanced_css
from themes.theme import advanced_css
f.write(f'<!DOCTYPE html><head><meta charset="utf-8"><title>对话历史</title><style>{advanced_css}</style></head>')
for i, contents in enumerate(chatbot):
for j, content in enumerate(contents):

View File

@@ -14,17 +14,19 @@ def 解析docx(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot
doc = Document(fp)
file_content = "\n".join([para.text for para in doc.paragraphs])
else:
import win32com.client
word = win32com.client.Dispatch("Word.Application")
word.visible = False
# 打开文件
print('fp', os.getcwd())
doc = word.Documents.Open(os.getcwd() + '/' + fp)
# file_content = doc.Content.Text
doc = word.ActiveDocument
file_content = doc.Range().Text
doc.Close()
word.Quit()
try:
import win32com.client
word = win32com.client.Dispatch("Word.Application")
word.visible = False
# 打开文件
doc = word.Documents.Open(os.getcwd() + '/' + fp)
# file_content = doc.Content.Text
doc = word.ActiveDocument
file_content = doc.Range().Text
doc.Close()
word.Quit()
except:
raise RuntimeError('请先将.doc文档转换为.docx文档。')
print(file_content)
# private_upload里面的文件名在解压zip后容易出现乱码rar和7z格式正常故可以只分析文章内容不输入文件名

View File

@@ -1,121 +1,107 @@
from toolbox import update_ui
from toolbox import update_ui, promote_file_to_downloadzone, gen_time_str
from toolbox import CatchException, report_execption, write_results_to_file
import re
import unicodedata
fast_debug = False
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
from .crazy_utils import read_and_clean_pdf_text
from .crazy_utils import input_clipping
def is_paragraph_break(match):
"""
根据给定的匹配结果来判断换行符是否表示段落分隔。
如果换行符前为句子结束标志(句号,感叹号,问号),且下一个字符为大写字母,则换行符更有可能表示段落分隔。
也可以根据之前的内容长度来判断段落是否已经足够长。
"""
prev_char, next_char = match.groups()
# 句子结束标志
sentence_endings = ".!?"
# 设定一个最小段落长度阈值
min_paragraph_length = 140
if prev_char in sentence_endings and next_char.isupper() and len(match.string[:match.start(1)]) > min_paragraph_length:
return "\n\n"
else:
return " "
def normalize_text(text):
"""
通过把连字ligatures等文本特殊符号转换为其基本形式来对文本进行归一化处理。
例如,将连字 "fi" 转换为 "f""i"
"""
# 对文本进行归一化处理,分解连字
normalized_text = unicodedata.normalize("NFKD", text)
# 替换其他特殊字符
cleaned_text = re.sub(r'[^\x00-\x7F]+', '', normalized_text)
return cleaned_text
def clean_text(raw_text):
"""
对从 PDF 提取出的原始文本进行清洗和格式化处理。
1. 对原始文本进行归一化处理。
2. 替换跨行的连词
3. 根据 heuristic 规则判断换行符是否是段落分隔,并相应地进行替换
"""
# 对文本进行归一化处理
normalized_text = normalize_text(raw_text)
# 替换跨行的连词
text = re.sub(r'(\w+-\n\w+)', lambda m: m.group(1).replace('-\n', ''), normalized_text)
# 根据前后相邻字符的特点,找到原文本中的换行符
newlines = re.compile(r'(\S)\n(\S)')
# 根据 heuristic 规则,用空格或段落分隔符替换原换行符
final_text = re.sub(newlines, lambda m: m.group(1) + is_paragraph_break(m) + m.group(2), text)
return final_text.strip()
def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
import time, glob, os, fitz
print('begin analysis on:', file_manifest)
for index, fp in enumerate(file_manifest):
with fitz.open(fp) as doc:
file_content = ""
for page in doc:
file_content += page.get_text()
file_content = clean_text(file_content)
print(file_content)
file_write_buffer = []
for file_name in file_manifest:
print('begin analysis on:', file_name)
############################## <第 0 步切割PDF> ##################################
# 递归地切割PDF文件每一块尽量是完整的一个section比如introductionexperiment等必要时再进行切割
# 的长度必须小于 2500 个 Token
file_content, page_one = read_and_clean_pdf_text(file_name) # 尝试按照章节切割PDF
file_content = file_content.encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars
page_one = str(page_one).encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars
TOKEN_LIMIT_PER_FRAGMENT = 2500
prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else ""
i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```'
i_say_show_user = prefix + f'[{index + 1}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}'
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
from request_llm.bridge_all import model_info
enc = model_info["gpt-3.5-turbo"]['tokenizer']
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
txt=file_content, get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT)
page_one_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
txt=str(page_one), get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT//4)
# 为了更好的效果我们剥离Introduction之后的部分如果有
paper_meta = page_one_fragments[0].split('introduction')[0].split('Introduction')[0].split('INTRODUCTION')[0]
############################## <第 1 步从摘要中提取高价值信息放到history中> ##################################
final_results = []
final_results.append(paper_meta)
if not fast_debug:
msg = '正常'
# ** gpt request **
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
inputs=i_say,
inputs_show_user=i_say_show_user,
llm_kwargs=llm_kwargs,
chatbot=chatbot,
history=[],
sys_prompt="总结文章。"
) # 带超时倒计时
############################## <第 2 步,迭代地历遍整个文章,提取精炼信息> ##################################
i_say_show_user = f'首先你在中文语境下通读整篇论文。'; gpt_say = "[Local Message] 收到。" # 用户提示
chatbot.append([i_say_show_user, gpt_say]); yield from update_ui(chatbot=chatbot, history=[]) # 更新UI
chatbot[-1] = (i_say_show_user, gpt_say)
history.append(i_say_show_user); history.append(gpt_say)
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
if not fast_debug: time.sleep(2)
iteration_results = []
last_iteration_result = paper_meta # 初始值是摘要
MAX_WORD_TOTAL = 4096 * 0.7
n_fragment = len(paper_fragments)
if n_fragment >= 20: print('文章极长,不能达到预期效果')
for i in range(n_fragment):
NUM_OF_WORD = MAX_WORD_TOTAL // n_fragment
i_say = f"Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} Chinese characters: {paper_fragments[i]}"
i_say_show_user = f"[{i+1}/{n_fragment}] Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} Chinese characters: {paper_fragments[i][:200]}"
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, # i_say=真正给chatgpt的提问 i_say_show_user=给用户看的提问
llm_kwargs, chatbot,
history=["The main idea of the previous section is?", last_iteration_result], # 迭代上一次的结果
sys_prompt="Extract the main idea of this section with Chinese." # 提示
)
iteration_results.append(gpt_say)
last_iteration_result = gpt_say
all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)])
i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。'
chatbot.append((i_say, "[Local Message] waiting gpt response."))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
if not fast_debug:
msg = '正常'
# ** gpt request **
############################## <第 3 步整理history提取总结> ##################################
final_results.extend(iteration_results)
final_results.append(f'Please conclude this paper discussed above。')
# This prompt is from https://github.com/kaixindelele/ChatPaper/blob/main/chat_paper.py
NUM_OF_WORD = 1000
i_say = """
1. Mark the title of the paper (with Chinese translation)
2. list all the authors' names (use English)
3. mark the first author's affiliation (output Chinese translation only)
4. mark the keywords of this article (use English)
5. link to the paper, Github code link (if available, fill in Github:None if not)
6. summarize according to the following four points.Be sure to use Chinese answers (proper nouns need to be marked in English)
- (1):What is the research background of this article?
- (2):What are the past methods? What are the problems with them? Is the approach well motivated?
- (3):What is the research methodology proposed in this paper?
- (4):On what task and what performance is achieved by the methods in this paper? Can the performance support their goals?
Follow the format of the output that follows:
1. Title: xxx\n\n
2. Authors: xxx\n\n
3. Affiliation: xxx\n\n
4. Keywords: xxx\n\n
5. Urls: xxx or xxx , xxx \n\n
6. Summary: \n\n
- (1):xxx;\n
- (2):xxx;\n
- (3):xxx;\n
- (4):xxx.\n\n
Be sure to use Chinese answers (proper nouns need to be marked in English), statements as concise and academic as possible,
do not have too much repetitive information, numerical values using the original numbers.
"""
# This prompt is from https://github.com/kaixindelele/ChatPaper/blob/main/chat_paper.py
file_write_buffer.extend(final_results)
i_say, final_results = input_clipping(i_say, final_results, max_token_limit=2000)
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
inputs=i_say,
inputs_show_user=i_say,
llm_kwargs=llm_kwargs,
chatbot=chatbot,
history=history,
sys_prompt="总结文章。"
) # 带超时倒计时
inputs=i_say, inputs_show_user='开始最终总结',
llm_kwargs=llm_kwargs, chatbot=chatbot, history=final_results,
sys_prompt= f"Extract the main idea of this paper with less than {NUM_OF_WORD} Chinese characters"
)
final_results.append(gpt_say)
file_write_buffer.extend([i_say, gpt_say])
############################## <第 4 步设置一个token上限> ##################################
_, final_results = input_clipping("", final_results, max_token_limit=3200)
yield from update_ui(chatbot=chatbot, history=final_results) # 注意这里的历史记录被替代了
chatbot[-1] = (i_say, gpt_say)
history.append(i_say); history.append(gpt_say)
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
res = write_results_to_file(history)
chatbot.append(("完成了吗?", res))
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
res = write_results_to_file(file_write_buffer, file_name=gen_time_str())
promote_file_to_downloadzone(res.split('\t')[-1], chatbot=chatbot)
yield from update_ui(chatbot=chatbot, history=final_results) # 刷新界面
@CatchException
@@ -151,10 +137,7 @@ def 批量总结PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
return
# 搜索需要处理的文件清单
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)] # + \
# [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + \
# [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
# [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)]
# 如果没找到任何文件
if len(file_manifest) == 0:

View File

@@ -6,7 +6,7 @@ def 同时问询(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
"""
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数如温度和top_p等一般原样传递下去就行
plugin_kwargs 插件模型的参数,如温度和top_p等一般原样传递下去就行
plugin_kwargs 插件模型的参数,用于灵活调整复杂功能的各种参数
chatbot 聊天显示框的句柄,用于显示给用户
history 聊天历史,前情提要
system_prompt 给gpt的静默提醒
@@ -35,7 +35,7 @@ def 同时问询_指定模型(txt, llm_kwargs, plugin_kwargs, chatbot, history,
"""
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数如温度和top_p等一般原样传递下去就行
plugin_kwargs 插件模型的参数,如温度和top_p等一般原样传递下去就行
plugin_kwargs 插件模型的参数,用于灵活调整复杂功能的各种参数
chatbot 聊天显示框的句柄,用于显示给用户
history 聊天历史,前情提要
system_prompt 给gpt的静默提醒

View File

@@ -0,0 +1,195 @@
from toolbox import update_ui
from toolbox import CatchException, get_conf, markdown_convertion
from crazy_functions.crazy_utils import input_clipping
from request_llm.bridge_all import predict_no_ui_long_connection
import threading, time
import numpy as np
from .live_audio.aliyunASR import AliyunASR
import json
class WatchDog():
def __init__(self, timeout, bark_fn, interval=3, msg="") -> None:
self.last_feed = None
self.timeout = timeout
self.bark_fn = bark_fn
self.interval = interval
self.msg = msg
self.kill_dog = False
def watch(self):
while True:
if self.kill_dog: break
if time.time() - self.last_feed > self.timeout:
if len(self.msg) > 0: print(self.msg)
self.bark_fn()
break
time.sleep(self.interval)
def begin_watch(self):
self.last_feed = time.time()
th = threading.Thread(target=self.watch)
th.daemon = True
th.start()
def feed(self):
self.last_feed = time.time()
def chatbot2history(chatbot):
history = []
for c in chatbot:
for q in c:
if q not in ["[请讲话]", "[等待GPT响应]", "[正在等您说完问题]"]:
history.append(q.strip('<div class="markdown-body">').strip('</div>').strip('<p>').strip('</p>'))
return history
class AsyncGptTask():
def __init__(self) -> None:
self.observe_future = []
self.observe_future_chatbot_index = []
def gpt_thread_worker(self, i_say, llm_kwargs, history, sys_prompt, observe_window, index):
try:
MAX_TOKEN_ALLO = 2560
i_say, history = input_clipping(i_say, history, max_token_limit=MAX_TOKEN_ALLO)
gpt_say_partial = predict_no_ui_long_connection(inputs=i_say, llm_kwargs=llm_kwargs, history=history, sys_prompt=sys_prompt,
observe_window=observe_window[index], console_slience=True)
except ConnectionAbortedError as token_exceed_err:
print('至少一个线程任务Token溢出而失败', e)
except Exception as e:
print('至少一个线程任务意外失败', e)
def add_async_gpt_task(self, i_say, chatbot_index, llm_kwargs, history, system_prompt):
self.observe_future.append([""])
self.observe_future_chatbot_index.append(chatbot_index)
cur_index = len(self.observe_future)-1
th_new = threading.Thread(target=self.gpt_thread_worker, args=(i_say, llm_kwargs, history, system_prompt, self.observe_future, cur_index))
th_new.daemon = True
th_new.start()
def update_chatbot(self, chatbot):
for of, ofci in zip(self.observe_future, self.observe_future_chatbot_index):
try:
chatbot[ofci] = list(chatbot[ofci])
chatbot[ofci][1] = markdown_convertion(of[0])
except:
self.observe_future = []
self.observe_future_chatbot_index = []
return chatbot
class InterviewAssistant(AliyunASR):
def __init__(self):
self.capture_interval = 0.5 # second
self.stop = False
self.parsed_text = ""
self.parsed_sentence = ""
self.buffered_sentence = ""
self.event_on_result_chg = threading.Event()
self.event_on_entence_end = threading.Event()
self.event_on_commit_question = threading.Event()
def __del__(self):
self.stop = True
self.stop_msg = ""
self.commit_wd.kill_dog = True
self.plugin_wd.kill_dog = True
def init(self, chatbot):
# 初始化音频采集线程
self.captured_audio = np.array([])
self.keep_latest_n_second = 10
self.commit_after_pause_n_second = 1.5
self.ready_audio_flagment = None
self.stop = False
self.plugin_wd = WatchDog(timeout=5, bark_fn=self.__del__, msg="程序终止")
self.aut = threading.Thread(target=self.audio_convertion_thread, args=(chatbot._cookies['uuid'],))
self.aut.daemon = True
self.aut.start()
# th2 = threading.Thread(target=self.audio2txt_thread, args=(chatbot._cookies['uuid'],))
# th2.daemon = True
# th2.start()
def no_audio_for_a_while(self):
if len(self.buffered_sentence) < 7: # 如果一句话小于7个字暂不提交
self.commit_wd.begin_watch()
else:
self.event_on_commit_question.set()
def begin(self, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
# main plugin function
self.init(chatbot)
chatbot.append(["[请讲话]", "[正在等您说完问题]"])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
self.plugin_wd.begin_watch()
self.agt = AsyncGptTask()
self.commit_wd = WatchDog(timeout=self.commit_after_pause_n_second, bark_fn=self.no_audio_for_a_while, interval=0.2)
self.commit_wd.begin_watch()
while not self.stop:
self.event_on_result_chg.wait(timeout=0.25) # run once every 0.25 second
chatbot = self.agt.update_chatbot(chatbot) # 将子线程的gpt结果写入chatbot
history = chatbot2history(chatbot)
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
self.plugin_wd.feed()
if self.event_on_result_chg.is_set():
# update audio decode result
self.event_on_result_chg.clear()
chatbot[-1] = list(chatbot[-1])
chatbot[-1][0] = self.buffered_sentence + self.parsed_text
history = chatbot2history(chatbot)
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
self.commit_wd.feed()
if self.event_on_entence_end.is_set():
# called when a sentence has ended
self.event_on_entence_end.clear()
self.parsed_text = self.parsed_sentence
self.buffered_sentence += self.parsed_sentence
if self.event_on_commit_question.is_set():
# called when a question should be commited
self.event_on_commit_question.clear()
if len(self.buffered_sentence) == 0: raise RuntimeError
self.commit_wd.begin_watch()
chatbot[-1] = list(chatbot[-1])
chatbot[-1] = [self.buffered_sentence, "[等待GPT响应]"]
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
# add gpt task 创建子线程请求gpt避免线程阻塞
history = chatbot2history(chatbot)
self.agt.add_async_gpt_task(self.buffered_sentence, len(chatbot)-1, llm_kwargs, history, system_prompt)
self.buffered_sentence = ""
chatbot.append(["[请讲话]", "[正在等您说完问题]"])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
if len(self.stop_msg) != 0:
raise RuntimeError(self.stop_msg)
@CatchException
def 语音助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
# pip install -U openai-whisper
chatbot.append(["对话助手函数插件:使用时,双手离开鼠标键盘吧", "音频助手, 正在听您讲话(点击“停止”键可终止程序)..."])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
# 尝试导入依赖,如果缺少依赖,则给出安装建议
try:
import nls
from scipy import io
except:
chatbot.append(["导入依赖失败", "使用该模块需要额外依赖, 安装方法:```pip install --upgrade pyOpenSSL scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git```"])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
TOKEN, APPKEY = get_conf('ALIYUN_TOKEN', 'ALIYUN_APPKEY')
if TOKEN == "" or APPKEY == "":
chatbot.append(["导入依赖失败", "没有阿里云语音识别APPKEY和TOKEN, 详情见https://help.aliyun.com/document_detail/450255.html"])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
ia = InterviewAssistant()
yield from ia.begin(llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)

View File

@@ -6,7 +6,7 @@ def 高阶功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
"""
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数如温度和top_p等一般原样传递下去就行
plugin_kwargs 插件模型的参数,如温度和top_p等一般原样传递下去就行
plugin_kwargs 插件模型的参数,用于灵活调整复杂功能的各种参数
chatbot 聊天显示框的句柄,用于显示给用户
history 聊天历史,前情提要
system_prompt 给gpt的静默提醒

View File

@@ -6,7 +6,7 @@
version: '3'
services:
gpt_academic_nolocalllms:
image: ghcr.io/binary-husky/gpt_academic_nolocal:master
image: ghcr.io/binary-husky/gpt_academic_nolocal:master # (Auto Built by Dockerfile: docs/GithubAction+NoLocal)
environment:
# 请查阅 `config.py` 以查看所有的配置信息
API_KEY: ' sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx '
@@ -33,7 +33,7 @@ services:
version: '3'
services:
gpt_academic_with_chatglm:
image: ghcr.io/binary-husky/gpt_academic_chatglm_moss:master
image: ghcr.io/binary-husky/gpt_academic_chatglm_moss:master # (Auto Built by Dockerfile: docs/Dockerfile+ChatGLM)
environment:
# 请查阅 `config.py` 以查看所有的配置信息
API_KEY: ' sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,fkxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx '
@@ -63,7 +63,7 @@ services:
version: '3'
services:
gpt_academic_with_rwkv:
image: fuqingxu/gpt_academic:jittorllms # [option 2] 如果需要运行ChatGLM本地模型
image: ghcr.io/binary-husky/gpt_academic_jittorllms:master
environment:
# 请查阅 `config.py` 以查看所有的配置信息
API_KEY: ' sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,fkxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx '
@@ -85,33 +85,18 @@ services:
# 与宿主的网络融合
network_mode: "host"
# 使用代理网络拉取最新代码
# command: >
# bash -c " truncate -s -1 /etc/proxychains.conf &&
# echo \"socks5 127.0.0.1 10880\" >> /etc/proxychains.conf &&
# echo '[gpt-academic] 正在从github拉取最新代码...' &&
# proxychains git pull &&
# echo '[jittorllms] 正在从github拉取最新代码...' &&
# proxychains git --git-dir=request_llm/jittorllms/.git --work-tree=request_llm/jittorllms pull --force &&
# python3 -u main.py"
# 不使用代理网络拉取最新代码
command: >
bash -c " echo '[gpt-academic] 正在从github拉取最新代码...' &&
git pull &&
pip install -r requirements.txt &&
echo '[jittorllms] 正在从github拉取最新代码...' &&
git --git-dir=request_llm/jittorllms/.git --work-tree=request_llm/jittorllms pull --force &&
python3 -u main.py"
python3 -u main.py
## ===================================================
## 【方案四】 chatgpt + Latex
## 【方案四】 ChatGPT + Latex
## ===================================================
version: '3'
services:
gpt_academic_with_latex:
image: ghcr.io/binary-husky/gpt_academic_with_latex:master
image: ghcr.io/binary-husky/gpt_academic_with_latex:master # (Auto Built by Dockerfile: docs/GithubAction+NoLocal+Latex)
environment:
# 请查阅 `config.py` 以查看所有的配置信息
API_KEY: ' sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx '

View File

@@ -26,7 +26,7 @@ RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8
RUN $useProxyNetwork python3 -m pip install torch --extra-index-url https://download.pytorch.org/whl/cu113
# 下载分支
WORKDIR /gpt
RUN $useProxyNetwork git clone https://github.com/binary-husky/chatgpt_academic.git -b jittor
RUN $useProxyNetwork git clone https://github.com/binary-husky/chatgpt_academic.git
WORKDIR /gpt/chatgpt_academic
RUN $useProxyNetwork python3 -m pip install -r requirements.txt
RUN $useProxyNetwork python3 -m pip install -r request_llm/requirements_chatglm.txt

View File

@@ -13,7 +13,7 @@ RUN python3 -m pip install torch --extra-index-url https://download.pytorch.org/
# 下载分支
WORKDIR /gpt
RUN git clone https://github.com/binary-husky/chatgpt_academic.git -b jittor
RUN git clone https://github.com/binary-husky/chatgpt_academic.git
WORKDIR /gpt/chatgpt_academic
RUN python3 -m pip install -r requirements.txt
RUN python3 -m pip install -r request_llm/requirements_chatglm.txt

Binary file not shown.

View File

@@ -1956,5 +1956,134 @@
"填入ENGINE": "Fill in ENGINE",
"填入api版本": "Fill in the API version",
"中文Bing版": "Chinese Bing version",
"当前支持的格式包括": "Currently supported formats include"
"当前支持的格式包括": "Currently supported formats include",
"交互功能模板函数": "InteractiveFunctionTemplateFunction",
"交互功能函数模板": "InteractiveFunctionFunctionTemplate",
"语音助手": "VoiceAssistant",
"微调数据集生成": "FineTuneDatasetGeneration",
"chatglm微调工具": "ChatGLMFineTuningTool",
"启动微调": "StartFineTuning",
"请讲话": "Please speak",
"正在听您讲话": "Listening to you",
"对这个人外貌、身处的环境、内心世界、过去经历进行描写": "Describe the appearance, environment, inner world, and past experiences of this person",
"请向下翻": "Please scroll down",
"实时音频采集": "Real-time audio collection",
"找不到": "Not found",
"在一个异步线程中采集音频": "Collect audio in an asynchronous thread",
"azure和api2d请求源": "Azure and API2D request source",
"等待ChatGLMFT响应中": "Waiting for ChatGLMFT response",
"如果使用ChatGLM2微调模型": "If using ChatGLM2 fine-tuning model",
"把文件复制过去": "Copy the file over",
"可选": "Optional",
"ChatGLMFT响应异常": "ChatGLMFT response exception",
"上传本地文件/压缩包供函数插件调用": "Upload local files/compressed packages for function plugin calls",
"例如 f37f30e0f9934c34a992f6f64f7eba4f": "For example, f37f30e0f9934c34a992f6f64f7eba4f",
"正在等您说完问题": "Waiting for you to finish the question",
"解除插件状态": "Release plugin status",
"详情见https": "See details at https",
"避免线程阻塞": "Avoid thread blocking",
"先上传数据集": "Upload dataset first",
"请直接提交即可": "Submit directly",
"Call ChatGLMFT fail 不能正常加载ChatGLMFT的参数": "Call ChatGLMFT fail, cannot load ChatGLMFT parameters",
"插件可读取“输入区”文本/路径作为参数": "The plugin can read text/path in the input area as parameters",
"给出指令": "Give instructions",
"暂不提交": "Do not submit for now",
"如 绿帽子*深蓝色衬衫*黑色运动裤": "E.g. green hat * dark blue shirt * black sports pants",
"阿里云实时语音识别 配置难度较高 仅建议高手用户使用 参考 https": "Aliyun real-time speech recognition has high configuration difficulty and is only recommended for advanced users. Refer to https",
"ChatGLMFT尚未加载": "ChatGLMFT has not been loaded yet",
"输入 clear 以清空对话历史": "Enter 'clear' to clear the conversation history",
"可以将自身的状态存储到cookie中": "You can store your own status in cookies",
"填入你亲手写的部署名": "Fill in the deployment name you wrote by yourself",
"该选项即将被弃用": "This option will be deprecated soon",
"代理网络配置": "Proxy network configuration",
"每秒采样数量": "Number of samples per second",
"使用时": "When using",
"想象一个穿着者": "Imagine a wearer",
"如果已经存在": "If it already exists",
"例如您可以将以下命令复制到下方": "For example, you can copy the following command below",
"正在锁定插件": "Locking plugin",
"使用": "Use",
"读 docs\\use_azure.md": "Read docs\\use_azure.md",
"开始最终总结": "Start final summary",
"openai的官方KEY需要伴随组织编码": "Openai's official KEY needs to be accompanied by organizational code",
"将子线程的gpt结果写入chatbot": "Write the GPT result of the sub-thread into the chatbot",
"Arixv论文精细翻译": "Fine translation of Arixv paper",
"开始接收chatglmft的回复": "Start receiving replies from chatglmft",
"请先将.doc文档转换为.docx文档": "Please convert .doc documents to .docx documents first",
"避免多用户干扰": "Avoid multiple user interference",
"清空label": "Clear label",
"解除插件锁定": "Unlock plugin",
"请以以下方式load模型": "Please load the model in the following way!!!",
"没给定指令": "No instruction given",
"100字以内": "Within 100 words",
"获取关键词": "Get keywords",
"欢迎使用 MOSS 人工智能助手!": "Welcome to use MOSS AI assistant!",
"音频助手": "Audio assistant",
"上传Latex项目": "Upload Latex project",
"对话助手函数插件": "Chat assistant function plugin",
"如果一句话小于7个字": "If a sentence is less than 7 words",
"640个字节为一组": "640 bytes per group",
"右下角更换模型菜单中可切换openai": "OpenAI can be switched in the model menu in the lower right corner",
"双手离开鼠标键盘吧": "Take your hands off the mouse and keyboard",
"先删除": "Delete first",
"如果要使用ChatGLMFT": "If you want to use ChatGLMFT",
"例如 RoPlZrM88DnAFkZK": "For example, RoPlZrM88DnAFkZK",
"提取总结": "Extract summary",
"ChatGLMFT消耗大量的内存": "ChatGLMFT consumes a lot of memory",
"格式如org-123456789abcdefghijklmno的": "In the format of org-123456789abcdefghijklmno",
"在执行完成之后": "After execution is complete",
"此处填API密钥": "Fill in the API key here",
"chatglmft 没有 sys_prompt 接口": "ChatGLMFT does not have a sys_prompt interface",
"用第二人称": "Use the second person",
"Chuanhu-Small-and-Beautiful主题": "Chuanhu-Small-and-Beautiful theme",
"请检查ALIYUN_TOKEN和ALIYUN_APPKEY是否过期": "Please check if ALIYUN_TOKEN and ALIYUN_APPKEY have expired",
"还需要填写组织": "You also need to fill in the organization",
"会直接转到该函数": "Will directly jump to the function",
"初始化插件状态": "Initializing plugin status",
"插件锁定中": "Plugin is locked",
"如果这里报错": "If there is an error here",
"本地Latex论文精细翻译": "Local Latex paper fine translation",
"极少数情况下": "In very few cases",
"首先你在中文语境下通读整篇论文": "First, read the entire paper in a Chinese context",
"点击“停止”键可终止程序": "Click the 'Stop' button to terminate the program",
"建议排查": "Suggested troubleshooting",
"没有阿里云语音识别APPKEY和TOKEN": "No Aliyun voice recognition APPKEY and TOKEN",
"避免遗忘导致死锁": "Avoid forgetting to cause deadlock",
"第一次调用": "First call",
"解决插件锁定时的界面显示问题": "Solve the interface display problem when the plugin is locked",
"初始化音频采集线程": "Initialize audio capture thread",
"找不到微调模型检查点": "Cannot find fine-tuning model checkpoint",
"色彩主体": "Color theme",
"上传文件自动修正路径": "Automatically correct the path when uploading files",
"将文件添加到chatbot cookie中": "Add files to chatbot cookie",
"正常状态": "Normal state",
"建议使用英文单词": "Suggest using English words",
"Aliyun音频服务异常": "Aliyun audio service exception",
"格式如org-xxxxxxxxxxxxxxxxxxxxxxxx": "Format like org-xxxxxxxxxxxxxxxxxxxxxxxx",
"GPT 学术优化": "GPT academic optimization",
"要求": "Requirement",
"赋予插件状态": "Assign plugin status",
"等待GPT响应": "Waiting for GPT response",
"MOSS can understand and communicate fluently in the language chosen by the user such as English and 中文. MOSS can perform any language-based tasks.": "MOSS can understand and communicate fluently in the language chosen by the user such as English and Chinese. MOSS can perform any language-based tasks.",
"我将为您查找相关壁纸": "I will search for related wallpapers for you",
"当下一次用户提交时": "When the next user submits",
"赋予插件锁定 锁定插件回调路径": "Assign plugin lock, lock plugin callback path",
"处理个别特殊插件的锁定状态": "Handle the lock status of individual special plugins",
"add gpt task 创建子线程请求gpt": "Add GPT task, create sub-thread to request GPT",
"等待用户的再次调用": "Waiting for the user to call again",
"只读": "Read-only",
"用于灵活调整复杂功能的各种参数": "Various parameters used to flexibly adjust complex functions",
"输入 stop 以终止对话": "Enter stop to terminate the conversation",
"缺少ChatGLMFT的依赖": "Missing dependency of ChatGLMFT",
"找 API_ORG 设置项": "Find API_ORG setting item",
"检查config中的AVAIL_LLM_MODELS选项": "Check the AVAIL_LLM_MODELS option in config",
"对这个人外貌、身处的环境、内心世界、人设进行描写": "Describe the appearance, environment, inner world, and character of this person.",
"请输入关键词": "Please enter a keyword.",
"!!!如果需要运行量化版本": "!!! If you need to run the quantitative version.",
"为每一位访问的用户赋予一个独一无二的uuid编码": "Assign a unique uuid code to each visiting user.",
"由于提问含不合规内容被Azure过滤": "Due to Azure filtering out questions containing non-compliant content.",
"欢迎使用 MOSS 人工智能助手!输入内容即可进行对话": "Welcome to use MOSS AI assistant! Enter the content to start the conversation.",
"记住当前的label": "Remember the current label.",
"不能正常加载ChatGLMFT的参数": "Cannot load ChatGLMFT parameters normally!",
"建议直接在API_KEY处填写": "It is recommended to fill in directly at API_KEY."
}

View File

@@ -150,26 +150,7 @@
"使用中文回答我的问题": "使用中文回答我的問題",
"备份一个文件": "備份一個文件",
"未知": "未知",
"如.md": "#",
"**输入参数说明**": "#",
"如果这裡拋出異常": "#",
"多線程操作已經開始": "#",
"備份和下載": "#",
"新版本可用": "#",
"將要忽略匹配的文件後綴": "#",
"可調節線程池的大小避免openai的流量限制錯誤": "#",
"使用Unsplash API": "#",
"ChatGPT綜合": "#",
"從摘要中提取高價值信息": "#",
"借助此參數": "#",
"知乎": "#",
"其他錯誤": "#",
"退出": "#",
"對話歷史寫入": "#",
"問詢記錄": "#",
"依次訪問網頁": "#",
"NewBing響應異常": "#",
"jittorllms尚未加載": "#",
"其他錯誤": "其他錯誤",
"等待NewBing响应": "等待NewBing回應",
"找不到任何CSharp文件": "找不到任何CSharp檔案",
"插件demo": "插件範例",
@@ -300,12 +281,12 @@
"上傳本地文件可供紅色函數插件調用": "上傳本地文件供紅色函數插件調用",
"生成圖像": "生成圖像",
"追加歷史": "追加歷史",
"網絡代理狀態": "網代理狀態",
"網絡代理狀態": "網代理狀態",
"不需要再次轉化": "不需要再次轉換",
"帶超時倒計時": "帶有超時倒數計時",
"保存當前對話": "儲存目前對話",
"等待響應": "等待回應",
"依賴檢測通過": "依賴檢通過",
"依賴檢測通過": "依賴檢通過",
"如果要使用ChatGLM": "如果要使用ChatGLM",
"對IPynb文件進行解析": "對IPynb檔案進行解析",
"先切換模型到openai或api2d": "先切換模型到openai或api2d",
@@ -411,7 +392,7 @@
"中转网址预览": "中轉網址預覽",
"自动截断": "自動截斷",
"当無法用標點、空行分割時": "當無法用標點、空行分割時",
"意外Json結構": "意外Json結構",
"意外Json結構": "意外Json結構",
"需要讀取和清理文本的pdf文件路徑": "需要讀取和清理文本的pdf文件路徑",
"HotReload的裝飾器函數": "HotReload的裝飾器函數",
"chatGPT 分析報告": "chatGPT 分析報告",
@@ -423,7 +404,7 @@
"這個bug沒找到觸發條件": "這個bug沒找到觸發條件",
"喚起高級參數輸入區": "喚起高級參數輸入區",
"但大部分場合下並不需要修改": "但大部分場合下並不需要修改",
"盡量是完整的一個section": "盡量完整的一個section",
"盡量是完整的一個section": "盡量選擇完整的一個章節",
"如果OpenAI不響應": "如果OpenAI不響應",
"等文本特殊符號轉換為其基本形式來對文本進行歸一化處理": "等文本特殊符號轉換為其基本形式來對文本進行歸一化處理",
"你的回答必須簡單明了": "你的回答必須簡單明了",
@@ -517,7 +498,7 @@
"正在提取摘要並下載PDF文檔……": "正在提取摘要並下載PDF文件……",
"1. 對原始文本進行歸一化處理": "1. 正規化原始文本",
"問題": "問題",
"用於基礎的對話功能": "基本對話功能",
"用於基礎的對話功能": "用於基礎的對話功能",
"獲取設置": "獲取設置",
"如果缺少依賴": "如果缺少依賴項",
"第6步": "第6步",
@@ -1111,26 +1092,9 @@
"清理规则包括": "清理規則包括",
"新版配置": "新版配置",
"如果有": "如果有",
"高級參數輸入區": "#",
"您提供的api-key不滿足要求": "#",
"“喂狗”": "#",
"有線程鎖": "#",
"解析整個CSharp項目": "#",
"上下文管理器必須實現兩個方法": "#",
"Call MOSS fail 不能正常加載MOSS的參數": "#",
"獲取圖片URL": "#",
"輸入部分太自由": "#",
"Not enough point. API2D賬戶點數不足": "#",
"網絡錯誤": "#",
"請開始多線程操作": "#",
"authors獲取失敗": "#",
"、地址": "#",
"根據以上分析": "#",
"1、英文題目2、中文題目翻譯3、作者4、arxiv公開": "#",
"一些普通功能模塊": "#",
"參數簡單": "#",
"具備以下功能": "#",
"優先級2. 獲取config_private中的配置": "#",
"Call MOSS fail 不能正常加載MOSS的參數": "Call MOSS fail 不能正常加載MOSS的參數",
"根據以上分析": "根據以上分析",
"一些普通功能模塊": "一些普通功能模塊",
"汇总报告如何远程获取": "如何遠程獲取匯總報告",
"热更新prompt": "熱更新提示",
"插件调度异常": "插件調度異常",
@@ -1191,26 +1155,9 @@
"函数插件区": "函數插件區",
"*** API_KEY 导入成功": "*** API_KEY 導入成功",
"请对下面的程序文件做一个概述文件名是": "請對下面的程序文件做一個概述文件名是",
"替換跨行的連詞": "#",
"內容太長了都會觸發token數量溢出的錯誤": "#",
"尚未完成全部響應": "#",
"生成帶有段落標籤的HTML代碼": "#",
"函數熱更新是指在不停止程序運行的情況下": "#",
"將Unsplash API中的PUT_YOUR_QUERY_HERE替換成描述該事件的一個最重要的單詞": "#",
"沒有提供高級參數功能說明": "#",
"條": "#",
"請刷新界面重試": "#",
"和openai的連接容易斷掉": "#",
"使用 Unsplash API": "#",
"完成情況": "#",
"迭代上一次的結果": "#",
"每個線程都要“餵狗”": "#",
"最多收納多少個網頁的結果": "#",
"日": "#",
"第4步": "#",
"找不到任何python文件": "#",
"經過充分測試": "#",
"缺少的依賴": "#",
"內容太長了都會觸發token數量溢出的錯誤": "內容太長了都會觸發token數量溢出的錯誤",
"沒有提供高級參數功能說明": "未提供高級參數功能說明",
"和openai的連接容易斷掉": "和openai的連接容易斷掉",
"分组+迭代处理": "分組+迭代處理",
"安装Newbing的依赖": "安裝Newbing的依賴",
"批": "批",
@@ -1511,5 +1458,760 @@
"包括": "包括",
"或者": "或者",
"并执行函数的新版本": "並執行函數的新版本",
"论文": "論文"
"论文": "論文",
"解析一个Golang项目": "ParseAGolangProject",
"Latex英文纠错": "LatexEnglishCorrection",
"连接bing搜索回答问题": "ConnectToBingSearchForAnswer",
"联网的ChatGPT_bing版": "ChatGPT_BingVersionOnline",
"总结音视频": "SummarizeAudioAndVideo",
"动画生成": "GenerateAnimations",
"数学动画生成manim": "GenerateMathematicalAnimationsWithManim",
"Markdown翻译指定语言": "TranslateMarkdownToSpecifiedLanguage",
"知识库问答": "KnowledgeBaseQA",
"Langchain知识库": "LangchainKnowledgeBase",
"读取知识库作答": "ReadKnowledgeBaseAndAnswerQuestions",
"交互功能模板函数": "InteractiveFunctionTemplateFunctions",
"交互功能函数模板": "InteractiveFunctionFunctionTemplates",
"Latex英文纠错加PDF对比": "LatexEnglishCorrectionWithPDFComparison",
"Latex输出PDF结果": "OutputPDFFromLatex",
"Latex翻译中文并重新编译PDF": "TranslateLatexToChineseAndRecompilePDF",
"语音助手": "VoiceAssistant",
"微调数据集生成": "FineTuneDatasetGeneration",
"chatglm微调工具": "ChatGLM_FineTuningTool",
"启动微调": "StartFineTuning",
"sprint亮靛": "SprintLiangDian",
"寻找Latex主文件": "FindLatexMainFile",
"专业词汇声明": "ProfessionalTerminologyDeclaration",
"Latex精细分解与转化": "LatexFineDecompositionAndConversion",
"编译Latex": "CompileLatex",
"正在等您说完问题": "正在等您說完問題",
"最多同时执行5个": "最多同時執行5個",
"将文件复制一份到下载区": "將檔案複製一份到下載區",
"您接下来不能再使用其他插件了": "您接下來不能再使用其他插件了",
"如 绿帽子*深蓝色衬衫*黑色运动裤": "如 綠帽子*深藍色襯衫*黑色運動褲",
"首先你在中文语境下通读整篇论文": "首先您在中文語境下通讀整篇論文",
"根据给定的切割时长将音频文件切割成多个片段": "根據給定的切割時長將音訊檔切割成多個片段",
"接下来两句话只显示在界面上": "接下來兩句話只顯示在介面上",
"清空label": "清空標籤",
"正在尝试自动安装": "正在嘗試自動安裝",
"MOSS消耗大量的内存": "MOSS消耗大量的記憶體",
"如果这里报错": "如果這裡報錯",
"其他类型文献转化效果未知": "其他類型文獻轉換效果未知",
"ChatGPT综合": "ChatGPT綜合",
"音频文件的路径": "音訊檔案的路徑",
"执行错误": "執行錯誤",
"因此选择GenerateImage函数": "因此選擇GenerateImage函數",
"从摘要中提取高价值信息": "從摘要中提取高價值資訊",
"使用英文": "使用英文",
"是否在提交时自动清空输入框": "是否在提交時自動清空輸入框",
"生成数学动画": "生成數學動畫",
"正在加载Claude组件": "正在載入Claude元件",
"参数说明": "參數說明",
"建议排查": "建議排查",
"将消耗较长时间下载中文向量化模型": "將消耗較長時間下載中文向量化模型",
"test_LangchainKnowledgeBase读取": "test_LangchainKnowledgeBase讀取",
"安装Claude的依赖": "安裝Claude的相依性",
"以下所有配置也都支持利用环境变量覆写": "以下所有配置也都支持利用環境變數覆寫",
"需要被切割的音频文件名": "需要被切割的音頻文件名",
"保存当前对话": "保存當前對話",
"功能、贡献者": "功能、貢獻者",
"Chuanhu-Small-and-Beautiful主题": "Chuanhu-小而美主題",
"等待Claude响应": "等待Claude響應",
"其他模型转化效果未知": "其他模型轉換效果未知",
"版权归原文作者所有": "版權歸原文作者所有",
"回答完问题后": "回答完問題後",
"请先上传文件素材": "請先上傳文件素材",
"上传本地文件/压缩包供函数插件调用": "上傳本地文件/壓縮包供函數插件調用",
"P.S. 顺便把Latex的注释去除": "P.S. 順便把Latex的註釋去除",
"您提供的api-key不满足要求": "您提供的api-key不滿足要求",
"切割音频文件": "切割音頻文件",
"对不同latex源文件扣分": "對不同latex源文件扣分",
"以下是一篇学术论文的基础信息": "以下是一篇學術論文的基礎信息",
"问题": "問題",
"待注入的知识库名称id": "待注入的知識庫名稱id",
"”的主要内容": "”的主要內容",
"获取设置": "獲取設置",
"str类型": "str類型",
"多线程": "多線程",
"尝试执行Latex指令失败": "嘗試執行Latex指令失敗",
"然后再写一段英文摘要": "然後再寫一段英文摘要",
"段音频的主要内容": "段音頻的主要內容",
"临时地激活代理网络": "臨時地激活代理網絡",
"网络的远程文件": "網絡的遠程文件",
"不能正常加载ChatGLMFT的参数": "無法正常載入ChatGLMFT的參數",
"正在编译PDF文档": "正在編譯PDF文件",
"等待ChatGLMFT响应中": "等待ChatGLMFT回應中",
"将": "將",
"片段": "片段",
"修复括号": "修復括號",
"条": "條",
"建议直接在API_KEY处填写": "建議直接在API_KEY處填寫",
"根据需要切换prompt": "根據需要切換prompt",
"使用": "使用",
"请输入要翻译成哪种语言": "請輸入要翻譯成哪種語言",
"实际得到格式": "實際得到格式",
"例如 f37f30e0f9934c34a992f6f64f7eba4f": "例如 f37f30e0f9934c34a992f6f64f7eba4f",
"请切换至“KnowledgeBaseQA”插件进行知识库访问": "請切換至“KnowledgeBaseQA”插件進行知識庫訪問",
"用户填3": "用戶填3",
"远程云服务器部署": "遠程雲服務器部署",
"未知指令": "未知指令",
"每个线程都要“喂狗”": "每個線程都要“喂狗”",
"该项目的Latex主文件是": "該項目的Latex主文件是",
"设置OpenAI密钥和模型": "設置OpenAI密鑰和模型",
"填入你亲手写的部署名": "填入你親手寫的部署名",
"仅调试": "僅調試",
"依赖不足": "依賴不足",
"右下角更换模型菜单中可切换openai": "右下角更換模型菜單中可切換openai",
"解析整个CSharp项目": "解析整個CSharp項目",
"唤起高级参数输入区": "喚起高級參數輸入區",
"这个bug没找到触发条件": "這個bug沒找到觸發條件",
"========================================= 插件主程序2 =====================================================": "========================================= 插件主程序2 =====================================================",
"经过充分测试": "經過充分測試",
"该文件中主要包含三个函数": "該文件中主要包含三個函數",
"您可以到Github Issue区": "您可以到Github Issue區",
"避免线程阻塞": "避免線程阻塞",
"吸收iffalse注释": "吸收iffalse註釋",
"from crazy_functions.虚空终端 import 终端": "from crazy_functions.虛空終端 import 終端",
"异步方法": "異步方法",
"块元提取": "塊元提取",
"Your account is not active. OpenAI以账户失效为由": "您的帳戶未啟用。OpenAI以帳戶失效為由",
"还原部分原文": "還原部分原文",
"如果要使用Claude": "如果要使用Claude",
"把文件复制过去": "把文件複製過去",
"解压失败! 需要安装pip install rarfile来解压rar文件": "解壓失敗需要安裝pip install rarfile來解壓rar文件",
"正在锁定插件": "正在鎖定插件",
"输入 clear 以清空对话历史": "輸入 clear 以清空對話歷史",
"P.S. 但愿没人把latex模板放在里面传进来": "P.S. 但願沒人把latex模板放在裡面傳進來",
"实时音频采集": "實時音頻採集",
"开始最终总结": "開始最終總結",
"拒绝服务": "拒絕服務",
"配置教程&视频教程": "配置教程&視頻教程",
"所有音频都总结完成了吗": "所有音頻都總結完成了嗎",
"返回": "返回",
"避免不小心传github被别人看到": "避免不小心傳github被別人看到",
"否则将导致每个人的Claude问询历史互相渗透": "否則將導致每個人的Claude問詢歷史互相滲透",
"提问吧! 但注意": "提問吧!但注意",
"待处理的word文档路径": "待處理的word文檔路徑",
"欢迎加REAME中的QQ联系开发者": "歡迎加REAME中的QQ聯繫開發者",
"建议暂时不要使用": "建議暫時不要使用",
"Latex没有安装": "Latex沒有安裝",
"在这里放一些网上搜集的demo": "在這裡放一些網上搜集的demo",
"实现消息发送、接收等功能": "實現消息發送、接收等功能",
"用于与with语句一起使用": "用於與with語句一起使用",
"解压失败! 需要安装pip install py7zr来解压7z文件": "解壓失敗! 需要安裝pip install py7zr來解壓7z文件",
"借助此参数": "借助此參數",
"判定为数据流的结束": "判定為數據流的結束",
"提取文件扩展名": "提取文件擴展名",
"GPT结果已输出": "GPT結果已輸出",
"读取文件": "讀取文件",
"如果OpenAI不响应": "如果OpenAI不響應",
"输入部分太自由": "輸入部分太自由",
"用于给一小段代码上代理": "用於給一小段代碼上代理",
"输入 stop 以终止对话": "輸入 stop 以終止對話",
"这个paper有个input命令文件名大小写错误": "這個paper有個input命令文件名大小寫錯誤",
"等待Claude回复的片段": "等待Claude回復的片段",
"开始": "開始",
"将根据报错信息修正tex源文件并重试": "將根據報錯信息修正tex源文件並重試",
"建议更换代理协议": "建議更換代理協議",
"递归地切割PDF文件": "遞歸地切割PDF文件",
"读 docs\\use_azure.md": "讀 docs\\use_azure.md",
"参数": "參數",
"屏蔽空行和太短的句子": "屏蔽空行和太短的句子",
"分析上述回答": "分析上述回答",
"因为在同一个频道里存在多人使用时历史消息渗透问题": "因為在同一個頻道裡存在多人使用時歷史消息滲透問題",
"使用latexdiff生成論文轉化前後對比": "使用latexdiff生成論文轉化前後對比",
"檢查結果": "檢查結果",
"請在此處追加更細緻的校錯指令": "請在此處追加更細緻的校錯指令",
"報告如何遠程獲取": "報告如何遠程獲取",
"發現已經存在翻譯好的PDF文檔": "發現已經存在翻譯好的PDF文檔",
"插件鎖定中": "插件鎖定中",
"正在精細切分latex文件": "正在精細切分latex文件",
"數學GenerateAnimations": "數學GenerateAnimations",
"上傳文件自動修正路徑": "上傳文件自動修正路徑",
"請檢查ALIYUN_TOKEN和ALIYUN_APPKEY是否過期": "請檢查ALIYUN_TOKEN和ALIYUN_APPKEY是否過期",
"上傳Latex項目": "上傳LaTeX項目",
"Aliyun音頻服務異常": "Aliyun音頻服務異常",
"為了防止大語言模型的意外謬誤產生擴散影響": "為了防止大語言模型的意外謬誤產生擴散影響",
"調用Claude時": "調用Claude時",
"解除插件鎖定": "解除插件鎖定",
"暗色模式 / 亮色模式": "暗色模式 / 亮色模式",
"只有第二步成功": "只有第二步成功",
"分析结果": "分析結果",
"用第二人称": "使用第二人稱",
"详情见https": "詳情請見https",
"记住当前的label": "記住當前的標籤",
"当无法用标点、空行分割时": "當無法用標點符號、空行分割時",
"如果分析错误": "如果分析錯誤",
"如果有必要": "如果有必要",
"不要修改!! 高危设置!通过修改此设置": "不要修改!! 高危設置!通過修改此設置",
"ChatGLMFT消耗大量的内存": "ChatGLMFT消耗大量的內存",
"摘要生成后的文档路径": "摘要生成後的文件路徑",
"对全文进行概括": "對全文進行概述",
"LLM_MODEL是默认选中的模型": "LLM_MODEL是默認選中的模型",
"640个字节为一组": "640個字節為一組",
"获取关键词": "獲取關鍵詞",
"解析为简体中文": "解析為簡體中文",
"将 \\include 命令转换为 \\input 命令": "將 \\include 命令轉換為 \\input 命令",
"默认值为1000": "默認值為1000",
"手动指定语言": "手動指定語言",
"请登录OpenAI查看详情 https": "請登錄OpenAI查看詳情 https",
"尝试第": "嘗試第",
"每秒采样数量": "每秒採樣數量",
"加载失败!": "加載失敗!",
"方法": "方法",
"对这个人外貌、身处的环境、内心世界、过去经历进行描写": "對這個人外貌、身處的環境、內心世界、過去經歷進行描寫",
"请先将.doc文档转换为.docx文档": "請先將.doc文檔轉換為.docx文檔",
"定位主Latex文件": "定位主Latex文件",
"批量SummarizeAudioAndVideo": "批量摘要音视频",
"终端": "終端",
"即将退出": "即將退出",
"找不到": "找不到",
"正在听您讲话": "正在聆聽您講話",
"请您不要删除或修改这行警告": "請勿刪除或修改此警告",
"没有阿里云语音识别APPKEY和TOKEN": "沒有阿里雲語音識別APPKEY和TOKEN",
"临时地启动代理网络": "臨時啟動代理網絡",
"请尝试把以下指令复制到高级参数区": "請將以下指令複製到高級參數區",
"中文Bing版": "中文Bing版",
"计算文件总时长和切割点": "計算文件總時長和切割點",
"寻找主文件": "尋找主文件",
"jittorllms尚未加载": "jittorllms尚未加載",
"使用正则表达式查找半行注释": "使用正則表達式查找半行註釋",
"文档越长耗时越长": "文檔越長耗時越長",
"生成中文PDF": "生成中文PDF",
"写入文件": "寫入文件",
"第三组插件": "第三組插件",
"开始接收chatglmft的回复": "開始接收chatglmft的回覆",
"由于提问含不合规内容被Azure过滤": "由於提問含不合規內容被Azure過濾",
"安装方法https": "安裝方法https",
"是否自动处理token溢出的情况": "是否自動處理token溢出的情況",
"如果需要使用AZURE 详情请见额外文档 docs\\use_azure.md": "如果需要使用AZURE 詳情請見額外文檔 docs\\use_azure.md",
"将要忽略匹配的文件后缀": "將要忽略匹配的文件後綴",
"authors获取失败": "authors獲取失敗",
"发送到openai音频解析终端": "發送到openai音頻解析終端",
"请开始多线程操作": "請開始多線程操作",
"对这个人外貌、身处的环境、内心世界、人设进行描写": "對這個人外貌、身處的環境、內心世界、人設進行描寫",
"MOSS can understand and communicate fluently in the language chosen by the user such as English and 中文. MOSS can perform any language-based tasks.": "MOSS可以流利地理解和使用用戶選擇的語言例如英語和中文。MOSS可以執行任何基於語言的任務。",
"work_folder = Latex預處理": "設置工作目錄為Latex預處理",
"然後轉移到指定的另一個路徑中": "然後轉移到指定的另一個路徑中",
"使用Newbing": "使用Newbing",
"詳情信息見requirements.txt": "詳細信息請參閱requirements.txt",
"開始下載": "開始下載",
"多線程翻譯開始": "多線程翻譯開始",
"當前大語言模型": "當前大語言模型",
"格式如org-123456789abcdefghijklmno的": "格式如org-123456789abcdefghijklmno的",
"當下一次用戶提交時": "當下一次用戶提交時",
"需要特殊依賴": "需要特殊依賴",
"次編譯": "次編譯",
"先上傳數據集": "先上傳數據集",
"gpt寫的": "gpt寫的",
"調用緩存": "調用緩存",
"优先级1. 获取环境变量作为配置": "優先級1. 獲取環境變量作為配置",
"检查config中的AVAIL_LLM_MODELS选项": "檢查config中的AVAIL_LLM_MODELS選項",
"并且对于网络上的文件": "並且對於網絡上的文件",
"根据文本使用GPT模型生成相应的图像": "根據文本使用GPT模型生成相應的圖像",
"功能描述": "功能描述",
"翻译结果": "翻譯結果",
"需要预先pip install rarfile": "需要預先pip install rarfile",
"等待响应": "等待響應",
"我们剥离Introduction之后的部分": "我們剝離Introduction之後的部分",
"函数插件-固定按钮区": "函數插件-固定按鈕區",
"临时存储用于调试": "臨時存儲用於調試",
"比正文字体小": "比正文字體小",
"会直接转到该函数": "會直接轉到該函數",
"请以以下方式load模型": "請以以下方式load模型",
"请输入关键词": "請輸入關鍵詞",
"返回找到的第一个": "返回找到的第一個",
"高级参数输入区": "高級參數輸入區",
"精细切分latex文件": "精細切分latex文件",
"赋予插件锁定 锁定插件回调路径": "賦予插件鎖定 鎖定插件回調路徑",
"尝试下载": "嘗試下載",
"包含documentclass关键字": "包含documentclass關鍵字",
"在一个异步线程中采集音频": "在一個異步線程中採集音頻",
"先删除": "先刪除",
"则跳过GPT请求环节": "則跳過GPT請求環節",
"Not enough point. API2D账户点数不足": "Not enough point. API2D帳戶點數不足",
"如果一句话小于7个字": "如果一句話小於7個字",
"具备以下功能": "具備以下功能",
"请查看终端的输出或耐心等待": "請查看終端的輸出或耐心等待",
"对输入的word文档进行摘要生成": "對輸入的word文檔進行摘要生成",
"只读": "只讀",
"文本碎片重组为完整的tex文件": "文本碎片重組為完整的tex文件",
"通过调用conversations_open方法打开一个频道": "通過調用conversations_open方法打開一個頻道",
"对话历史文件损坏!": "對話歷史文件損壞!",
"再失败就没办法了": "再失敗就沒辦法了",
"原始PDF编译是否成功": "原始PDF編譯是否成功",
"不能正常加载jittorllms的参数": "不能正常加載jittorllms的參數",
"正在编译对比PDF": "正在編譯對比PDF",
"找不到微调模型检查点": "找不到微調模型檢查點",
"将生成的报告自动投射到文件上传区": "將生成的報告自動投射到文件上傳區",
"请对这部分内容进行语法矫正": "請對這部分內容進行語法校正",
"编译已经开始": "編譯已經開始",
"需要读取和清理文本的pdf文件路径": "需要讀取和清理文本的pdf文件路徑",
"读取文件内容到内存": "讀取文件內容到內存",
"用&符号分隔": "用&符號分隔",
"输入arxivID": "輸入arxivID",
"找 API_ORG 设置项": "找API_ORG設置項",
"分析用户提供的谷歌学术": "分析用戶提供的谷歌學術",
"欢迎使用 MOSS 人工智能助手!输入内容即可进行对话": "歡迎使用 MOSS 人工智能助手!輸入內容即可進行對話",
"段音频的第": "段音頻的第",
"没有找到任何可读取文件": "沒有找到任何可讀取文件",
"目前仅支持GPT3.5/GPT4": "目前僅支持GPT3.5/GPT4",
"为每一位访问的用户赋予一个独一无二的uuid编码": "為每一位訪問的用戶賦予一個獨一無二的uuid編碼",
"内含已经翻译的Tex文档": "內含已經翻譯的Tex文檔",
"消耗时间的函数": "消耗時間的函數",
"成功啦": "成功啦",
"环境变量配置格式见docker-compose.yml": "環境變量配置格式見docker-compose.yml",
"将每次对话记录写入Markdown格式的文件中": "將每次對話記錄寫入Markdown格式的文件中",
"报告已经添加到右侧“文件上传区”": "報告已經添加到右側“文件上傳區”",
"此处可以输入解析提示": "此處可以輸入解析提示",
"缺少MOSS的依赖": "缺少MOSS的依賴",
"仅在Windows系统进行了测试": "僅在Windows系統進行了測試",
"然后重启程序": "然後重啟程序",
"此处不修改": "此處不修改",
"输出html调试文件": "輸出html調試文件",
"6.25 加入判定latex模板的代码": "6.25 加入判定latex模板的代碼",
"提取总结": "提取總結",
"要求": "要求",
"由于最为关键的转化PDF编译失败": "由於最為關鍵的轉化PDF編譯失敗",
"除非您是论文的原作者": "除非您是論文的原作者",
"输入问题后点击该插件": "輸入問題後點擊該插件",
"该选项即将被弃用": "該選項即將被棄用",
"再列出用户可能提出的三个问题": "再列出用戶可能提出的三個問題",
"所有文件都总结完成了吗": "所有文件都總結完成了嗎",
"请稍候": "請稍候",
"向chatbot中添加简单的意外错误信息": "向chatbot中添加簡單的意外錯誤信息",
"快捷的调试函数": "快捷的調試函數",
"LatexEnglishCorrection+高亮修正位置": "Latex英文校正+高亮修正位置",
"循环监听已打开频道的消息": "循環監聽已打開頻道的消息",
"将指定目录下的PDF文件从英文翻译成中文": "將指定目錄下的PDF文件從英文翻譯成中文",
"请对下面的音频片段做概述": "請對下面的音頻片段做概述",
"openai的官方KEY需要伴隨组织编码": "openai的官方KEY需要伴隨組織編碼",
"表示频道ID": "頻道ID",
"当前支持的格式包括": "目前支援的格式包括",
"只有GenerateImage和生成图像相关": "僅限GenerateImage和生成圖像相關",
"删除中间文件夹": "刪除中間資料夾",
"解除插件状态": "解除插件狀態",
"正在预热文本向量化模组": "正在預熱文本向量化模組",
"100字以内": "限制100字內",
"如果缺少依赖": "如果缺少相依性",
"寻找主tex文件": "尋找主要tex檔案",
"gpt 多线程请求": "gpt 多線程請求",
"已知某些代码的局部作用是": "已知某些程式碼的局部作用是",
"--读取文件": "--讀取檔案",
"前面是中文冒号": "前面是中文冒號",
"*{\\scriptsize\\textbf{警告": "*{\\scriptsize\\textbf{警告",
"OpenAI所允许的最大并行过载": "OpenAI所允許的最大並行過載",
"请直接去该路径下取回翻译结果": "請直接前往該路徑取回翻譯結果",
"以免输入溢出": "以免輸入溢出",
"把某个路径下所有文件压缩": "壓縮某個路徑下的所有檔案",
"问询记录": "詢問記錄",
"Tex源文件缺失": "Tex原始檔案遺失",
"当前参数": "目前參數",
"处理markdown文本格式的转变": "處理markdown文本格式的轉換",
"尝试加载": "嘗試載入",
"请在此处给出自定义翻译命令": "請在此處提供自訂翻譯命令",
"这需要一段时间计算": "這需要一段時間計算",
"-构建知识库": "-建立知識庫",
"还需要填写组织": "還需要填寫組織",
"当前知识库内的有效文件": "當前知識庫內的有效文件",
"第一次调用": "第一次調用",
"从一批文件": "從一批文件",
"json等": "json等",
"翻译-": "翻譯-",
"编译文献交叉引用": "編譯文獻交叉引用",
"优先级2. 获取config_private中的配置": "優先級2. 獲取config_private中的配置",
"可选": "可選",
"我们": "我們",
"编译结束": "編譯結束",
"或代理节点": "或代理節點",
"chatGPT 分析报告": "chatGPT 分析報告",
"调用openai api 使用whisper-1模型": "調用openai api 使用whisper-1模型",
"这段代码定义了一个名为TempProxy的空上下文管理器": "這段代碼定義了一個名為TempProxy的空上下文管理器",
"生成的视频文件路径": "生成的視頻文件路徑",
"请直接提交即可": "請直接提交即可",
"=================================== 工具函数 ===============================================": "=================================== 工具函數 ===============================================",
"报错信息如下. 如果是与网络相关的问题": "報錯信息如下. 如果是與網絡相關的問題",
"python 版本建议3.9+": "python 版本建議3.9+",
"多线程函数插件中": "多線程函數插件中",
"对话助手函数插件": "對話助手函數插件",
"或者重启之后再度尝试": "或者重啟之後再度嘗試",
"拆分过长的latex片段": "拆分過長的latex片段",
"调用whisper模型音频转文字": "調用whisper模型音頻轉文字",
"失败啦": "失敗啦",
"正在编译PDF": "正在編譯PDF",
"请刷新界面重试": "請刷新界面重試",
"模型参数": "模型參數",
"写出文件": "寫出文件",
"第二组插件": "第二組插件",
"在多Tex文档中": "在多Tex文檔中",
"有线程锁": "有線程鎖",
"释放线程锁": "釋放線程鎖",
"读取优先级": "讀取優先級",
"Linux下必须使用Docker安装": "Linux下必須使用Docker安裝",
"例如您可以将以下命令复制到下方": "例如您可以將以下命令複製到下方",
"导入依赖失败": "導入依賴失敗",
"给出一些判定模板文档的词作为扣分项": "給出一些判定模板文檔的詞作為扣分項",
"等待Claude响应中": "等待Claude響應中",
"Call ChatGLMFT fail 不能正常加载ChatGLMFT的参数": "Call ChatGLMFT fail 不能正常加載ChatGLMFT的參數",
"但本地存储了以下历史文件": "但本地存儲了以下歷史文件",
"如果存在调试缓存文件": "如果存在調試緩存文件",
"如果这里抛出异常": "如果這裡拋出異常",
"详见项目主README.md": "詳見項目主README.md",
"作者": "作者",
"现在您点击任意“红颜色”标识的函数插件时": "現在您點擊任意“紅顏色”標識的函數插件時",
"上下文管理器必须实现两个方法": "上下文管理器必須實現兩個方法",
"匹配^数字^": "匹配^數字^",
"也是可读的": "也是可讀的",
"将音频解析为简体中文": "將音頻解析為簡體中文",
"依次访问网页": "依次訪問網頁",
"P.S. 顺便把CTEX塞进去以支持中文": "P.S. 順便把CTEX塞進去以支持中文",
"NewBing响应异常": "NewBing響應異常",
"获取已打开频道的最新消息并返回消息列表": "獲取已打開頻道的最新消息並返回消息列表",
"请使用Markdown": "請使用Markdown",
"例如 RoPlZrM88DnAFkZK": "例如 RoPlZrM88DnAFkZK",
"编译BibTex": "編譯BibTex",
"Claude失败": "Claude失敗",
"请更换为API_URL_REDIRECT配置": "請更換為API_URL_REDIRECT配置",
"P.S. 其他可用的模型还包括": "P.S. 其他可用的模型還包括",
"色彩主体": "色彩主體",
"后面是英文逗号": "後面是英文逗號",
"下载pdf文件未成功": "下載pdf文件未成功",
"删除整行的空注释": "刪除整行的空注釋",
"吸收匿名公式": "吸收匿名公式",
"从而更全面地理解项目的整体功能": "從而更全面地理解項目的整體功能",
"不需要再次转化": "不需要再次轉化",
"可以将自身的状态存储到cookie中": "可以將自身的狀態存儲到cookie中",
"1、英文题目2、中文题目翻译3、作者4、arxiv公开": "1、英文題目2、中文題目翻譯3、作者4、arxiv公開",
"GPT 学术优化": "GPT 學術優化",
"解析整个Python项目": "解析整個Python項目",
"吸收其他杂项": "吸收其他雜項",
"-预热文本向量化模组": "-預熱文本向量化模組",
"Claude组件初始化成功": "Claude組件初始化成功",
"此处填API密钥": "此處填API密鑰",
"请继续分析其他源代码": "請繼續分析其他源代碼",
"质能方程式": "質能方程式",
"功能尚不稳定": "功能尚不穩定",
"使用教程详情见 request_llm/README.md": "使用教程詳情見 request_llm/README.md",
"从以上搜索结果中抽取信息": "從以上搜索結果中抽取信息",
"虽然PDF生成失败了": "雖然PDF生成失敗了",
"找图片": "尋找圖片",
"还原原文": "還原原文",
"可调节线程池的大小避免openai的流量限制错误": "可調整線程池大小以避免openai流量限制錯誤",
"正在提取摘要并下载PDF文档……": "正在提取摘要並下載PDF文件......",
"缺少ChatGLMFT的依赖": "缺少ChatGLMFT的依賴",
"不会实时显示在界面上": "不會即時顯示在界面上",
"解决部分词汇翻译不准确的问题": "解決部分詞彙翻譯不準確的問題",
"等待多线程操作": "等待多線程操作",
"吸收title与作者以上的部分": "吸收標題與作者以上的部分",
"如果需要使用Slack Claude": "如果需要使用Slack Claude",
"一、论文概况": "一、論文概況",
"默认为Chinese": "默認為中文",
"图像生成所用到的提示文本": "圖像生成所用到的提示文本",
"向已打开的频道发送一条文本消息": "向已打開的頻道發送一條文本消息",
"如果某个子任务出错": "如果某個子任務出錯",
"chatglmft 没有 sys_prompt 接口": "chatglmft沒有sys_prompt接口",
"对比PDF编译是否成功": "對比PDF編譯是否成功",
"免费": "免費",
"请讲话": "請講話",
"安装ChatGLM的依赖": "安裝ChatGLM的依賴",
"对IPynb文件进行解析": "對IPynb文件進行解析",
"文件路径列表": "文件路徑列表",
"或者使用此插件继续上传更多文件": "或者使用此插件繼續上傳更多文件",
"随机负载均衡": "隨機負載均衡",
"!!!如果需要运行量化版本": "!!!如果需要運行量化版本",
"注意目前不能多人同时调用Claude接口": "注意目前不能多人同時調用Claude接口",
"文件读取完成": "文件讀取完成",
"用于灵活调整复杂功能的各种参数": "用於靈活調整複雜功能的各種參數",
"**函数功能**": "**函數功能**",
"先切换模型到openai或api2d": "先切換模型到openai或api2d",
"You are associated with a deactivated account. OpenAI以账户失效为由": "您的帳戶已停用。OpenAI以帳戶失效為由",
"你的回答必须简单明了": "您的回答必須簡單明了",
"是否丢弃掉 不是正文的内容": "是否丟棄掉 不是正文的內容",
"但请查收结果": "但請查收結果",
"Claude响应缓慢": "Claude響應緩慢",
"需Latex": "需Latex",
"Claude回复的片段": "Claude回復的片段",
"如果要使用ChatGLMFT": "如果要使用ChatGLMFT",
"它*必须*被包含在AVAIL_LLM_MODELS列表中": "它*必須*被包含在AVAIL_LLM_MODELS列表中",
"前面是中文逗号": "前面是中文逗號",
"需要预先pip install py7zr": "需要預先pip install py7zr",
"将前后断行符脱离": "將前後斷行符脫離",
"防止丢失最后一条消息": "防止丟失最後一條消息",
"初始化插件状态": "初始化插件狀態",
"以秒为单位": "以秒為單位",
"中文Latex项目全文润色": "中文Latex項目全文潤色",
"对整个Latex项目进行纠错": "對整個Latex項目進行校對",
"NEWBING_COOKIES未填写或有格式错误": "NEWBING_COOKIES未填寫或有格式錯誤",
"函数插件作者": "函數插件作者",
"结束": "結束",
"追加历史": "追加歷史",
"您需要首先调用构建知识库": "您需要首先調用構建知識庫",
"如果程序停顿5分钟以上": "如果程序停頓5分鐘以上",
"ChatGLMFT响应异常": "ChatGLMFT響應異常",
"根据当前的模型类别": "根據當前的模型類別",
"才能继续下面的步骤": "才能繼續下面的步驟",
"并将返回的频道ID保存在属性CHANNEL_ID中": "並將返回的頻道ID保存在屬性CHANNEL_ID中",
"请查收结果": "請查收結果",
"解决插件锁定时的界面显示问题": "解決插件鎖定時的界面顯示問題",
"待提取的知识库名称id": "待提取的知識庫名稱id",
"Claude响应异常": "Claude響應異常",
"当前代理可用性": "當前代理可用性",
"代理网络配置": "代理網絡配置",
"我将为您查找相关壁纸": "我將為您查找相關壁紙",
"没给定指令": "沒給定指令",
"音频内容是": "音頻內容是",
"用该压缩包+ConversationHistoryArchive进行反馈": "用該壓縮包+ConversationHistoryArchive進行反饋",
"总结音频": "總結音頻",
"等待用户的再次调用": "等待用戶的再次調用",
"永远给定None": "永遠給定None",
"论文概况": "論文概況",
"建议使用英文单词": "建議使用英文單詞",
"刷新Gradio前端界面": "刷新Gradio前端界面",
"列表递归接龙": "列表遞歸接龍",
"赋予插件状态": "賦予插件狀態",
"构建完成": "構建完成",
"避免多用户干扰": "避免多用戶干擾",
"当前工作路径为": "當前工作路徑為",
"用黑色标注转换区": "用黑色標注轉換區",
"压缩包": "壓縮包",
"刷新页面即可以退出KnowledgeBaseQA模式": "刷新頁面即可以退出KnowledgeBaseQA模式",
"拆分过长的Markdown文件": "拆分過長的Markdown文件",
"生成时间戳": "生成時間戳",
"尚未完成全部响应": "尚未完成全部響應",
"HotReload的装饰器函数": "HotReload的裝飾器函數",
"请务必用 pip install -r requirements.txt 指令安装依赖": "請務必用 pip install -r requirements.txt 指令安裝依賴",
"TGUI不支持函数插件的实现": "TGUI不支持函數插件的實現",
"音频文件名": "音頻文件名",
"找不到任何音频或视频文件": "找不到任何音頻或視頻文件",
"音频解析结果": "音頻解析結果",
"如果使用ChatGLM2微调模型": "如果使用ChatGLM2微調模型",
"限制的3/4时": "限制的3/4時",
"获取回复": "獲取回復",
"对话历史写入": "對話歷史寫入",
"记录删除注释后的文本": "記錄刪除註釋後的文本",
"整理结果为压缩包": "整理結果為壓縮包",
"注意事项": "注意事項",
"请耐心等待": "請耐心等待",
"在执行完成之后": "在執行完成之後",
"参数简单": "參數簡單",
"Arixv论文精细翻译": "Arixv論文精細翻譯",
"备份和下载": "備份和下載",
"当前报错的latex代码处于第": "當前報錯的latex代碼處於第",
"Markdown翻译": "Markdown翻譯",
"英文Latex项目全文纠错": "英文Latex項目全文校對",
"获取预处理函数": "獲取預處理函數",
"add gpt task 创建子线程请求gpt": "add gpt task 創建子線程請求gpt",
"一个包含所有切割音频片段文件路径的列表": "一個包含所有切割音頻片段文件路徑的列表",
"解析arxiv网址失败": "解析arxiv網址失敗",
"PDF文件所在的路径": "PDF文件所在路徑",
"取评分最高者返回": "取評分最高者返回",
"此插件处于开发阶段": "此插件處於開發階段",
"如果已经存在": "如果已經存在",
"或者不在环境变量PATH中": "或者不在環境變量PATH中",
"目前支持的格式": "目前支持的格式",
"将多文件tex工程融合为一个巨型tex": "將多文件tex工程融合為一個巨型tex",
"暂不提交": "暫不提交",
"调用函数": "調用函數",
"编译转化后的PDF": "編譯轉化後的PDF",
"将代码转为动画": "將代碼轉為動畫",
"本地Latex论文精细翻译": "本地Latex論文精細翻譯",
"删除或修改歧义文件": "刪除或修改歧義文件",
"其他操作系统表现未知": "其他操作系統表現未知",
"此插件Windows支持最佳": "此插件Windows支持最佳",
"构建知识库": "構建知識庫",
"每个切割音频片段的时长": "每個切割音頻片段的時長",
"用latex编译为PDF对修正处做高亮": "用latex編譯為PDF對修正處做高亮",
"行": "行",
"= 2 通过一些Latex模板中常见": "= 2 通過一些Latex模板中常見",
"如参考文献、脚注、图注等": "如參考文獻、腳註、圖註等",
"期望格式例如": "期望格式例如",
"翻译内容可靠性无保障": "翻譯內容可靠性無保障",
"请用一句话概括这些文件的整体功能": "請用一句話概括這些文件的整體功能",
"段音频完成了吗": "段音頻完成了嗎",
"填入azure openai api的密钥": "填入azure openai api的密鑰",
"文本碎片重组为完整的tex片段": "文本碎片重組為完整的tex片段",
"吸收在42行以內的begin-end組合": "吸收在42行以內的begin-end組合",
"屬性": "屬性",
"必須包含documentclass": "必須包含documentclass",
"等待GPT響應": "等待GPT響應",
"當前語言模型溫度設定": "當前語言模型溫度設定",
"模型選擇是": "選擇的模型為",
"reverse 操作必須放在最後": "reverse 操作必須放在最後",
"將子線程的gpt結果寫入chatbot": "將子線程的gpt結果寫入chatbot",
"默認為default": "默認為default",
"目前對機器學習類文獻轉化效果最好": "目前對機器學習類文獻轉化效果最好",
"主程序即將開始": "主程序即將開始",
"點擊“停止”鍵可終止程序": "點擊“停止”鍵可終止程序",
"正在處理": "正在處理",
"請立即終止程序": "請立即停止程序",
"將 chatglm 直接對齊到 chatglm2": "將 chatglm 直接對齊到 chatglm2",
"音頻助手": "音頻助手",
"正在構建知識庫": "正在構建知識庫",
"請向下翻": "請向下滾動頁面",
"後面是英文冒號": "後面是英文冒號",
"無法找到一個主Tex文件": "無法找到一個主Tex文件",
"使用中文总结音频“": "使用中文總結音頻",
"该PDF由GPT-Academic开源项目调用大语言模型+Latex翻译插件一键生成": "該PDF由GPT-Academic開源項目調用大語言模型+Latex翻譯插件一鍵生成",
"开始生成动画": "開始生成動畫",
"完成情况": "完成情況",
"然后进行问答": "然後進行問答",
"为啥chatgpt会把cite里面的逗号换成中文逗号呀": "為啥chatgpt會把cite裡面的逗號換成中文逗號呀",
"暂时不支持历史消息": "暫時不支持歷史消息",
"项目Github地址 \\url{https": "項目Github地址 \\url{https",
"Newbing 请求失败": "Newbing 請求失敗",
"根据自然语言执行插件命令": "根據自然語言執行插件命令",
"迭代上一次的结果": "迭代上一次的結果",
"azure和api2d请求源": "azure和api2d請求源",
"格式如org-xxxxxxxxxxxxxxxxxxxxxxxx": "格式如org-xxxxxxxxxxxxxxxxxxxxxxxx",
"推荐http": "推薦http",
"将要匹配的模式": "將要匹配的模式",
"代理数据解析失败": "代理數據解析失敗",
"创建存储切割音频的文件夹": "創建存儲切割音頻的文件夾",
"用红色标注处保留区": "用紅色標注處保留區",
"至少一个线程任务Token溢出而失败": "至少一個線程任務Token溢出而失敗",
"获取Slack消息失败": "獲取Slack消息失敗",
"极少数情况下": "極少數情況下",
"辅助gpt生成代码": "輔助gpt生成代碼",
"生成图像": "生成圖像",
"最多收纳多少个网页的结果": "最多收納多少個網頁的結果",
"获取图片URL": "獲取圖片URL",
"正常状态": "正常狀態",
"编译原始PDF": "編譯原始PDF",
"SummarizeAudioAndVideo内容": "音視頻摘要內容",
"Latex文件融合完成": "Latex文件融合完成",
"获取线程锁": "獲取線程鎖",
"SlackClient类用于与Slack API进行交互": "SlackClient類用於與Slack API進行交互",
"检测到arxiv文档连接": "檢測到arxiv文檔連接",
"--读取参数": "--讀取參數",
"如果您是论文原作者": "如果您是論文原作者",
"5刀": "5美元",
"转化PDF编译是否成功": "轉換PDF編譯是否成功",
"生成带有段落标签的HTML代码": "生成帶有段落標籤的HTML代碼",
"目前不支持历史消息查询": "目前不支持歷史消息查詢",
"将文件添加到chatbot cookie中": "將文件添加到chatbot cookie中",
"多线程操作已经开始": "多線程操作已經開始",
"请求子进程": "請求子進程",
"将Unsplash API中的PUT_YOUR_QUERY_HERE替换成描述该事件的一个最重要的单词": "將Unsplash API中的PUT_YOUR_QUERY_HERE替換成描述該事件的一個最重要的單詞",
"不能加载Claude组件": "不能加載Claude組件",
"请仔细鉴别并以原文为准": "請仔細鑒別並以原文為準",
"否则结束循环": "否則結束循環",
"插件可读取“输入区”文本/路径作为参数": "插件可讀取“輸入區”文本/路徑作為參數",
"网络错误": "網絡錯誤",
"想象一个穿着者": "想像一個穿著者",
"避免遗忘导致死锁": "避免遺忘導致死鎖",
"保证括号正确": "保證括號正確",
"报错信息": "錯誤信息",
"提取视频中的音频": "提取視頻中的音頻",
"初始化音频采集线程": "初始化音頻採集線程",
"参考文献转Bib": "參考文獻轉Bib",
"阿里云实时语音识别 配置难度较高 仅建议高手用户使用 参考 https": "阿里云即時語音識別配置難度較高,僅建議高手用戶使用,參考 https",
"使用时": "使用時",
"处理个别特殊插件的锁定状态": "處理個別特殊插件的鎖定狀態",
"但通常不会出现在正文": "但通常不會出現在正文",
"此函数逐渐地搜索最长的条目进行剪辑": "此函數逐漸地搜索最長的條目進行剪輯",
"给出指令": "給出指令",
"读取音频文件": "讀取音頻文件",
"========================================= 插件主程序1 =====================================================": "========================================= 插件主程序1 =====================================================",
"带超时倒计时": "帶超時倒計時",
"禁止移除或修改此警告": "禁止移除或修改此警告",
"ChatGLMFT尚未加载": "ChatGLMFT尚未加載",
"双手离开鼠标键盘吧": "雙手離開鼠標鍵盤吧",
"缺少的依赖": "缺少的依賴",
"的单词": "的單詞",
"中读取数据构建知识库": "中讀取數據構建知識庫",
"函数热更新是指在不停止程序运行的情况下": "函數熱更新是指在不停止程序運行的情況下",
"建议低于1": "建議低於1",
"转化PDF编译已经成功": "轉換PDF編譯已經成功",
"出问题了": "出問題了",
"欢迎使用 MOSS 人工智能助手!": "歡迎使用 MOSS 人工智能助手!",
"正在精细切分latex文件": "正在精細切分LaTeX文件",
"”补上": "”補上",
"网络代理状态": "網路代理狀態",
"依赖检测通过": "依賴檢測通過",
"默认为default": "預設為default",
"Call MOSS fail 不能正常加载MOSS的参数": "呼叫MOSS失敗無法正常載入MOSS參數",
"音频助手": "音頻助手",
"次编译": "次編譯",
"其他错误": "其他錯誤",
"属性": "屬性",
"主程序即将开始": "主程式即將開始",
"Aliyun音频服务异常": "Aliyun音頻服務異常",
"response中会携带traceback报错信息": "response中會攜帶traceback錯誤信息",
"一些普通功能模块": "一些普通功能模組",
"和openai的连接容易断掉": "和openai的連線容易斷掉",
"请检查ALIYUN_TOKEN和ALIYUN_APPKEY是否过期": "請檢查ALIYUN_TOKEN和ALIYUN_APPKEY是否過期",
"调用Claude时": "呼叫Claude時",
"插件锁定中": "插件鎖定中",
"将子线程的gpt结果写入chatbot": "將子線程的gpt結果寫入chatbot",
"当下一次用户提交时": "當下一次使用者提交時",
"先上传数据集": "先上傳資料集",
"请在此处追加更细致的矫错指令": "請在此處追加更細緻的矯錯指令",
"无法找到一个主Tex文件": "無法找到一個主Tex文件",
"gpt写的": "gpt寫的",
"预处理": "預處理",
"但大部分场合下并不需要修改": "但大部分場合下並不需要修改",
"正在构建知识库": "正在建構知識庫",
"开始请求": "開始請求",
"根据以上分析": "根據以上分析",
"需要特殊依赖": "需要特殊依賴",
"用于基础的对话功能": "用於基礎的對話功能",
"且没有代码段": "且沒有程式碼段",
"取决于": "取決於",
"openai的官方KEY需要伴隨組織編碼": "請填入組織編碼",
"等待newbing回覆的片段": "等待newbing回覆的片段",
"调用缓存": "呼叫快取",
"模型选择是": "模型選擇為",
"当前大语言模型": "當前大語言模型",
"然后转移到指定的另一个路径中": "然後轉移到指定的另一個路徑中",
"请向下翻": "請向下滾動",
"内容太长了都会触发token数量溢出的错误": "內容太長會觸發token數量溢出的錯誤",
"每一块": "每一塊",
"详情信息见requirements.txt": "詳細信息見requirements.txt",
"没有提供高级参数功能说明": "沒有提供高級參數功能說明",
"上传Latex项目": "上傳Latex項目",
"请立即终止程序": "請立即終止程式",
"解除插件锁定": "解除插件鎖定",
"意外Json结构": "意外Json結構",
"必须包含documentclass": "必須包含documentclass",
"10个文件为一组": "10個文件為一組",
"openai的官方KEY需要伴随组织编码": "openai的官方KEY需要伴隨組織編碼",
"重置文件的创建时间": "重置文件的創建時間",
"尽量是完整的一个section": "盡量是完整的一個section",
"报告如何远程获取": "報告如何遠程獲取",
"work_folder = Latex预处理": "work_folder = Latex預處理",
"吸收在42行以内的begin-end组合": "吸收在42行以內的begin-end組合",
"后面是英文冒号": "後面是英文冒號",
"使用latexdiff生成论文转化前后对比": "使用latexdiff生成論文轉化前後對比",
"首先你在英文语境下通读整篇论文": "首先你在英文語境下通讀整篇論文",
"为了防止大语言模型的意外谬误产生扩散影响": "為了防止大語言模型的意外謬誤產生擴散影響",
"发现已经存在翻译好的PDF文档": "發現已經存在翻譯好的PDF文檔",
"点击“停止”键可终止程序": "點擊“停止”鍵可終止程序",
"数学GenerateAnimations": "數學GenerateAnimations",
"随变按钮的回调函数注册": "隨變按鈕的回調函數註冊",
"history至少释放二分之一": "history至少釋放二分之一",
"当前语言模型温度设定": "當前語言模型溫度設定",
"等待GPT响应": "等待GPT響應",
"正在处理": "正在處理",
"多线程翻译开始": "多線程翻譯開始",
"reverse 操作必须放在最后": "reverse 操作必須放在最後",
"等待newbing回复的片段": "等待newbing回覆的片段",
"开始下载": "開始下載",
"将 chatglm 直接对齐到 chatglm2": "將 chatglm 直接對齊到 chatglm2",
"以上材料已经被写入": "以上材料已經被寫入",
"上传文件自动修正路径": "上傳文件自動修正路徑",
"然后请使用Markdown格式封装": "然後請使用Markdown格式封裝",
"目前对机器学习类文献转化效果最好": "目前對機器學習類文獻轉化效果最好",
"检查结果": "檢查結果",
"、地址": "地址",
"如.md": "如.md",
"使用Unsplash API": "使用Unsplash API",
"**输入参数说明**": "**輸入參數說明**",
"新版本可用": "新版本可用",
"找不到任何python文件": "找不到任何python文件",
"知乎": "知乎",
"日": "日",
"“喂狗”": "“喂狗”",
"第4步": "第4步",
"退出": "退出",
"使用 Unsplash API": "使用 Unsplash API"
}

54
docs/use_audio.md Normal file
View File

@@ -0,0 +1,54 @@
# 使用音频交互功能
## 1. 安装额外依赖
```
pip install --upgrade pyOpenSSL scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git
```
如果因为特色网络问题导致上述命令无法执行:
1. git clone alibabacloud-nls-python-sdk这个项目或者直接前往Github对应网址下载压缩包.
命令行输入: `git clone https://github.com/aliyun/alibabacloud-nls-python-sdk.git`
1. 进入alibabacloud-nls-python-sdk目录命令行输入`python setup.py install`
## 2. 配置音频功能开关 和 阿里云APPKEYconfig.py/config_private.py/环境变量)
- 注册阿里云账号
- 开通 智能语音交互 (有免费白嫖时长)
- 获取token和appkey
- 未来将逐步用其他更廉价的云服务取代阿里云
```
ENABLE_AUDIO = True
ALIYUN_TOKEN = "554a50fcd0bb476c8d07bb630e94d20c" # 此token已经失效
ALIYUN_APPKEY = "RoPlZrM88DnAFkZK" # 此appkey已经失效
```
参考 https://help.aliyun.com/document_detail/450255.html
先有阿里云开发者账号,登录之后,需要开通 智能语音交互 的功能可以免费获得一个token然后在 全部项目 中创建一个项目可以获得一个appkey.
## 3.启动
启动gpt-academic `python main.py`
## 4.点击record from microphe授权音频采集
I 如果需要监听自己说话(不监听电脑音频),直接在浏览器中选择对应的麦即可
II 如果需要监听电脑音频(不监听自己说话),需要安装`VB-Audio VoiceMeeter`,打开声音控制面板(sound control panel)
- 1 `[把电脑的所有外放声音用VoiceMeeter截留]` 在输出区playback选项卡把VoiceMeeter Input虚拟设备set as default设为默认播放设备。
- 2 `[把截留的声音释放到gpt-academic]` 打开gpt-academic主界面授权音频采集后在浏览器地址栏或者类似的地方会出现一个麦克风图标打开后按照浏览器的提示选择VoiceMeeter虚拟麦克风。然后刷新页面重新授权音频采集。
- 3 `[把截留的声音同时释放到耳机或音响]` 完成第一步之后,您应处于听不到电脑声音的状态。为了在截获音频的同时,避免影响正常使用,请完成这最后一步配置。在声音控制面板(sound control panel)输入区recording选项卡把VoiceMeeter Output虚拟设备set as default。双击进入VoiceMeeter Output虚拟设备的设置。
- 3-1 进入VoiceMeeter Output虚拟设备子菜单打开listen选项卡。
- 3-2 勾选Listen to this device。
- 3-3 在playback through this device下拉菜单中选择你的正常耳机或音响。
III `[把特殊软件如腾讯会议的外放声音用VoiceMeeter截留]` 在完成步骤II的基础上在特殊软件如腾讯会议打开声音菜单选择扬声器VoiceMeeter Input选择麦克风为正常耳机麦。
VI 两种音频监听模式切换时,需要刷新页面才有效。
## 5.点击函数插件区“实时音频采集” 或者其他音频交互功能

View File

@@ -100,10 +100,12 @@
# 修改 config.py
```
AZURE_ENDPOINT = "填入终结点"
LLM_MODEL = "azure-gpt-3.5" # 指定启动时的默认模型当然事后从下拉菜单选也ok
AZURE_ENDPOINT = "填入终结点" # 见上述图片
AZURE_API_KEY = "填入azure openai api的密钥"
AZURE_API_VERSION = "2023-05-15" # 默认使用 2023-05-15 版本,无需修改
AZURE_ENGINE = "填入部署名" # 见上
AZURE_ENGINE = "填入部署名" # 见上述图片
```

68
main.py
View File

@@ -4,22 +4,23 @@ def main():
import gradio as gr
if gr.__version__ not in ['3.28.3','3.32.2']: assert False, "需要特殊依赖,请务必用 pip install -r requirements.txt 指令安装依赖详情信息见requirements.txt"
from request_llm.bridge_all import predict
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, load_chat_cookies, DummyWith
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = \
get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT')
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = \
get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT')
ENABLE_AUDIO, AUTO_CLEAR_TXT = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT')
# 如果WEB_PORT是-1, 则随机选取WEB端口
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
if not AUTHENTICATION: AUTHENTICATION = None
from check_proxy import get_current_version
from themes.theme import adjust_theme, advanced_css, theme_declaration
initial_prompt = "Serve me as a writing and programming assistant."
title_html = f"<h1 align=\"center\">ChatGPT 学术优化 {get_current_version()}</h1>"
title_html = f"<h1 align=\"center\">GPT 学术优化 {get_current_version()}</h1>{theme_declaration}"
description = """代码开源和更新[地址🚀](https://github.com/binary-husky/chatgpt_academic),感谢热情的[开发者们❤️](https://github.com/binary-husky/chatgpt_academic/graphs/contributors)"""
# 问询记录, python 版本建议3.9+(越新越好)
import logging
import logging, uuid
os.makedirs("gpt_log", exist_ok=True)
try:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO, encoding="utf-8")
except:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO)
@@ -37,7 +38,6 @@ def main():
gr.Chatbot.postprocess = format_io
# 做一些外观色彩上的调整
from theme import adjust_theme, advanced_css
set_theme = adjust_theme()
# 代理与自动更新
@@ -45,23 +45,23 @@ def main():
proxy_info = check_proxy(proxies)
gr_L1 = lambda: gr.Row().style()
gr_L2 = lambda scale: gr.Column(scale=scale)
gr_L2 = lambda scale, elem_id: gr.Column(scale=scale, elem_id=elem_id)
if LAYOUT == "TOP-DOWN":
gr_L1 = lambda: DummyWith()
gr_L2 = lambda scale: gr.Row()
gr_L2 = lambda scale, elem_id: gr.Row()
CHATBOT_HEIGHT /= 2
cancel_handles = []
with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
with gr.Blocks(title="GPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
gr.HTML(title_html)
cookies = gr.State({'api_key': API_KEY, 'llm_model': LLM_MODEL})
cookies = gr.State(load_chat_cookies())
with gr_L1():
with gr_L2(scale=2):
chatbot = gr.Chatbot(label=f"当前模型:{LLM_MODEL}")
chatbot.style(height=CHATBOT_HEIGHT)
with gr_L2(scale=2, elem_id="gpt-chat"):
chatbot = gr.Chatbot(label=f"当前模型:{LLM_MODEL}", elem_id="gpt-chatbot")
if LAYOUT == "TOP-DOWN": chatbot.style(height=CHATBOT_HEIGHT)
history = gr.State([])
with gr_L2(scale=1):
with gr.Accordion("输入区", open=True) as area_input_primary:
with gr_L2(scale=1, elem_id="gpt-panel"):
with gr.Accordion("输入区", open=True, elem_id="input-panel") as area_input_primary:
with gr.Row():
txt = gr.Textbox(show_label=False, placeholder="Input question here.").style(container=False)
with gr.Row():
@@ -70,17 +70,20 @@ def main():
resetBtn = gr.Button("重置", variant="secondary"); resetBtn.style(size="sm")
stopBtn = gr.Button("停止", variant="secondary"); stopBtn.style(size="sm")
clearBtn = gr.Button("清除", variant="secondary", visible=False); clearBtn.style(size="sm")
if ENABLE_AUDIO:
with gr.Row():
audio_mic = gr.Audio(source="microphone", type="numpy", streaming=True, show_label=False).style(container=False)
with gr.Row():
status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}")
with gr.Accordion("基础功能区", open=True) as area_basic_fn:
status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}", elem_id="state-panel")
with gr.Accordion("基础功能区", open=True, elem_id="basic-panel") as area_basic_fn:
with gr.Row():
for k in functional:
if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue
variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
functional[k]["Button"] = gr.Button(k, variant=variant)
with gr.Accordion("函数插件区", open=True) as area_crazy_fn:
with gr.Accordion("函数插件区", open=True, elem_id="plugin-panel") as area_crazy_fn:
with gr.Row():
gr.Markdown("注意:以下“红颜色”标识的函数插件需从输入区读取路径作为参数.")
gr.Markdown("插件可读取“输入区”文本/路径作为参数(上传文件自动修正路径)")
with gr.Row():
for k in crazy_fns:
if not crazy_fns[k].get("AsButton", True): continue
@@ -91,16 +94,16 @@ def main():
with gr.Accordion("更多函数插件", open=True):
dropdown_fn_list = [k for k in crazy_fns.keys() if not crazy_fns[k].get("AsButton", True)]
with gr.Row():
dropdown = gr.Dropdown(dropdown_fn_list, value=r"打开插件列表", label="").style(container=False)
dropdown = gr.Dropdown(dropdown_fn_list, value=r"打开插件列表", label="", show_label=False).style(container=False)
with gr.Row():
plugin_advanced_arg = gr.Textbox(show_label=True, label="高级参数输入区", visible=False,
placeholder="这里是特殊函数插件的高级参数输入区").style(container=False)
with gr.Row():
switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary")
with gr.Row():
with gr.Accordion("点击展开“文件上传区”。上传本地文件可供红色函数插件调用。", open=False) as area_file_up:
with gr.Accordion("点击展开“文件上传区”。上传本地文件/压缩包供函数插件调用。", open=False) as area_file_up:
file_upload = gr.Files(label="任何文件, 但推荐上传压缩文件(zip, tar)", file_count="multiple")
with gr.Accordion("更换模型 & SysPrompt & 交互界面布局", open=(LAYOUT == "TOP-DOWN")):
with gr.Accordion("更换模型 & SysPrompt & 交互界面布局", open=(LAYOUT == "TOP-DOWN"), elem_id="interact-panel"):
system_prompt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt)
top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
@@ -109,7 +112,7 @@ def main():
md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False)
gr.Markdown(description)
with gr.Accordion("备选输入区", open=True, visible=False) as area_input_secondary:
with gr.Accordion("备选输入区", open=True, visible=False, elem_id="input-panel2") as area_input_secondary:
with gr.Row():
txt2 = gr.Textbox(show_label=False, placeholder="Input question here.", label="输入区2").style(container=False)
with gr.Row():
@@ -176,16 +179,29 @@ def main():
return {chatbot: gr.update(label="当前模型:"+k)}
md_dropdown.select(on_md_dropdown_changed, [md_dropdown], [chatbot] )
# 随变按钮的回调函数注册
def route(k, *args, **kwargs):
def route(request: gr.Request, k, *args, **kwargs):
if k in [r"打开插件列表", r"请先从插件列表中选择"]: return
yield from ArgsGeneralWrapper(crazy_fns[k]["Function"])(*args, **kwargs)
yield from ArgsGeneralWrapper(crazy_fns[k]["Function"])(request, *args, **kwargs)
click_handle = switchy_bt.click(route,[switchy_bt, *input_combo, gr.State(PORT)], output_combo)
click_handle.then(on_report_generated, [cookies, file_upload, chatbot], [cookies, file_upload, chatbot])
cancel_handles.append(click_handle)
# 终止按钮的回调函数注册
stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
stopBtn2.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
if ENABLE_AUDIO:
from crazy_functions.live_audio.audio_io import RealtimeAudioDistribution
rad = RealtimeAudioDistribution()
def deal_audio(audio, cookies):
rad.feed(cookies['uuid'].hex, audio)
audio_mic.stream(deal_audio, inputs=[audio_mic, cookies])
def init_cookie(cookies, chatbot):
# 为每一位访问的用户赋予一个独一无二的uuid编码
cookies.update({'uuid': uuid.uuid4()})
return cookies
demo.load(init_cookie, inputs=[cookies, chatbot], outputs=[cookies])
demo.load(lambda: 0, inputs=None, outputs=None, _js='()=>{ChatBotHeight();}')
# gradio的inbrowser触发不太稳定回滚代码到原始的浏览器打开函数
def auto_opentab_delay():
import threading, webbrowser, time

View File

@@ -16,9 +16,6 @@ from toolbox import get_conf, trimmed_format_exc
from .bridge_chatgpt import predict_no_ui_long_connection as chatgpt_noui
from .bridge_chatgpt import predict as chatgpt_ui
from .bridge_azure_test import predict_no_ui_long_connection as azure_noui
from .bridge_azure_test import predict as azure_ui
from .bridge_chatglm import predict_no_ui_long_connection as chatglm_noui
from .bridge_chatglm import predict as chatglm_ui
@@ -48,10 +45,11 @@ class LazyloadTiktoken(object):
return encoder.decode(*args, **kwargs)
# Endpoint 重定向
API_URL_REDIRECT, = get_conf("API_URL_REDIRECT")
API_URL_REDIRECT, AZURE_ENDPOINT, AZURE_ENGINE = get_conf("API_URL_REDIRECT", "AZURE_ENDPOINT", "AZURE_ENGINE")
openai_endpoint = "https://api.openai.com/v1/chat/completions"
api2d_endpoint = "https://openai.api2d.net/v1/chat/completions"
newbing_endpoint = "wss://sydney.bing.com/sydney/ChatHub"
azure_endpoint = AZURE_ENDPOINT + f'openai/deployments/{AZURE_ENGINE}/chat/completions?api-version=2023-05-15'
# 兼容旧版的配置
try:
API_URL, = get_conf("API_URL")
@@ -122,9 +120,9 @@ model_info = {
# azure openai
"azure-gpt-3.5":{
"fn_with_ui": azure_ui,
"fn_without_ui": azure_noui,
"endpoint": get_conf("AZURE_ENDPOINT"),
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
"endpoint": azure_endpoint,
"max_token": 4096,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
@@ -170,7 +168,31 @@ model_info = {
}
AVAIL_LLM_MODELS, = get_conf("AVAIL_LLM_MODELS")
AVAIL_LLM_MODELS, LLM_MODEL = get_conf("AVAIL_LLM_MODELS", "LLM_MODEL")
AVAIL_LLM_MODELS = AVAIL_LLM_MODELS + [LLM_MODEL]
if "claude-1-100k" in AVAIL_LLM_MODELS or "claude-2" in AVAIL_LLM_MODELS:
from .bridge_claude import predict_no_ui_long_connection as claude_noui
from .bridge_claude import predict as claude_ui
model_info.update({
"claude-1-100k": {
"fn_with_ui": claude_ui,
"fn_without_ui": claude_noui,
"endpoint": None,
"max_token": 8196,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
})
model_info.update({
"claude-2": {
"fn_with_ui": claude_ui,
"fn_without_ui": claude_noui,
"endpoint": None,
"max_token": 8196,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
})
if "jittorllms_rwkv" in AVAIL_LLM_MODELS:
from .bridge_jittorllms_rwkv import predict_no_ui_long_connection as rwkv_noui
from .bridge_jittorllms_rwkv import predict as rwkv_ui
@@ -271,6 +293,24 @@ if "newbing" in AVAIL_LLM_MODELS: # same with newbing-free
})
except:
print(trimmed_format_exc())
if "chatglmft" in AVAIL_LLM_MODELS: # same with newbing-free
try:
from .bridge_chatglmft import predict_no_ui_long_connection as chatglmft_noui
from .bridge_chatglmft import predict as chatglmft_ui
# claude
model_info.update({
"chatglmft": {
"fn_with_ui": chatglmft_ui,
"fn_without_ui": chatglmft_noui,
"endpoint": None,
"max_token": 4096,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
}
})
except:
print(trimmed_format_exc())
def LLM_CATCH_EXCEPTION(f):
"""
@@ -374,6 +414,6 @@ def predict(inputs, llm_kwargs, *args, **kwargs):
additional_fn代表点击的哪个按钮按钮见functional.py
"""
method = model_info[llm_kwargs['llm_model']]["fn_with_ui"]
method = model_info[llm_kwargs['llm_model']]["fn_with_ui"] # 如果这里报错检查config中的AVAIL_LLM_MODELS选项
yield from method(inputs, llm_kwargs, *args, **kwargs)

View File

@@ -1,237 +0,0 @@
"""
该文件中主要包含三个函数
不具备多线程能力的函数:
1. predict: 正常对话时使用,具备完备的交互功能,不可多线程
具备多线程调用能力的函数
2. predict_no_ui高级实验性功能模块调用不会实时显示在界面上参数简单可以多线程并行方便实现复杂的功能逻辑
3. predict_no_ui_long_connection在实验过程中发现调用predict_no_ui处理长文档时和openai的连接容易断掉这个函数用stream的方式解决这个问题同样支持多线程
"""
import logging
import traceback
import importlib
import openai
import time
import requests
import json
# 读取config.py文件中关于AZURE OPENAI API的信息
from toolbox import get_conf, update_ui, clip_history, trimmed_format_exc
TIMEOUT_SECONDS, MAX_RETRY, AZURE_ENGINE, AZURE_ENDPOINT, AZURE_API_VERSION, AZURE_API_KEY = \
get_conf('TIMEOUT_SECONDS', 'MAX_RETRY',"AZURE_ENGINE","AZURE_ENDPOINT", "AZURE_API_VERSION", "AZURE_API_KEY")
def get_full_error(chunk, stream_response):
"""
获取完整的从Openai返回的报错
"""
while True:
try:
chunk += next(stream_response)
except:
break
return chunk
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
"""
发送至azure openai api流式获取输出。
用于基础的对话功能。
inputs 是本次问询的输入
top_p, temperature是chatGPT的内部调优参数
history 是之前的对话列表注意无论是inputs还是history内容太长了都会触发token数量溢出的错误
chatbot 为WebUI中显示的对话列表修改它然后yeild出去可以直接修改对话界面内容
additional_fn代表点击的哪个按钮按钮见functional.py
"""
if additional_fn is not None:
import core_functional
importlib.reload(core_functional) # 热更新prompt
core_functional = core_functional.get_core_functions()
if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
raw_input = inputs
logging.info(f'[raw_input] {raw_input}')
chatbot.append((inputs, ""))
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
payload = generate_azure_payload(inputs, llm_kwargs, history, system_prompt, stream)
history.append(inputs); history.append("")
retry = 0
while True:
try:
openai.api_type = "azure"
openai.api_version = AZURE_API_VERSION
openai.api_base = AZURE_ENDPOINT
openai.api_key = AZURE_API_KEY
response = openai.ChatCompletion.create(timeout=TIMEOUT_SECONDS, **payload);break
except openai.error.AuthenticationError:
tb_str = '```\n' + trimmed_format_exc() + '```'
chatbot[-1] = [chatbot[-1][0], tb_str]
yield from update_ui(chatbot=chatbot, history=history, msg="openai返回错误") # 刷新界面
return
except:
retry += 1
traceback.print_exc()
if retry > MAX_RETRY: raise TimeoutError
if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……')
gpt_replying_buffer = ""
is_head_of_the_stream = True
if stream:
stream_response = response
while True:
try:
chunk = next(stream_response)
except StopIteration:
from toolbox import regular_txt_to_markdown; tb_str = '```\n' + trimmed_format_exc() + '```'
chatbot[-1] = (chatbot[-1][0], f"[Local Message] 远程返回错误: \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk)}")
yield from update_ui(chatbot=chatbot, history=history, msg="远程返回错误:" + chunk) # 刷新界面
return
if is_head_of_the_stream and (r'"object":"error"' not in chunk):
# 数据流的第一帧不携带content
is_head_of_the_stream = False; continue
if chunk:
#print(chunk)
try:
if "delta" in chunk["choices"][0]:
if chunk["choices"][0]["finish_reason"] == "stop":
logging.info(f'[response] {gpt_replying_buffer}')
break
status_text = f"finish_reason: {chunk['choices'][0]['finish_reason']}"
gpt_replying_buffer = gpt_replying_buffer + chunk["choices"][0]["delta"]["content"]
history[-1] = gpt_replying_buffer
chatbot[-1] = (history[-2], history[-1])
yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面
except Exception as e:
traceback.print_exc()
yield from update_ui(chatbot=chatbot, history=history, msg="Json解析不合常规") # 刷新界面
chunk = get_full_error(chunk, stream_response)
error_msg = chunk
yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + error_msg) # 刷新界面
return
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
"""
发送至AZURE OPENAI API等待回复一次性完成不显示中间过程。但内部用stream的方法避免中途网线被掐。
inputs
是本次问询的输入
sys_prompt:
系统静默prompt
llm_kwargs
chatGPT的内部调优参数
history
是之前的对话列表
observe_window = None
用于负责跨越线程传递已经输出的部分大部分时候仅仅为了fancy的视觉效果留空即可。observe_window[0]观测窗。observe_window[1]:看门狗
"""
watch_dog_patience = 5 # 看门狗的耐心, 设置5秒即可
payload = generate_azure_payload(inputs, llm_kwargs, history, system_prompt=sys_prompt, stream=True)
retry = 0
while True:
try:
openai.api_type = "azure"
openai.api_version = AZURE_API_VERSION
openai.api_base = AZURE_ENDPOINT
openai.api_key = AZURE_API_KEY
response = openai.ChatCompletion.create(timeout=TIMEOUT_SECONDS, **payload);break
except:
retry += 1
traceback.print_exc()
if retry > MAX_RETRY: raise TimeoutError
if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……')
stream_response = response
result = ''
while True:
try: chunk = next(stream_response)
except StopIteration:
break
except:
chunk = next(stream_response) # 失败了,重试一次?再失败就没办法了。
if len(chunk)==0: continue
json_data = json.loads(str(chunk))['choices'][0]
delta = json_data["delta"]
if len(delta) == 0:
break
if "role" in delta:
continue
if "content" in delta:
result += delta["content"]
if not console_slience: print(delta["content"], end='')
if observe_window is not None:
# 观测窗,把已经获取的数据显示出去
if len(observe_window) >= 1: observe_window[0] += delta["content"]
# 看门狗,如果超过期限没有喂狗,则终止
if len(observe_window) >= 2000:
if (time.time()-observe_window[1]) > watch_dog_patience:
raise RuntimeError("用户取消了程序。")
else:
raise RuntimeError("意外Json结构"+delta)
if json_data['finish_reason'] == 'content_filter':
raise RuntimeError("由于提问含不合规内容被Azure过滤。")
if json_data['finish_reason'] == 'length':
raise ConnectionAbortedError("正常结束但显示Token不足导致输出不完整请削减单次输入的文本量。")
return result
def generate_azure_payload(inputs, llm_kwargs, history, system_prompt, stream):
"""
整合所有信息选择LLM模型生成 azure openai api请求为发送请求做准备
"""
conversation_cnt = len(history) // 2
messages = [{"role": "system", "content": system_prompt}]
if conversation_cnt:
for index in range(0, 2*conversation_cnt, 2):
what_i_have_asked = {}
what_i_have_asked["role"] = "user"
what_i_have_asked["content"] = history[index]
what_gpt_answer = {}
what_gpt_answer["role"] = "assistant"
what_gpt_answer["content"] = history[index+1]
if what_i_have_asked["content"] != "":
if what_gpt_answer["content"] == "": continue
messages.append(what_i_have_asked)
messages.append(what_gpt_answer)
else:
messages[-1]['content'] = what_gpt_answer['content']
what_i_ask_now = {}
what_i_ask_now["role"] = "user"
what_i_ask_now["content"] = inputs
messages.append(what_i_ask_now)
payload = {
"model": llm_kwargs['llm_model'],
"messages": messages,
"temperature": llm_kwargs['temperature'], # 1.0,
"top_p": llm_kwargs['top_p'], # 1.0,
"n": 1,
"stream": stream,
"presence_penalty": 0,
"frequency_penalty": 0,
"engine": AZURE_ENGINE
}
try:
print(f" {llm_kwargs['llm_model']} : {conversation_cnt} : {inputs[:100]} ..........")
except:
print('输入中可能存在乱码。')
return payload

View File

@@ -0,0 +1,210 @@
from transformers import AutoModel, AutoTokenizer
import time
import os
import json
import threading
import importlib
from toolbox import update_ui, get_conf
from multiprocessing import Process, Pipe
load_message = "ChatGLMFT尚未加载加载需要一段时间。注意取决于`config.py`的配置ChatGLMFT消耗大量的内存CPU或显存GPU也许会导致低配计算机卡死 ……"
def string_to_options(arguments):
import argparse
import shlex
# Create an argparse.ArgumentParser instance
parser = argparse.ArgumentParser()
# Add command-line arguments
parser.add_argument("--llm_to_learn", type=str, help="LLM model to learn", default="gpt-3.5-turbo")
parser.add_argument("--prompt_prefix", type=str, help="Prompt prefix", default='')
parser.add_argument("--system_prompt", type=str, help="System prompt", default='')
parser.add_argument("--batch", type=int, help="System prompt", default=50)
# Parse the arguments
args = parser.parse_args(shlex.split(arguments))
return args
#################################################################################
class GetGLMFTHandle(Process):
def __init__(self):
super().__init__(daemon=True)
self.parent, self.child = Pipe()
self.chatglmft_model = None
self.chatglmft_tokenizer = None
self.info = ""
self.success = True
self.check_dependency()
self.start()
self.threadLock = threading.Lock()
def check_dependency(self):
try:
import sentencepiece
self.info = "依赖检测通过"
self.success = True
except:
self.info = "缺少ChatGLMFT的依赖如果要使用ChatGLMFT除了基础的pip依赖以外您还需要运行`pip install -r request_llm/requirements_chatglm.txt`安装ChatGLM的依赖。"
self.success = False
def ready(self):
return self.chatglmft_model is not None
def run(self):
# 子进程执行
# 第一次运行,加载参数
retry = 0
while True:
try:
if self.chatglmft_model is None:
from transformers import AutoConfig
import torch
# conf = 'request_llm/current_ptune_model.json'
# if not os.path.exists(conf): raise RuntimeError('找不到微调模型信息')
# with open(conf, 'r', encoding='utf8') as f:
# model_args = json.loads(f.read())
ChatGLM_PTUNING_CHECKPOINT, = get_conf('ChatGLM_PTUNING_CHECKPOINT')
assert os.path.exists(ChatGLM_PTUNING_CHECKPOINT), "找不到微调模型检查点"
conf = os.path.join(ChatGLM_PTUNING_CHECKPOINT, "config.json")
with open(conf, 'r', encoding='utf8') as f:
model_args = json.loads(f.read())
if 'model_name_or_path' not in model_args:
model_args['model_name_or_path'] = model_args['_name_or_path']
self.chatglmft_tokenizer = AutoTokenizer.from_pretrained(
model_args['model_name_or_path'], trust_remote_code=True)
config = AutoConfig.from_pretrained(
model_args['model_name_or_path'], trust_remote_code=True)
config.pre_seq_len = model_args['pre_seq_len']
config.prefix_projection = model_args['prefix_projection']
print(f"Loading prefix_encoder weight from {ChatGLM_PTUNING_CHECKPOINT}")
model = AutoModel.from_pretrained(model_args['model_name_or_path'], config=config, trust_remote_code=True)
prefix_state_dict = torch.load(os.path.join(ChatGLM_PTUNING_CHECKPOINT, "pytorch_model.bin"))
new_prefix_state_dict = {}
for k, v in prefix_state_dict.items():
if k.startswith("transformer.prefix_encoder."):
new_prefix_state_dict[k[len("transformer.prefix_encoder."):]] = v
model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict)
if model_args['quantization_bit'] is not None:
print(f"Quantized to {model_args['quantization_bit']} bit")
model = model.quantize(model_args['quantization_bit'])
model = model.cuda()
if model_args['pre_seq_len'] is not None:
# P-tuning v2
model.transformer.prefix_encoder.float()
self.chatglmft_model = model.eval()
break
else:
break
except Exception as e:
retry += 1
if retry > 3:
self.child.send('[Local Message] Call ChatGLMFT fail 不能正常加载ChatGLMFT的参数。')
raise RuntimeError("不能正常加载ChatGLMFT的参数")
while True:
# 进入任务等待状态
kwargs = self.child.recv()
# 收到消息,开始请求
try:
for response, history in self.chatglmft_model.stream_chat(self.chatglmft_tokenizer, **kwargs):
self.child.send(response)
# # 中途接收可能的终止指令(如果有的话)
# if self.child.poll():
# command = self.child.recv()
# if command == '[Terminate]': break
except:
from toolbox import trimmed_format_exc
self.child.send('[Local Message] Call ChatGLMFT fail.' + '\n```\n' + trimmed_format_exc() + '\n```\n')
# 请求处理结束,开始下一个循环
self.child.send('[Finish]')
def stream_chat(self, **kwargs):
# 主进程执行
self.threadLock.acquire()
self.parent.send(kwargs)
while True:
res = self.parent.recv()
if res != '[Finish]':
yield res
else:
break
self.threadLock.release()
global glmft_handle
glmft_handle = None
#################################################################################
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
"""
多线程方法
函数的说明请见 request_llm/bridge_all.py
"""
global glmft_handle
if glmft_handle is None:
glmft_handle = GetGLMFTHandle()
if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + glmft_handle.info
if not glmft_handle.success:
error = glmft_handle.info
glmft_handle = None
raise RuntimeError(error)
# chatglmft 没有 sys_prompt 接口因此把prompt加入 history
history_feedin = []
history_feedin.append(["What can I do?", sys_prompt])
for i in range(len(history)//2):
history_feedin.append([history[2*i], history[2*i+1]] )
watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
response = ""
for response in glmft_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
if len(observe_window) >= 1: observe_window[0] = response
if len(observe_window) >= 2:
if (time.time()-observe_window[1]) > watch_dog_patience:
raise RuntimeError("程序终止。")
return response
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
"""
单线程方法
函数的说明请见 request_llm/bridge_all.py
"""
chatbot.append((inputs, ""))
global glmft_handle
if glmft_handle is None:
glmft_handle = GetGLMFTHandle()
chatbot[-1] = (inputs, load_message + "\n\n" + glmft_handle.info)
yield from update_ui(chatbot=chatbot, history=[])
if not glmft_handle.success:
glmft_handle = None
return
if additional_fn is not None:
import core_functional
importlib.reload(core_functional) # 热更新prompt
core_functional = core_functional.get_core_functions()
if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
# 处理历史信息
history_feedin = []
history_feedin.append(["What can I do?", system_prompt] )
for i in range(len(history)//2):
history_feedin.append([history[2*i], history[2*i+1]] )
# 开始接收chatglmft的回复
response = "[Local Message]: 等待ChatGLMFT响应中 ..."
for response in glmft_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
chatbot[-1] = (inputs, response)
yield from update_ui(chatbot=chatbot, history=history)
# 总结输出
if response == "[Local Message]: 等待ChatGLMFT响应中 ...":
response = "[Local Message]: ChatGLMFT响应异常 ..."
history.extend([inputs, response])
yield from update_ui(chatbot=chatbot, history=history)

View File

@@ -22,8 +22,8 @@ import importlib
# config_private.py放自己的秘密如API和代理网址
# 读取时首先看是否存在私密的config_private配置文件不受git管控如果有则覆盖原config文件
from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys, clip_history, trimmed_format_exc
proxies, API_KEY, TIMEOUT_SECONDS, MAX_RETRY, API_ORG = \
get_conf('proxies', 'API_KEY', 'TIMEOUT_SECONDS', 'MAX_RETRY', 'API_ORG')
proxies, TIMEOUT_SECONDS, MAX_RETRY, API_ORG = \
get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY', 'API_ORG')
timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \
'网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。'
@@ -101,6 +101,8 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
if (time.time()-observe_window[1]) > watch_dog_patience:
raise RuntimeError("用户取消了程序。")
else: raise RuntimeError("意外Json结构"+delta)
if json_data['finish_reason'] == 'content_filter':
raise RuntimeError("由于提问含不合规内容被Azure过滤。")
if json_data['finish_reason'] == 'length':
raise ConnectionAbortedError("正常结束但显示Token不足导致输出不完整请削减单次输入的文本量。")
return result
@@ -247,6 +249,7 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
"Authorization": f"Bearer {api_key}"
}
if API_ORG.startswith('org-'): headers.update({"OpenAI-Organization": API_ORG})
if llm_kwargs['llm_model'].startswith('azure-'): headers.update({"api-key": api_key})
conversation_cnt = len(history) // 2

View File

@@ -0,0 +1,231 @@
# 借鉴了 https://github.com/GaiZhenbiao/ChuanhuChatGPT 项目
"""
该文件中主要包含2个函数
不具备多线程能力的函数:
1. predict: 正常对话时使用,具备完备的交互功能,不可多线程
具备多线程调用能力的函数
2. predict_no_ui_long_connection在实验过程中发现调用predict_no_ui处理长文档时和openai的连接容易断掉这个函数用stream的方式解决这个问题同样支持多线程
"""
import os
import json
import time
import gradio as gr
import logging
import traceback
import requests
import importlib
# config_private.py放自己的秘密如API和代理网址
# 读取时首先看是否存在私密的config_private配置文件不受git管控如果有则覆盖原config文件
from toolbox import get_conf, update_ui, trimmed_format_exc, ProxyNetworkActivate
proxies, TIMEOUT_SECONDS, MAX_RETRY, ANTHROPIC_API_KEY = \
get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY', 'ANTHROPIC_API_KEY')
timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \
'网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。'
def get_full_error(chunk, stream_response):
"""
获取完整的从Openai返回的报错
"""
while True:
try:
chunk += next(stream_response)
except:
break
return chunk
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
"""
发送至chatGPT等待回复一次性完成不显示中间过程。但内部用stream的方法避免中途网线被掐。
inputs
是本次问询的输入
sys_prompt:
系统静默prompt
llm_kwargs
chatGPT的内部调优参数
history
是之前的对话列表
observe_window = None
用于负责跨越线程传递已经输出的部分大部分时候仅仅为了fancy的视觉效果留空即可。observe_window[0]观测窗。observe_window[1]:看门狗
"""
from anthropic import Anthropic
watch_dog_patience = 5 # 看门狗的耐心, 设置5秒即可
prompt = generate_payload(inputs, llm_kwargs, history, system_prompt=sys_prompt, stream=True)
retry = 0
if len(ANTHROPIC_API_KEY) == 0:
raise RuntimeError("没有设置ANTHROPIC_API_KEY选项")
while True:
try:
# make a POST request to the API endpoint, stream=False
from .bridge_all import model_info
anthropic = Anthropic(api_key=ANTHROPIC_API_KEY)
# endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
# with ProxyNetworkActivate()
stream = anthropic.completions.create(
prompt=prompt,
max_tokens_to_sample=4096, # The maximum number of tokens to generate before stopping.
model=llm_kwargs['llm_model'],
stream=True,
temperature = llm_kwargs['temperature']
)
break
except Exception as e:
retry += 1
traceback.print_exc()
if retry > MAX_RETRY: raise TimeoutError
if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……')
result = ''
try:
for completion in stream:
result += completion.completion
if not console_slience: print(completion.completion, end='')
if observe_window is not None:
# 观测窗,把已经获取的数据显示出去
if len(observe_window) >= 1: observe_window[0] += completion.completion
# 看门狗,如果超过期限没有喂狗,则终止
if len(observe_window) >= 2:
if (time.time()-observe_window[1]) > watch_dog_patience:
raise RuntimeError("用户取消了程序。")
except Exception as e:
traceback.print_exc()
return result
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
"""
发送至chatGPT流式获取输出。
用于基础的对话功能。
inputs 是本次问询的输入
top_p, temperature是chatGPT的内部调优参数
history 是之前的对话列表注意无论是inputs还是history内容太长了都会触发token数量溢出的错误
chatbot 为WebUI中显示的对话列表修改它然后yeild出去可以直接修改对话界面内容
additional_fn代表点击的哪个按钮按钮见functional.py
"""
from anthropic import Anthropic
if len(ANTHROPIC_API_KEY) == 0:
chatbot.append((inputs, "没有设置ANTHROPIC_API_KEY"))
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
return
if additional_fn is not None:
import core_functional
importlib.reload(core_functional) # 热更新prompt
core_functional = core_functional.get_core_functions()
if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
raw_input = inputs
logging.info(f'[raw_input] {raw_input}')
chatbot.append((inputs, ""))
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
try:
prompt = generate_payload(inputs, llm_kwargs, history, system_prompt, stream)
except RuntimeError as e:
chatbot[-1] = (inputs, f"您提供的api-key不满足要求不包含任何可用于{llm_kwargs['llm_model']}的api-key。您可能选择了错误的模型或请求源。")
yield from update_ui(chatbot=chatbot, history=history, msg="api-key不满足要求") # 刷新界面
return
history.append(inputs); history.append("")
retry = 0
while True:
try:
# make a POST request to the API endpoint, stream=True
from .bridge_all import model_info
anthropic = Anthropic(api_key=ANTHROPIC_API_KEY)
# endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
# with ProxyNetworkActivate()
stream = anthropic.completions.create(
prompt=prompt,
max_tokens_to_sample=4096, # The maximum number of tokens to generate before stopping.
model=llm_kwargs['llm_model'],
stream=True,
temperature = llm_kwargs['temperature']
)
break
except:
retry += 1
chatbot[-1] = ((chatbot[-1][0], timeout_bot_msg))
retry_msg = f",正在重试 ({retry}/{MAX_RETRY}) ……" if MAX_RETRY > 0 else ""
yield from update_ui(chatbot=chatbot, history=history, msg="请求超时"+retry_msg) # 刷新界面
if retry > MAX_RETRY: raise TimeoutError
gpt_replying_buffer = ""
for completion in stream:
try:
gpt_replying_buffer = gpt_replying_buffer + completion.completion
history[-1] = gpt_replying_buffer
chatbot[-1] = (history[-2], history[-1])
yield from update_ui(chatbot=chatbot, history=history, msg='正常') # 刷新界面
except Exception as e:
from toolbox import regular_txt_to_markdown
tb_str = '```\n' + trimmed_format_exc() + '```'
chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str}")
yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + tb_str) # 刷新界面
return
# https://github.com/jtsang4/claude-to-chatgpt/blob/main/claude_to_chatgpt/adapter.py
def convert_messages_to_prompt(messages):
prompt = ""
role_map = {
"system": "Human",
"user": "Human",
"assistant": "Assistant",
}
for message in messages:
role = message["role"]
content = message["content"]
transformed_role = role_map[role]
prompt += f"\n\n{transformed_role.capitalize()}: {content}"
prompt += "\n\nAssistant: "
return prompt
def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
"""
整合所有信息选择LLM模型生成http请求为发送请求做准备
"""
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
conversation_cnt = len(history) // 2
messages = [{"role": "system", "content": system_prompt}]
if conversation_cnt:
for index in range(0, 2*conversation_cnt, 2):
what_i_have_asked = {}
what_i_have_asked["role"] = "user"
what_i_have_asked["content"] = history[index]
what_gpt_answer = {}
what_gpt_answer["role"] = "assistant"
what_gpt_answer["content"] = history[index+1]
if what_i_have_asked["content"] != "":
if what_gpt_answer["content"] == "": continue
if what_gpt_answer["content"] == timeout_bot_msg: continue
messages.append(what_i_have_asked)
messages.append(what_gpt_answer)
else:
messages[-1]['content'] = what_gpt_answer['content']
what_i_ask_now = {}
what_i_ask_now["role"] = "user"
what_i_ask_now["content"] = inputs
messages.append(what_i_ask_now)
prompt = convert_messages_to_prompt(messages)
return prompt

View File

@@ -10,10 +10,11 @@ def validate_path():
validate_path() # validate path so you can run from base directory
if __name__ == "__main__":
from request_llm.bridge_newbingfree import predict_no_ui_long_connection
# from request_llm.bridge_newbingfree import predict_no_ui_long_connection
# from request_llm.bridge_moss import predict_no_ui_long_connection
# from request_llm.bridge_jittorllms_pangualpha import predict_no_ui_long_connection
# from request_llm.bridge_jittorllms_llama import predict_no_ui_long_connection
from request_llm.bridge_claude import predict_no_ui_long_connection
llm_kwargs = {
'max_length': 512,
@@ -28,17 +29,6 @@ if __name__ == "__main__":
print('final result:', result)
result = predict_no_ui_long_connection(inputs="what is a hero?",
llm_kwargs=llm_kwargs,
history=["hello world"],
sys_prompt="")
print('final result:', result)
result = predict_no_ui_long_connection(inputs="如何理解传奇?",
llm_kwargs=llm_kwargs,
history=[],
sys_prompt="")
print('final result:', result)
# # print(result)
# from multiprocessing import Process, Pipe
@@ -56,7 +46,6 @@ if __name__ == "__main__":
# os.chdir(root_dir_assume + '/request_llm/jittorllms')
# sys.path.append(root_dir_assume + '/request_llm/jittorllms')
# validate_path() # validate path so you can run from base directory
# jittorllms_model = None
# import types
# try:
@@ -70,7 +59,6 @@ if __name__ == "__main__":
# except:
# # self.child.send('[Local Message] Call jittorllms fail 不能正常加载jittorllms的参数。')
# raise RuntimeError("不能正常加载jittorllms的参数")
# x = GetGLMHandle()
# x.start()

View File

@@ -1,4 +1,5 @@
./docs/gradio-3.32.2-py3-none-any.whl
pydantic==1.10.11
tiktoken>=0.3.3
requests[socks]
transformers
@@ -8,6 +9,7 @@ prompt_toolkit
latex2mathml
python-docx
mdtex2html
anthropic
colorama
Markdown
pygments
@@ -15,4 +17,4 @@ pymupdf
openai
numpy
arxiv
rich
rich

47
themes/common.js Normal file
View File

@@ -0,0 +1,47 @@
function ChatBotHeight() {
function update_height(){
var { panel_height_target, chatbot_height, chatbot } = get_elements();
if (panel_height_target!=chatbot_height)
{
var pixelString = panel_height_target.toString() + 'px';
chatbot.style.maxHeight = pixelString; chatbot.style.height = pixelString;
}
}
function update_height_slow(){
var { panel_height_target, chatbot_height, chatbot } = get_elements();
if (panel_height_target!=chatbot_height)
{
new_panel_height = (panel_height_target - chatbot_height)*0.5 + chatbot_height;
if (Math.abs(new_panel_height - panel_height_target) < 10){
new_panel_height = panel_height_target;
}
// console.log(chatbot_height, panel_height_target, new_panel_height);
var pixelString = new_panel_height.toString() + 'px';
chatbot.style.maxHeight = pixelString; chatbot.style.height = pixelString;
}
}
update_height();
setInterval(function() {
update_height_slow()
}, 50); // 每100毫秒执行一次
}
function get_elements() {
var chatbot = document.querySelector('#gpt-chatbot > div.wrap.svelte-18telvq');
if (!chatbot) {
chatbot = document.querySelector('#gpt-chatbot');
}
const panel1 = document.querySelector('#input-panel');
const panel2 = document.querySelector('#basic-panel');
const panel3 = document.querySelector('#plugin-panel');
const panel4 = document.querySelector('#interact-panel');
const panel5 = document.querySelector('#input-panel2');
const panel_active = document.querySelector('#state-panel');
var panel_height_target = (20-panel_active.offsetHeight) + panel1.offsetHeight + panel2.offsetHeight + panel3.offsetHeight + panel4.offsetHeight + panel5.offsetHeight + 21;
var panel_height_target = parseInt(panel_height_target);
var chatbot_height = chatbot.style.height;
var chatbot_height = parseInt(chatbot_height);
return { panel_height_target, chatbot_height, chatbot };
}

View File

@@ -1,108 +1,3 @@
import gradio as gr
from toolbox import get_conf
CODE_HIGHLIGHT, ADD_WAIFU = get_conf('CODE_HIGHLIGHT', 'ADD_WAIFU')
# gradio可用颜色列表
# gr.themes.utils.colors.slate (石板色)
# gr.themes.utils.colors.gray (灰色)
# gr.themes.utils.colors.zinc (锌色)
# gr.themes.utils.colors.neutral (中性色)
# gr.themes.utils.colors.stone (石头色)
# gr.themes.utils.colors.red (红色)
# gr.themes.utils.colors.orange (橙色)
# gr.themes.utils.colors.amber (琥珀色)
# gr.themes.utils.colors.yellow (黄色)
# gr.themes.utils.colors.lime (酸橙色)
# gr.themes.utils.colors.green (绿色)
# gr.themes.utils.colors.emerald (祖母绿)
# gr.themes.utils.colors.teal (青蓝色)
# gr.themes.utils.colors.cyan (青色)
# gr.themes.utils.colors.sky (天蓝色)
# gr.themes.utils.colors.blue (蓝色)
# gr.themes.utils.colors.indigo (靛蓝色)
# gr.themes.utils.colors.violet (紫罗兰色)
# gr.themes.utils.colors.purple (紫色)
# gr.themes.utils.colors.fuchsia (洋红色)
# gr.themes.utils.colors.pink (粉红色)
# gr.themes.utils.colors.rose (玫瑰色)
def adjust_theme():
try:
color_er = gr.themes.utils.colors.fuchsia
set_theme = gr.themes.Default(
primary_hue=gr.themes.utils.colors.orange,
neutral_hue=gr.themes.utils.colors.gray,
font=["sans-serif", "Microsoft YaHei", "ui-sans-serif", "system-ui",
"sans-serif", gr.themes.utils.fonts.GoogleFont("Source Sans Pro")],
font_mono=["ui-monospace", "Consolas", "monospace", gr.themes.utils.fonts.GoogleFont("IBM Plex Mono")])
set_theme.set(
# Colors
input_background_fill_dark="*neutral_800",
# Transition
button_transition="none",
# Shadows
button_shadow="*shadow_drop",
button_shadow_hover="*shadow_drop_lg",
button_shadow_active="*shadow_inset",
input_shadow="0 0 0 *shadow_spread transparent, *shadow_inset",
input_shadow_focus="0 0 0 *shadow_spread *secondary_50, *shadow_inset",
input_shadow_focus_dark="0 0 0 *shadow_spread *neutral_700, *shadow_inset",
checkbox_label_shadow="*shadow_drop",
block_shadow="*shadow_drop",
form_gap_width="1px",
# Button borders
input_border_width="1px",
input_background_fill="white",
# Gradients
stat_background_fill="linear-gradient(to right, *primary_400, *primary_200)",
stat_background_fill_dark="linear-gradient(to right, *primary_400, *primary_600)",
error_background_fill=f"linear-gradient(to right, {color_er.c100}, *background_fill_secondary)",
error_background_fill_dark="*background_fill_primary",
checkbox_label_background_fill="linear-gradient(to top, *neutral_50, white)",
checkbox_label_background_fill_dark="linear-gradient(to top, *neutral_900, *neutral_800)",
checkbox_label_background_fill_hover="linear-gradient(to top, *neutral_100, white)",
checkbox_label_background_fill_hover_dark="linear-gradient(to top, *neutral_900, *neutral_800)",
button_primary_background_fill="linear-gradient(to bottom right, *primary_100, *primary_300)",
button_primary_background_fill_dark="linear-gradient(to bottom right, *primary_500, *primary_600)",
button_primary_background_fill_hover="linear-gradient(to bottom right, *primary_100, *primary_200)",
button_primary_background_fill_hover_dark="linear-gradient(to bottom right, *primary_500, *primary_500)",
button_primary_border_color_dark="*primary_500",
button_secondary_background_fill="linear-gradient(to bottom right, *neutral_100, *neutral_200)",
button_secondary_background_fill_dark="linear-gradient(to bottom right, *neutral_600, *neutral_700)",
button_secondary_background_fill_hover="linear-gradient(to bottom right, *neutral_100, *neutral_100)",
button_secondary_background_fill_hover_dark="linear-gradient(to bottom right, *neutral_600, *neutral_600)",
button_cancel_background_fill=f"linear-gradient(to bottom right, {color_er.c100}, {color_er.c200})",
button_cancel_background_fill_dark=f"linear-gradient(to bottom right, {color_er.c600}, {color_er.c700})",
button_cancel_background_fill_hover=f"linear-gradient(to bottom right, {color_er.c100}, {color_er.c100})",
button_cancel_background_fill_hover_dark=f"linear-gradient(to bottom right, {color_er.c600}, {color_er.c600})",
button_cancel_border_color=color_er.c200,
button_cancel_border_color_dark=color_er.c600,
button_cancel_text_color=color_er.c600,
button_cancel_text_color_dark="white",
)
# 添加一个萌萌的看板娘
if ADD_WAIFU:
js = """
<script src="file=docs/waifu_plugin/jquery.min.js"></script>
<script src="file=docs/waifu_plugin/jquery-ui.min.js"></script>
<script src="file=docs/waifu_plugin/autoload.js"></script>
"""
gradio_original_template_fn = gr.routes.templates.TemplateResponse
def gradio_new_template_fn(*args, **kwargs):
res = gradio_original_template_fn(*args, **kwargs)
res.body = res.body.replace(b'</html>', f'{js}</html>'.encode("utf8"))
res.init_headers()
return res
gr.routes.templates.TemplateResponse = gradio_new_template_fn # override gradio template
except:
set_theme = None
print('gradio版本较旧, 不能自定义字体和颜色')
return set_theme
advanced_css = """
.markdown-body table {
margin: 1em 0;
border-collapse: collapse;
@@ -187,10 +82,15 @@ advanced_css = """
margin: 1em 2em 1em 0.5em;
}
"""
/* .mic-wrap.svelte-1thnwz {
if CODE_HIGHLIGHT:
advanced_css += """
} */
.block.svelte-mppz8v > .mic-wrap.svelte-1thnwz{
justify-content: center;
display: flex;
padding: 0;
}
.codehilite .hll { background-color: #6e7681 }
.codehilite .c { color: #8b949e; font-style: italic } /* Comment */
@@ -350,4 +250,3 @@ if CODE_HIGHLIGHT:
.dark .codehilite .vm { color: #82AAFF } /* Name.Variable.Magic */
.dark .codehilite .il { color: #F78C6C } /* Literal.Number.Integer.Long */
"""

86
themes/default.py Normal file
View File

@@ -0,0 +1,86 @@
import gradio as gr
from toolbox import get_conf
CODE_HIGHLIGHT, ADD_WAIFU, LAYOUT = get_conf('CODE_HIGHLIGHT', 'ADD_WAIFU', 'LAYOUT')
def adjust_theme():
try:
color_er = gr.themes.utils.colors.fuchsia
set_theme = gr.themes.Default(
primary_hue=gr.themes.utils.colors.orange,
neutral_hue=gr.themes.utils.colors.gray,
font=["sans-serif", "Microsoft YaHei", "ui-sans-serif", "system-ui"],
font_mono=["ui-monospace", "Consolas", "monospace"])
set_theme.set(
# Colors
input_background_fill_dark="*neutral_800",
# Transition
button_transition="none",
# Shadows
button_shadow="*shadow_drop",
button_shadow_hover="*shadow_drop_lg",
button_shadow_active="*shadow_inset",
input_shadow="0 0 0 *shadow_spread transparent, *shadow_inset",
input_shadow_focus="0 0 0 *shadow_spread *secondary_50, *shadow_inset",
input_shadow_focus_dark="0 0 0 *shadow_spread *neutral_700, *shadow_inset",
checkbox_label_shadow="*shadow_drop",
block_shadow="*shadow_drop",
form_gap_width="1px",
# Button borders
input_border_width="1px",
input_background_fill="white",
# Gradients
stat_background_fill="linear-gradient(to right, *primary_400, *primary_200)",
stat_background_fill_dark="linear-gradient(to right, *primary_400, *primary_600)",
error_background_fill=f"linear-gradient(to right, {color_er.c100}, *background_fill_secondary)",
error_background_fill_dark="*background_fill_primary",
checkbox_label_background_fill="linear-gradient(to top, *neutral_50, white)",
checkbox_label_background_fill_dark="linear-gradient(to top, *neutral_900, *neutral_800)",
checkbox_label_background_fill_hover="linear-gradient(to top, *neutral_100, white)",
checkbox_label_background_fill_hover_dark="linear-gradient(to top, *neutral_900, *neutral_800)",
button_primary_background_fill="linear-gradient(to bottom right, *primary_100, *primary_300)",
button_primary_background_fill_dark="linear-gradient(to bottom right, *primary_500, *primary_600)",
button_primary_background_fill_hover="linear-gradient(to bottom right, *primary_100, *primary_200)",
button_primary_background_fill_hover_dark="linear-gradient(to bottom right, *primary_500, *primary_500)",
button_primary_border_color_dark="*primary_500",
button_secondary_background_fill="linear-gradient(to bottom right, *neutral_100, *neutral_200)",
button_secondary_background_fill_dark="linear-gradient(to bottom right, *neutral_600, *neutral_700)",
button_secondary_background_fill_hover="linear-gradient(to bottom right, *neutral_100, *neutral_100)",
button_secondary_background_fill_hover_dark="linear-gradient(to bottom right, *neutral_600, *neutral_600)",
button_cancel_background_fill=f"linear-gradient(to bottom right, {color_er.c100}, {color_er.c200})",
button_cancel_background_fill_dark=f"linear-gradient(to bottom right, {color_er.c600}, {color_er.c700})",
button_cancel_background_fill_hover=f"linear-gradient(to bottom right, {color_er.c100}, {color_er.c100})",
button_cancel_background_fill_hover_dark=f"linear-gradient(to bottom right, {color_er.c600}, {color_er.c600})",
button_cancel_border_color=color_er.c200,
button_cancel_border_color_dark=color_er.c600,
button_cancel_text_color=color_er.c600,
button_cancel_text_color_dark="white",
)
if LAYOUT=="TOP-DOWN":
js = ""
else:
with open('themes/common.js', 'r', encoding='utf8') as f:
js = f"<script>{f.read()}</script>"
# 添加一个萌萌的看板娘
if ADD_WAIFU:
js += """
<script src="file=docs/waifu_plugin/jquery.min.js"></script>
<script src="file=docs/waifu_plugin/jquery-ui.min.js"></script>
<script src="file=docs/waifu_plugin/autoload.js"></script>
"""
gradio_original_template_fn = gr.routes.templates.TemplateResponse
def gradio_new_template_fn(*args, **kwargs):
res = gradio_original_template_fn(*args, **kwargs)
res.body = res.body.replace(b'</html>', f'{js}</html>'.encode("utf8"))
res.init_headers()
return res
gr.routes.templates.TemplateResponse = gradio_new_template_fn # override gradio template
except:
set_theme = None
print('gradio版本较旧, 不能自定义字体和颜色')
return set_theme
with open("themes/default.css", "r", encoding="utf-8") as f:
advanced_css = f.read()

806
themes/green.css Normal file
View File

@@ -0,0 +1,806 @@
:root {
--chatbot-color-light: #000000;
--chatbot-color-dark: #FFFFFF;
--chatbot-background-color-light: #F3F3F3;
--chatbot-background-color-dark: #121111;
--message-user-background-color-light: #95EC69;
--message-user-background-color-dark: #26B561;
--message-bot-background-color-light: #FFFFFF;
--message-bot-background-color-dark: #2C2C2C;
}
mspace {
display: block;
}
@media only screen and (max-width: 767px) {
#column_1 {
display: none !important;
}
}
@keyframes highlight {
0%, 100% {
border: 2px solid transparent;
}
50% {
border-color: yellow;
}
}
#highlight_update {
animation-name: highlight;
animation-duration: 0.75s;
animation-iteration-count: 3;
}
.table-wrap.svelte-13hsdno.svelte-13hsdno.svelte-13hsdno {
border: 0px solid var(--border-color-primary) !important;
}
#examples_col {
z-index: 2;
position: absolute;
bottom: 0;
left: 0;
width: 100%;
margin-bottom: 30% !important;
}
#hide_examples {
z-index: 0;
}
#debug_mes {
position: absolute;
display: flex;
bottom: 0;
left: 0;
z-index: 1; /* 设置更高的 z-index 值 */
margin-bottom: -4px !important;
align-self: flex-end;
}
#chat_box {
display: flex;
flex-direction: column;
overflow-y: visible !important;
z-index: 3;
flex-grow: 1; /* 自动填充剩余空间 */
position: absolute;
bottom: 0;
left: 0;
width: 100%;
margin-bottom: 30px !important;
border: 1px solid var(--border-color-primary);
}
.toast-body {
z-index: 5 !important;
}
.chat_input {
}
.sm_btn {
position: relative;
bottom: 5px;
height: 10%;
border-radius: 20px!important;
min-width: min(10%,100%) !important;
overflow: hidden;
}
.sm_select {
position: relative !important;
z-index: 5 !important;
bottom: 5px;
min-width: min(20%,100%) !important;
border-radius: 20px!important;
}
.sm_checkbox {
position: relative !important;
z-index: 5 !important;
bottom: 5px;
padding: 0 !important;
}
.sm_select .wrap-inner.svelte-aqlk7e.svelte-aqlk7e.svelte-aqlk7e {
padding: 0 !important;
}
.sm_select .block.svelte-mppz8v {
width: 10% !important;
}
/* usage_display */
.insert_block {
position: relative;
bottom: 2px;
min-width: min(55px,100%) !important;
}
.submit_btn {
flex-direction: column-reverse;
overflow-y: auto !important;
position: absolute;
bottom: 0;
right: 10px;
margin-bottom: 10px !important;
min-width: min(50px,100%) !important;
}
textarea {
resize: none;
height: 100%; /* 填充父元素的高度 */
}
#main_chatbot {
height: 75vh !important;
max-height: 75vh !important;
/* overflow: auto !important; */
z-index: 2;
transform: translateZ(0) !important;
backface-visibility: hidden !important;
will-change: transform !important;
}
#prompt_result{
height: 60vh !important;
max-height: 60vh !important;
}
#app_title {
font-weight: var(--prose-header-text-weight);
font-size: var(--text-xxl);
line-height: 1.3;
text-align: left;
margin-top: 6px;
white-space: nowrap;
}
#description {
text-align: center;
margin: 32px 0 4px 0;
}
/* gradio的页脚信息 */
footer {
/* display: none !important; */
margin-top: .2em !important;
font-size: 85%;
}
#footer {
text-align: center;
}
#footer div {
display: inline-block;
}
#footer .versions{
font-size: 85%;
opacity: 0.60;
}
/* user_info */
#float_display {
position: absolute;
max-height: 30px;
}
/* user_info */
#user_info {
white-space: nowrap;
position: absolute; left: 8em; top: .2em;
z-index: var(--layer-2);
box-shadow: var(--block-shadow);
border: none; border-radius: var(--block-label-radius);
background: var(--color-accent);
padding: var(--block-label-padding);
font-size: var(--block-label-text-size); line-height: var(--line-sm);
width: auto; min-height: 30px !important;
opacity: 1;
transition: opacity 0.3s ease-in-out;
}
textarea.svelte-1pie7s6 {
background: #e7e6e6 !important;
width: 96% !important;
}
.dark textarea.svelte-1pie7s6 {
background: var(--input-background-fill) !important;
width: 96% !important;
}
.dark input[type=number].svelte-1cl284s {
background: #393939 !important;
border: var(--input-border-width) solid var(--input-border-color) !important;
}
.dark input[type="range"] {
background: #393939 !important;
}
#user_info .wrap {
opacity: 0;
}
#user_info p {
color: white;
font-weight: var(--block-label-text-weight);
}
#user_info.hideK {
opacity: 0;
transition: opacity 1s ease-in-out;
}
[class *= "message"] {
gap: 7px !important;
border-radius: var(--radius-xl) !important
}
/* debug_mes */
#debug_mes {
min-height: 2em;
align-items: flex-end;
justify-content: flex-end;
}
#debug_mes p {
font-size: .85em;
font-family: ui-monospace, "SF Mono", "SFMono-Regular", "Menlo", "Consolas", "Liberation Mono", "Microsoft Yahei UI", "Microsoft Yahei", monospace;
/* Windows下中文的monospace会fallback为新宋体实在太丑这里折中使用微软雅黑 */
color: #000000;
}
.dark #debug_mes p {
color: #ee65ed;
}
#debug_mes {
transition: all 0.6s;
}
#main_chatbot {
transition: height 0.3s ease;
}
/* .wrap.svelte-18telvq.svelte-18telvq {
padding: var(--block-padding) !important;
height: 100% !important;
max-height: 95% !important;
overflow-y: auto !important;
}*/
.app.svelte-1mya07g.svelte-1mya07g {
max-width: 100%;
position: relative;
padding: var(--size-4);
width: 100%;
height: 100%;
}
.gradio-container-3-32-2 h1 {
font-weight: 700 !important;
font-size: 28px !important;
}
.gradio-container-3-32-2 h2 {
font-weight: 600 !important;
font-size: 24px !important;
}
.gradio-container-3-32-2 h3 {
font-weight: 500 !important;
font-size: 20px !important;
}
.gradio-container-3-32-2 h4 {
font-weight: 400 !important;
font-size: 16px !important;
}
.gradio-container-3-32-2 h5 {
font-weight: 300 !important;
font-size: 14px !important;
}
.gradio-container-3-32-2 h6 {
font-weight: 200 !important;
font-size: 12px !important;
}
#usage_display p, #usage_display span {
margin: 0;
font-size: .85em;
color: var(--body-text-color-subdued);
}
.progress-bar {
background-color: var(--input-background-fill);;
margin: .5em 0 !important;
height: 20px;
border-radius: 10px;
overflow: hidden;
}
.progress {
background-color: var(--block-title-background-fill);
height: 100%;
border-radius: 10px;
text-align: right;
transition: width 0.5s ease-in-out;
}
.progress-text {
/* color: white; */
color: var(--color-accent) !important;
font-size: 1em !important;
font-weight: bold;
padding-right: 10px;
line-height: 20px;
}
.apSwitch {
top: 2px;
display: inline-block;
height: 24px;
position: relative;
width: 48px;
border-radius: 12px;
}
.apSwitch input {
display: none !important;
}
.apSlider {
background-color: var(--neutral-200);
bottom: 0;
cursor: pointer;
left: 0;
position: absolute;
right: 0;
top: 0;
transition: .4s;
font-size: 18px;
border-radius: 7px;
}
.apSlider::before {
bottom: -1.5px;
left: 1px;
position: absolute;
transition: .4s;
content: "🌞";
}
hr.append-display {
margin: 8px 0;
border: none;
height: 1px;
border-top-width: 0;
background-image: linear-gradient(to right, rgba(50,50,50, 0.1), rgba(150, 150, 150, 0.8), rgba(50,50,50, 0.1));
}
.source-a {
font-size: 0.8em;
max-width: 100%;
margin: 0;
display: flex;
flex-direction: row;
flex-wrap: wrap;
align-items: center;
/* background-color: #dddddd88; */
border-radius: 1.5rem;
padding: 0.2em;
}
.source-a a {
display: inline-block;
background-color: #aaaaaa50;
border-radius: 1rem;
padding: 0.5em;
text-align: center;
text-overflow: ellipsis;
overflow: hidden;
min-width: 20%;
white-space: nowrap;
margin: 0.2rem 0.1rem;
text-decoration: none !important;
flex: 1;
transition: flex 0.5s;
}
.source-a a:hover {
background-color: #aaaaaa20;
flex: 2;
}
input:checked + .apSlider {
background-color: var(--primary-600);
}
input:checked + .apSlider::before {
transform: translateX(23px);
content:"🌚";
}
/* Override Slider Styles (for webkit browsers like Safari and Chrome)
* 好希望这份提案能早日实现 https://github.com/w3c/csswg-drafts/issues/4410
* 进度滑块在各个平台还是太不统一了
*/
input[type="range"] {
-webkit-appearance: none;
height: 4px;
background: var(--input-background-fill);
border-radius: 5px;
background-image: linear-gradient(var(--primary-500),var(--primary-500));
background-size: 0% 100%;
background-repeat: no-repeat;
}
input[type="range"]::-webkit-slider-thumb {
-webkit-appearance: none;
height: 20px;
width: 20px;
border-radius: 50%;
border: solid 0.5px #ddd;
background-color: white;
cursor: ew-resize;
box-shadow: var(--input-shadow);
transition: background-color .1s ease;
}
input[type="range"]::-webkit-slider-thumb:hover {
background: var(--neutral-50);
}
input[type=range]::-webkit-slider-runnable-track {
-webkit-appearance: none;
box-shadow: none;
border: none;
background: transparent;
}
.submit_btn, #cancel_btn {
height: 42px !important;
}
.submit_btn::before {
content: url("data:image/svg+xml, %3Csvg width='21px' height='20px' viewBox='0 0 21 20' version='1.1' xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'%3E %3Cg id='page' stroke='none' stroke-width='1' fill='none' fill-rule='evenodd'%3E %3Cg id='send' transform='translate(0.435849, 0.088463)' fill='%23FFFFFF' fill-rule='nonzero'%3E %3Cpath d='M0.579148261,0.0428666046 C0.301105539,-0.0961547561 -0.036517765,0.122307382 0.0032026237,0.420210298 L1.4927172,18.1553639 C1.5125774,18.4334066 1.79062012,18.5922882 2.04880264,18.4929872 L8.24518329,15.8913017 L11.6412765,19.7441794 C11.8597387,19.9825018 12.2370824,19.8832008 12.3165231,19.5852979 L13.9450591,13.4882182 L19.7839562,11.0255541 C20.0619989,10.8865327 20.0818591,10.4694687 19.7839562,10.3105871 L0.579148261,0.0428666046 Z M11.6138902,17.0883151 L9.85385903,14.7195502 L0.718169621,0.618812241 L12.69945,12.9346347 L11.6138902,17.0883151 Z' id='shape'%3E%3C/path%3E %3C/g%3E %3C/g%3E %3C/svg%3E");
height: 21px;
}
#cancel_btn::before {
content: url("data:image/svg+xml,%3Csvg width='21px' height='21px' viewBox='0 0 21 21' version='1.1' xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'%3E %3Cg id='pg' stroke='none' stroke-width='1' fill='none' fill-rule='evenodd'%3E %3Cpath d='M10.2072007,20.088463 C11.5727865,20.088463 12.8594566,19.8259823 14.067211,19.3010209 C15.2749653,18.7760595 16.3386126,18.0538087 17.2581528,17.1342685 C18.177693,16.2147282 18.8982283,15.1527965 19.4197586,13.9484733 C19.9412889,12.7441501 20.202054,11.4557644 20.202054,10.0833163 C20.202054,8.71773046 19.9395733,7.43106036 19.4146119,6.22330603 C18.8896505,5.01555169 18.1673997,3.95018885 17.2478595,3.0272175 C16.3283192,2.10424615 15.2646719,1.3837109 14.0569176,0.865611739 C12.8491633,0.34751258 11.5624932,0.088463 10.1969073,0.088463 C8.83132146,0.088463 7.54636692,0.34751258 6.34204371,0.865611739 C5.1377205,1.3837109 4.07407321,2.10424615 3.15110186,3.0272175 C2.22813051,3.95018885 1.5058797,5.01555169 0.984349419,6.22330603 C0.46281914,7.43106036 0.202054,8.71773046 0.202054,10.0833163 C0.202054,11.4557644 0.4645347,12.7441501 0.9894961,13.9484733 C1.5144575,15.1527965 2.23670831,16.2147282 3.15624854,17.1342685 C4.07578877,18.0538087 5.1377205,18.7760595 6.34204371,19.3010209 C7.54636692,19.8259823 8.83475258,20.088463 10.2072007,20.088463 Z M10.2072007,18.2562448 C9.07493099,18.2562448 8.01471483,18.0452309 7.0265522,17.6232031 C6.03838956,17.2011753 5.17031614,16.6161693 4.42233192,15.8681851 C3.6743477,15.1202009 3.09105726,14.2521274 2.67246059,13.2639648 C2.25386392,12.2758022 2.04456558,11.215586 2.04456558,10.0833163 C2.04456558,8.95104663 2.25386392,7.89083047 2.67246059,6.90266784 C3.09105726,5.9145052 3.6743477,5.04643178 4.42233192,4.29844756 C5.17031614,3.55046334 6.036674,2.9671729 7.02140552,2.54857623 C8.00613703,2.12997956 9.06463763,1.92068122 10.1969073,1.92068122 C11.329177,1.92068122 12.3911087,2.12997956 13.3827025,2.54857623 C14.3742962,2.9671729 15.2440852,3.55046334 15.9920694,4.29844756 C16.7400537,5.04643178 17.3233441,5.9145052 17.7419408,6.90266784 C18.1605374,7.89083047 18.3698358,8.95104663 18.3698358,10.0833163 C18.3698358,11.215586 18.1605374,12.2758022 17.7419408,13.2639648 C17.3233441,14.2521274 16.7400537,15.1202009 15.9920694,15.8681851 C15.2440852,16.6161693 14.3760118,17.2011753 13.3878492,17.6232031 C12.3996865,18.0452309 11.3394704,18.2562448 10.2072007,18.2562448 Z M7.65444721,13.6242324 L12.7496608,13.6242324 C13.0584616,13.6242324 13.3003556,13.5384544 13.4753427,13.3668984 C13.6503299,13.1953424 13.7378234,12.9585951 13.7378234,12.6566565 L13.7378234,7.49968276 C13.7378234,7.19774418 13.6503299,6.96099688 13.4753427,6.78944087 C13.3003556,6.61788486 13.0584616,6.53210685 12.7496608,6.53210685 L7.65444721,6.53210685 C7.33878414,6.53210685 7.09345904,6.61788486 6.91847191,6.78944087 C6.74348478,6.96099688 6.65599121,7.19774418 6.65599121,7.49968276 L6.65599121,12.6566565 C6.65599121,12.9585951 6.74348478,13.1953424 6.91847191,13.3668984 C7.09345904,13.5384544 7.33878414,13.6242324 7.65444721,13.6242324 Z' id='shape' fill='%23FF3B30' fill-rule='nonzero'%3E%3C/path%3E %3C/g%3E %3C/svg%3E");
height: 21px;
}
/* list */
ol:not(.options), ul:not(.options) {
padding-inline-start: 2em !important;
}
/* 亮色(默认) */
#main_chatbot {
background-color: var(--chatbot-background-color-light) !important;
color: var(--chatbot-color-light) !important;
}
/* 暗色 */
.dark #main_chatbot {
background-color: var(--block-background-fill) !important;
color: var(--chatbot-color-dark) !important;
}
/* 屏幕宽度大于等于500px的设备 */
/* update on 2023.4.8: 高度的细致调整已写入JavaScript */
@media screen and (min-width: 500px) {
#main_chatbot {
height: calc(100vh - 200px);
}
#main_chatbot .wrap {
max-height: calc(100vh - 200px - var(--line-sm)*1rem - 2*var(--block-label-margin) );
}
}
/* 屏幕宽度小于500px的设备 */
@media screen and (max-width: 499px) {
#main_chatbot {
height: calc(100vh - 140px);
}
#main_chatbot .wrap {
max-height: calc(100vh - 140px - var(--line-sm)*1rem - 2*var(--block-label-margin) );
}
[data-testid = "bot"] {
max-width: 95% !important;
}
#app_title h1{
letter-spacing: -1px; font-size: 22px;
}
}
#main_chatbot .wrap {
overflow-x: hidden
}
/* 对话气泡 */
.message {
border-radius: var(--radius-xl) !important;
border: none;
padding: var(--spacing-xl) !important;
font-size: 15px !important;
line-height: var(--line-md) !important;
min-height: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl));
min-width: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl));
}
[data-testid = "bot"] {
max-width: 85%;
border-bottom-left-radius: 0 !important;
}
[data-testid = "user"] {
max-width: 85%;
width: auto !important;
border-bottom-right-radius: 0 !important;
}
.message p {
margin-top: 0.6em !important;
margin-bottom: 0.6em !important;
}
.message p:first-child { margin-top: 0 !important; }
.message p:last-of-type { margin-bottom: 0 !important; }
.message .md-message {
display: block;
padding: 0 !important;
}
.message .raw-message {
display: block;
padding: 0 !important;
white-space: pre-wrap;
}
.raw-message.hideM, .md-message.hideM {
display: none;
}
/* custom buttons */
.chuanhu-btn {
border-radius: 5px;
/* background-color: #E6E6E6 !important; */
color: rgba(120, 120, 120, 0.64) !important;
padding: 4px !important;
position: absolute;
right: -22px;
cursor: pointer !important;
transition: color .2s ease, background-color .2s ease;
}
.chuanhu-btn:hover {
background-color: rgba(167, 167, 167, 0.25) !important;
color: unset !important;
}
.chuanhu-btn:active {
background-color: rgba(167, 167, 167, 0.5) !important;
}
.chuanhu-btn:focus {
outline: none;
}
.copy-bot-btn {
/* top: 18px; */
bottom: 0;
}
.toggle-md-btn {
/* top: 0; */
bottom: 20px;
}
.copy-code-btn {
position: relative;
float: right;
font-size: 1em;
cursor: pointer;
}
.message-wrap>div img{
border-radius: 10px !important;
}
/* history message */
.wrap>.history-message {
padding: 10px !important;
}
.history-message {
/* padding: 0 !important; */
opacity: 80%;
display: flex;
flex-direction: column;
}
.history-message>.history-message {
padding: 0 !important;
}
.history-message>.message-wrap {
padding: 0 !important;
margin-bottom: 16px;
}
.history-message>.message {
margin-bottom: 16px;
}
.wrap>.history-message::after {
content: "";
display: block;
height: 2px;
background-color: var(--body-text-color-subdued);
margin-bottom: 10px;
margin-top: -10px;
clear: both;
}
.wrap>.history-message>:last-child::after {
content: "仅供查看";
display: block;
text-align: center;
color: var(--body-text-color-subdued);
font-size: 0.8em;
}
/* 表格 */
table {
margin: 1em 0;
border-collapse: collapse;
empty-cells: show;
}
td,th {
border: 1.2px solid var(--border-color-primary) !important;
padding: 0.2em;
}
thead {
background-color: rgba(175,184,193,0.2);
}
thead th {
padding: .5em .2em;
}
/* 行内代码 */
.message :not(pre) code {
display: inline;
white-space: break-spaces;
border-radius: 6px;
margin: 0 2px 0 2px;
padding: .2em .4em .1em .4em;
background-color: rgba(175,184,193,0.2);
}
/* 代码块 */
.message pre code {
display: block;
overflow: auto;
white-space: pre;
background-color: hsla(0, 0%, 7%, 70%)!important;
border-radius: 10px;
padding: 1.2em 1em 0em .5em;
margin: 0.6em 2em 1em 0.2em;
color: #FFF;
box-shadow: 6px 6px 16px hsla(0, 0%, 0%, 0.2);
}
.dark .message pre code {
background-color: hsla(0, 0%, 20%, 300%)!important;
}
.message pre {
padding: 0 !important;
}
.message pre code div.highlight {
background-color: unset !important;
}
button.copy-button {
display: none;
}
/* 代码高亮样式 */
.codehilite .hll { background-color: #6e7681 }
.codehilite .c { color: #8b949e; font-style: italic } /* Comment */
.codehilite .err { color: #f85149 } /* Error */
.codehilite .esc { color: #c9d1d9 } /* Escape */
.codehilite .g { color: #c9d1d9 } /* Generic */
.codehilite .k { color: #ff7b72 } /* Keyword */
.codehilite .l { color: #a5d6ff } /* Literal */
.codehilite .n { color: #c9d1d9 } /* Name */
.codehilite .o { color: #ff7b72; font-weight: bold } /* Operator */
.codehilite .x { color: #c9d1d9 } /* Other */
.codehilite .p { color: #c9d1d9 } /* Punctuation */
.codehilite .ch { color: #8b949e; font-style: italic } /* Comment.Hashbang */
.codehilite .cm { color: #8b949e; font-style: italic } /* Comment.Multiline */
.codehilite .cp { color: #8b949e; font-weight: bold; font-style: italic } /* Comment.Preproc */
.codehilite .cpf { color: #8b949e; font-style: italic } /* Comment.PreprocFile */
.codehilite .c1 { color: #8b949e; font-style: italic } /* Comment.Single */
.codehilite .cs { color: #8b949e; font-weight: bold; font-style: italic } /* Comment.Special */
.codehilite .gd { color: #ffa198; background-color: #490202 } /* Generic.Deleted */
.codehilite .ge { color: #c9d1d9; font-style: italic } /* Generic.Emph */
.codehilite .gr { color: #ffa198 } /* Generic.Error */
.codehilite .gh { color: #79c0ff; font-weight: bold } /* Generic.Heading */
.codehilite .gi { color: #56d364; background-color: #0f5323 } /* Generic.Inserted */
.codehilite .go { color: #8b949e } /* Generic.Output */
.codehilite .gp { color: #8b949e } /* Generic.Prompt */
.codehilite .gs { color: #c9d1d9; font-weight: bold } /* Generic.Strong */
.codehilite .gu { color: #79c0ff } /* Generic.Subheading */
.codehilite .gt { color: #ff7b72 } /* Generic.Traceback */
.codehilite .g-Underline { color: #c9d1d9; text-decoration: underline } /* Generic.Underline */
.codehilite .kc { color: #79c0ff } /* Keyword.Constant */
.codehilite .kd { color: #ff7b72 } /* Keyword.Declaration */
.codehilite .kn { color: #ff7b72 } /* Keyword.Namespace */
.codehilite .kp { color: #79c0ff } /* Keyword.Pseudo */
.codehilite .kr { color: #ff7b72 } /* Keyword.Reserved */
.codehilite .kt { color: #ff7b72 } /* Keyword.Type */
.codehilite .ld { color: #79c0ff } /* Literal.Date */
.codehilite .m { color: #a5d6ff } /* Literal.Number */
.codehilite .s { color: #a5d6ff } /* Literal.String */
.codehilite .na { color: #c9d1d9 } /* Name.Attribute */
.codehilite .nb { color: #c9d1d9 } /* Name.Builtin */
.codehilite .nc { color: #f0883e; font-weight: bold } /* Name.Class */
.codehilite .no { color: #79c0ff; font-weight: bold } /* Name.Constant */
.codehilite .nd { color: #d2a8ff; font-weight: bold } /* Name.Decorator */
.codehilite .ni { color: #ffa657 } /* Name.Entity */
.codehilite .ne { color: #f0883e; font-weight: bold } /* Name.Exception */
.codehilite .nf { color: #d2a8ff; font-weight: bold } /* Name.Function */
.codehilite .nl { color: #79c0ff; font-weight: bold } /* Name.Label */
.codehilite .nn { color: #ff7b72 } /* Name.Namespace */
.codehilite .nx { color: #c9d1d9 } /* Name.Other */
.codehilite .py { color: #79c0ff } /* Name.Property */
.codehilite .nt { color: #7ee787 } /* Name.Tag */
.codehilite .nv { color: #79c0ff } /* Name.Variable */
.codehilite .ow { color: #ff7b72; font-weight: bold } /* Operator.Word */
.codehilite .pm { color: #c9d1d9 } /* Punctuation.Marker */
.codehilite .w { color: #6e7681 } /* Text.Whitespace */
.codehilite .mb { color: #a5d6ff } /* Literal.Number.Bin */
.codehilite .mf { color: #a5d6ff } /* Literal.Number.Float */
.codehilite .mh { color: #a5d6ff } /* Literal.Number.Hex */
.codehilite .mi { color: #a5d6ff } /* Literal.Number.Integer */
.codehilite .mo { color: #a5d6ff } /* Literal.Number.Oct */
.codehilite .sa { color: #79c0ff } /* Literal.String.Affix */
.codehilite .sb { color: #a5d6ff } /* Literal.String.Backtick */
.codehilite .sc { color: #a5d6ff } /* Literal.String.Char */
.codehilite .dl { color: #79c0ff } /* Literal.String.Delimiter */
.codehilite .sd { color: #a5d6ff } /* Literal.String.Doc */
.codehilite .s2 { color: #a5d6ff } /* Literal.String.Double */
.codehilite .se { color: #79c0ff } /* Literal.String.Escape */
.codehilite .sh { color: #79c0ff } /* Literal.String.Heredoc */
.codehilite .si { color: #a5d6ff } /* Literal.String.Interpol */
.codehilite .sx { color: #a5d6ff } /* Literal.String.Other */
.codehilite .sr { color: #79c0ff } /* Literal.String.Regex */
.codehilite .s1 { color: #a5d6ff } /* Literal.String.Single */
.codehilite .ss { color: #a5d6ff } /* Literal.String.Symbol */
.codehilite .bp { color: #c9d1d9 } /* Name.Builtin.Pseudo */
.codehilite .fm { color: #d2a8ff; font-weight: bold } /* Name.Function.Magic */
.codehilite .vc { color: #79c0ff } /* Name.Variable.Class */
.codehilite .vg { color: #79c0ff } /* Name.Variable.Global */
.codehilite .vi { color: #79c0ff } /* Name.Variable.Instance */
.codehilite .vm { color: #79c0ff } /* Name.Variable.Magic */
.codehilite .il { color: #a5d6ff } /* Literal.Number.Integer.Long */
.dark .codehilite .hll { background-color: #2C3B41 }
.dark .codehilite .c { color: #79d618; font-style: italic } /* Comment */
.dark .codehilite .err { color: #FF5370 } /* Error */
.dark .codehilite .esc { color: #89DDFF } /* Escape */
.dark .codehilite .g { color: #EEFFFF } /* Generic */
.dark .codehilite .k { color: #BB80B3 } /* Keyword */
.dark .codehilite .l { color: #C3E88D } /* Literal */
.dark .codehilite .n { color: #EEFFFF } /* Name */
.dark .codehilite .o { color: #89DDFF } /* Operator */
.dark .codehilite .p { color: #89DDFF } /* Punctuation */
.dark .codehilite .ch { color: #79d618; font-style: italic } /* Comment.Hashbang */
.dark .codehilite .cm { color: #79d618; font-style: italic } /* Comment.Multiline */
.dark .codehilite .cp { color: #79d618; font-style: italic } /* Comment.Preproc */
.dark .codehilite .cpf { color: #79d618; font-style: italic } /* Comment.PreprocFile */
.dark .codehilite .c1 { color: #79d618; font-style: italic } /* Comment.Single */
.dark .codehilite .cs { color: #79d618; font-style: italic } /* Comment.Special */
.dark .codehilite .gd { color: #FF5370 } /* Generic.Deleted */
.dark .codehilite .ge { color: #89DDFF } /* Generic.Emph */
.dark .codehilite .gr { color: #FF5370 } /* Generic.Error */
.dark .codehilite .gh { color: #C3E88D } /* Generic.Heading */
.dark .codehilite .gi { color: #C3E88D } /* Generic.Inserted */
.dark .codehilite .go { color: #79d618 } /* Generic.Output */
.dark .codehilite .gp { color: #FFCB6B } /* Generic.Prompt */
.dark .codehilite .gs { color: #FF5370 } /* Generic.Strong */
.dark .codehilite .gu { color: #89DDFF } /* Generic.Subheading */
.dark .codehilite .gt { color: #FF5370 } /* Generic.Traceback */
.dark .codehilite .kc { color: #89DDFF } /* Keyword.Constant */
.dark .codehilite .kd { color: #BB80B3 } /* Keyword.Declaration */
.dark .codehilite .kn { color: #89DDFF; font-style: italic } /* Keyword.Namespace */
.dark .codehilite .kp { color: #89DDFF } /* Keyword.Pseudo */
.dark .codehilite .kr { color: #BB80B3 } /* Keyword.Reserved */
.dark .codehilite .kt { color: #BB80B3 } /* Keyword.Type */
.dark .codehilite .ld { color: #C3E88D } /* Literal.Date */
.dark .codehilite .m { color: #F78C6C } /* Literal.Number */
.dark .codehilite .s { color: #C3E88D } /* Literal.String */
.dark .codehilite .na { color: #BB80B3 } /* Name.Attribute */
.dark .codehilite .nb { color: #82AAFF } /* Name.Builtin */
.dark .codehilite .nc { color: #FFCB6B } /* Name.Class */
.dark .codehilite .no { color: #EEFFFF } /* Name.Constant */
.dark .codehilite .nd { color: #82AAFF } /* Name.Decorator */
.dark .codehilite .ni { color: #89DDFF } /* Name.Entity */
.dark .codehilite .ne { color: #FFCB6B } /* Name.Exception */
.dark .codehilite .nf { color: #82AAFF } /* Name.Function */
.dark .codehilite .nl { color: #82AAFF } /* Name.Label */
.dark .codehilite .nn { color: #FFCB6B } /* Name.Namespace */
.dark .codehilite .nx { color: #EEFFFF } /* Name.Other */
.dark .codehilite .py { color: #FFCB6B } /* Name.Property */
.dark .codehilite .nt { color: #FF5370 } /* Name.Tag */
.dark .codehilite .nv { color: #89DDFF } /* Name.Variable */
.dark .codehilite .ow { color: #89DDFF; font-style: italic } /* Operator.Word */
.dark .codehilite .pm { color: #89DDFF } /* Punctuation.Marker */
.dark .codehilite .w { color: #EEFFFF } /* Text.Whitespace */
.dark .codehilite .mb { color: #F78C6C } /* Literal.Number.Bin */
.dark .codehilite .mf { color: #F78C6C } /* Literal.Number.Float */
.dark .codehilite .mh { color: #F78C6C } /* Literal.Number.Hex */
.dark .codehilite .mi { color: #F78C6C } /* Literal.Number.Integer */
.dark .codehilite .mo { color: #F78C6C } /* Literal.Number.Oct */
.dark .codehilite .sa { color: #BB80B3 } /* Literal.String.Affix */
.dark .codehilite .sb { color: #C3E88D } /* Literal.String.Backtick */
.dark .codehilite .sc { color: #C3E88D } /* Literal.String.Char */
.dark .codehilite .dl { color: #EEFFFF } /* Literal.String.Delimiter */
.dark .codehilite .sd { color: #79d618; font-style: italic } /* Literal.String.Doc */
.dark .codehilite .s2 { color: #C3E88D } /* Literal.String.Double */
.dark .codehilite .se { color: #EEFFFF } /* Literal.String.Escape */
.dark .codehilite .sh { color: #C3E88D } /* Literal.String.Heredoc */
.dark .codehilite .si { color: #89DDFF } /* Literal.String.Interpol */
.dark .codehilite .sx { color: #C3E88D } /* Literal.String.Other */
.dark .codehilite .sr { color: #89DDFF } /* Literal.String.Regex */
.dark .codehilite .s1 { color: #C3E88D } /* Literal.String.Single */
.dark .codehilite .ss { color: #89DDFF } /* Literal.String.Symbol */
.dark .codehilite .bp { color: #89DDFF } /* Name.Builtin.Pseudo */
.dark .codehilite .fm { color: #82AAFF } /* Name.Function.Magic */
.dark .codehilite .vc { color: #89DDFF } /* Name.Variable.Class */
.dark .codehilite .vg { color: #89DDFF } /* Name.Variable.Global */
.dark .codehilite .vi { color: #89DDFF } /* Name.Variable.Instance */
.dark .codehilite .vm { color: #82AAFF } /* Name.Variable.Magic */
.dark .codehilite .il { color: #F78C6C } /* Literal.Number.Integer.Long */

104
themes/green.py Normal file
View File

@@ -0,0 +1,104 @@
import gradio as gr
from toolbox import get_conf
CODE_HIGHLIGHT, ADD_WAIFU, LAYOUT = get_conf('CODE_HIGHLIGHT', 'ADD_WAIFU', 'LAYOUT')
def adjust_theme():
try:
set_theme = gr.themes.Soft(
primary_hue=gr.themes.Color(
c50="#EBFAF2",
c100="#CFF3E1",
c200="#A8EAC8",
c300="#77DEA9",
c400="#3FD086",
c500="#02C160",
c600="#06AE56",
c700="#05974E",
c800="#057F45",
c900="#04673D",
c950="#2E5541",
name="small_and_beautiful",
),
secondary_hue=gr.themes.Color(
c50="#576b95",
c100="#576b95",
c200="#576b95",
c300="#576b95",
c400="#576b95",
c500="#576b95",
c600="#576b95",
c700="#576b95",
c800="#576b95",
c900="#576b95",
c950="#576b95",
),
neutral_hue=gr.themes.Color(
name="gray",
c50="#f6f7f8",
# c100="#f3f4f6",
c100="#F2F2F2",
c200="#e5e7eb",
c300="#d1d5db",
c400="#B2B2B2",
c500="#808080",
c600="#636363",
c700="#515151",
c800="#393939",
# c900="#272727",
c900="#2B2B2B",
c950="#171717",
),
radius_size=gr.themes.sizes.radius_sm,
).set(
button_primary_background_fill="*primary_500",
button_primary_background_fill_dark="*primary_600",
button_primary_background_fill_hover="*primary_400",
button_primary_border_color="*primary_500",
button_primary_border_color_dark="*primary_600",
button_primary_text_color="wihte",
button_primary_text_color_dark="white",
button_secondary_background_fill="*neutral_100",
button_secondary_background_fill_hover="*neutral_50",
button_secondary_background_fill_dark="*neutral_900",
button_secondary_text_color="*neutral_800",
button_secondary_text_color_dark="white",
background_fill_primary="#F7F7F7",
background_fill_primary_dark="#1F1F1F",
block_title_text_color="*primary_500",
block_title_background_fill_dark="*primary_900",
block_label_background_fill_dark="*primary_900",
input_background_fill="#F6F6F6",
chatbot_code_background_color="*neutral_950",
chatbot_code_background_color_dark="*neutral_950",
)
js = ''
if LAYOUT=="TOP-DOWN":
js = ""
else:
with open('themes/common.js', 'r', encoding='utf8') as f:
js = f"<script>{f.read()}</script>"
# 添加一个萌萌的看板娘
if ADD_WAIFU:
js += """
<script src="file=docs/waifu_plugin/jquery.min.js"></script>
<script src="file=docs/waifu_plugin/jquery-ui.min.js"></script>
<script src="file=docs/waifu_plugin/autoload.js"></script>
"""
gradio_original_template_fn = gr.routes.templates.TemplateResponse
def gradio_new_template_fn(*args, **kwargs):
res = gradio_original_template_fn(*args, **kwargs)
res.body = res.body.replace(b'</html>', f'{js}</html>'.encode("utf8"))
res.init_headers()
return res
gr.routes.templates.TemplateResponse = gradio_new_template_fn # override gradio template
except:
set_theme = None
print('gradio版本较旧, 不能自定义字体和颜色')
return set_theme
with open("themes/green.css", "r", encoding="utf-8") as f:
advanced_css = f.read()

12
themes/theme.py Normal file
View File

@@ -0,0 +1,12 @@
import gradio as gr
from toolbox import get_conf
THEME, = get_conf('THEME')
if THEME == 'Chuanhu-Small-and-Beautiful':
from .green import adjust_theme, advanced_css
theme_declaration = "<h2 align=\"center\" class=\"small\">[Chuanhu-Small-and-Beautiful主题]</h2>"
else:
from .default import adjust_theme, advanced_css
theme_declaration = ""

View File

@@ -4,6 +4,7 @@ import time
import inspect
import re
import os
import gradio
from latex2mathml.converter import convert as tex2mathml
from functools import wraps, lru_cache
pj = os.path.join
@@ -40,7 +41,7 @@ def ArgsGeneralWrapper(f):
"""
装饰器函数,用于重组输入参数,改变输入参数的顺序与结构。
"""
def decorated(cookies, max_length, llm_model, txt, txt2, top_p, temperature, chatbot, history, system_prompt, plugin_advanced_arg, *args):
def decorated(request: gradio.Request, cookies, max_length, llm_model, txt, txt2, top_p, temperature, chatbot, history, system_prompt, plugin_advanced_arg, *args):
txt_passon = txt
if txt == "" and txt2 != "": txt_passon = txt2
# 引入一个有cookie的chatbot
@@ -54,13 +55,21 @@ def ArgsGeneralWrapper(f):
'top_p':top_p,
'max_length': max_length,
'temperature':temperature,
'client_ip': request.client.host,
}
plugin_kwargs = {
"advanced_arg": plugin_advanced_arg,
}
chatbot_with_cookie = ChatBotWithCookies(cookies)
chatbot_with_cookie.write_list(chatbot)
yield from f(txt_passon, llm_kwargs, plugin_kwargs, chatbot_with_cookie, history, system_prompt, *args)
if cookies.get('lock_plugin', None) is None:
# 正常状态
yield from f(txt_passon, llm_kwargs, plugin_kwargs, chatbot_with_cookie, history, system_prompt, *args)
else:
# 处理个别特殊插件的锁定状态
module, fn_name = cookies['lock_plugin'].split('->')
f_hot_reload = getattr(importlib.import_module(module, fn_name), fn_name)
yield from f_hot_reload(txt_passon, llm_kwargs, plugin_kwargs, chatbot_with_cookie, history, system_prompt, *args)
return decorated
@@ -68,8 +77,21 @@ def update_ui(chatbot, history, msg='正常', **kwargs): # 刷新界面
"""
刷新用户界面
"""
assert isinstance(chatbot, ChatBotWithCookies), "在传递chatbot的过程中不要将其丢弃。必要时可用clear将其清空然后用for+append循环重新赋值。"
yield chatbot.get_cookies(), chatbot, history, msg
assert isinstance(chatbot, ChatBotWithCookies), "在传递chatbot的过程中不要将其丢弃。必要时, 可用clear将其清空, 然后用for+append循环重新赋值。"
cookies = chatbot.get_cookies()
# 解决插件锁定时的界面显示问题
if cookies.get('lock_plugin', None):
label = cookies.get('llm_model', "") + " | " + "正在锁定插件" + cookies.get('lock_plugin', None)
chatbot_gr = gradio.update(value=chatbot, label=label)
if cookies.get('label', "") != label: cookies['label'] = label # 记住当前的label
elif cookies.get('label', None):
chatbot_gr = gradio.update(value=chatbot, label=cookies.get('llm_model', ""))
cookies['label'] = None # 清空label
else:
chatbot_gr = chatbot
yield cookies, chatbot_gr, history, msg
def update_ui_lastest_msg(lastmsg, chatbot, history, delay=1): # 刷新界面
"""
@@ -192,7 +214,7 @@ def write_results_to_file(history, file_name=None):
# remove everything that cannot be handled by utf8
f.write(content.encode('utf-8', 'ignore').decode())
f.write('\n\n')
res = '以上材料已经被写入' + os.path.abspath(f'./gpt_log/{file_name}')
res = '以上材料已经被写入:\t' + os.path.abspath(f'./gpt_log/{file_name}')
print(res)
return res
@@ -445,8 +467,11 @@ def promote_file_to_downloadzone(file, rename_file=None, chatbot=None):
import shutil
if rename_file is None: rename_file = f'{gen_time_str()}-{os.path.basename(file)}'
new_path = os.path.join(f'./gpt_log/', rename_file)
# 如果已经存在,先删除
if os.path.exists(new_path) and not os.path.samefile(new_path, file): os.remove(new_path)
# 把文件复制过去
if not os.path.exists(new_path): shutil.copyfile(file, new_path)
# 将文件添加到chatbot cookie中避免多用户干扰
if chatbot:
if 'file_to_promote' in chatbot._cookies: current = chatbot._cookies['file_to_promote']
else: current = []
@@ -505,16 +530,24 @@ def on_report_generated(cookies, files, chatbot):
chatbot.append(['报告如何远程获取?', f'报告已经添加到右侧“文件上传区”(可能处于折叠状态),请查收。{file_links}'])
return cookies, report_files, chatbot
def load_chat_cookies():
API_KEY, LLM_MODEL, AZURE_API_KEY = get_conf('API_KEY', 'LLM_MODEL', 'AZURE_API_KEY')
if is_any_api_key(AZURE_API_KEY):
if is_any_api_key(API_KEY): API_KEY = API_KEY + ',' + AZURE_API_KEY
else: API_KEY = AZURE_API_KEY
return {'api_key': API_KEY, 'llm_model': LLM_MODEL}
def is_openai_api_key(key):
API_MATCH_ORIGINAL = re.match(r"sk-[a-zA-Z0-9]{48}$", key)
return bool(API_MATCH_ORIGINAL)
def is_azure_api_key(key):
API_MATCH_AZURE = re.match(r"[a-zA-Z0-9]{32}$", key)
return bool(API_MATCH_ORIGINAL) or bool(API_MATCH_AZURE)
return bool(API_MATCH_AZURE)
def is_api2d_key(key):
if key.startswith('fk') and len(key) == 41:
return True
else:
return False
API_MATCH_API2D = re.match(r"fk[a-zA-Z0-9]{6}-[a-zA-Z0-9]{32}$", key)
return bool(API_MATCH_API2D)
def is_any_api_key(key):
if ',' in key:
@@ -523,10 +556,10 @@ def is_any_api_key(key):
if is_any_api_key(k): return True
return False
else:
return is_openai_api_key(key) or is_api2d_key(key)
return is_openai_api_key(key) or is_api2d_key(key) or is_azure_api_key(key)
def what_keys(keys):
avail_key_list = {'OpenAI Key':0, "API2D Key":0}
avail_key_list = {'OpenAI Key':0, "Azure Key":0, "API2D Key":0}
key_list = keys.split(',')
for k in key_list:
@@ -537,7 +570,11 @@ def what_keys(keys):
if is_api2d_key(k):
avail_key_list['API2D Key'] += 1
return f"检测到: OpenAI Key {avail_key_list['OpenAI Key']}API2D Key {avail_key_list['API2D Key']}"
for k in key_list:
if is_azure_api_key(k):
avail_key_list['Azure Key'] += 1
return f"检测到: OpenAI Key {avail_key_list['OpenAI Key']} 个, Azure Key {avail_key_list['Azure Key']} 个, API2D Key {avail_key_list['API2D Key']}"
def select_api_key(keys, llm_model):
import random
@@ -552,8 +589,12 @@ def select_api_key(keys, llm_model):
for k in key_list:
if is_api2d_key(k): avail_key_list.append(k)
if llm_model.startswith('azure-'):
for k in key_list:
if is_azure_api_key(k): avail_key_list.append(k)
if len(avail_key_list) == 0:
raise RuntimeError(f"您提供的api-key不满足要求不包含任何可用于{llm_model}的api-key。您可能选择了错误的模型或请求源")
raise RuntimeError(f"您提供的api-key不满足要求不包含任何可用于{llm_model}的api-key。您可能选择了错误的模型或请求源右下角更换模型菜单中可切换openai,azure和api2d请求源")
api_key = random.choice(avail_key_list) # 随机负载均衡
return api_key

View File

@@ -1,5 +1,5 @@
{
"version": 3.43,
"version": 3.46,
"show_feature": true,
"new_feature": "修复Azure接口的BUG <-> 完善多语言模块 <-> 完善本地Latex矫错和翻译功能 <-> 增加gpt-3.5-16k的支持 <-> 新增最强Arxiv论文翻译插件 <-> 修复gradio复制按钮BUG <-> 修复PDF翻译的BUG, 新增HTML中英双栏对照 <-> 添加了OpenAI图片生成插件"
}
"new_feature": "临时修复theme的文件丢失问题 <-> 新增实时语音对话插件(自动断句,脱手对话) <-> 支持加载自定义的ChatGLM2微调模型 <-> 动态ChatBot窗口高度 <-> 修复Azure接口的BUG <-> 完善多语言模块 <-> 完善本地Latex矫错和翻译功能 <-> 增加gpt-3.5-16k的支持 <-> 新增最强Arxiv论文翻译插件 <-> 修复gradio复制按钮BUG <-> 修复PDF翻译的BUG, 新增HTML中英双栏对照 <-> 添加了OpenAI图片生成插件"
}