Compare commits

..

115 Commits

Author SHA1 Message Date
binary-husky
05186a2ef0 Update requirements.txt 2024-02-09 13:24:42 +08:00
binary-husky
3025d5be45 remove jsdelivr (#1547) 2024-02-09 13:17:14 +08:00
binary-husky
6c13bb7b46 patch issue #1538 2024-02-06 17:59:09 +08:00
binary-husky
c27e559f10 match sess-* key 2024-02-06 17:51:47 +08:00
binary-husky
cdb5288f49 fix issue #1532 2024-02-02 17:47:35 +08:00
hongyi-zhao
49c6fcfe97 Update config.py: supports gpt-4-turbo-preview (#1516)
* Update config.py: supports gpt-4-turbo-preview

supports gpt-4-turbo-preview

* Update config.py

---------

Co-authored-by: binary-husky <96192199+binary-husky@users.noreply.github.com>
2024-01-26 16:44:32 +08:00
hongyi-zhao
45fa0404eb Update bridge_all.py: supports gpt-4-turbo-preview (#1517)
* Update bridge_all.py: supports gpt-4-turbo-preview

supports gpt-4-turbo-preview

* Update bridge_all.py

---------

Co-authored-by: binary-husky <96192199+binary-husky@users.noreply.github.com>
2024-01-26 16:36:23 +08:00
binary-husky
f889ef7625 解决issues #1510 2024-01-25 22:42:08 +08:00
binary-husky
a93bf4410d version 3.71 2024-01-25 22:18:43 +08:00
binary-husky
1c0764753a Merge branch 'frontier' of github.com:binary-husky/chatgpt_academic into frontier 2024-01-25 22:05:13 +08:00
Menghuan1918
c847209ac9 Update "Generate multiple Mermaid charts" plugin with md file read (#1506)
* Update crazy_functional.py with new functionality deal with PDF

* Update crazy_functional.py and Mermaid.py for plugin_kwargs

* Update crazy_functional.py with new chart type: mind map

* Update SELECT_PROMPT and i_say_show_user messages

* Update ArgsReminder message in get_crazy_functions() function

* Update with read md file and update PROMPTS

* Return the PROMPTS as the test found that the initial version worked best

* Update Mermaid chart generation function
2024-01-24 17:44:54 +08:00
binary-husky
4f9d40c14f 删除冗余代码 2024-01-24 01:42:31 +08:00
binary-husky
91926d24b7 处理一个core_functional.py中出现的mermaid渲染特例 2024-01-24 01:38:06 +08:00
binary-husky
ef311c4859 localize mjs scripts 2024-01-24 01:06:58 +08:00
binary-husky
82795d3817 remove mask string feature for now (still buggy) 2024-01-24 00:44:27 +08:00
binary-husky
49e28a5a00 Merge branch 'frontier' of github.com:binary-husky/chatgpt_academic into frontier 2024-01-23 15:48:49 +08:00
binary-husky
01def2e329 Merge branch 'master' into frontier 2024-01-23 15:48:06 +08:00
Menghuan1918
2291be2b28 Update "Generate multiple Mermaid charts" plugin (#1503)
* Update crazy_functional.py with new functionality deal with PDF

* Update crazy_functional.py and Mermaid.py for plugin_kwargs
2024-01-23 15:45:34 +08:00
binary-husky
c89ec7969f fix test import err 2024-01-23 09:52:58 +08:00
Menghuan1918
1506c19834 Update crazy_functional.py with new functionality deal with PDF (#1500) 2024-01-22 14:55:39 +08:00
binary-husky
a6fdc493b7 autogen plugin bug fix 2024-01-22 00:08:04 +08:00
binary-husky
113067c6ab Merge branch 'master' into frontier 2024-01-21 23:49:20 +08:00
Menghuan1918
7b6828ab07 从当前对话历史中生产Mermaid图表的插件 (#1497)
* Add functionality to generate multiple types of Mermaid charts

* Update conditional statement in 解析历史输入 function
2024-01-21 23:41:39 +08:00
binary-husky
d818c38dfe better theme 2024-01-21 19:41:18 +08:00
binary-husky
08b4e9796e Update README.md (#1499)
* Update README.md

* Update README.md
2024-01-21 19:08:48 +08:00
binary-husky
b55d573819 auto prompt lang 2024-01-21 13:47:11 +08:00
binary-husky
06b0e800a2 修复渲染的小BUG 2024-01-21 12:19:04 +08:00
binary-husky
7bbaf05961 Merge branch 'master' into frontier 2024-01-20 22:33:41 +08:00
binary-husky
3b83279855 anim generation bug fix #896 2024-01-20 22:17:51 +08:00
binary-husky
37164a826e GengKanghua #896 2024-01-20 22:14:13 +08:00
binary-husky
dd2a97e7a9 draw project struct with mermaid 2024-01-20 21:23:56 +08:00
binary-husky
e579006c4a add set_multi_conf 2024-01-20 18:33:35 +08:00
binary-husky
031f19b6dd 替换错误的变量名称 2024-01-20 18:28:54 +08:00
binary-husky
142b516749 gpt_academic text mask imp 2024-01-20 18:00:06 +08:00
binary-husky
f2e73aa580 智谱API突发恶疾 2024-01-19 21:09:27 +08:00
binary-husky
8565a35cf7 readme update 2024-01-18 23:21:11 +08:00
binary-husky
72d78eb150 Merge branch 'master' of github.com:binary-husky/chatgpt_academic 2024-01-18 23:07:05 +08:00
binary-husky
7aeda537ac remove debug btn 2024-01-18 23:05:47 +08:00
binary-husky
6cea17d4b7 remove debug btn 2024-01-18 22:33:49 +08:00
binary-husky
20bc51d747 Merge branch 'master' into frontier 2024-01-18 22:23:26 +08:00
XIao
b8ebefa427 Google gemini fix (#1473)
* 适配 google gemini 优化为从用户input中提取文件

* Update README.md (#1477)

* Update README.md

* Update README.md

* Update requirements.txt (#1480)

* welcome glm4 from 智谱!

* Update README.md (#1484)

* Update README.md (#1485)

* update zhipu

* Fix translation task name in core_functional.py

* zhipuai version problem

---------

Co-authored-by: binary-husky <96192199+binary-husky@users.noreply.github.com>
Co-authored-by: binary-husky <qingxu.fu@outlook.com>
2024-01-18 18:06:07 +08:00
binary-husky
dcc9326f0b zhipuai version problem 2024-01-18 17:51:20 +08:00
binary-husky
94fc396eb9 Fix translation task name in core_functional.py 2024-01-18 15:32:17 +08:00
binary-husky
e594e1b928 update zhipu 2024-01-18 00:32:51 +08:00
binary-husky
8fe545d97b update zhipu 2024-01-18 00:31:53 +08:00
binary-husky
6f978fa72e Merge branch 'master' into frontier 2024-01-17 12:37:07 +08:00
binary-husky
19be471aa8 Refactor core_functional.py 2024-01-17 12:34:42 +08:00
binary-husky
38956934fd Update README.md (#1485) 2024-01-17 11:45:49 +08:00
binary-husky
32439e14b5 Update README.md (#1484) 2024-01-17 11:30:09 +08:00
binary-husky
317389bf4b Merge branch 'master' into frontier 2024-01-16 21:53:53 +08:00
binary-husky
2c740fc641 welcome glm4 from 智谱! 2024-01-16 21:51:14 +08:00
binary-husky
96832a8228 Update requirements.txt (#1480) 2024-01-16 20:08:32 +08:00
binary-husky
361557da3c roll version 2024-01-16 02:15:35 +08:00
binary-husky
5f18d4a1af Update README.md (#1477)
* Update README.md

* Update README.md
2024-01-16 02:14:08 +08:00
binary-husky
0d10bc570f bug fix 2024-01-16 01:22:50 +08:00
binary-husky
3ce7d9347d dark support 2024-01-16 00:33:11 +08:00
Keldos
8a78d7b89f adapt mermaid to dark mode (#1476)
Co-authored-by: binary-husky <96192199+binary-husky@users.noreply.github.com>
2024-01-16 00:32:12 +08:00
binary-husky
0e43b08837 同步 2024-01-16 00:29:46 +08:00
binary-husky
74bced2d35 添加脑图编辑按钮 2024-01-15 13:41:21 +08:00
binary-husky
961a24846f remove console log 2024-01-15 11:50:37 +08:00
binary-husky
b7e4744f28 apply to other themes 2024-01-15 11:49:04 +08:00
binary-husky
71adc40901 support diagram plotting via mermaid ! 2024-01-15 02:49:21 +08:00
binary-husky
a2099f1622 fix code highlight problem 2024-01-15 00:07:07 +08:00
binary-husky
c0a697f6c8 publish gradio via jsdelivr 2024-01-14 16:46:10 +08:00
binary-husky
bdde1d2fd7 format code 2024-01-14 04:18:38 +08:00
binary-husky
63373ab3b6 Merge branch 'frontier' of github.com:binary-husky/chatgpt_academic into frontier 2024-01-14 03:41:47 +08:00
binary-husky
fb6566adde add todo 2024-01-14 03:41:23 +08:00
binary-husky
9f2ef9ec49 limit scroll 2024-01-14 02:11:07 +08:00
binary-husky
35c1aa21e4 limit scroll 2024-01-14 01:55:59 +08:00
binary-husky
627d739720 注入火山引擎大模型的接口代码 2024-01-13 22:33:08 +08:00
binary-husky
37f15185b6 Merge branch 'master' into frontier 2024-01-13 18:23:55 +08:00
binary-husky
9643e1c25f code dem fix 2024-01-13 18:23:06 +08:00
binary-husky
28eae2f80e Merge branch 'frontier' of github.com:binary-husky/chatgpt_academic into frontier 2024-01-13 18:04:21 +08:00
binary-husky
7ab379688e format source code 2024-01-13 18:04:09 +08:00
binary-husky
3d4c6f54f1 format source code 2024-01-13 18:00:52 +08:00
binary-husky
1714116a89 break down toolbox.py to multiple files 2024-01-13 16:10:46 +08:00
hongyi-zhao
2bc65a99ca Update bridge_all.py (#1472)
删除 "chatgpt_website" 函数,从而不再支持域基于逆向工程的方法的接口,该方法对应的实现项目为:https://github.com/acheong08/ChatGPT-to-API/。目前,该项目已被开发者 archived,且该方法由于其实现的原理,而不可能是稳健和完美的,因此不是可持续维护的。
2024-01-13 14:35:04 +08:00
binary-husky
0a2805513e better gui interaction (#1459) 2024-01-07 19:13:12 +08:00
binary-husky
d698b96209 Merge branch 'master' into frontier 2024-01-07 18:49:56 +08:00
binary-husky
6b1c6f0bf7 better gui interaction 2024-01-07 18:49:08 +08:00
binary-husky
c22867b74c Merge pull request #1458 from binary-husky/frontier
introduce Gemini & Format code
2024-01-07 16:24:33 +08:00
binary-husky
2abe665521 Merge branch 'master' into frontier 2024-01-05 16:12:41 +08:00
binary-husky
b0e6c4d365 change ui prompt 2024-01-05 16:11:30 +08:00
fzcqc
d883c7f34b fix: expected_words添加反斜杆 (#1442) 2024-01-03 19:57:10 +08:00
Menghuan1918
aba871342f 修复分割函数中使用的变量错误 (#1443)
* Fix force_breakdown function parameter name

* Add handling for PDFs with lowercase starting paragraphs

* Change first lowercase word in meta_txt to uppercase
2024-01-03 19:49:17 +08:00
qingxu fu
37744a9cb1 jpeg type align for gemini 2023-12-31 20:28:39 +08:00
qingxu fu
480516380d re-format code to with pre-commit 2023-12-31 19:30:32 +08:00
qingxu fu
60ba712131 use legacy image io for gemini 2023-12-31 19:02:40 +08:00
XIao
a7c960dcb0 适配 google gemini 优化为从用户input中提取文件 (#1419)
适配 google gemini 优化为从用户input中提取文件
2023-12-31 18:05:55 +08:00
binary-husky
a96f842b3a minor ui change 2023-12-30 02:57:42 +08:00
binary-husky
417ca91e23 minor css change 2023-12-30 00:55:52 +08:00
binary-husky
ef8fadfa18 fix ui element padding 2023-12-29 15:16:33 +08:00
binary-husky
865c4ca993 Update README.md 2023-12-26 22:51:56 +08:00
binary-husky
31304f481a remove console log 2023-12-25 22:57:09 +08:00
binary-husky
1bd3637d32 modify image gen plugin user interaction 2023-12-25 22:24:12 +08:00
binary-husky
160a683667 smart input panel swap 2023-12-25 22:05:14 +08:00
binary-husky
49ca03ca06 Merge branch 'master' into frontier 2023-12-25 21:43:33 +08:00
binary-husky
c625348ce1 smarter chatbot height adjustment 2023-12-25 21:26:24 +08:00
binary-husky
6d4a74893a Merge pull request #1415 from binary-husky/frontier
Merge Frontier Branch
2023-12-25 20:18:56 +08:00
qingxu fu
5c7499cada compat with some third party api 2023-12-25 17:21:35 +08:00
binary-husky
f522691529 Merge pull request #1398 from leike0813/frontier
添加通义千问在线模型系列支持&增加插件
2023-12-24 18:21:45 +08:00
binary-husky
ca85573ec1 Update README.md 2023-12-24 18:14:57 +08:00
binary-husky
2c7bba5c63 change dash scope api key check behavior 2023-12-23 21:35:42 +08:00
binary-husky
e22f0226d5 Merge branch 'master' into leike0813-frontier 2023-12-23 21:00:38 +08:00
binary-husky
0f250305b4 add urllib3 version limit 2023-12-23 20:59:32 +08:00
binary-husky
7606f5c130 name fix 2023-12-23 20:55:58 +08:00
binary-husky
4f0dcc431c Merge branch 'frontier' of https://github.com/leike0813/gpt_academic into leike0813-frontier 2023-12-23 20:42:43 +08:00
binary-husky
6ca0dd2f9e Merge pull request #1410 from binary-husky/frontier
fix spark image understanding api
2023-12-23 17:49:35 +08:00
binary-husky
e3e9921f6b correct the misuse of spark image understanding 2023-12-23 17:46:25 +08:00
binary-husky
867ddd355e adjust green theme layout 2023-12-22 21:59:18 +08:00
leike0813
c60a7452bf Improve NOUGAT pdf plugin
Add an API version of NOUGAT plugin
Add advanced argument support to NOUGAT plugin

Adapt new text breakdown function

bugfix
2023-12-20 08:57:27 +08:00
leike0813
68a49d3758 Add 2 plugins
相当于将“批量总结PDF文档”插件拆成了两部分,目的在于使用廉价的模型干粗活,再将关键的最终总结交给GPT-4,降低使用成本
批量总结PDF文档_初步:初步总结PDF,每个PDF输出一个md文档
批量总结Markdown文档_进阶:将所有md文档高度凝练并汇总至一个md文档,可直接使用“批量总结PDF文档_初步”的输出结果作为输入
2023-12-20 07:44:53 +08:00
leike0813
ac3d4cf073 Add support to aliyun qwen online models.
Rename model tag "qwen" to "qwen-local"
Add model tag "qwen-turbo", "qwen-plus", "qwen-max"
Add corresponding model interfaces in request_llms/bridge_all.py
Add configuration variable “DASHSCOPE_API_KEY"
Rename request_llms/bridge_qwen.py to bridge_qwen_local.py to distinguish it from the online model interface
2023-12-20 07:37:26 +08:00
binary-husky
9479dd984c avoid adding the same file multiple times
to the chatbot's files_to_promote list
2023-12-19 19:43:03 +08:00
binary-husky
3c271302cc improve long text breakdown perfomance 2023-12-19 19:30:44 +08:00
134 changed files with 13675 additions and 2064 deletions

View File

@@ -34,7 +34,7 @@ body:
- Others | 非最新版
validations:
required: true
- type: dropdown
id: os
attributes:
@@ -47,7 +47,7 @@ body:
- Docker
validations:
required: true
- type: textarea
id: describe
attributes:
@@ -55,7 +55,7 @@ body:
description: Describe the bug | 简述
validations:
required: true
- type: textarea
id: screenshot
attributes:
@@ -63,15 +63,9 @@ body:
description: Screen Shot | 有帮助的截图
validations:
required: true
- type: textarea
id: traceback
attributes:
label: Terminal Traceback & Material to Help Reproduce Bugs | 终端traceback如有 + 帮助我们复现的测试材料样本(如有)
description: Terminal Traceback & Material to Help Reproduce Bugs | 终端traceback如有 + 帮助我们复现的测试材料样本(如有)

View File

@@ -21,8 +21,3 @@ body:
attributes:
label: Feature Request | 功能请求
description: Feature Request | 功能请求

View File

@@ -15,7 +15,7 @@ jobs:
permissions:
issues: write
pull-requests: read
steps:
- uses: actions/stale@v8
with:

1
.gitignore vendored
View File

@@ -152,3 +152,4 @@ request_llms/moss
media
flagged
request_llms/ChatGLM-6b-onnx-u8s8
.pre-commit-config.yaml

View File

@@ -18,7 +18,6 @@ WORKDIR /gpt
# 安装大部分依赖利用Docker缓存加速以后的构建 (以下三行,可以删除)
COPY requirements.txt ./
COPY ./docs/gradio-3.32.6-py3-none-any.whl ./docs/gradio-3.32.6-py3-none-any.whl
RUN pip3 install -r requirements.txt

110
README.md
View File

@@ -1,8 +1,8 @@
> **Caution**
>
> 2023.11.12: 某些依赖包尚不兼容python 3.12推荐python 3.11。
>
> 2023.11.7: 安装依赖时,请选择`requirements.txt`中**指定的版本**。 安装命令:`pip install -r requirements.txt`。本项目开源免费,近期发现有人蔑视开源协议并利用本项目违规圈钱,请提高警惕,谨防上当受骗
> [!IMPORTANT]
> 2024.1.18: 更新3.70版本支持Mermaid绘图库让大模型绘制脑图
> 2024.1.17: 恭迎GLM4全力支持Qwen、GLM、DeepseekCoder等国内中文大语言基座模型
> 2024.1.17: 某些依赖包尚不兼容python 3.12推荐python 3.11。
> 2024.1.17: 安装依赖时,请选择`requirements.txt`中**指定的版本**。 安装命令:`pip install -r requirements.txt`。本项目完全开源免费,您可通过订阅[在线服务](https://github.com/binary-husky/gpt_academic/wiki/online)的方式鼓励本项目的发展
<br>
@@ -42,13 +42,11 @@ If you like this project, please give it a Star.
Read this in [English](docs/README.English.md) | [日本語](docs/README.Japanese.md) | [한국어](docs/README.Korean.md) | [Русский](docs/README.Russian.md) | [Français](docs/README.French.md). All translations have been provided by the project itself. To translate this project to arbitrary language with GPT, read and run [`multi_language.py`](multi_language.py) (experimental).
<br>
> 1.请注意只有 **高亮** 标识的插件(按钮)才支持读取文件,部分插件位于插件区的**下拉菜单**中。另外我们以**最高优先级**欢迎和处理任何新插件的PR
>
> 2.本项目中每个文件的功能都在[自译解报告](https://github.com/binary-husky/gpt_academic/wiki/GPTAcademic项目自译解报告)`self_analysis.md`详细说明。随着版本的迭代您也可以随时自行点击相关函数插件调用GPT重新生成项目的自我解析报告。常见问题请查阅wiki。
> [!NOTE]
> 1.本项目中每个文件的功能都在[自译解报告](https://github.com/binary-husky/gpt_academic/wiki/GPTAcademic项目自译解报告)`self_analysis.md`详细说明。随着版本的迭代您也可以随时自行点击相关函数插件调用GPT重新生成项目的自我解析报告。常见问题请查阅wiki
> [![常规安装方法](https://img.shields.io/static/v1?label=&message=常规安装方法&color=gray)](#installation) [![一键安装脚本](https://img.shields.io/static/v1?label=&message=一键安装脚本&color=gray)](https://github.com/binary-husky/gpt_academic/releases) [![配置说明](https://img.shields.io/static/v1?label=&message=配置说明&color=gray)](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明) [![wiki](https://img.shields.io/static/v1?label=&message=wiki&color=gray)]([https://github.com/binary-husky/gpt_academic/wiki/项目配置说明](https://github.com/binary-husky/gpt_academic/wiki))
>
> 3.本项目兼容并鼓励尝试国产大语言模型ChatGLM等。支持多个api-key共存可在配置文件中填写如`API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`。需要临时更换`API_KEY`时,在输入区输入临时的`API_KEY`然后回车键提交即可生效。
>
> 2.本项目兼容并鼓励尝试国内中文大语言基座模型如通义千问,智谱GLM等。支持多个api-key共存可在配置文件中填写如`API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`。需要临时更换`API_KEY`时,在输入区输入临时的`API_KEY`然后回车键提交即可生效。
<br><br>
@@ -56,7 +54,12 @@ Read this in [English](docs/README.English.md) | [日本語](docs/README.Japanes
功能(⭐= 近期新增功能) | 描述
--- | ---
⭐[接入新模型](https://github.com/binary-husky/gpt_academic/wiki/%E5%A6%82%E4%BD%95%E5%88%87%E6%8D%A2%E6%A8%A1%E5%9E%8B) | 百度[千帆](https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu)与文心一言, 通义千问[Qwen](https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary)上海AI-Lab[书生](https://github.com/InternLM/InternLM),讯飞[星火](https://xinghuo.xfyun.cn/)[LLaMa2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf)[智谱API](https://open.bigmodel.cn/)DALLE3, [DeepseekCoder](https://coder.deepseek.com/)
⭐[接入新模型](https://github.com/binary-husky/gpt_academic/wiki/%E5%A6%82%E4%BD%95%E5%88%87%E6%8D%A2%E6%A8%A1%E5%9E%8B) | 百度[千帆](https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu)与文心一言, 通义千问[Qwen](https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary)上海AI-Lab[书生](https://github.com/InternLM/InternLM),讯飞[星火](https://xinghuo.xfyun.cn/)[LLaMa2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf)[智谱GLM4](https://open.bigmodel.cn/)DALLE3, [DeepseekCoder](https://coder.deepseek.com/)
⭐支持mermaid图像渲染 | 支持让GPT生成[流程图](https://www.bilibili.com/video/BV18c41147H9/)、状态转移图、甘特图、饼状图、GitGraph等等3.7版本)
⭐Arxiv论文精细翻译 ([Docker](https://github.com/binary-husky/gpt_academic/pkgs/container/gpt_academic_with_latex)) | [插件] 一键[以超高质量翻译arxiv论文](https://www.bilibili.com/video/BV1dz4y1v77A/),目前最好的论文翻译工具
⭐[实时语音对话输入](https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md) | [插件] 异步[监听音频](https://www.bilibili.com/video/BV1AV4y187Uy/),自动断句,自动寻找回答时机
⭐AutoGen多智能体插件 | [插件] 借助微软AutoGen探索多Agent的智能涌现可能
⭐虚空终端插件 | [插件] 能够使用自然语言直接调度本项目其他插件
润色、翻译、代码解释 | 一键润色、翻译、查找论文语法错误、解释代码
[自定义快捷键](https://www.bilibili.com/video/BV14s4y1E7jN) | 支持自定义快捷键
模块化设计 | 支持自定义强大的[插件](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions),插件支持[热更新](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)
@@ -65,22 +68,16 @@ Read this in [English](docs/README.English.md) | [日本語](docs/README.Japanes
Latex全文[翻译](https://www.bilibili.com/video/BV1nk4y1Y7Js/)、[润色](https://www.bilibili.com/video/BV1FT411H7c5/) | [插件] 一键翻译或润色latex论文
批量注释生成 | [插件] 一键批量生成函数注释
Markdown[中英互译](https://www.bilibili.com/video/BV1yo4y157jV/) | [插件] 看到上面5种语言的[README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md)了吗?就是出自他的手笔
chat分析报告生成 | [插件] 运行后自动生成总结汇报
[PDF论文全文翻译功能](https://www.bilibili.com/video/BV1KT411x7Wn) | [插件] PDF论文提取题目&摘要+翻译全文(多线程)
[Arxiv小助手](https://www.bilibili.com/video/BV1LM4y1279X) | [插件] 输入arxiv文章url即可一键翻译摘要+下载PDF
Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼写纠错+输出对照PDF
[谷歌学术统合小助手](https://www.bilibili.com/video/BV19L411U7ia) | [插件] 给定任意谷歌学术搜索页面URL让gpt帮你[写relatedworks](https://www.bilibili.com/video/BV1GP411U7Az/)
互联网信息聚合+GPT | [插件] 一键[让GPT从互联网获取信息](https://www.bilibili.com/video/BV1om4y127ck)回答问题,让信息永不过时
⭐Arxiv论文精细翻译 ([Docker](https://github.com/binary-husky/gpt_academic/pkgs/container/gpt_academic_with_latex)) | [插件] 一键[以超高质量翻译arxiv论文](https://www.bilibili.com/video/BV1dz4y1v77A/),目前最好的论文翻译工具
⭐[实时语音对话输入](https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md) | [插件] 异步[监听音频](https://www.bilibili.com/video/BV1AV4y187Uy/),自动断句,自动寻找回答时机
公式/图片/表格显示 | 可以同时显示公式的[tex形式和渲染形式](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png),支持公式、代码高亮
⭐AutoGen多智能体插件 | [插件] 借助微软AutoGen探索多Agent的智能涌现可能
启动暗色[主题](https://github.com/binary-husky/gpt_academic/issues/173) | 在浏览器url后面添加```/?__theme=dark```可以切换dark主题
[多LLM模型](https://www.bilibili.com/video/BV1wT411p7yf)支持 | 同时被GPT3.5、GPT4、[清华ChatGLM2](https://github.com/THUDM/ChatGLM2-6B)、[复旦MOSS](https://github.com/OpenLMLab/MOSS)伺候的感觉一定会很不错吧?
⭐ChatGLM2微调模型 | 支持加载ChatGLM2微调模型提供ChatGLM2微调辅助插件
更多LLM模型接入支持[huggingface部署](https://huggingface.co/spaces/qingxu98/gpt-academic) | 加入Newbing接口(新必应),引入清华[Jittorllms](https://github.com/Jittor/JittorLLMs)支持[LLaMA](https://github.com/facebookresearch/llama)和[盘古α](https://openi.org.cn/pangu/)
⭐[void-terminal](https://github.com/binary-husky/void-terminal) pip包 | 脱离GUI在Python中直接调用本项目的所有函数插件开发中
⭐虚空终端插件 | [插件] 能够使用自然语言直接调度本项目其他插件
更多新功能展示 (图像生成等) …… | 见本文档结尾处 ……
</div>
@@ -111,7 +108,7 @@ Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="700" >
</div>
- 多种大语言模型混合调用ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4
- 多种大语言模型混合调用ChatGLM + OpenAI-GPT3.5 + GPT4
<div align="center">
<img src="https://user-images.githubusercontent.com/96192199/232537274-deca0563-7aa6-4b5d-94a2-b7c453c47794.png" width="700" >
</div>
@@ -119,7 +116,26 @@ Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼
<br><br>
# Installation
### 安装方法I直接运行 (Windows, Linux or MacOS)
```mermaid
flowchart TD
A{"安装方法"} --> W1("I. 🔑直接运行 (Windows, Linux or MacOS)")
W1 --> W11["1. Python pip包管理依赖"]
W1 --> W12["2. Anaconda包管理依赖推荐⭐"]
A --> W2["II. 🐳使用Docker (Windows, Linux or MacOS)"]
W2 --> k1["1. 部署项目全部能力的大镜像(推荐⭐)"]
W2 --> k2["2. 仅在线模型GPT, GLM4等镜像"]
W2 --> k3["3. 在线模型 + Latex的大镜像"]
A --> W4["IV. 🚀其他部署方法"]
W4 --> C1["1. Windows/MacOS 一键安装运行脚本(推荐⭐)"]
W4 --> C2["2. Huggingface, Sealos远程部署"]
W4 --> C4["3. ... 其他 ..."]
```
### 安装方法I直接运行 (Windows, Linux or MacOS)
1. 下载项目
@@ -132,7 +148,7 @@ Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼
在`config.py`中配置API KEY等变量。[特殊网络环境设置方法](https://github.com/binary-husky/gpt_academic/issues/1)、[Wiki-项目配置说明](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。
「 程序会优先检查是否存在名为`config_private.py`的私密配置文件,并用其中的配置覆盖`config.py`的同名配置。如您能理解以上读取逻辑,我们强烈建议您在`config.py`同路径下创建一个名为`config_private.py`的新配置文件,并使用`config_private.py`配置项目,以确保更新或其他用户无法轻易查看您的私有配置 」。
「 程序会优先检查是否存在名为`config_private.py`的私密配置文件,并用其中的配置覆盖`config.py`的同名配置。如您能理解以上读取逻辑,我们强烈建议您在`config.py`同路径下创建一个名为`config_private.py`的新配置文件,并使用`config_private.py`配置项目,从而确保自动更新时不会丢失配置 」。
「 支持通过`环境变量`配置项目,环境变量的书写格式参考`docker-compose.yml`文件或者我们的[Wiki页面](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。配置读取优先级: `环境变量` > `config_private.py` > `config.py` 」。
@@ -152,11 +168,11 @@ Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼
<details><summary>如果需要支持清华ChatGLM2/复旦MOSS/RWKV作为后端请点击展开此处</summary>
<p>
【可选步骤】如果需要支持清华ChatGLM2/复旦MOSS作为后端需要额外安装更多依赖前提条件熟悉Python + 用过Pytorch + 电脑配置够强):
【可选步骤】如果需要支持清华ChatGLM3/复旦MOSS作为后端需要额外安装更多依赖前提条件熟悉Python + 用过Pytorch + 电脑配置够强):
```sh
# 【可选步骤I】支持清华ChatGLM2。清华ChatGLM备注如果遇到"Call ChatGLM fail 不能正常加载ChatGLM的参数" 错误,参考如下: 1以上默认安装的为torch+cpu版使用cuda需要卸载torch重新安装torch+cuda 2如因本机配置不够无法加载模型可以修改request_llm/bridge_chatglm.py中的模型精度, 将 AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) 都修改为 AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
python -m pip install -r request_llms/requirements_chatglm.txt
# 【可选步骤I】支持清华ChatGLM3。清华ChatGLM备注如果遇到"Call ChatGLM fail 不能正常加载ChatGLM的参数" 错误,参考如下: 1以上默认安装的为torch+cpu版使用cuda需要卸载torch重新安装torch+cuda 2如因本机配置不够无法加载模型可以修改request_llm/bridge_chatglm.py中的模型精度, 将 AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) 都修改为 AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
python -m pip install -r request_llms/requirements_chatglm.txt
# 【可选步骤II】支持复旦MOSS
python -m pip install -r request_llms/requirements_moss.txt
@@ -197,7 +213,7 @@ pip install peft
docker-compose up
```
1. 仅ChatGPT+文心一言+spark等在线模型推荐大多数人选择
1. 仅ChatGPT + GLM4 + 文心一言+spark等在线模型推荐大多数人选择
[![basic](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml)
[![basiclatex](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml)
[![basicaudio](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml)
@@ -209,7 +225,7 @@ pip install peft
P.S. 如果需要依赖Latex的插件功能请见Wiki。另外您也可以直接使用方案4或者方案0获取Latex功能。
2. ChatGPT + ChatGLM2 + MOSS + LLAMA2 + 通义千问(需要熟悉[Nvidia Docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installing-on-ubuntu-and-debian)运行时)
2. ChatGPT + GLM3 + MOSS + LLAMA2 + 通义千问(需要熟悉[Nvidia Docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installing-on-ubuntu-and-debian)运行时)
[![chatglm](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml)
``` sh
@@ -243,8 +259,8 @@ P.S. 如果需要依赖Latex的插件功能请见Wiki。另外您也可以
```python
"超级英译中": {
# 前缀,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等
"Prefix": "请翻译把下面一段内容成中文然后用一个markdown表格逐一解释文中出现的专有名词\n\n",
"Prefix": "请翻译把下面一段内容成中文然后用一个markdown表格逐一解释文中出现的专有名词\n\n",
# 后缀,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来。
"Suffix": "",
},
@@ -308,9 +324,9 @@ Tip不指定文件直接点击 `载入对话历史存档` 可以查看历史h
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/bc7ab234-ad90-48a0-8d62-f703d9e74665" width="500" >
</div>
8. OpenAI音频解析与总结
8. 基于mermaid的流图、脑图绘制
<div align="center">
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/709ccf95-3aee-498a-934a-e1c22d3d5d5b" width="500" >
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/c518b82f-bd53-46e2-baf5-ad1b081c1da4" width="500" >
</div>
9. Latex全文校对纠错
@@ -327,8 +343,8 @@ Tip不指定文件直接点击 `载入对话历史存档` 可以查看历史h
### II版本:
- version 3.70todo: 优化AutoGen插件主题并设计一系列衍生插件
- version 3.80(TODO): 优化AutoGen插件主题并设计一系列衍生插件
- version 3.70: 引入Mermaid绘图实现GPT画脑图等功能
- version 3.60: 引入AutoGen作为新一代插件的基石
- version 3.57: 支持GLM3星火v3文心一言v4修复本地模型的并发BUG
- version 3.56: 支持动态追加基础功能按钮新汇报PDF汇总页面
@@ -361,6 +377,32 @@ GPT Academic开发者QQ群`610599535`
- 某些浏览器翻译插件干扰此软件前端的运行
- 官方Gradio目前有很多兼容性问题请**务必使用`requirement.txt`安装Gradio**
```mermaid
timeline LR
title GPT-Academic项目发展历程
section 2.x
1.0~2.2: 基础功能: 引入模块化函数插件: 可折叠式布局: 函数插件支持热重载
2.3~2.5: 增强多线程交互性: 新增PDF全文翻译功能: 新增输入区切换位置的功能: 自更新
2.6: 重构了插件结构: 提高了交互性: 加入更多插件
section 3.x
3.0~3.1: 对chatglm支持: 对其他小型llm支持: 支持同时问询多个gpt模型: 支持多个apikey负载均衡
3.2~3.3: 函数插件支持更多参数接口: 保存对话功能: 解读任意语言代码: 同时询问任意的LLM组合: 互联网信息综合功能
3.4: 加入arxiv论文翻译: 加入latex论文批改功能
3.44: 正式支持Azure: 优化界面易用性
3.46: 自定义ChatGLM2微调模型: 实时语音对话
3.49: 支持阿里达摩院通义千问: 上海AI-Lab书生: 讯飞星火: 支持百度千帆平台 & 文心一言
3.50: 虚空终端: 支持插件分类: 改进UI: 设计新主题
3.53: 动态选择不同界面主题: 提高稳定性: 解决多用户冲突问题
3.55: 动态代码解释器: 重构前端界面: 引入悬浮窗口与菜单栏
3.56: 动态追加基础功能按钮: 新汇报PDF汇总页面
3.57: GLM3, 星火v3: 支持文心一言v4: 修复本地模型的并发BUG
3.60: 引入AutoGen
3.70: 引入Mermaid绘图: 实现GPT画脑图等功能
3.80(TODO): 优化AutoGen插件主题: 设计衍生插件
```
### III主题
可以通过修改`THEME`选项config.py变更主题
1. `Chuanhu-Small-and-Beautiful` [网址](https://github.com/GaiZhenbiao/ChuanhuChatGPT/)
@@ -370,8 +412,8 @@ GPT Academic开发者QQ群`610599535`
1. `master` 分支: 主分支,稳定版
2. `frontier` 分支: 开发分支,测试版
3. 如何接入其他大模型:[接入其他大模型](request_llms/README.md)
3. 如何[接入其他大模型](request_llms/README.md)
4. 访问GPT-Academic的[在线服务并支持我们](https://github.com/binary-husky/gpt_academic/wiki/online)
### V参考与学习

View File

@@ -86,14 +86,17 @@ DEFAULT_FN_GROUPS = ['对话', '编程', '学术', '智能体']
# 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 )
LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
AVAIL_LLM_MODELS = ["gpt-3.5-turbo-1106","gpt-4-1106-preview","gpt-4-vision-preview",
"gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5",
"api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k',
LLM_MODEL = "gpt-3.5-turbo-16k" # 可选 ↓↓↓
AVAIL_LLM_MODELS = ["gpt-4-1106-preview", "gpt-4-turbo-preview", "gpt-4-vision-preview",
"gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5",
"gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4",
"chatglm3", "moss", "claude-2"]
# P.S. 其他可用的模型还包括 ["zhipuai", "qianfan", "deepseekcoder", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-random"
# "spark", "sparkv2", "sparkv3", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"]
"gemini-pro", "chatglm3", "claude-2", "zhipuai"]
# P.S. 其他可用的模型还包括 [
# "moss", "qwen-turbo", "qwen-plus", "qwen-max"
# "zhipuai", "qianfan", "deepseekcoder", "llama2", "qwen-local", "gpt-3.5-turbo-0613",
# "gpt-3.5-turbo-16k-0613", "gpt-3.5-random", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k',
# "spark", "sparkv2", "sparkv3", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"
# ]
# 定义界面上“询问多个GPT模型”插件应该使用哪些模型请从AVAIL_LLM_MODELS中选择并在不同模型之间用`&`间隔,例如"gpt-3.5-turbo&chatglm3&azure-gpt-4"
@@ -103,7 +106,11 @@ MULTI_QUERY_LLM_MODELS = "gpt-3.5-turbo&chatglm3"
# 选择本地模型变体只有当AVAIL_LLM_MODELS包含了对应本地模型时才会起作用
# 如果你选择Qwen系列的模型那么请在下面的QWEN_MODEL_SELECTION中指定具体的模型
# 也可以是具体的模型路径
QWEN_MODEL_SELECTION = "Qwen/Qwen-1_8B-Chat-Int8"
QWEN_LOCAL_MODEL_SELECTION = "Qwen/Qwen-1_8B-Chat-Int8"
# 接入通义千问在线大模型 https://dashscope.console.aliyun.com/
DASHSCOPE_API_KEY = "" # 阿里灵积云API_KEY
# 百度千帆LLM_MODEL="qianfan"
@@ -188,7 +195,13 @@ XFYUN_API_KEY = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
# 接入智谱大模型
ZHIPUAI_API_KEY = ""
ZHIPUAI_MODEL = "chatglm_turbo"
ZHIPUAI_MODEL = "glm-4" # 可选 "glm-3-turbo" "glm-4"
# # 火山引擎YUNQUE大模型
# YUNQUE_SECRET_KEY = ""
# YUNQUE_ACCESS_KEY = ""
# YUNQUE_MODEL = ""
# Claude API KEY
@@ -199,6 +212,10 @@ ANTHROPIC_API_KEY = ""
CUSTOM_API_KEY_PATTERN = ""
# Google Gemini API-Key
GEMINI_API_KEY = ''
# HUGGINGFACE的TOKEN下载LLAMA时起作用 https://huggingface.co/docs/hub/security-tokens
HUGGINGFACE_ACCESS_TOKEN = "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"
@@ -284,6 +301,12 @@ NUM_CUSTOM_BASIC_BTN = 4
│ ├── ZHIPUAI_API_KEY
│ └── ZHIPUAI_MODEL
├── "qwen-turbo" 等通义千问大模型
│ └── DASHSCOPE_API_KEY
├── "Gemini"
│ └── GEMINI_API_KEY
└── "newbing" Newbing接口不再稳定不推荐使用
├── NEWBING_STYLE
└── NEWBING_COOKIES
@@ -300,7 +323,7 @@ NUM_CUSTOM_BASIC_BTN = 4
├── "jittorllms_pangualpha"
├── "jittorllms_llama"
├── "deepseekcoder"
├── "qwen"
├── "qwen-local"
├── RWKV的支持见Wiki
└── "llama2"

View File

@@ -3,30 +3,69 @@
# 'stop' 颜色对应 theme.py 中的 color_er
import importlib
from toolbox import clear_line_break
from toolbox import apply_gpt_academic_string_mask_langbased
from toolbox import build_gpt_academic_masked_string_langbased
from textwrap import dedent
def get_core_functions():
return {
"英语学术润色": {
# 前缀,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等
"Prefix": r"Below is a paragraph from an academic paper. Polish the writing to meet the academic style, " +
r"improve the spelling, grammar, clarity, concision and overall readability. When necessary, rewrite the whole sentence. " +
r"Firstly, you should provide the polished paragraph. "
r"Secondly, you should list all your modification and explain the reasons to do so in markdown table." + "\n\n",
# 后缀,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来
"学术语料润色": {
# [1*] 前缀字符串,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等。
# 这里填一个提示词字符串就行了,这里为了区分中英文情景搞复杂了一点
"Prefix": build_gpt_academic_masked_string_langbased(
text_show_english=
r"Below is a paragraph from an academic paper. Polish the writing to meet the academic style, "
r"improve the spelling, grammar, clarity, concision and overall readability. When necessary, rewrite the whole sentence. "
r"Firstly, you should provide the polished paragraph. "
r"Secondly, you should list all your modification and explain the reasons to do so in markdown table.",
text_show_chinese=
r"作为一名中文学术论文写作改进助理,你的任务是改进所提供文本的拼写、语法、清晰、简洁和整体可读性,"
r"同时分解长句减少重复并提供改进建议。请先提供文本的更正版本然后在markdown表格中列出修改的内容并给出修改的理由:"
) + "\n\n",
# [2*] 后缀字符串,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来
"Suffix": r"",
# 按钮颜色 (默认 secondary)
# [3] 按钮颜色 (可选参数,默认 secondary)
"Color": r"secondary",
# 按钮是否可见 (默认 True即可见)
# [4] 按钮是否可见 (可选参数,默认 True即可见)
"Visible": True,
# 是否在触发时清除历史 (默认 False即不处理之前的对话历史)
"AutoClearHistory": False
# [5] 是否在触发时清除历史 (可选参数,默认 False即不处理之前的对话历史)
"AutoClearHistory": False,
# [6] 文本预处理 (可选参数,默认 None举例写个函数移除所有的换行符
"PreProcess": None,
},
"中文学术润色": {
"Prefix": r"作为一名中文学术论文写作改进助理,你的任务是改进所提供文本的拼写、语法、清晰、简洁和整体可读性," +
r"同时分解长句,减少重复,并提供改进建议。请只提供文本的更正版本,避免包括解释。请编辑以下文本" + "\n\n",
"Suffix": r"",
"总结绘制脑图": {
# 前缀,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等
"Prefix": r"",
# 后缀,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来
"Suffix":
# dedent() 函数用于去除多行字符串的缩进
dedent("\n"+r'''
==============================
使用mermaid flowchart对以上文本进行总结概括上述段落的内容以及内在逻辑关系例如
以下是对以上文本的总结以mermaid flowchart的形式展示
```mermaid
flowchart LR
A["节点名1"] --> B("节点名2")
B --> C{"节点名3"}
C --> D["节点名4"]
C --> |"箭头名1"| E["节点名5"]
C --> |"箭头名2"| F["节点名6"]
```
警告:
1使用中文
2节点名字使用引号包裹如["Laptop"]
3`|` 和 `"`之间不要存在空格
4根据情况选择flowchart LR从左到右或者flowchart TD从上到下
'''),
},
"查找语法错误": {
"Prefix": r"Help me ensure that the grammar and the spelling is correct. "
r"Do not try to polish the text, if no mistake is found, tell me that this paragraph is good. "
@@ -46,42 +85,61 @@ def get_core_functions():
"Suffix": r"",
"PreProcess": clear_line_break, # 预处理:清除换行符
},
"中译英": {
"Prefix": r"Please translate following sentence to English:" + "\n\n",
"Suffix": r"",
},
"学术中英互译": {
"Prefix": r"I want you to act as a scientific English-Chinese translator, " +
r"I will provide you with some paragraphs in one language " +
r"and your task is to accurately and academically translate the paragraphs only into the other language. " +
r"Do not repeat the original provided paragraphs after translation. " +
r"You should use artificial intelligence tools, " +
r"such as natural language processing, and rhetorical knowledge " +
r"and experience about effective writing techniques to reply. " +
r"I'll give you my paragraphs as follows, tell me what language it is written in, and then translate:" + "\n\n",
"Suffix": "",
"Color": "secondary",
"学术英中互译": {
"Prefix": build_gpt_academic_masked_string_langbased(
text_show_chinese=
r"I want you to act as a scientific English-Chinese translator, "
r"I will provide you with some paragraphs in one language "
r"and your task is to accurately and academically translate the paragraphs only into the other language. "
r"Do not repeat the original provided paragraphs after translation. "
r"You should use artificial intelligence tools, "
r"such as natural language processing, and rhetorical knowledge "
r"and experience about effective writing techniques to reply. "
r"I'll give you my paragraphs as follows, tell me what language it is written in, and then translate:",
text_show_english=
r"你是经验丰富的翻译,请把以下学术文章段落翻译成中文,"
r"并同时充分考虑中文的语法、清晰、简洁和整体可读性,"
r"必要时,你可以修改整个句子的顺序以确保翻译后的段落符合中文的语言习惯。"
r"你需要翻译的文本如下:"
) + "\n\n",
"Suffix": r"",
},
"英译中": {
"Prefix": r"翻译成地道的中文:" + "\n\n",
"Suffix": r"",
"Visible": False,
"Visible": False,
},
"找图片": {
"Prefix": r"我需要你找一张网络图片。使用Unsplash API(https://source.unsplash.com/960x640/?<英语关键词>)获取图片URL" +
"Prefix": r"我需要你找一张网络图片。使用Unsplash API(https://source.unsplash.com/960x640/?<英语关键词>)获取图片URL"
r"然后请使用Markdown格式封装并且不要有反斜线不要用代码块。现在请按以下描述给我发送图片" + "\n\n",
"Suffix": r"",
"Visible": False,
"Visible": False,
},
"解释代码": {
"Prefix": r"请解释以下代码:" + "\n```\n",
"Suffix": "\n```\n",
},
"参考文献转Bib": {
"Prefix": r"Here are some bibliography items, please transform them into bibtex style." +
r"Note that, reference styles maybe more than one kind, you should transform each item correctly." +
r"Items need to be transformed:",
"Visible": False,
"Prefix": r"Here are some bibliography items, please transform them into bibtex style."
r"Note that, reference styles maybe more than one kind, you should transform each item correctly."
r"Items need to be transformed:" + "\n\n",
"Visible": False,
"Suffix": r"",
}
}
@@ -98,8 +156,18 @@ def handle_core_functionality(additional_fn, inputs, history, chatbot):
return inputs, history
else:
# 预制功能
if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
if "PreProcess" in core_functional[additional_fn]:
if core_functional[additional_fn]["PreProcess"] is not None:
inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
# 为字符串加上上面定义的前缀和后缀。
inputs = apply_gpt_academic_string_mask_langbased(
string = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"],
lang_reference = inputs,
)
if core_functional[additional_fn].get("AutoClearHistory", False):
history = []
return inputs, history
if __name__ == "__main__":
t = get_core_functions()["总结绘制脑图"]
print(t["Prefix"] + t["Suffix"])

View File

@@ -32,115 +32,122 @@ def get_crazy_functions():
from crazy_functions.理解PDF文档内容 import 理解PDF文档内容标准文件输入
from crazy_functions.Latex全文润色 import Latex中文润色
from crazy_functions.Latex全文润色 import Latex英文纠错
from crazy_functions.Latex全文翻译 import Latex中译英
from crazy_functions.Latex全文翻译 import Latex英译中
from crazy_functions.批量Markdown翻译 import Markdown中译英
from crazy_functions.虚空终端 import 虚空终端
from crazy_functions.生成多种Mermaid图表 import 生成多种Mermaid图表
function_plugins = {
"虚空终端": {
"Group": "对话|编程|学术|智能体",
"Color": "stop",
"AsButton": True,
"Function": HotReload(虚空终端)
"Function": HotReload(虚空终端),
},
"解析整个Python项目": {
"Group": "编程",
"Color": "stop",
"AsButton": True,
"Info": "解析一个Python项目的所有源文件(.py) | 输入参数为路径",
"Function": HotReload(解析一个Python项目)
"Function": HotReload(解析一个Python项目),
},
"载入对话历史存档(先上传存档或输入路径)": {
"Group": "对话",
"Color": "stop",
"AsButton": False,
"Info": "载入对话历史存档 | 输入参数为路径",
"Function": HotReload(载入对话历史存档)
"Function": HotReload(载入对话历史存档),
},
"删除所有本地对话历史记录(谨慎操作)": {
"Group": "对话",
"AsButton": False,
"Info": "删除所有本地对话历史记录,谨慎操作 | 不需要输入参数",
"Function": HotReload(删除所有本地对话历史记录)
"Function": HotReload(删除所有本地对话历史记录),
},
"清除所有缓存文件(谨慎操作)": {
"Group": "对话",
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
"Info": "清除所有缓存文件,谨慎操作 | 不需要输入参数",
"Function": HotReload(清除缓存)
"Function": HotReload(清除缓存),
},
"生成多种Mermaid图表(从当前对话或文件(.pdf/.md)中生产图表)": {
"Group": "对话",
"Color": "stop",
"AsButton": False,
"Info" : "基于当前对话或PDF生成多种Mermaid图表,图表类型由模型判断",
"Function": HotReload(生成多种Mermaid图表),
"AdvancedArgs": True,
"ArgsReminder": "请输入图类型对应的数字,不输入则为模型自行判断:1-流程图,2-序列图,3-类图,4-饼图,5-甘特图,6-状态图,7-实体关系图,8-象限提示图,9-思维导图",
},
"批量总结Word文档": {
"Group": "学术",
"Color": "stop",
"AsButton": True,
"Info": "批量总结word文档 | 输入参数为路径",
"Function": HotReload(总结word文档)
"Function": HotReload(总结word文档),
},
"解析整个Matlab项目": {
"Group": "编程",
"Color": "stop",
"AsButton": False,
"Info": "解析一个Matlab项目的所有源文件(.m) | 输入参数为路径",
"Function": HotReload(解析一个Matlab项目)
"Function": HotReload(解析一个Matlab项目),
},
"解析整个C++项目头文件": {
"Group": "编程",
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
"Info": "解析一个C++项目的所有头文件(.h/.hpp) | 输入参数为路径",
"Function": HotReload(解析一个C项目的头文件)
"Function": HotReload(解析一个C项目的头文件),
},
"解析整个C++项目(.cpp/.hpp/.c/.h": {
"Group": "编程",
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
"Info": "解析一个C++项目的所有源文件(.cpp/.hpp/.c/.h| 输入参数为路径",
"Function": HotReload(解析一个C项目)
"Function": HotReload(解析一个C项目),
},
"解析整个Go项目": {
"Group": "编程",
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
"Info": "解析一个Go项目的所有源文件 | 输入参数为路径",
"Function": HotReload(解析一个Golang项目)
"Function": HotReload(解析一个Golang项目),
},
"解析整个Rust项目": {
"Group": "编程",
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
"Info": "解析一个Rust项目的所有源文件 | 输入参数为路径",
"Function": HotReload(解析一个Rust项目)
"Function": HotReload(解析一个Rust项目),
},
"解析整个Java项目": {
"Group": "编程",
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
"Info": "解析一个Java项目的所有源文件 | 输入参数为路径",
"Function": HotReload(解析一个Java项目)
"Function": HotReload(解析一个Java项目),
},
"解析整个前端项目js,ts,css等": {
"Group": "编程",
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
"Info": "解析一个前端项目的所有源文件js,ts,css等 | 输入参数为路径",
"Function": HotReload(解析一个前端项目)
"Function": HotReload(解析一个前端项目),
},
"解析整个Lua项目": {
"Group": "编程",
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
"Info": "解析一个Lua项目的所有源文件 | 输入参数为路径",
"Function": HotReload(解析一个Lua项目)
"Function": HotReload(解析一个Lua项目),
},
"解析整个CSharp项目": {
"Group": "编程",
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
"Info": "解析一个CSharp项目的所有源文件 | 输入参数为路径",
"Function": HotReload(解析一个CSharp项目)
"Function": HotReload(解析一个CSharp项目),
},
"解析Jupyter Notebook文件": {
"Group": "编程",
@@ -156,103 +163,104 @@ def get_crazy_functions():
"Color": "stop",
"AsButton": False,
"Info": "读取Tex论文并写摘要 | 输入参数为路径",
"Function": HotReload(读文章写摘要)
"Function": HotReload(读文章写摘要),
},
"翻译README或MD": {
"Group": "编程",
"Color": "stop",
"AsButton": True,
"Info": "将Markdown翻译为中文 | 输入参数为路径或URL",
"Function": HotReload(Markdown英译中)
"Function": HotReload(Markdown英译中),
},
"翻译Markdown或README支持Github链接": {
"Group": "编程",
"Color": "stop",
"AsButton": False,
"Info": "将Markdown或README翻译为中文 | 输入参数为路径或URL",
"Function": HotReload(Markdown英译中)
"Function": HotReload(Markdown英译中),
},
"批量生成函数注释": {
"Group": "编程",
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
"Info": "批量生成函数的注释 | 输入参数为路径",
"Function": HotReload(批量生成函数注释)
"Function": HotReload(批量生成函数注释),
},
"保存当前的对话": {
"Group": "对话",
"AsButton": True,
"Info": "保存当前的对话 | 不需要输入参数",
"Function": HotReload(对话历史存档)
"Function": HotReload(对话历史存档),
},
"[多线程Demo]解析此项目本身(源码自译解)": {
"Group": "对话|编程",
"AsButton": False, # 加入下拉菜单中
"Info": "多线程解析并翻译此项目的源码 | 不需要输入参数",
"Function": HotReload(解析项目本身)
"Function": HotReload(解析项目本身),
},
"历史上的今天": {
"Group": "对话",
"AsButton": True,
"Info": "查看历史上的今天事件 (这是一个面向开发者的插件Demo) | 不需要输入参数",
"Function": HotReload(高阶功能模板函数)
"Function": HotReload(高阶功能模板函数),
},
"精准翻译PDF论文": {
"Group": "学术",
"Color": "stop",
"AsButton": True,
"AsButton": True,
"Info": "精准翻译PDF论文为中文 | 输入参数为路径",
"Function": HotReload(批量翻译PDF文档)
"Function": HotReload(批量翻译PDF文档),
},
"询问多个GPT模型": {
"Group": "对话",
"Color": "stop",
"AsButton": True,
"Function": HotReload(同时问询)
"Function": HotReload(同时问询),
},
"批量总结PDF文档": {
"Group": "学术",
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
"Info": "批量总结PDF文档的内容 | 输入参数为路径",
"Function": HotReload(批量总结PDF文档)
"Function": HotReload(批量总结PDF文档),
},
"谷歌学术检索助手输入谷歌学术搜索页url": {
"Group": "学术",
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
"Info": "使用谷歌学术检索助手搜索指定URL的结果 | 输入参数为谷歌学术搜索页的URL",
"Function": HotReload(谷歌检索小助手)
"Function": HotReload(谷歌检索小助手),
},
"理解PDF文档内容 模仿ChatPDF": {
"Group": "学术",
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
"Info": "理解PDF文档的内容并进行回答 | 输入参数为路径",
"Function": HotReload(理解PDF文档内容标准文件输入)
"Function": HotReload(理解PDF文档内容标准文件输入),
},
"英文Latex项目全文润色输入路径或上传压缩包": {
"Group": "学术",
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
"Info": "对英文Latex项目全文进行润色处理 | 输入参数为路径或上传压缩包",
"Function": HotReload(Latex英文润色)
},
"英文Latex项目全文纠错输入路径或上传压缩包": {
"Group": "学术",
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
"Info": "对英文Latex项目全文进行纠错处理 | 输入参数为路径或上传压缩包",
"Function": HotReload(Latex英文纠错)
"Function": HotReload(Latex英文润色),
},
"中文Latex项目全文润色输入路径或上传压缩包": {
"Group": "学术",
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
"Info": "对中文Latex项目全文进行润色处理 | 输入参数为路径或上传压缩包",
"Function": HotReload(Latex中文润色)
"Function": HotReload(Latex中文润色),
},
# 已经被新插件取代
# "英文Latex项目全文纠错输入路径或上传压缩包": {
# "Group": "学术",
# "Color": "stop",
# "AsButton": False, # 加入下拉菜单中
# "Info": "对英文Latex项目全文进行纠错处理 | 输入参数为路径或上传压缩包",
# "Function": HotReload(Latex英文纠错),
# },
# 已经被新插件取代
# "Latex项目全文中译英输入路径或上传压缩包": {
# "Group": "学术",
@@ -261,7 +269,6 @@ def get_crazy_functions():
# "Info": "对Latex项目全文进行中译英处理 | 输入参数为路径或上传压缩包",
# "Function": HotReload(Latex中译英)
# },
# 已经被新插件取代
# "Latex项目全文英译中输入路径或上传压缩包": {
# "Group": "学术",
@@ -270,339 +277,405 @@ def get_crazy_functions():
# "Info": "对Latex项目全文进行英译中处理 | 输入参数为路径或上传压缩包",
# "Function": HotReload(Latex英译中)
# },
"批量Markdown中译英输入路径或上传压缩包": {
"Group": "编程",
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
"Info": "批量将Markdown文件中文翻译为英文 | 输入参数为路径或上传压缩包",
"Function": HotReload(Markdown中译英)
"Function": HotReload(Markdown中译英),
},
}
# -=--=- 尚未充分测试的实验性插件 & 需要额外依赖的插件 -=--=-
try:
from crazy_functions.下载arxiv论文翻译摘要 import 下载arxiv论文并翻译摘要
function_plugins.update({
"一键下载arxiv论文并翻译摘要先在input输入编号如1812.10695": {
"Group": "学术",
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
# "Info": "下载arxiv论文并翻译摘要 | 输入参数为arxiv编号如1812.10695",
"Function": HotReload(下载arxiv论文并翻译摘要)
function_plugins.update(
{
"一键下载arxiv论文并翻译摘要先在input输入编号如1812.10695": {
"Group": "学术",
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
# "Info": "下载arxiv论文并翻译摘要 | 输入参数为arxiv编号如1812.10695",
"Function": HotReload(下载arxiv论文并翻译摘要),
}
}
})
)
except:
print(trimmed_format_exc())
print('Load function plugin failed')
print("Load function plugin failed")
try:
from crazy_functions.联网的ChatGPT import 连接网络回答问题
function_plugins.update({
"连接网络回答问题(输入问题后点击该插件,需要访问谷歌)": {
"Group": "对话",
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
# "Info": "连接网络回答问题(需要访问谷歌)| 输入参数是一个问题",
"Function": HotReload(连接网络回答问题)
function_plugins.update(
{
"连接网络回答问题(输入问题后点击该插件,需要访问谷歌)": {
"Group": "对话",
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
# "Info": "连接网络回答问题(需要访问谷歌)| 输入参数是一个问题",
"Function": HotReload(连接网络回答问题),
}
}
})
)
from crazy_functions.联网的ChatGPT_bing版 import 连接bing搜索回答问题
function_plugins.update({
"连接网络回答问题中文Bing版输入问题后点击该插件": {
"Group": "对话",
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
"Info": "连接网络回答问题需要访问中文Bing| 输入参数是一个问题",
"Function": HotReload(连接bing搜索回答问题)
function_plugins.update(
{
"连接网络回答问题中文Bing版输入问题后点击该插件": {
"Group": "对话",
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
"Info": "连接网络回答问题需要访问中文Bing| 输入参数是一个问题",
"Function": HotReload(连接bing搜索回答问题),
}
}
})
)
except:
print(trimmed_format_exc())
print('Load function plugin failed')
print("Load function plugin failed")
try:
from crazy_functions.解析项目源代码 import 解析任意code项目
function_plugins.update({
"解析项目源代码(手动指定和筛选源代码文件类型)": {
"Group": "编程",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True, # 调用时唤起高级参数输入区默认False
"ArgsReminder": "输入时用逗号隔开, *代表通配符, 加了^代表不匹配; 不输入代表全部匹配。例如: \"*.c, ^*.cpp, config.toml, ^*.toml\"", # 高级参数输入区的显示提示
"Function": HotReload(解析任意code项目)
},
})
function_plugins.update(
{
"解析项目源代码(手动指定和筛选源代码文件类型)": {
"Group": "编程",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True, # 调用时唤起高级参数输入区默认False
"ArgsReminder": '输入时用逗号隔开, *代表通配符, 加了^代表不匹配; 不输入代表全部匹配。例如: "*.c, ^*.cpp, config.toml, ^*.toml"', # 高级参数输入区的显示提示
"Function": HotReload(解析任意code项目),
},
}
)
except:
print(trimmed_format_exc())
print('Load function plugin failed')
print("Load function plugin failed")
try:
from crazy_functions.询问多个大语言模型 import 同时问询_指定模型
function_plugins.update({
"询问多个GPT模型手动指定询问哪些模型": {
"Group": "对话",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True, # 调用时唤起高级参数输入区默认False
"ArgsReminder": "支持任意数量的llm接口用&符号分隔。例如chatglm&gpt-3.5-turbo&api2d-gpt-4", # 高级参数输入区的显示提示
"Function": HotReload(同时问询_指定模型)
},
})
function_plugins.update(
{
"询问多个GPT模型手动指定询问哪些模型": {
"Group": "对话",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True, # 调用时唤起高级参数输入区默认False
"ArgsReminder": "支持任意数量的llm接口用&符号分隔。例如chatglm&gpt-3.5-turbo&gpt-4", # 高级参数输入区的显示提示
"Function": HotReload(同时问询_指定模型),
},
}
)
except:
print(trimmed_format_exc())
print('Load function plugin failed')
print("Load function plugin failed")
try:
from crazy_functions.图片生成 import 图片生成_DALLE2, 图片生成_DALLE3, 图片修改_DALLE2
function_plugins.update({
"图片生成_DALLE2 先切换模型到openai或api2d": {
"Group": "对话",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True, # 调用时唤起高级参数输入区默认False
"ArgsReminder": "在这里输入分辨率, 如1024x1024默认支持 256x256, 512x512, 1024x1024", # 高级参数输入区的显示提示
"Info": "使用DALLE2生成图片 | 输入参数字符串,提供图像的内容",
"Function": HotReload(图片生成_DALLE2)
},
})
function_plugins.update({
"图片生成_DALLE3 先切换模型到openai或api2d": {
"Group": "对话",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True, # 调用时唤起高级参数输入区默认False
"ArgsReminder": "在这里输入自定义参数「分辨率-质量(可选)-风格(可选)」, 参数示例「1024x1024-hd-vivid」 || 分辨率支持 「1024x1024」(默认) /「1792x1024」/「1024x1792」 || 质量支持 「-standard」(默认) /「-hd」 || 风格支持 「-vivid」(默认) /「-natural」", # 高级参数输入区的显示提示
"Info": "使用DALLE3生成图片 | 输入参数字符串,提供图像的内容",
"Function": HotReload(图片生成_DALLE3)
},
})
function_plugins.update({
"图片修改_DALLE2 先切换模型到openai或api2d": {
"Group": "对话",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": False, # 调用时唤起高级参数输入区默认False
# "Info": "使用DALLE2修改图片 | 输入参数字符串,提供图像的内容",
"Function": HotReload(图片修改_DALLE2)
},
})
function_plugins.update(
{
"图片生成_DALLE2 先切换模型到gpt-*": {
"Group": "对话",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True, # 调用时唤起高级参数输入区默认False
"ArgsReminder": "在这里输入分辨率, 如1024x1024默认支持 256x256, 512x512, 1024x1024", # 高级参数输入区的显示提示
"Info": "使用DALLE2生成图片 | 输入参数字符串,提供图像的内容",
"Function": HotReload(图片生成_DALLE2),
},
}
)
function_plugins.update(
{
"图片生成_DALLE3 先切换模型到gpt-*": {
"Group": "对话",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True, # 调用时唤起高级参数输入区默认False
"ArgsReminder": "在这里输入自定义参数「分辨率-质量(可选)-风格(可选)」, 参数示例「1024x1024-hd-vivid」 || 分辨率支持 「1024x1024」(默认) /「1792x1024」/「1024x1792」 || 质量支持 「-standard」(默认) /「-hd」 || 风格支持 「-vivid」(默认) /「-natural」", # 高级参数输入区的显示提示
"Info": "使用DALLE3生成图片 | 输入参数字符串,提供图像的内容",
"Function": HotReload(图片生成_DALLE3),
},
}
)
function_plugins.update(
{
"图片修改_DALLE2 先切换模型到gpt-*": {
"Group": "对话",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": False, # 调用时唤起高级参数输入区默认False
# "Info": "使用DALLE2修改图片 | 输入参数字符串,提供图像的内容",
"Function": HotReload(图片修改_DALLE2),
},
}
)
except:
print(trimmed_format_exc())
print('Load function plugin failed')
print("Load function plugin failed")
try:
from crazy_functions.总结音视频 import 总结音视频
function_plugins.update({
"批量总结音视频(输入路径或上传压缩包)": {
"Group": "对话",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True,
"ArgsReminder": "调用openai api 使用whisper-1模型, 目前支持的格式:mp4, m4a, wav, mpga, mpeg, mp3。此处可以输入解析提示例如解析为简体中文默认",
"Info": "批量总结音频或视频 | 输入参数为路径",
"Function": HotReload(总结音视频)
function_plugins.update(
{
"批量总结音视频(输入路径或上传压缩包)": {
"Group": "对话",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True,
"ArgsReminder": "调用openai api 使用whisper-1模型, 目前支持的格式:mp4, m4a, wav, mpga, mpeg, mp3。此处可以输入解析提示例如解析为简体中文默认",
"Info": "批量总结音频或视频 | 输入参数为路径",
"Function": HotReload(总结音视频),
}
}
})
)
except:
print(trimmed_format_exc())
print('Load function plugin failed')
print("Load function plugin failed")
try:
from crazy_functions.数学动画生成manim import 动画生成
function_plugins.update({
"数学动画生成Manim": {
"Group": "对话",
"Color": "stop",
"AsButton": False,
"Info": "按照自然语言描述生成一个动画 | 输入参数是一段话",
"Function": HotReload(动画生成)
function_plugins.update(
{
"数学动画生成Manim": {
"Group": "对话",
"Color": "stop",
"AsButton": False,
"Info": "按照自然语言描述生成一个动画 | 输入参数是一段话",
"Function": HotReload(动画生成),
}
}
})
)
except:
print(trimmed_format_exc())
print('Load function plugin failed')
print("Load function plugin failed")
try:
from crazy_functions.批量Markdown翻译 import Markdown翻译指定语言
function_plugins.update({
"Markdown翻译指定翻译成何种语言": {
"Group": "编程",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True,
"ArgsReminder": "请输入要翻译成哪种语言默认为Chinese。",
"Function": HotReload(Markdown翻译指定语言)
function_plugins.update(
{
"Markdown翻译指定翻译成何种语言": {
"Group": "编程",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True,
"ArgsReminder": "请输入要翻译成哪种语言默认为Chinese。",
"Function": HotReload(Markdown翻译指定语言),
}
}
})
)
except:
print(trimmed_format_exc())
print('Load function plugin failed')
print("Load function plugin failed")
try:
from crazy_functions.知识库问答 import 知识库文件注入
function_plugins.update({
"构建知识库(先上传文件素材,再运行此插件)": {
"Group": "对话",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True,
"ArgsReminder": "此处待注入的知识库名称id, 默认为default。文件进入知识库后可长期保存。可以通过再次调用本插件的方式向知识库追加更多文档。",
"Function": HotReload(知识库文件注入)
function_plugins.update(
{
"构建知识库(先上传文件素材,再运行此插件)": {
"Group": "对话",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True,
"ArgsReminder": "此处待注入的知识库名称id, 默认为default。文件进入知识库后可长期保存。可以通过再次调用本插件的方式向知识库追加更多文档。",
"Function": HotReload(知识库文件注入),
}
}
})
)
except:
print(trimmed_format_exc())
print('Load function plugin failed')
print("Load function plugin failed")
try:
from crazy_functions.知识库问答 import 读取知识库作答
function_plugins.update({
"知识库文件注入(构建知识库后,再运行此插件)": {
"Group": "对话",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True,
"ArgsReminder": "待提取的知识库名称id, 默认为default, 您需要构建知识库后再运行此插件。",
"Function": HotReload(读取知识库作答)
function_plugins.update(
{
"知识库文件注入(构建知识库后,再运行此插件)": {
"Group": "对话",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True,
"ArgsReminder": "待提取的知识库名称id, 默认为default, 您需要构建知识库后再运行此插件。",
"Function": HotReload(读取知识库作答),
}
}
})
)
except:
print(trimmed_format_exc())
print('Load function plugin failed')
print("Load function plugin failed")
try:
from crazy_functions.交互功能函数模板 import 交互功能模板函数
function_plugins.update({
"交互功能模板Demo函数查找wallhaven.cc的壁纸": {
"Group": "对话",
"Color": "stop",
"AsButton": False,
"Function": HotReload(交互功能模板函数)
function_plugins.update(
{
"交互功能模板Demo函数查找wallhaven.cc的壁纸": {
"Group": "对话",
"Color": "stop",
"AsButton": False,
"Function": HotReload(交互功能模板函数),
}
}
})
)
except:
print(trimmed_format_exc())
print('Load function plugin failed')
print("Load function plugin failed")
try:
from crazy_functions.Latex输出PDF结果 import Latex英文纠错加PDF对比
function_plugins.update({
"Latex英文纠错+高亮修正位置 [需Latex]": {
"Group": "学术",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True,
"ArgsReminder": "如果有必要, 请在此处追加更细致的矫错指令(使用英文)。",
"Function": HotReload(Latex英文纠错加PDF对比)
}
})
from crazy_functions.Latex输出PDF结果 import Latex翻译中文并重新编译PDF
function_plugins.update({
"Arxiv论文精细翻译输入arxivID[需Latex]": {
"Group": "学术",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True,
"ArgsReminder":
"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 " +
"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " +
'If the term "agent" is used in this section, it should be translated to "智能体". ',
"Info": "Arixv论文精细翻译 | 输入参数arxiv论文的ID比如1812.10695",
"Function": HotReload(Latex翻译中文并重新编译PDF)
function_plugins.update(
{
"Latex英文纠错+高亮修正位置 [需Latex]": {
"Group": "学术",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True,
"ArgsReminder": "如果有必要, 请在此处追加更细致的矫错指令(使用英文)。",
"Function": HotReload(Latex英文纠错加PDF对比),
},
"Arxiv论文精细翻译输入arxivID[需Latex]": {
"Group": "学术",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True,
"ArgsReminder": "如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "
+ "例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: "
+ 'If the term "agent" is used in this section, it should be translated to "智能体". ',
"Info": "Arixv论文精细翻译 | 输入参数arxiv论文的ID比如1812.10695",
"Function": HotReload(Latex翻译中文并重新编译PDF),
},
"本地Latex论文精细翻译上传Latex项目[需Latex]": {
"Group": "学术",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True,
"ArgsReminder": "如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "
+ "例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: "
+ 'If the term "agent" is used in this section, it should be translated to "智能体". ',
"Info": "本地Latex论文精细翻译 | 输入参数是路径",
"Function": HotReload(Latex翻译中文并重新编译PDF),
}
}
})
function_plugins.update({
"本地Latex论文精细翻译上传Latex项目[需Latex]": {
"Group": "学术",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True,
"ArgsReminder":
"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 " +
"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " +
'If the term "agent" is used in this section, it should be translated to "智能体". ',
"Info": "本地Latex论文精细翻译 | 输入参数是路径",
"Function": HotReload(Latex翻译中文并重新编译PDF)
}
})
)
except:
print(trimmed_format_exc())
print('Load function plugin failed')
print("Load function plugin failed")
try:
from toolbox import get_conf
ENABLE_AUDIO = get_conf('ENABLE_AUDIO')
ENABLE_AUDIO = get_conf("ENABLE_AUDIO")
if ENABLE_AUDIO:
from crazy_functions.语音助手 import 语音助手
function_plugins.update({
"实时语音对话": {
"Group": "对话",
"Color": "stop",
"AsButton": True,
"Info": "这是一个时刻聆听着的语音对话助手 | 没有输入参数",
"Function": HotReload(语音助手)
function_plugins.update(
{
"实时语音对话": {
"Group": "对话",
"Color": "stop",
"AsButton": True,
"Info": "这是一个时刻聆听着的语音对话助手 | 没有输入参数",
"Function": HotReload(语音助手),
}
}
})
)
except:
print(trimmed_format_exc())
print('Load function plugin failed')
print("Load function plugin failed")
try:
from crazy_functions.批量翻译PDF文档_NOUGAT import 批量翻译PDF文档
function_plugins.update({
"精准翻译PDF文档NOUGAT": {
"Group": "学术",
"Color": "stop",
"AsButton": False,
"Function": HotReload(批量翻译PDF文档)
function_plugins.update(
{
"精准翻译PDF文档NOUGAT": {
"Group": "学术",
"Color": "stop",
"AsButton": False,
"Function": HotReload(批量翻译PDF文档),
}
}
})
)
except:
print(trimmed_format_exc())
print('Load function plugin failed')
print("Load function plugin failed")
try:
from crazy_functions.函数动态生成 import 函数动态生成
function_plugins.update({
"动态代码解释器CodeInterpreter": {
"Group": "智能体",
"Color": "stop",
"AsButton": False,
"Function": HotReload(函数动态生成)
function_plugins.update(
{
"动态代码解释器CodeInterpreter": {
"Group": "智能体",
"Color": "stop",
"AsButton": False,
"Function": HotReload(函数动态生成),
}
}
})
)
except:
print(trimmed_format_exc())
print('Load function plugin failed')
print("Load function plugin failed")
try:
from crazy_functions.多智能体 import 多智能体终端
function_plugins.update({
"AutoGen多智能体终端仅供测试": {
"Group": "智能体",
"Color": "stop",
"AsButton": False,
"Function": HotReload(多智能体终端)
function_plugins.update(
{
"AutoGen多智能体终端仅供测试": {
"Group": "智能体",
"Color": "stop",
"AsButton": False,
"Function": HotReload(多智能体终端),
}
}
})
)
except:
print(trimmed_format_exc())
print('Load function plugin failed')
print("Load function plugin failed")
try:
from crazy_functions.互动小游戏 import 随机小游戏
function_plugins.update({
"随机互动小游戏(仅供测试)": {
"Group": "智能体",
"Color": "stop",
"AsButton": False,
"Function": HotReload(随机小游戏)
function_plugins.update(
{
"随机互动小游戏(仅供测试)": {
"Group": "智能体",
"Color": "stop",
"AsButton": False,
"Function": HotReload(随机小游戏),
}
}
})
)
except:
print(trimmed_format_exc())
print('Load function plugin failed')
print("Load function plugin failed")
# try:
# from crazy_functions.高级功能函数模板 import 测试图表渲染
# function_plugins.update({
# "绘制逻辑关系(测试图表渲染)": {
# "Group": "智能体",
# "Color": "stop",
# "AsButton": True,
# "Function": HotReload(测试图表渲染)
# }
# })
# except:
# print(trimmed_format_exc())
# print('Load function plugin failed')
# try:
# from crazy_functions.chatglm微调工具 import 微调数据集生成
@@ -618,8 +691,6 @@ def get_crazy_functions():
# except:
# print('Load function plugin failed')
"""
设置默认值:
- 默认 Group = 对话
@@ -629,12 +700,12 @@ def get_crazy_functions():
"""
for name, function_meta in function_plugins.items():
if "Group" not in function_meta:
function_plugins[name]["Group"] = '对话'
function_plugins[name]["Group"] = "对话"
if "AsButton" not in function_meta:
function_plugins[name]["AsButton"] = True
if "AdvancedArgs" not in function_meta:
function_plugins[name]["AdvancedArgs"] = False
if "Color" not in function_meta:
function_plugins[name]["Color"] = 'secondary'
function_plugins[name]["Color"] = "secondary"
return function_plugins

View File

@@ -137,7 +137,7 @@ def get_recent_file_prompt_support(chatbot):
return path
@CatchException
def 虚空终端CodeInterpreter(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 虚空终端CodeInterpreter(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
"""
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数如温度和top_p等一般原样传递下去就行
@@ -145,7 +145,7 @@ def 虚空终端CodeInterpreter(txt, llm_kwargs, plugin_kwargs, chatbot, history
chatbot 聊天显示框的句柄,用于显示给用户
history 聊天历史,前情提要
system_prompt 给gpt的静默提醒
web_port 当前软件运行的端口号
user_request 当前用户的请求信息IP地址等
"""
raise NotImplementedError

View File

@@ -135,11 +135,11 @@ def 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch
@CatchException
def Latex英文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def Latex英文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
# 基本信息:功能、贡献者
chatbot.append([
"函数插件功能?",
"对整个Latex项目进行润色。函数插件贡献者: Binary-Husky。注意此插件不调用Latex如果有Latex环境请使用Latex英文纠错+高亮插件"])
"对整个Latex项目进行润色。函数插件贡献者: Binary-Husky。注意此插件不调用Latex如果有Latex环境请使用Latex英文纠错+高亮修正位置(需Latex)插件"])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
# 尝试导入依赖,如果缺少依赖,则给出安装建议
@@ -173,7 +173,7 @@ def Latex英文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
@CatchException
def Latex中文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def Latex中文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
# 基本信息:功能、贡献者
chatbot.append([
"函数插件功能?",
@@ -209,7 +209,7 @@ def Latex中文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
@CatchException
def Latex英文纠错(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def Latex英文纠错(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
# 基本信息:功能、贡献者
chatbot.append([
"函数插件功能?",

View File

@@ -106,7 +106,7 @@ def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch
@CatchException
def Latex英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def Latex英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
# 基本信息:功能、贡献者
chatbot.append([
"函数插件功能?",
@@ -143,7 +143,7 @@ def Latex英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom
@CatchException
def Latex中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def Latex中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
# 基本信息:功能、贡献者
chatbot.append([
"函数插件功能?",

View File

@@ -1,11 +1,11 @@
from toolbox import update_ui, trimmed_format_exc, get_conf, get_log_folder, promote_file_to_downloadzone
from toolbox import CatchException, report_exception, update_ui_lastest_msg, zip_result, gen_time_str
from functools import partial
import glob, os, requests, time
import glob, os, requests, time, tarfile
pj = os.path.join
ARXIV_CACHE_DIR = os.path.expanduser(f"~/arxiv_cache/")
# =================================== 工具函数 ===============================================
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- 工具函数 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# 专业词汇声明 = 'If the term "agent" is used in this section, it should be translated to "智能体". '
def switch_prompt(pfg, mode, more_requirement):
"""
@@ -104,7 +104,7 @@ def arxiv_download(chatbot, history, txt, allow_cache=True):
if ('.' in txt) and ('/' not in txt) and is_float(txt[:10]): # is arxiv ID
txt = 'https://arxiv.org/abs/' + txt[:10]
if not txt.startswith('https://arxiv.org'):
return txt, None
return txt, None # 是本地文件,跳过下载
# <-------------- inspect format ------------->
chatbot.append([f"检测到arxiv文档连接", '尝试下载 ...'])
@@ -142,11 +142,11 @@ def arxiv_download(chatbot, history, txt, allow_cache=True):
from toolbox import extract_archive
extract_archive(file_path=dst, dest_dir=extract_dst)
return extract_dst, arxiv_id
# ========================================= 插件主程序1 =====================================================
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= 插件主程序1 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
@CatchException
def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
# <-------------- information about this plugin ------------->
chatbot.append([ "函数插件功能?",
"对整个Latex项目进行纠错, 用latex编译为PDF对修正处做高亮。函数插件贡献者: Binary-Husky。注意事项: 目前仅支持GPT3.5/GPT4其他模型转化效果未知。目前对机器学习类文献转化效果最好其他类型文献转化效果未知。仅在Windows系统进行了测试其他操作系统表现未知。"])
@@ -218,10 +218,10 @@ def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, histo
# <-------------- we are done ------------->
return success
# ========================================= 插件主程序2 =====================================================
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= 插件主程序2 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
@CatchException
def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
# <-------------- information about this plugin ------------->
chatbot.append([
"函数插件功能?",
@@ -250,7 +250,14 @@ def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot,
# <-------------- clear history and read input ------------->
history = []
txt, arxiv_id = yield from arxiv_download(chatbot, history, txt, allow_cache)
try:
txt, arxiv_id = yield from arxiv_download(chatbot, history, txt, allow_cache)
except tarfile.ReadError as e:
yield from update_ui_lastest_msg(
"无法自动下载该论文的Latex源码请前往arxiv打开此论文下载页面点other Formats然后download source手动下载latex源码包。接下来调用本地Latex翻译插件即可。",
chatbot=chatbot, history=history)
return
if txt.endswith('.pdf'):
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"发现已经存在翻译好的PDF文档")
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面

View File

@@ -35,7 +35,11 @@ def gpt_academic_generate_oai_reply(
class AutoGenGeneral(PluginMultiprocessManager):
def gpt_academic_print_override(self, user_proxy, message, sender):
# ⭐⭐ run in subprocess
self.child_conn.send(PipeCom("show", sender.name + "\n\n---\n\n" + message["content"]))
try:
print_msg = sender.name + "\n\n---\n\n" + message["content"]
except:
print_msg = sender.name + "\n\n---\n\n" + message
self.child_conn.send(PipeCom("show", print_msg))
def gpt_academic_get_human_input(self, user_proxy, message):
# ⭐⭐ run in subprocess
@@ -62,33 +66,33 @@ class AutoGenGeneral(PluginMultiprocessManager):
def exe_autogen(self, input):
# ⭐⭐ run in subprocess
input = input.content
with ProxyNetworkActivate("AutoGen"):
code_execution_config = {"work_dir": self.autogen_work_dir, "use_docker": self.use_docker}
agents = self.define_agents()
user_proxy = None
assistant = None
for agent_kwargs in agents:
agent_cls = agent_kwargs.pop('cls')
kwargs = {
'llm_config':self.llm_kwargs,
'code_execution_config':code_execution_config
}
kwargs.update(agent_kwargs)
agent_handle = agent_cls(**kwargs)
agent_handle._print_received_message = lambda a,b: self.gpt_academic_print_override(agent_kwargs, a, b)
for d in agent_handle._reply_func_list:
if hasattr(d['reply_func'],'__name__') and d['reply_func'].__name__ == 'generate_oai_reply':
d['reply_func'] = gpt_academic_generate_oai_reply
if agent_kwargs['name'] == 'user_proxy':
agent_handle.get_human_input = lambda a: self.gpt_academic_get_human_input(user_proxy, a)
user_proxy = agent_handle
if agent_kwargs['name'] == 'assistant': assistant = agent_handle
try:
if user_proxy is None or assistant is None: raise Exception("用户代理或助理代理未定义")
code_execution_config = {"work_dir": self.autogen_work_dir, "use_docker": self.use_docker}
agents = self.define_agents()
user_proxy = None
assistant = None
for agent_kwargs in agents:
agent_cls = agent_kwargs.pop('cls')
kwargs = {
'llm_config':self.llm_kwargs,
'code_execution_config':code_execution_config
}
kwargs.update(agent_kwargs)
agent_handle = agent_cls(**kwargs)
agent_handle._print_received_message = lambda a,b: self.gpt_academic_print_override(agent_kwargs, a, b)
for d in agent_handle._reply_func_list:
if hasattr(d['reply_func'],'__name__') and d['reply_func'].__name__ == 'generate_oai_reply':
d['reply_func'] = gpt_academic_generate_oai_reply
if agent_kwargs['name'] == 'user_proxy':
agent_handle.get_human_input = lambda a: self.gpt_academic_get_human_input(user_proxy, a)
user_proxy = agent_handle
if agent_kwargs['name'] == 'assistant': assistant = agent_handle
try:
if user_proxy is None or assistant is None: raise Exception("用户代理或助理代理未定义")
with ProxyNetworkActivate("AutoGen"):
user_proxy.initiate_chat(assistant, message=input)
except Exception as e:
tb_str = '```\n' + trimmed_format_exc() + '```'
self.child_conn.send(PipeCom("done", "AutoGen 执行失败: \n\n" + tb_str))
except Exception as e:
tb_str = '```\n' + trimmed_format_exc() + '```'
self.child_conn.send(PipeCom("done", "AutoGen 执行失败: \n\n" + tb_str))
def subprocess_worker(self, child_conn):
# ⭐⭐ run in subprocess

View File

@@ -9,7 +9,7 @@ class PipeCom:
class PluginMultiprocessManager:
def __init__(self, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def __init__(self, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
# ⭐ run in main process
self.autogen_work_dir = os.path.join(get_log_folder("autogen"), gen_time_str())
self.previous_work_dir_files = {}
@@ -18,7 +18,7 @@ class PluginMultiprocessManager:
self.chatbot = chatbot
self.history = history
self.system_prompt = system_prompt
# self.web_port = web_port
# self.user_request = user_request
self.alive = True
self.use_docker = get_conf("AUTOGEN_USE_DOCKER")
self.last_user_input = ""

View File

@@ -32,7 +32,7 @@ def string_to_options(arguments):
return args
@CatchException
def 微调数据集生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 微调数据集生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
"""
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数如温度和top_p等一般原样传递下去就行
@@ -40,7 +40,7 @@ def 微调数据集生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
chatbot 聊天显示框的句柄,用于显示给用户
history 聊天历史,前情提要
system_prompt 给gpt的静默提醒
web_port 当前软件运行的端口号
user_request 当前用户的请求信息IP地址等
"""
history = [] # 清空历史,以免输入溢出
chatbot.append(("这是什么功能?", "[Local Message] 微调数据集生成"))
@@ -80,7 +80,7 @@ def 微调数据集生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
@CatchException
def 启动微调(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 启动微调(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
"""
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数如温度和top_p等一般原样传递下去就行
@@ -88,7 +88,7 @@ def 启动微调(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
chatbot 聊天显示框的句柄,用于显示给用户
history 聊天历史,前情提要
system_prompt 给gpt的静默提醒
web_port 当前软件运行的端口号
user_request 当前用户的请求信息IP地址等
"""
import subprocess
history = [] # 清空历史,以免输入溢出

View File

@@ -139,6 +139,8 @@ def can_multi_process(llm):
if llm.startswith('gpt-'): return True
if llm.startswith('api2d-'): return True
if llm.startswith('azure-'): return True
if llm.startswith('spark'): return True
if llm.startswith('zhipuai'): return True
return False
def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
@@ -282,8 +284,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
# 在前端打印些好玩的东西
for thread_index, _ in enumerate(worker_done):
print_something_really_funny = "[ ...`"+mutable[thread_index][0][-scroller_max_len:].\
replace('\n', '').replace('`', '.').replace(
' ', '.').replace('<br/>', '.....').replace('$', '.')+"`... ]"
replace('\n', '').replace('`', '.').replace(' ', '.').replace('<br/>', '.....').replace('$', '.')+"`... ]"
observe_win.append(print_something_really_funny)
# 在前端打印些好玩的东西
stat_str = ''.join([f'`{mutable[thread_index][2]}`: {obs}\n\n'
@@ -464,6 +465,9 @@ def read_and_clean_pdf_text(fp):
return True
else:
return False
# 对于某些PDF会有第一个段落就以小写字母开头,为了避免索引错误将其更改为大写
if starts_with_lowercase_word(meta_txt[0]):
meta_txt[0] = meta_txt[0].capitalize()
for _ in range(100):
for index, block_txt in enumerate(meta_txt):
if starts_with_lowercase_word(block_txt):

View File

@@ -0,0 +1,122 @@
import os
from textwrap import indent
class FileNode:
def __init__(self, name):
self.name = name
self.children = []
self.is_leaf = False
self.level = 0
self.parenting_ship = []
self.comment = ""
self.comment_maxlen_show = 50
@staticmethod
def add_linebreaks_at_spaces(string, interval=10):
return '\n'.join(string[i:i+interval] for i in range(0, len(string), interval))
def sanitize_comment(self, comment):
if len(comment) > self.comment_maxlen_show: suf = '...'
else: suf = ''
comment = comment[:self.comment_maxlen_show]
comment = comment.replace('\"', '').replace('`', '').replace('\n', '').replace('`', '').replace('$', '')
comment = self.add_linebreaks_at_spaces(comment, 10)
return '`' + comment + suf + '`'
def add_file(self, file_path, file_comment):
directory_names, file_name = os.path.split(file_path)
current_node = self
level = 1
if directory_names == "":
new_node = FileNode(file_name)
current_node.children.append(new_node)
new_node.is_leaf = True
new_node.comment = self.sanitize_comment(file_comment)
new_node.level = level
current_node = new_node
else:
dnamesplit = directory_names.split(os.sep)
for i, directory_name in enumerate(dnamesplit):
found_child = False
level += 1
for child in current_node.children:
if child.name == directory_name:
current_node = child
found_child = True
break
if not found_child:
new_node = FileNode(directory_name)
current_node.children.append(new_node)
new_node.level = level - 1
current_node = new_node
term = FileNode(file_name)
term.level = level
term.comment = self.sanitize_comment(file_comment)
term.is_leaf = True
current_node.children.append(term)
def print_files_recursively(self, level=0, code="R0"):
print(' '*level + self.name + ' ' + str(self.is_leaf) + ' ' + str(self.level))
for j, child in enumerate(self.children):
child.print_files_recursively(level=level+1, code=code+str(j))
self.parenting_ship.extend(child.parenting_ship)
p1 = f"""{code}[\"🗎{self.name}\"]""" if self.is_leaf else f"""{code}[[\"📁{self.name}\"]]"""
p2 = """ --> """
p3 = f"""{code+str(j)}[\"🗎{child.name}\"]""" if child.is_leaf else f"""{code+str(j)}[[\"📁{child.name}\"]]"""
edge_code = p1 + p2 + p3
if edge_code in self.parenting_ship:
continue
self.parenting_ship.append(edge_code)
if self.comment != "":
pc1 = f"""{code}[\"🗎{self.name}\"]""" if self.is_leaf else f"""{code}[[\"📁{self.name}\"]]"""
pc2 = f""" -.-x """
pc3 = f"""C{code}[\"{self.comment}\"]:::Comment"""
edge_code = pc1 + pc2 + pc3
self.parenting_ship.append(edge_code)
MERMAID_TEMPLATE = r"""
```mermaid
flowchart LR
%% <gpt_academic_hide_mermaid_code> 一个特殊标记用于在生成mermaid图表时隐藏代码块
classDef Comment stroke-dasharray: 5 5
subgraph {graph_name}
{relationship}
end
```
"""
def build_file_tree_mermaid_diagram(file_manifest, file_comments, graph_name):
# Create the root node
file_tree_struct = FileNode("root")
# Build the tree structure
for file_path, file_comment in zip(file_manifest, file_comments):
file_tree_struct.add_file(file_path, file_comment)
file_tree_struct.print_files_recursively()
cc = "\n".join(file_tree_struct.parenting_ship)
ccc = indent(cc, prefix=" "*8)
return MERMAID_TEMPLATE.format(graph_name=graph_name, relationship=ccc)
if __name__ == "__main__":
# File manifest
file_manifest = [
"cradle_void_terminal.ipynb",
"tests/test_utils.py",
"tests/test_plugins.py",
"tests/test_llms.py",
"config.py",
"build/ChatGLM-6b-onnx-u8s8/chatglm-6b-int8-onnx-merged/model_weights_0.bin",
"crazy_functions/latex_fns/latex_actions.py",
"crazy_functions/latex_fns/latex_toolbox.py"
]
file_comments = [
"根据位置和名称,可能是一个模块的初始化文件根据位置和名称,可能是一个模块的初始化文件根据位置和名称,可能是一个模块的初始化文件",
"包含一些用于文本处理和模型微调的函数和装饰器包含一些用于文本处理和模型微调的函数和装饰器包含一些用于文本处理和模型微调的函数和装饰器",
"用于构建HTML报告的类和方法用于构建HTML报告的类和方法用于构建HTML报告的类和方法",
"包含了用于文本切分的函数以及处理PDF文件的示例代码包含了用于文本切分的函数以及处理PDF文件的示例代码包含了用于文本切分的函数以及处理PDF文件的示例代码",
"用于解析和翻译PDF文件的功能和相关辅助函数用于解析和翻译PDF文件的功能和相关辅助函数用于解析和翻译PDF文件的功能和相关辅助函数",
"是一个包的初始化文件,用于初始化包的属性和导入模块是一个包的初始化文件,用于初始化包的属性和导入模块是一个包的初始化文件,用于初始化包的属性和导入模块",
"用于加载和分割文件中的文本的通用文件加载器用于加载和分割文件中的文本的通用文件加载器用于加载和分割文件中的文本的通用文件加载器",
"包含了用于构建和管理向量数据库的函数和类包含了用于构建和管理向量数据库的函数和类包含了用于构建和管理向量数据库的函数和类",
]
print(build_file_tree_mermaid_diagram(file_manifest, file_comments, "项目文件树"))

View File

@@ -1,15 +1,18 @@
import os, shutil
import re
import numpy as np
PRESERVE = 0
TRANSFORM = 1
pj = os.path.join
class LinkedListNode():
class LinkedListNode:
"""
Linked List Node
"""
def __init__(self, string, preserve=True) -> None:
self.string = string
self.preserve = preserve
@@ -18,41 +21,47 @@ class LinkedListNode():
# self.begin_line = 0
# self.begin_char = 0
def convert_to_linklist(text, mask):
root = LinkedListNode("", preserve=True)
current_node = root
for c, m, i in zip(text, mask, range(len(text))):
if (m==PRESERVE and current_node.preserve) \
or (m==TRANSFORM and not current_node.preserve):
if (m == PRESERVE and current_node.preserve) or (
m == TRANSFORM and not current_node.preserve
):
# add
current_node.string += c
else:
current_node.next = LinkedListNode(c, preserve=(m==PRESERVE))
current_node.next = LinkedListNode(c, preserve=(m == PRESERVE))
current_node = current_node.next
return root
def post_process(root):
# 修复括号
node = root
while True:
string = node.string
if node.preserve:
if node.preserve:
node = node.next
if node is None: break
if node is None:
break
continue
def break_check(string):
str_stack = [""] # (lv, index)
str_stack = [""] # (lv, index)
for i, c in enumerate(string):
if c == '{':
str_stack.append('{')
elif c == '}':
if c == "{":
str_stack.append("{")
elif c == "}":
if len(str_stack) == 1:
print('stack fix')
print("stack fix")
return i
str_stack.pop(-1)
else:
str_stack[-1] += c
return -1
bp = break_check(string)
if bp == -1:
@@ -69,51 +78,66 @@ def post_process(root):
node.next = q
node = node.next
if node is None: break
if node is None:
break
# 屏蔽空行和太短的句子
node = root
while True:
if len(node.string.strip('\n').strip(''))==0: node.preserve = True
if len(node.string.strip('\n').strip(''))<42: node.preserve = True
if len(node.string.strip("\n").strip("")) == 0:
node.preserve = True
if len(node.string.strip("\n").strip("")) < 42:
node.preserve = True
node = node.next
if node is None: break
if node is None:
break
node = root
while True:
if node.next and node.preserve and node.next.preserve:
node.string += node.next.string
node.next = node.next.next
node = node.next
if node is None: break
if node is None:
break
# 将前后断行符脱离
node = root
prev_node = None
while True:
if not node.preserve:
lstriped_ = node.string.lstrip().lstrip('\n')
if (prev_node is not None) and (prev_node.preserve) and (len(lstriped_)!=len(node.string)):
prev_node.string += node.string[:-len(lstriped_)]
lstriped_ = node.string.lstrip().lstrip("\n")
if (
(prev_node is not None)
and (prev_node.preserve)
and (len(lstriped_) != len(node.string))
):
prev_node.string += node.string[: -len(lstriped_)]
node.string = lstriped_
rstriped_ = node.string.rstrip().rstrip('\n')
if (node.next is not None) and (node.next.preserve) and (len(rstriped_)!=len(node.string)):
node.next.string = node.string[len(rstriped_):] + node.next.string
rstriped_ = node.string.rstrip().rstrip("\n")
if (
(node.next is not None)
and (node.next.preserve)
and (len(rstriped_) != len(node.string))
):
node.next.string = node.string[len(rstriped_) :] + node.next.string
node.string = rstriped_
# =====
# =-=-=
prev_node = node
node = node.next
if node is None: break
if node is None:
break
# 标注节点的行数范围
node = root
n_line = 0
expansion = 2
while True:
n_l = node.string.count('\n')
node.range = [n_line-expansion, n_line+n_l+expansion] # 失败时,扭转的范围
n_line = n_line+n_l
n_l = node.string.count("\n")
node.range = [n_line - expansion, n_line + n_l + expansion] # 失败时,扭转的范围
n_line = n_line + n_l
node = node.next
if node is None: break
if node is None:
break
return root
@@ -128,97 +152,125 @@ def set_forbidden_text(text, mask, pattern, flags=0):
"""
Add a preserve text area in this paper
e.g. with pattern = r"\\begin\{algorithm\}(.*?)\\end\{algorithm\}"
you can mask out (mask = PRESERVE so that text become untouchable for GPT)
you can mask out (mask = PRESERVE so that text become untouchable for GPT)
everything between "\begin{equation}" and "\end{equation}"
"""
if isinstance(pattern, list): pattern = '|'.join(pattern)
if isinstance(pattern, list):
pattern = "|".join(pattern)
pattern_compile = re.compile(pattern, flags)
for res in pattern_compile.finditer(text):
mask[res.span()[0]:res.span()[1]] = PRESERVE
mask[res.span()[0] : res.span()[1]] = PRESERVE
return text, mask
def reverse_forbidden_text(text, mask, pattern, flags=0, forbid_wrapper=True):
"""
Move area out of preserve area (make text editable for GPT)
count the number of the braces so as to catch compelete text area.
count the number of the braces so as to catch compelete text area.
e.g.
\begin{abstract} blablablablablabla. \end{abstract}
\begin{abstract} blablablablablabla. \end{abstract}
"""
if isinstance(pattern, list): pattern = '|'.join(pattern)
if isinstance(pattern, list):
pattern = "|".join(pattern)
pattern_compile = re.compile(pattern, flags)
for res in pattern_compile.finditer(text):
if not forbid_wrapper:
mask[res.span()[0]:res.span()[1]] = TRANSFORM
mask[res.span()[0] : res.span()[1]] = TRANSFORM
else:
mask[res.regs[0][0]: res.regs[1][0]] = PRESERVE # '\\begin{abstract}'
mask[res.regs[1][0]: res.regs[1][1]] = TRANSFORM # abstract
mask[res.regs[1][1]: res.regs[0][1]] = PRESERVE # abstract
mask[res.regs[0][0] : res.regs[1][0]] = PRESERVE # '\\begin{abstract}'
mask[res.regs[1][0] : res.regs[1][1]] = TRANSFORM # abstract
mask[res.regs[1][1] : res.regs[0][1]] = PRESERVE # abstract
return text, mask
def set_forbidden_text_careful_brace(text, mask, pattern, flags=0):
"""
Add a preserve text area in this paper (text become untouchable for GPT).
count the number of the braces so as to catch compelete text area.
count the number of the braces so as to catch compelete text area.
e.g.
\caption{blablablablabla\texbf{blablabla}blablabla.}
\caption{blablablablabla\texbf{blablabla}blablabla.}
"""
pattern_compile = re.compile(pattern, flags)
for res in pattern_compile.finditer(text):
brace_level = -1
p = begin = end = res.regs[0][0]
for _ in range(1024*16):
if text[p] == '}' and brace_level == 0: break
elif text[p] == '}': brace_level -= 1
elif text[p] == '{': brace_level += 1
for _ in range(1024 * 16):
if text[p] == "}" and brace_level == 0:
break
elif text[p] == "}":
brace_level -= 1
elif text[p] == "{":
brace_level += 1
p += 1
end = p+1
end = p + 1
mask[begin:end] = PRESERVE
return text, mask
def reverse_forbidden_text_careful_brace(text, mask, pattern, flags=0, forbid_wrapper=True):
def reverse_forbidden_text_careful_brace(
text, mask, pattern, flags=0, forbid_wrapper=True
):
"""
Move area out of preserve area (make text editable for GPT)
count the number of the braces so as to catch compelete text area.
count the number of the braces so as to catch compelete text area.
e.g.
\caption{blablablablabla\texbf{blablabla}blablabla.}
\caption{blablablablabla\texbf{blablabla}blablabla.}
"""
pattern_compile = re.compile(pattern, flags)
for res in pattern_compile.finditer(text):
brace_level = 0
p = begin = end = res.regs[1][0]
for _ in range(1024*16):
if text[p] == '}' and brace_level == 0: break
elif text[p] == '}': brace_level -= 1
elif text[p] == '{': brace_level += 1
for _ in range(1024 * 16):
if text[p] == "}" and brace_level == 0:
break
elif text[p] == "}":
brace_level -= 1
elif text[p] == "{":
brace_level += 1
p += 1
end = p
mask[begin:end] = TRANSFORM
if forbid_wrapper:
mask[res.regs[0][0]:begin] = PRESERVE
mask[end:res.regs[0][1]] = PRESERVE
mask[res.regs[0][0] : begin] = PRESERVE
mask[end : res.regs[0][1]] = PRESERVE
return text, mask
def set_forbidden_text_begin_end(text, mask, pattern, flags=0, limit_n_lines=42):
"""
Find all \begin{} ... \end{} text block that with less than limit_n_lines lines.
Add it to preserve area
"""
pattern_compile = re.compile(pattern, flags)
def search_with_line_limit(text, mask):
for res in pattern_compile.finditer(text):
cmd = res.group(1) # begin{what}
this = res.group(2) # content between begin and end
this_mask = mask[res.regs[2][0]:res.regs[2][1]]
white_list = ['document', 'abstract', 'lemma', 'definition', 'sproof',
'em', 'emph', 'textit', 'textbf', 'itemize', 'enumerate']
if (cmd in white_list) or this.count('\n') >= limit_n_lines: # use a magical number 42
this = res.group(2) # content between begin and end
this_mask = mask[res.regs[2][0] : res.regs[2][1]]
white_list = [
"document",
"abstract",
"lemma",
"definition",
"sproof",
"em",
"emph",
"textit",
"textbf",
"itemize",
"enumerate",
]
if (cmd in white_list) or this.count(
"\n"
) >= limit_n_lines: # use a magical number 42
this, this_mask = search_with_line_limit(this, this_mask)
mask[res.regs[2][0]:res.regs[2][1]] = this_mask
mask[res.regs[2][0] : res.regs[2][1]] = this_mask
else:
mask[res.regs[0][0]:res.regs[0][1]] = PRESERVE
mask[res.regs[0][0] : res.regs[0][1]] = PRESERVE
return text, mask
return search_with_line_limit(text, mask)
return search_with_line_limit(text, mask)
"""
@@ -227,6 +279,7 @@ Latex Merge File
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
"""
def find_main_tex_file(file_manifest, mode):
"""
在多Tex文档中寻找主文件必须包含documentclass返回找到的第一个。
@@ -234,27 +287,36 @@ def find_main_tex_file(file_manifest, mode):
"""
canidates = []
for texf in file_manifest:
if os.path.basename(texf).startswith('merge'):
if os.path.basename(texf).startswith("merge"):
continue
with open(texf, 'r', encoding='utf8', errors='ignore') as f:
with open(texf, "r", encoding="utf8", errors="ignore") as f:
file_content = f.read()
if r'\documentclass' in file_content:
if r"\documentclass" in file_content:
canidates.append(texf)
else:
continue
if len(canidates) == 0:
raise RuntimeError('无法找到一个主Tex文件包含documentclass关键字')
raise RuntimeError("无法找到一个主Tex文件包含documentclass关键字")
elif len(canidates) == 1:
return canidates[0]
else: # if len(canidates) >= 2 通过一些Latex模板中常见但通常不会出现在正文的单词对不同latex源文件扣分取评分最高者返回
else: # if len(canidates) >= 2 通过一些Latex模板中常见但通常不会出现在正文的单词对不同latex源文件扣分取评分最高者返回
canidates_score = []
# 给出一些判定模板文档的词作为扣分项
unexpected_words = ['\LaTeX', 'manuscript', 'Guidelines', 'font', 'citations', 'rejected', 'blind review', 'reviewers']
expected_words = ['\input', '\ref', '\cite']
unexpected_words = [
"\\LaTeX",
"manuscript",
"Guidelines",
"font",
"citations",
"rejected",
"blind review",
"reviewers",
]
expected_words = ["\\input", "\\ref", "\\cite"]
for texf in canidates:
canidates_score.append(0)
with open(texf, 'r', encoding='utf8', errors='ignore') as f:
with open(texf, "r", encoding="utf8", errors="ignore") as f:
file_content = f.read()
file_content = rm_comments(file_content)
for uw in unexpected_words:
@@ -263,9 +325,10 @@ def find_main_tex_file(file_manifest, mode):
for uw in expected_words:
if uw in file_content:
canidates_score[-1] += 1
select = np.argmax(canidates_score) # 取评分最高者返回
select = np.argmax(canidates_score) # 取评分最高者返回
return canidates[select]
def rm_comments(main_file):
new_file_remove_comment_lines = []
for l in main_file.splitlines():
@@ -274,30 +337,39 @@ def rm_comments(main_file):
pass
else:
new_file_remove_comment_lines.append(l)
main_file = '\n'.join(new_file_remove_comment_lines)
main_file = "\n".join(new_file_remove_comment_lines)
# main_file = re.sub(r"\\include{(.*?)}", r"\\input{\1}", main_file) # 将 \include 命令转换为 \input 命令
main_file = re.sub(r'(?<!\\)%.*', '', main_file) # 使用正则表达式查找半行注释, 并替换为空字符串
main_file = re.sub(r"(?<!\\)%.*", "", main_file) # 使用正则表达式查找半行注释, 并替换为空字符串
return main_file
def find_tex_file_ignore_case(fp):
dir_name = os.path.dirname(fp)
base_name = os.path.basename(fp)
# 如果输入的文件路径是正确的
if os.path.isfile(pj(dir_name, base_name)): return pj(dir_name, base_name)
if os.path.isfile(pj(dir_name, base_name)):
return pj(dir_name, base_name)
# 如果不正确,试着加上.tex后缀试试
if not base_name.endswith('.tex'): base_name+='.tex'
if os.path.isfile(pj(dir_name, base_name)): return pj(dir_name, base_name)
if not base_name.endswith(".tex"):
base_name += ".tex"
if os.path.isfile(pj(dir_name, base_name)):
return pj(dir_name, base_name)
# 如果还找不到,解除大小写限制,再试一次
import glob
for f in glob.glob(dir_name+'/*.tex'):
for f in glob.glob(dir_name + "/*.tex"):
base_name_s = os.path.basename(fp)
base_name_f = os.path.basename(f)
if base_name_s.lower() == base_name_f.lower(): return f
if base_name_s.lower() == base_name_f.lower():
return f
# 试着加上.tex后缀试试
if not base_name_s.endswith('.tex'): base_name_s+='.tex'
if base_name_s.lower() == base_name_f.lower(): return f
if not base_name_s.endswith(".tex"):
base_name_s += ".tex"
if base_name_s.lower() == base_name_f.lower():
return f
return None
def merge_tex_files_(project_foler, main_file, mode):
"""
Merge Tex project recrusively
@@ -309,18 +381,18 @@ def merge_tex_files_(project_foler, main_file, mode):
fp_ = find_tex_file_ignore_case(fp)
if fp_:
try:
with open(fp_, 'r', encoding='utf-8', errors='replace') as fx: c = fx.read()
with open(fp_, "r", encoding="utf-8", errors="replace") as fx:
c = fx.read()
except:
c = f"\n\nWarning from GPT-Academic: LaTex source file is missing!\n\n"
else:
raise RuntimeError(f'找不到{fp}Tex源文件缺失')
raise RuntimeError(f"找不到{fp}Tex源文件缺失")
c = merge_tex_files_(project_foler, c, mode)
main_file = main_file[:s.span()[0]] + c + main_file[s.span()[1]:]
main_file = main_file[: s.span()[0]] + c + main_file[s.span()[1] :]
return main_file
def find_title_and_abs(main_file):
def extract_abstract_1(text):
pattern = r"\\abstract\{(.*?)\}"
match = re.search(pattern, text, re.DOTALL)
@@ -362,21 +434,30 @@ def merge_tex_files(project_foler, main_file, mode):
main_file = merge_tex_files_(project_foler, main_file, mode)
main_file = rm_comments(main_file)
if mode == 'translate_zh':
if mode == "translate_zh":
# find paper documentclass
pattern = re.compile(r'\\documentclass.*\n')
pattern = re.compile(r"\\documentclass.*\n")
match = pattern.search(main_file)
assert match is not None, "Cannot find documentclass statement!"
position = match.end()
add_ctex = '\\usepackage{ctex}\n'
add_url = '\\usepackage{url}\n' if '{url}' not in main_file else ''
add_ctex = "\\usepackage{ctex}\n"
add_url = "\\usepackage{url}\n" if "{url}" not in main_file else ""
main_file = main_file[:position] + add_ctex + add_url + main_file[position:]
# fontset=windows
import platform
main_file = re.sub(r"\\documentclass\[(.*?)\]{(.*?)}", r"\\documentclass[\1,fontset=windows,UTF8]{\2}",main_file)
main_file = re.sub(r"\\documentclass{(.*?)}", r"\\documentclass[fontset=windows,UTF8]{\1}",main_file)
main_file = re.sub(
r"\\documentclass\[(.*?)\]{(.*?)}",
r"\\documentclass[\1,fontset=windows,UTF8]{\2}",
main_file,
)
main_file = re.sub(
r"\\documentclass{(.*?)}",
r"\\documentclass[fontset=windows,UTF8]{\1}",
main_file,
)
# find paper abstract
pattern_opt1 = re.compile(r'\\begin\{abstract\}.*\n')
pattern_opt1 = re.compile(r"\\begin\{abstract\}.*\n")
pattern_opt2 = re.compile(r"\\abstract\{(.*?)\}", flags=re.DOTALL)
match_opt1 = pattern_opt1.search(main_file)
match_opt2 = pattern_opt2.search(main_file)
@@ -385,7 +466,9 @@ def merge_tex_files(project_foler, main_file, mode):
main_file = insert_abstract(main_file)
match_opt1 = pattern_opt1.search(main_file)
match_opt2 = pattern_opt2.search(main_file)
assert (match_opt1 is not None) or (match_opt2 is not None), "Cannot find paper abstract section!"
assert (match_opt1 is not None) or (
match_opt2 is not None
), "Cannot find paper abstract section!"
return main_file
@@ -395,6 +478,7 @@ The GPT-Academic program cannot find abstract section in this paper.
\end{abstract}
"""
def insert_abstract(tex_content):
if "\\maketitle" in tex_content:
# find the position of "\maketitle"
@@ -402,7 +486,13 @@ def insert_abstract(tex_content):
# find the nearest ending line
end_line_index = tex_content.find("\n", find_index)
# insert "abs_str" on the next line
modified_tex = tex_content[:end_line_index+1] + '\n\n' + insert_missing_abs_str + '\n\n' + tex_content[end_line_index+1:]
modified_tex = (
tex_content[: end_line_index + 1]
+ "\n\n"
+ insert_missing_abs_str
+ "\n\n"
+ tex_content[end_line_index + 1 :]
)
return modified_tex
elif r"\begin{document}" in tex_content:
# find the position of "\maketitle"
@@ -410,29 +500,39 @@ def insert_abstract(tex_content):
# find the nearest ending line
end_line_index = tex_content.find("\n", find_index)
# insert "abs_str" on the next line
modified_tex = tex_content[:end_line_index+1] + '\n\n' + insert_missing_abs_str + '\n\n' + tex_content[end_line_index+1:]
modified_tex = (
tex_content[: end_line_index + 1]
+ "\n\n"
+ insert_missing_abs_str
+ "\n\n"
+ tex_content[end_line_index + 1 :]
)
return modified_tex
else:
return tex_content
"""
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
Post process
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
"""
def mod_inbraket(match):
"""
为啥chatgpt会把cite里面的逗号换成中文逗号呀
为啥chatgpt会把cite里面的逗号换成中文逗号呀
"""
# get the matched string
cmd = match.group(1)
str_to_modify = match.group(2)
# modify the matched string
str_to_modify = str_to_modify.replace('', ':') # 前面是中文冒号,后面是英文冒号
str_to_modify = str_to_modify.replace('', ',') # 前面是中文逗号,后面是英文逗号
str_to_modify = str_to_modify.replace("", ":") # 前面是中文冒号,后面是英文冒号
str_to_modify = str_to_modify.replace("", ",") # 前面是中文逗号,后面是英文逗号
# str_to_modify = 'BOOM'
return "\\" + cmd + "{" + str_to_modify + "}"
def fix_content(final_tex, node_string):
"""
Fix common GPT errors to increase success rate
@@ -443,10 +543,10 @@ def fix_content(final_tex, node_string):
final_tex = re.sub(r"\\([a-z]{2,10})\{([^\}]*?)\}", mod_inbraket, string=final_tex)
if "Traceback" in final_tex and "[Local Message]" in final_tex:
final_tex = node_string # 出问题了,还原原文
if node_string.count('\\begin') != final_tex.count('\\begin'):
final_tex = node_string # 出问题了,还原原文
if node_string.count('\_') > 0 and node_string.count('\_') > final_tex.count('\_'):
final_tex = node_string # 出问题了,还原原文
if node_string.count("\\begin") != final_tex.count("\\begin"):
final_tex = node_string # 出问题了,还原原文
if node_string.count("\_") > 0 and node_string.count("\_") > final_tex.count("\_"):
# walk and replace any _ without \
final_tex = re.sub(r"(?<!\\)_", "\\_", final_tex)
@@ -454,24 +554,32 @@ def fix_content(final_tex, node_string):
# this function count the number of { and }
brace_level = 0
for c in string:
if c == "{": brace_level += 1
elif c == "}": brace_level -= 1
if c == "{":
brace_level += 1
elif c == "}":
brace_level -= 1
return brace_level
def join_most(tex_t, tex_o):
# this function join translated string and original string when something goes wrong
p_t = 0
p_o = 0
def find_next(string, chars, begin):
p = begin
while p < len(string):
if string[p] in chars: return p, string[p]
if string[p] in chars:
return p, string[p]
p += 1
return None, None
while True:
res1, char = find_next(tex_o, ['{','}'], p_o)
if res1 is None: break
res1, char = find_next(tex_o, ["{", "}"], p_o)
if res1 is None:
break
res2, char = find_next(tex_t, [char], p_t)
if res2 is None: break
if res2 is None:
break
p_o = res1 + 1
p_t = res2 + 1
return tex_t[:p_t] + tex_o[p_o:]
@@ -480,10 +588,14 @@ def fix_content(final_tex, node_string):
# 出问题了,还原部分原文,保证括号正确
final_tex = join_most(final_tex, node_string)
return final_tex
def compile_latex_with_timeout(command, cwd, timeout=60):
import subprocess
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
process = subprocess.Popen(
command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd
)
try:
stdout, stderr = process.communicate(timeout=timeout)
except subprocess.TimeoutExpired:
@@ -493,43 +605,52 @@ def compile_latex_with_timeout(command, cwd, timeout=60):
return False
return True
def run_in_subprocess_wrapper_func(func, args, kwargs, return_dict, exception_dict):
import sys
try:
result = func(*args, **kwargs)
return_dict['result'] = result
return_dict["result"] = result
except Exception as e:
exc_info = sys.exc_info()
exception_dict['exception'] = exc_info
exception_dict["exception"] = exc_info
def run_in_subprocess(func):
import multiprocessing
def wrapper(*args, **kwargs):
return_dict = multiprocessing.Manager().dict()
exception_dict = multiprocessing.Manager().dict()
process = multiprocessing.Process(target=run_in_subprocess_wrapper_func,
args=(func, args, kwargs, return_dict, exception_dict))
process = multiprocessing.Process(
target=run_in_subprocess_wrapper_func,
args=(func, args, kwargs, return_dict, exception_dict),
)
process.start()
process.join()
process.close()
if 'exception' in exception_dict:
if "exception" in exception_dict:
# ooops, the subprocess ran into an exception
exc_info = exception_dict['exception']
exc_info = exception_dict["exception"]
raise exc_info[1].with_traceback(exc_info[2])
if 'result' in return_dict.keys():
if "result" in return_dict.keys():
# If the subprocess ran successfully, return the result
return return_dict['result']
return return_dict["result"]
return wrapper
def _merge_pdfs(pdf1_path, pdf2_path, output_path):
import PyPDF2 # PyPDF2这个库有严重的内存泄露问题把它放到子进程中运行从而方便内存的释放
import PyPDF2 # PyPDF2这个库有严重的内存泄露问题把它放到子进程中运行从而方便内存的释放
Percent = 0.95
# raise RuntimeError('PyPDF2 has a serious memory leak problem, please use other tools to merge PDF files.')
# Open the first PDF file
with open(pdf1_path, 'rb') as pdf1_file:
with open(pdf1_path, "rb") as pdf1_file:
pdf1_reader = PyPDF2.PdfFileReader(pdf1_file)
# Open the second PDF file
with open(pdf2_path, 'rb') as pdf2_file:
with open(pdf2_path, "rb") as pdf2_file:
pdf2_reader = PyPDF2.PdfFileReader(pdf2_file)
# Create a new PDF file to store the merged pages
output_writer = PyPDF2.PdfFileWriter()
@@ -549,14 +670,25 @@ def _merge_pdfs(pdf1_path, pdf2_path, output_path):
page2 = PyPDF2.PageObject.createBlankPage(pdf1_reader)
# Create a new empty page with double width
new_page = PyPDF2.PageObject.createBlankPage(
width = int(int(page1.mediaBox.getWidth()) + int(page2.mediaBox.getWidth()) * Percent),
height = max(page1.mediaBox.getHeight(), page2.mediaBox.getHeight())
width=int(
int(page1.mediaBox.getWidth())
+ int(page2.mediaBox.getWidth()) * Percent
),
height=max(page1.mediaBox.getHeight(), page2.mediaBox.getHeight()),
)
new_page.mergeTranslatedPage(page1, 0, 0)
new_page.mergeTranslatedPage(page2, int(int(page1.mediaBox.getWidth())-int(page2.mediaBox.getWidth())* (1-Percent)), 0)
new_page.mergeTranslatedPage(
page2,
int(
int(page1.mediaBox.getWidth())
- int(page2.mediaBox.getWidth()) * (1 - Percent)
),
0,
)
output_writer.addPage(new_page)
# Save the merged PDF file
with open(output_path, 'wb') as output_file:
with open(output_path, "wb") as output_file:
output_writer.write(output_file)
merge_pdfs = run_in_subprocess(_merge_pdfs) # PyPDF2这个库有严重的内存泄露问题把它放到子进程中运行从而方便内存的释放
merge_pdfs = run_in_subprocess(_merge_pdfs) # PyPDF2这个库有严重的内存泄露问题把它放到子进程中运行从而方便内存的释放

View File

@@ -65,10 +65,10 @@ def cut(limit, get_token_fn, txt_tocut, must_break_at_empty_line, break_anyway=F
# 如果没有找到合适的切分点
if break_anyway:
# 是否允许暴力切分
prev, post = force_breakdown(txt_tocut, limit, get_token_fn)
prev, post = force_breakdown(remain_txt_to_cut, limit, get_token_fn)
else:
# 不允许直接报错
raise RuntimeError(f"存在一行极长的文本!{txt_tocut}")
raise RuntimeError(f"存在一行极长的文本!{remain_txt_to_cut}")
# 追加列表
res.append(prev); fin_len+=len(prev)

View File

@@ -130,7 +130,7 @@ def get_name(_url_):
@CatchException
def 下载arxiv论文并翻译摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 下载arxiv论文并翻译摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
CRAZY_FUNCTION_INFO = "下载arxiv论文并翻译摘要函数插件作者[binary-husky]。正在提取摘要并下载PDF文档……"
import glob

View File

@@ -5,7 +5,7 @@ from request_llms.bridge_all import predict_no_ui_long_connection
from crazy_functions.game_fns.game_utils import get_code_block, is_same_thing
@CatchException
def 随机小游戏(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 随机小游戏(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
from crazy_functions.game_fns.game_interactive_story import MiniGame_ResumeStory
# 清空历史
history = []
@@ -23,7 +23,7 @@ def 随机小游戏(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_
@CatchException
def 随机小游戏1(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 随机小游戏1(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
from crazy_functions.game_fns.game_ascii_art import MiniGame_ASCII_Art
# 清空历史
history = []

View File

@@ -3,7 +3,7 @@ from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
@CatchException
def 交互功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 交互功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
"""
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数, 如温度和top_p等, 一般原样传递下去就行
@@ -11,7 +11,7 @@ def 交互功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
chatbot 聊天显示框的句柄,用于显示给用户
history 聊天历史,前情提要
system_prompt 给gpt的静默提醒
web_port 当前软件运行的端口号
user_request 当前用户的请求信息IP地址等
"""
history = [] # 清空历史,以免输入溢出
chatbot.append(("这是什么功能?", "交互功能函数模板。在执行完成之后, 可以将自身的状态存储到cookie中, 等待用户的再次调用。"))

View File

@@ -139,7 +139,7 @@ def get_recent_file_prompt_support(chatbot):
return path
@CatchException
def 函数动态生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 函数动态生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
"""
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数如温度和top_p等一般原样传递下去就行
@@ -147,7 +147,7 @@ def 函数动态生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
chatbot 聊天显示框的句柄,用于显示给用户
history 聊天历史,前情提要
system_prompt 给gpt的静默提醒
web_port 当前软件运行的端口号
user_request 当前用户的请求信息IP地址等
"""
# 清空历史

View File

@@ -4,7 +4,7 @@ from .crazy_utils import input_clipping
import copy, json
@CatchException
def 命令行助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 命令行助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
"""
txt 输入栏用户输入的文本, 例如需要翻译的一段话, 再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数, 如温度和top_p等, 一般原样传递下去就行
@@ -12,7 +12,7 @@ def 命令行助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
chatbot 聊天显示框的句柄, 用于显示给用户
history 聊天历史, 前情提要
system_prompt 给gpt的静默提醒
web_port 当前软件运行的端口号
user_request 当前用户的请求信息IP地址等
"""
# 清空历史, 以免输入溢出
history = []

View File

@@ -93,7 +93,7 @@ def edit_image(llm_kwargs, prompt, image_path, resolution="1024x1024", model="da
@CatchException
def 图片生成_DALLE2(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 图片生成_DALLE2(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
"""
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
@@ -101,10 +101,14 @@ def 图片生成_DALLE2(prompt, llm_kwargs, plugin_kwargs, chatbot, history, sys
chatbot 聊天显示框的句柄,用于显示给用户
history 聊天历史,前情提要
system_prompt 给gpt的静默提醒
web_port 当前软件运行的端口号
user_request 当前用户的请求信息IP地址等
"""
history = [] # 清空历史,以免输入溢出
chatbot.append(("您正在调用“图像生成”插件。", "[Local Message] 生成图像, 请先把模型切换至gpt-*或者api2d-*。如果中文Prompt效果不理想, 请尝试英文Prompt。正在处理中 ....."))
if prompt.strip() == "":
chatbot.append((prompt, "[Local Message] 图像生成提示为空白,请在“输入区”输入图像生成提示。"))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 界面更新
return
chatbot.append(("您正在调用“图像生成”插件。", "[Local Message] 生成图像, 请先把模型切换至gpt-*。如果中文Prompt效果不理想, 请尝试英文Prompt。正在处理中 ....."))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 由于请求gpt需要一段时间,我们先及时地做一次界面更新
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
resolution = plugin_kwargs.get("advanced_arg", '1024x1024')
@@ -119,9 +123,13 @@ def 图片生成_DALLE2(prompt, llm_kwargs, plugin_kwargs, chatbot, history, sys
@CatchException
def 图片生成_DALLE3(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 图片生成_DALLE3(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
history = [] # 清空历史,以免输入溢出
chatbot.append(("您正在调用“图像生成”插件。", "[Local Message] 生成图像, 请先把模型切换至gpt-*或者api2d-*。如果中文Prompt效果不理想, 请尝试英文Prompt。正在处理中 ....."))
if prompt.strip() == "":
chatbot.append((prompt, "[Local Message] 图像生成提示为空白,请在“输入区”输入图像生成提示。"))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 界面更新
return
chatbot.append(("您正在调用“图像生成”插件。", "[Local Message] 生成图像, 请先把模型切换至gpt-*。如果中文Prompt效果不理想, 请尝试英文Prompt。正在处理中 ....."))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 由于请求gpt需要一段时间,我们先及时地做一次界面更新
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
resolution_arg = plugin_kwargs.get("advanced_arg", '1024x1024-standard-vivid').lower()
@@ -201,7 +209,7 @@ class ImageEditState(GptAcademicState):
return all([x['value'] is not None for x in self.req])
@CatchException
def 图片修改_DALLE2(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 图片修改_DALLE2(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
# 尚未完成
history = [] # 清空历史
state = ImageEditState.get_state(chatbot, ImageEditState)

View File

@@ -21,7 +21,7 @@ def remove_model_prefix(llm):
@CatchException
def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
"""
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数如温度和top_p等一般原样传递下去就行
@@ -29,7 +29,7 @@ def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
chatbot 聊天显示框的句柄,用于显示给用户
history 聊天历史,前情提要
system_prompt 给gpt的静默提醒
web_port 当前软件运行的端口号
user_request 当前用户的请求信息IP地址等
"""
# 检查当前的模型是否符合要求
supported_llms = [
@@ -50,14 +50,7 @@ def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
return
if model_info[llm_kwargs['llm_model']]["endpoint"] is not None: # 如果不是本地模型加载API_KEY
llm_kwargs['api_key'] = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
# 检查当前的模型是否符合要求
API_URL_REDIRECT = get_conf('API_URL_REDIRECT')
if len(API_URL_REDIRECT) > 0:
chatbot.append([f"处理任务: {txt}", f"暂不支持中转."])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
# 尝试导入依赖,如果缺少依赖,则给出安装建议
try:
import autogen
@@ -96,7 +89,7 @@ def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
history = []
chatbot.append(["正在启动: 多智能体终端", "插件动态生成, 执行开始, 作者 Microsoft & Binary-Husky."])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
executor = AutoGenMath(llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port)
executor = AutoGenMath(llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)
persistent_class_multi_user_manager.set(persistent_key, executor)
exit_reason = yield from executor.main_process_ui_control(txt, create_or_resume="create")

View File

@@ -69,7 +69,7 @@ def read_file_to_chat(chatbot, history, file_name):
return chatbot, history
@CatchException
def 对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
"""
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数如温度和top_p等一般原样传递下去就行
@@ -77,7 +77,7 @@ def 对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
chatbot 聊天显示框的句柄,用于显示给用户
history 聊天历史,前情提要
system_prompt 给gpt的静默提醒
web_port 当前软件运行的端口号
user_request 当前用户的请求信息IP地址等
"""
chatbot.append(("保存当前对话",
@@ -91,7 +91,7 @@ def hide_cwd(str):
return str.replace(current_path, replace_path)
@CatchException
def 载入对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 载入对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
"""
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数如温度和top_p等一般原样传递下去就行
@@ -99,7 +99,7 @@ def 载入对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
chatbot 聊天显示框的句柄,用于显示给用户
history 聊天历史,前情提要
system_prompt 给gpt的静默提醒
web_port 当前软件运行的端口号
user_request 当前用户的请求信息IP地址等
"""
from .crazy_utils import get_files_from_everything
success, file_manifest, _ = get_files_from_everything(txt, type='.html')
@@ -126,7 +126,7 @@ def 载入对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
return
@CatchException
def 删除所有本地对话历史记录(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 删除所有本地对话历史记录(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
"""
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数如温度和top_p等一般原样传递下去就行
@@ -134,7 +134,7 @@ def 删除所有本地对话历史记录(txt, llm_kwargs, plugin_kwargs, chatbot
chatbot 聊天显示框的句柄,用于显示给用户
history 聊天历史,前情提要
system_prompt 给gpt的静默提醒
web_port 当前软件运行的端口号
user_request 当前用户的请求信息IP地址等
"""
import glob, os

View File

@@ -79,7 +79,7 @@ def 解析docx(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot
@CatchException
def 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
import glob, os
# 基本信息:功能、贡献者

View File

@@ -153,7 +153,7 @@ def get_files_from_everything(txt, preference=''):
@CatchException
def Markdown英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def Markdown英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
# 基本信息:功能、贡献者
chatbot.append([
"函数插件功能?",
@@ -193,7 +193,7 @@ def Markdown英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
@CatchException
def Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
# 基本信息:功能、贡献者
chatbot.append([
"函数插件功能?",
@@ -226,7 +226,7 @@ def Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
@CatchException
def Markdown翻译指定语言(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def Markdown翻译指定语言(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
# 基本信息:功能、贡献者
chatbot.append([
"函数插件功能?",

View File

@@ -101,7 +101,7 @@ do not have too much repetitive information, numerical values using the original
@CatchException
def 批量总结PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 批量总结PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
import glob, os
# 基本信息:功能、贡献者

View File

@@ -124,7 +124,7 @@ def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbo
@CatchException
def 批量总结PDF文档pdfminer(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 批量总结PDF文档pdfminer(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
history = [] # 清空历史,以免输入溢出
import glob, os

View File

@@ -48,7 +48,7 @@ def markdown_to_dict(article_content):
@CatchException
def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
disable_auto_promotion(chatbot)
# 基本信息:功能、贡献者

View File

@@ -10,7 +10,7 @@ import os
@CatchException
def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
disable_auto_promotion(chatbot)
# 基本信息:功能、贡献者

View File

@@ -1,6 +1,7 @@
from toolbox import CatchException, update_ui, gen_time_str
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
from .crazy_utils import input_clipping
import os
from toolbox import CatchException, update_ui, gen_time_str, promote_file_to_downloadzone
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
from crazy_functions.crazy_utils import input_clipping
def inspect_dependency(chatbot, history):
# 尝试导入依赖,如果缺少依赖,则给出安装建议
@@ -27,9 +28,10 @@ def eval_manim(code):
class_name = get_class_name(code)
try:
time_str = gen_time_str()
subprocess.check_output([sys.executable, '-c', f"from gpt_log.MyAnimation import {class_name}; {class_name}().render()"])
shutil.move('media/videos/1080p60/{class_name}.mp4', f'gpt_log/{class_name}-{gen_time_str()}.mp4')
return f'gpt_log/{gen_time_str()}.mp4'
shutil.move(f'media/videos/1080p60/{class_name}.mp4', f'gpt_log/{class_name}-{time_str}.mp4')
return f'gpt_log/{time_str}.mp4'
except subprocess.CalledProcessError as e:
output = e.output.decode()
print(f"Command returned non-zero exit status {e.returncode}: {output}.")
@@ -48,7 +50,7 @@ def get_code_block(reply):
return matches[0].strip('python') # code block
@CatchException
def 动画生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 动画生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
"""
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数如温度和top_p等一般原样传递下去就行
@@ -56,7 +58,7 @@ def 动画生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
chatbot 聊天显示框的句柄,用于显示给用户
history 聊天历史,前情提要
system_prompt 给gpt的静默提醒
web_port 当前软件运行的端口号
user_request 当前用户的请求信息IP地址等
"""
# 清空历史,以免输入溢出
history = []
@@ -94,6 +96,8 @@ def 动画生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
res = eval_manim(code)
chatbot.append(("生成的视频文件路径", res))
if os.path.exists(res):
promote_file_to_downloadzone(res, chatbot=chatbot)
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
# 在这里放一些网上搜集的demo辅助gpt生成代码

View File

@@ -63,7 +63,7 @@ def 解析PDF(file_name, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
@CatchException
def 理解PDF文档内容标准文件输入(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 理解PDF文档内容标准文件输入(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
import glob, os
# 基本信息:功能、贡献者

View File

@@ -36,7 +36,7 @@ def 生成函数注释(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
@CatchException
def 批量生成函数注释(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 批量生成函数注释(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
history = [] # 清空历史,以免输入溢出
import glob, os
if os.path.exists(txt):

View File

@@ -0,0 +1,302 @@
from toolbox import CatchException, update_ui, report_exception
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
from .crazy_utils import read_and_clean_pdf_text
import datetime
#以下是每类图表的PROMPT
SELECT_PROMPT = """
{subject}
=============
以上是从文章中提取的摘要,将会使用这些摘要绘制图表。请你选择一个合适的图表类型:
1 流程图
2 序列图
3 类图
4 饼图
5 甘特图
6 状态图
7 实体关系图
8 象限提示图
不需要解释原因,仅需要输出单个不带任何标点符号的数字。
"""
#没有思维导图!!!测试发现模型始终会优先选择思维导图
#流程图
PROMPT_1 = """
请你给出围绕“{subject}”的逻辑关系图使用mermaid语法mermaid语法举例
```mermaid
graph TD
P(编程) --> L1(Python)
P(编程) --> L2(C)
P(编程) --> L3(C++)
P(编程) --> L4(Javascipt)
P(编程) --> L5(PHP)
```
"""
#序列图
PROMPT_2 = """
请你给出围绕“{subject}”的序列图使用mermaid语法mermaid语法举例
```mermaid
sequenceDiagram
participant A as 用户
participant B as 系统
A->>B: 登录请求
B->>A: 登录成功
A->>B: 获取数据
B->>A: 返回数据
```
"""
#类图
PROMPT_3 = """
请你给出围绕“{subject}”的类图使用mermaid语法mermaid语法举例
```mermaid
classDiagram
Class01 <|-- AveryLongClass : Cool
Class03 *-- Class04
Class05 o-- Class06
Class07 .. Class08
Class09 --> C2 : Where am i?
Class09 --* C3
Class09 --|> Class07
Class07 : equals()
Class07 : Object[] elementData
Class01 : size()
Class01 : int chimp
Class01 : int gorilla
Class08 <--> C2: Cool label
```
"""
#饼图
PROMPT_4 = """
请你给出围绕“{subject}”的饼图使用mermaid语法mermaid语法举例
```mermaid
pie title Pets adopted by volunteers
"" : 386
"" : 85
"兔子" : 15
```
"""
#甘特图
PROMPT_5 = """
请你给出围绕“{subject}”的甘特图使用mermaid语法mermaid语法举例
```mermaid
gantt
title 项目开发流程
dateFormat YYYY-MM-DD
section 设计
需求分析 :done, des1, 2024-01-06,2024-01-08
原型设计 :active, des2, 2024-01-09, 3d
UI设计 : des3, after des2, 5d
section 开发
前端开发 :2024-01-20, 10d
后端开发 :2024-01-20, 10d
```
"""
#状态图
PROMPT_6 = """
请你给出围绕“{subject}”的状态图使用mermaid语法mermaid语法举例
```mermaid
stateDiagram-v2
[*] --> Still
Still --> [*]
Still --> Moving
Moving --> Still
Moving --> Crash
Crash --> [*]
```
"""
#实体关系图
PROMPT_7 = """
请你给出围绕“{subject}”的实体关系图使用mermaid语法mermaid语法举例
```mermaid
erDiagram
CUSTOMER ||--o{ ORDER : places
ORDER ||--|{ LINE-ITEM : contains
CUSTOMER {
string name
string id
}
ORDER {
string orderNumber
date orderDate
string customerID
}
LINE-ITEM {
number quantity
string productID
}
```
"""
#象限提示图
PROMPT_8 = """
请你给出围绕“{subject}”的象限图使用mermaid语法mermaid语法举例
```mermaid
graph LR
A[Hard skill] --> B(Programming)
A[Hard skill] --> C(Design)
D[Soft skill] --> E(Coordination)
D[Soft skill] --> F(Communication)
```
"""
#思维导图
PROMPT_9 = """
{subject}
==========
请给出上方内容的思维导图充分考虑其之间的逻辑使用mermaid语法mermaid语法举例
```mermaid
mindmap
root((mindmap))
Origins
Long history
::icon(fa fa-book)
Popularisation
British popular psychology author Tony Buzan
Research
On effectiveness<br/>and features
On Automatic creation
Uses
Creative techniques
Strategic planning
Argument mapping
Tools
Pen and paper
Mermaid
```
"""
def 解析历史输入(history,llm_kwargs,chatbot,plugin_kwargs):
############################## <第 0 步,切割输入> ##################################
# 借用PDF切割中的函数对文本进行切割
TOKEN_LIMIT_PER_FRAGMENT = 2500
txt = str(history).encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
txt = breakdown_text_to_satisfy_token_limit(txt=txt, limit=TOKEN_LIMIT_PER_FRAGMENT, llm_model=llm_kwargs['llm_model'])
############################## <第 1 步,迭代地历遍整个文章,提取精炼信息> ##################################
i_say_show_user = f'首先你从历史记录或文件中提取摘要。'; gpt_say = "[Local Message] 收到。" # 用户提示
chatbot.append([i_say_show_user, gpt_say]); yield from update_ui(chatbot=chatbot, history=history) # 更新UI
results = []
MAX_WORD_TOTAL = 4096
n_txt = len(txt)
last_iteration_result = "从以下文本中提取摘要。"
if n_txt >= 20: print('文章极长,不能达到预期效果')
for i in range(n_txt):
NUM_OF_WORD = MAX_WORD_TOTAL // n_txt
i_say = f"Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {txt[i]}"
i_say_show_user = f"[{i+1}/{n_txt}] Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {txt[i][:200]} ...."
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, # i_say=真正给chatgpt的提问 i_say_show_user=给用户看的提问
llm_kwargs, chatbot,
history=["The main content of the previous section is?", last_iteration_result], # 迭代上一次的结果
sys_prompt="Extracts the main content from the text section where it is located for graphing purposes, answer me with Chinese." # 提示
)
results.append(gpt_say)
last_iteration_result = gpt_say
############################## <第 2 步,根据整理的摘要选择图表类型> ##################################
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
gpt_say = plugin_kwargs.get("advanced_arg", "") #将图表类型参数赋值为插件参数
results_txt = '\n'.join(results) #合并摘要
if gpt_say not in ['1','2','3','4','5','6','7','8','9']: #如插件参数不正确则使用对话模型判断
i_say_show_user = f'接下来将判断适合的图表类型,如连续3次判断失败将会使用流程图进行绘制'; gpt_say = "[Local Message] 收到。" # 用户提示
chatbot.append([i_say_show_user, gpt_say]); yield from update_ui(chatbot=chatbot, history=[]) # 更新UI
i_say = SELECT_PROMPT.format(subject=results_txt)
i_say_show_user = f'请判断适合使用的流程图类型,其中数字对应关系为:1-流程图,2-序列图,3-类图,4-饼图,5-甘特图,6-状态图,7-实体关系图,8-象限提示图。由于不管提供文本是什么,模型大概率认为"思维导图"最合适,因此思维导图仅能通过参数调用。'
for i in range(3):
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
inputs=i_say,
inputs_show_user=i_say_show_user,
llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
sys_prompt=""
)
if gpt_say in ['1','2','3','4','5','6','7','8','9']: #判断返回是否正确
break
if gpt_say not in ['1','2','3','4','5','6','7','8','9']:
gpt_say = '1'
############################## <第 3 步,根据选择的图表类型绘制图表> ##################################
if gpt_say == '1':
i_say = PROMPT_1.format(subject=results_txt)
elif gpt_say == '2':
i_say = PROMPT_2.format(subject=results_txt)
elif gpt_say == '3':
i_say = PROMPT_3.format(subject=results_txt)
elif gpt_say == '4':
i_say = PROMPT_4.format(subject=results_txt)
elif gpt_say == '5':
i_say = PROMPT_5.format(subject=results_txt)
elif gpt_say == '6':
i_say = PROMPT_6.format(subject=results_txt)
elif gpt_say == '7':
i_say = PROMPT_7.replace("{subject}", results_txt) #由于实体关系图用到了{}符号
elif gpt_say == '8':
i_say = PROMPT_8.format(subject=results_txt)
elif gpt_say == '9':
i_say = PROMPT_9.format(subject=results_txt)
i_say_show_user = f'请根据判断结果绘制相应的图表。如需绘制思维导图请使用参数调用,同时过大的图表可能需要复制到在线编辑器中进行渲染。'
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
inputs=i_say,
inputs_show_user=i_say_show_user,
llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
sys_prompt="你精通使用mermaid语法来绘制图表,首先确保语法正确,其次避免在mermaid语法中使用不允许的字符,此外也应当分考虑图表的可读性。"
)
history.append(gpt_say)
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
def 输入区文件处理(txt):
if txt == "": return False, txt
success = True
import glob
from .crazy_utils import get_files_from_everything
file_pdf,pdf_manifest,folder_pdf = get_files_from_everything(txt, '.pdf')
file_md,md_manifest,folder_md = get_files_from_everything(txt, '.md')
if len(pdf_manifest) == 0 and len(md_manifest) == 0:
return False, txt #如输入区内容不是文件则直接返回输入区内容
final_result = ""
if file_pdf:
for index, fp in enumerate(pdf_manifest):
file_content, page_one = read_and_clean_pdf_text(fp) # 尝试按照章节切割PDF
file_content = file_content.encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars
final_result += "\n" + file_content
if file_md:
for index, fp in enumerate(md_manifest):
with open(fp, 'r', encoding='utf-8', errors='replace') as f:
file_content = f.read()
file_content = file_content.encode('utf-8', 'ignore').decode()
final_result += "\n" + file_content
return True, final_result
@CatchException
def 生成多种Mermaid图表(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
"""
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数如温度和top_p等一般原样传递下去就行
plugin_kwargs 插件模型的参数,用于灵活调整复杂功能的各种参数
chatbot 聊天显示框的句柄,用于显示给用户
history 聊天历史,前情提要
system_prompt 给gpt的静默提醒
web_port 当前软件运行的端口号
"""
import os
# 基本信息:功能、贡献者
chatbot.append([
"函数插件功能?",
"根据当前聊天历史或文件中(文件内容优先)绘制多种mermaid图表将会由对话模型首先判断适合的图表类型随后绘制图表。\
\n您也可以使用插件参数指定绘制的图表类型,函数插件贡献者: Menghuan1918"])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
# 尝试导入依赖,如果缺少依赖,则给出安装建议
try:
import fitz
except:
report_exception(chatbot, history,
a = f"解析项目: {txt}",
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。")
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
if os.path.exists(txt): #如输入区无内容则直接解析历史记录
file_exist, txt = 输入区文件处理(txt)
else:
file_exist = False
if file_exist : history = [] #如输入区内容为文件则清空历史记录
history.append(txt) #将解析后的txt传递加入到历史中
yield from 解析历史输入(history,llm_kwargs,chatbot,plugin_kwargs)

View File

@@ -13,7 +13,7 @@ install_msg ="""
"""
@CatchException
def 知识库文件注入(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 知识库文件注入(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
"""
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数, 如温度和top_p等, 一般原样传递下去就行
@@ -21,7 +21,7 @@ def 知识库文件注入(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
chatbot 聊天显示框的句柄,用于显示给用户
history 聊天历史,前情提要
system_prompt 给gpt的静默提醒
web_port 当前软件运行的端口号
user_request 当前用户的请求信息IP地址等
"""
history = [] # 清空历史,以免输入溢出
@@ -84,7 +84,7 @@ def 知识库文件注入(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间我们先及时地做一次界面更新
@CatchException
def 读取知识库作答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port=-1):
def 读取知识库作答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request=-1):
# resolve deps
try:
# from zh_langchain import construct_vector_store

View File

@@ -55,7 +55,7 @@ def scrape_text(url, proxies) -> str:
return text
@CatchException
def 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
"""
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数如温度和top_p等一般原样传递下去就行
@@ -63,7 +63,7 @@ def 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
chatbot 聊天显示框的句柄,用于显示给用户
history 聊天历史,前情提要
system_prompt 给gpt的静默提醒
web_port 当前软件运行的端口号
user_request 当前用户的请求信息IP地址等
"""
history = [] # 清空历史,以免输入溢出
chatbot.append((f"请结合互联网信息回答以下问题:{txt}",

View File

@@ -55,7 +55,7 @@ def scrape_text(url, proxies) -> str:
return text
@CatchException
def 连接bing搜索回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 连接bing搜索回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
"""
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数如温度和top_p等一般原样传递下去就行
@@ -63,7 +63,7 @@ def 连接bing搜索回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, histor
chatbot 聊天显示框的句柄,用于显示给用户
history 聊天历史,前情提要
system_prompt 给gpt的静默提醒
web_port 当前软件运行的端口号
user_request 当前用户的请求信息IP地址等
"""
history = [] # 清空历史,以免输入溢出
chatbot.append((f"请结合互联网信息回答以下问题:{txt}",

View File

@@ -104,7 +104,7 @@ def analyze_intention_with_simple_rules(txt):
@CatchException
def 虚空终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 虚空终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
disable_auto_promotion(chatbot=chatbot)
# 获取当前虚空终端状态
state = VoidTerminalState.get_state(chatbot)
@@ -121,7 +121,7 @@ def 虚空终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
state.set_state(chatbot=chatbot, key='has_provided_explaination', value=True)
state.unlock_plugin(chatbot=chatbot)
yield from update_ui(chatbot=chatbot, history=history)
yield from 虚空终端主路由(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port)
yield from 虚空终端主路由(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)
return
else:
# 如果意图模糊,提示
@@ -133,7 +133,7 @@ def 虚空终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
def 虚空终端主路由(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 虚空终端主路由(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
history = []
chatbot.append(("虚空终端状态: ", f"正在执行任务: {txt}"))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面

View File

@@ -109,7 +109,7 @@ def ipynb解释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbo
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
@CatchException
def 解析ipynb文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 解析ipynb文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
chatbot.append([
"函数插件功能?",
"对IPynb文件进行解析。Contributor: codycjy."])

View File

@@ -83,7 +83,8 @@ def 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
history=this_iteration_history_feed, # 迭代之前的分析
sys_prompt="你是一个程序架构分析师,正在分析一个项目的源代码。" + sys_prompt_additional)
summary = "请用一句话概括这些文件的整体功能"
diagram_code = make_diagram(this_iteration_files, result, this_iteration_history_feed)
summary = "请用一句话概括这些文件的整体功能。\n\n" + diagram_code
summary_result = yield from request_gpt_model_in_new_thread_with_ui_alive(
inputs=summary,
inputs_show_user=summary,
@@ -104,9 +105,12 @@ def 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
chatbot.append(("完成了吗?", res))
yield from update_ui(chatbot=chatbot, history=history_to_return) # 刷新界面
def make_diagram(this_iteration_files, result, this_iteration_history_feed):
from crazy_functions.diagram_fns.file_tree import build_file_tree_mermaid_diagram
return build_file_tree_mermaid_diagram(this_iteration_history_feed[0::2], this_iteration_history_feed[1::2], "项目示意图")
@CatchException
def 解析项目本身(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 解析项目本身(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
history = [] # 清空历史,以免输入溢出
import glob
file_manifest = [f for f in glob.glob('./*.py')] + \
@@ -119,7 +123,7 @@ def 解析项目本身(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
@CatchException
def 解析一个Python项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 解析一个Python项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
history = [] # 清空历史,以免输入溢出
import glob, os
if os.path.exists(txt):
@@ -137,7 +141,7 @@ def 解析一个Python项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
@CatchException
def 解析一个Matlab项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 解析一个Matlab项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
history = [] # 清空历史,以免输入溢出
import glob, os
if os.path.exists(txt):
@@ -155,7 +159,7 @@ def 解析一个Matlab项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
@CatchException
def 解析一个C项目的头文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 解析一个C项目的头文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
history = [] # 清空历史,以免输入溢出
import glob, os
if os.path.exists(txt):
@@ -175,7 +179,7 @@ def 解析一个C项目的头文件(txt, llm_kwargs, plugin_kwargs, chatbot, his
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
@CatchException
def 解析一个C项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 解析一个C项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
history = [] # 清空历史,以免输入溢出
import glob, os
if os.path.exists(txt):
@@ -197,7 +201,7 @@ def 解析一个C项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system
@CatchException
def 解析一个Java项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 解析一个Java项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
history = [] # 清空历史,以免输入溢出
import glob, os
if os.path.exists(txt):
@@ -219,7 +223,7 @@ def 解析一个Java项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys
@CatchException
def 解析一个前端项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 解析一个前端项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
history = [] # 清空历史,以免输入溢出
import glob, os
if os.path.exists(txt):
@@ -248,7 +252,7 @@ def 解析一个前端项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
@CatchException
def 解析一个Golang项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 解析一个Golang项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
history = [] # 清空历史,以免输入溢出
import glob, os
if os.path.exists(txt):
@@ -269,7 +273,7 @@ def 解析一个Golang项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
@CatchException
def 解析一个Rust项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 解析一个Rust项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
history = [] # 清空历史,以免输入溢出
import glob, os
if os.path.exists(txt):
@@ -289,7 +293,7 @@ def 解析一个Rust项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
@CatchException
def 解析一个Lua项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 解析一个Lua项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
history = [] # 清空历史,以免输入溢出
import glob, os
if os.path.exists(txt):
@@ -311,7 +315,7 @@ def 解析一个Lua项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
@CatchException
def 解析一个CSharp项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 解析一个CSharp项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
history = [] # 清空历史,以免输入溢出
import glob, os
if os.path.exists(txt):
@@ -331,7 +335,7 @@ def 解析一个CSharp项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
@CatchException
def 解析任意code项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 解析任意code项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
txt_pattern = plugin_kwargs.get("advanced_arg")
txt_pattern = txt_pattern.replace("", ",")
# 将要匹配的模式(例如: *.c, *.cpp, *.py, config.toml)

View File

@@ -2,7 +2,7 @@ from toolbox import CatchException, update_ui, get_conf
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
import datetime
@CatchException
def 同时问询(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 同时问询(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
"""
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数如温度和top_p等一般原样传递下去就行
@@ -10,7 +10,7 @@ def 同时问询(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
chatbot 聊天显示框的句柄,用于显示给用户
history 聊天历史,前情提要
system_prompt 给gpt的静默提醒
web_port 当前软件运行的端口号
user_request 当前用户的请求信息IP地址等
"""
history = [] # 清空历史,以免输入溢出
MULTI_QUERY_LLM_MODELS = get_conf('MULTI_QUERY_LLM_MODELS')
@@ -32,7 +32,7 @@ def 同时问询(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
@CatchException
def 同时问询_指定模型(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 同时问询_指定模型(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
"""
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数如温度和top_p等一般原样传递下去就行
@@ -40,7 +40,7 @@ def 同时问询_指定模型(txt, llm_kwargs, plugin_kwargs, chatbot, history,
chatbot 聊天显示框的句柄,用于显示给用户
history 聊天历史,前情提要
system_prompt 给gpt的静默提醒
web_port 当前软件运行的端口号
user_request 当前用户的请求信息IP地址等
"""
history = [] # 清空历史,以免输入溢出

View File

@@ -166,7 +166,7 @@ class InterviewAssistant(AliyunASR):
@CatchException
def 语音助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 语音助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
# pip install -U openai-whisper
chatbot.append(["对话助手函数插件:使用时,双手离开鼠标键盘吧", "音频助手, 正在听您讲话(点击“停止”键可终止程序)..."])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面

View File

@@ -44,7 +44,7 @@ def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbo
@CatchException
def 读文章写摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 读文章写摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
history = [] # 清空历史,以免输入溢出
import glob, os
if os.path.exists(txt):

View File

@@ -132,7 +132,7 @@ def get_meta_information(url, chatbot, history):
return profile
@CatchException
def 谷歌检索小助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 谷歌检索小助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
disable_auto_promotion(chatbot=chatbot)
# 基本信息:功能、贡献者
chatbot.append([

View File

@@ -11,7 +11,7 @@ import os
@CatchException
def 猜你想问(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 猜你想问(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
if txt:
show_say = txt
prompt = txt+'\n回答完问题后,再列出用户可能提出的三个问题。'
@@ -32,7 +32,7 @@ def 猜你想问(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
@CatchException
def 清除缓存(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 清除缓存(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
chatbot.append(['清除本地缓存数据', '执行中. 删除数据'])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面

View File

@@ -1,19 +1,47 @@
from toolbox import CatchException, update_ui
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
import datetime
高阶功能模板函数示意图 = f"""
```mermaid
flowchart TD
%% <gpt_academic_hide_mermaid_code> 一个特殊标记用于在生成mermaid图表时隐藏代码块
subgraph 函数调用["函数调用过程"]
AA["输入栏用户输入的文本(txt)"] --> BB["gpt模型参数(llm_kwargs)"]
BB --> CC["插件模型参数(plugin_kwargs)"]
CC --> DD["对话显示框的句柄(chatbot)"]
DD --> EE["对话历史(history)"]
EE --> FF["系统提示词(system_prompt)"]
FF --> GG["当前用户信息(web_port)"]
A["开始(查询5天历史事件)"]
A --> B["获取当前月份和日期"]
B --> C["生成历史事件查询提示词"]
C --> D["调用大模型"]
D --> E["更新界面"]
E --> F["记录历史"]
F --> |"下一天"| B
end
```
"""
@CatchException
def 高阶功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 高阶功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
"""
# 高阶功能模板函数示意图https://mermaid.live/edit#pako:eNptk1tvEkEYhv8KmattQpvlvOyFCcdeeaVXuoYssBwie8gyhCIlqVoLhrbbtAWNUpEGUkyMEDW2Fmn_DDOL_8LZHdOwxrnamX3f7_3mmZk6yKhZCfAgV1KrmYKoQ9fDuKC4yChX0nld1Aou1JzjznQ5fWmejh8LYHW6vG2a47YAnlCLNSIRolnenKBXI_zRIBrcuqRT890u7jZx7zMDt-AaMbnW1--5olGiz2sQjwfoQxsZL0hxplSSU0-rop4vrzmKR6O2JxYjHmwcL2Y_HDatVMkXlf86YzHbGY9bO5j8XE7O8Nsbc3iNB3ukL2SMcH-XIQBgWoVOZzxuOxOJOyc63EPGV6ZQLENVrznViYStTiaJ2vw2M2d9bByRnOXkgCnXylCSU5quyto_IcmkbdvctELmJ-j1ASW3uB3g5xOmKqVTmqr_Na3AtuS_dtBFm8H90XJyHkDDT7S9xXWb4HGmRChx64AOL5HRpUm411rM5uh4H78Z4V7fCZzytjZz2seto9XaNPFue07clLaVZF8UNLygJ-VES8lah_n-O-5Ozc7-77NzJ0-K0yr0ZYrmHdqAk50t2RbA4qq9uNohBASw7YpSgaRkLWCCAtxAlnRZLGbJba9bPwUAC5IsCYAnn1kpJ1ZKUACC0iBSsQLVBzUlA3ioVyQ3qGhZEUrxokiehAz4nFgqk1VNVABfB1uAD_g2_AGPl-W8nMcbCvsDblADfNCz4feyobDPy3rYEMtxwYYbPFNVUoHdCPmDHBv2cP4AMfrCbiBli-Q-3afv0X6WdsIjW2-10fgDy1SAig
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数如温度和top_p等一般原样传递下去就行
plugin_kwargs 插件模型的参数,用于灵活调整复杂功能的各种参数
chatbot 聊天显示框的句柄,用于显示给用户
history 聊天历史,前情提要
system_prompt 给gpt的静默提醒
web_port 当前软件运行的端口号
user_request 当前用户的请求信息IP地址等
"""
history = [] # 清空历史,以免输入溢出
chatbot.append(("这是什么功能?", "[Local Message] 请注意,您正在调用一个[函数插件]的模板该函数面向希望实现更多有趣功能的开发者它可以作为创建新功能函数的模板该函数只有20多行代码。此外我们也提供可同步处理大量文件的多线程Demo供您参考。您若希望分享新的功能模组请不吝PR"))
chatbot.append((
"您正在调用插件:历史上的今天",
"[Local Message] 请注意,您正在调用一个[函数插件]的模板该函数面向希望实现更多有趣功能的开发者它可以作为创建新功能函数的模板该函数只有20多行代码。此外我们也提供可同步处理大量文件的多线程Demo供您参考。您若希望分享新的功能模组请不吝PR" + 高阶功能模板函数示意图))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间我们先及时地做一次界面更新
for i in range(5):
currentMonth = (datetime.date.today() + datetime.timedelta(days=i)).month
@@ -26,4 +54,46 @@ def 高阶功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
)
chatbot[-1] = (i_say, gpt_say)
history.append(i_say);history.append(gpt_say)
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
PROMPT = """
请你给出围绕“{subject}”的逻辑关系图使用mermaid语法mermaid语法举例
```mermaid
graph TD
P(编程) --> L1(Python)
P(编程) --> L2(C)
P(编程) --> L3(C++)
P(编程) --> L4(Javascipt)
P(编程) --> L5(PHP)
```
"""
@CatchException
def 测试图表渲染(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
"""
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数如温度和top_p等一般原样传递下去就行
plugin_kwargs 插件模型的参数,用于灵活调整复杂功能的各种参数
chatbot 聊天显示框的句柄,用于显示给用户
history 聊天历史,前情提要
system_prompt 给gpt的静默提醒
user_request 当前用户的请求信息IP地址等
"""
history = [] # 清空历史,以免输入溢出
chatbot.append(("这是什么功能?", "一个测试mermaid绘制图表的功能您可以在输入框中输入一些关键词然后使用mermaid+llm绘制图表。"))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间我们先及时地做一次界面更新
if txt == "": txt = "空白的输入栏" # 调皮一下
i_say_show_user = f'请绘制有关“{txt}”的逻辑关系图。'
i_say = PROMPT.format(subject=txt)
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
inputs=i_say,
inputs_show_user=i_say_show_user,
llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
sys_prompt=""
)
history.append(i_say); history.append(gpt_say)
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新

View File

@@ -129,7 +129,7 @@ services:
runtime: nvidia
devices:
- /dev/nvidia0:/dev/nvidia0
# 与宿主的网络融合
network_mode: "host"
command: >
@@ -163,7 +163,7 @@ services:
runtime: nvidia
devices:
- /dev/nvidia0:/dev/nvidia0
# 与宿主的网络融合
network_mode: "host"
@@ -229,4 +229,3 @@ services:
# 不使用代理网络拉取最新代码
command: >
bash -c "python3 -u main.py"

View File

@@ -1,2 +1 @@
# 此Dockerfile不再维护请前往docs/GithubAction+ChatGLM+Moss

View File

@@ -1 +1 @@
# 此Dockerfile不再维护请前往docs/GithubAction+JittorLLMs
# 此Dockerfile不再维护请前往docs/GithubAction+JittorLLMs

View File

@@ -13,7 +13,7 @@ COPY . .
RUN pip3 install -r requirements.txt
# 安装语音插件的额外依赖
RUN pip3 install pyOpenSSL scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git
RUN pip3 install aliyun-python-sdk-core==2.13.3 pyOpenSSL webrtcvad scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git
# 可选步骤,用于预热模块
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'

View File

@@ -15,7 +15,7 @@ WORKDIR /gpt
RUN pip3 install openai numpy arxiv rich
RUN pip3 install colorama Markdown pygments pymupdf
RUN pip3 install python-docx pdfminer
RUN pip3 install python-docx pdfminer
RUN pip3 install nougat-ocr
# 装载项目文件

View File

@@ -2,9 +2,9 @@
> **ملحوظة**
>
>
> تمت ترجمة هذا الملف README باستخدام GPT (بواسطة المكون الإضافي لهذا المشروع) وقد لا تكون الترجمة 100٪ موثوقة، يُرجى التمييز بعناية بنتائج الترجمة.
>
>
> 2023.11.7: عند تثبيت التبعيات، يُرجى اختيار الإصدار المُحدد في `requirements.txt`. الأمر للتثبيت: `pip install -r requirements.txt`.
# <div align=center><img src="logo.png" width="40"> GPT الأكاديمي</div>
@@ -12,14 +12,14 @@
**إذا كنت تحب هذا المشروع، فيُرجى إعطاؤه Star. لترجمة هذا المشروع إلى لغة عشوائية باستخدام GPT، قم بقراءة وتشغيل [`multi_language.py`](multi_language.py) (تجريبي).
> **ملحوظة**
>
>
> 1. يُرجى ملاحظة أنها الإضافات (الأزرار) المميزة فقط التي تدعم قراءة الملفات، وبعض الإضافات توجد في قائمة منسدلة في منطقة الإضافات. بالإضافة إلى ذلك، نرحب بأي Pull Request جديد بأعلى أولوية لأي إضافة جديدة.
>
>
> 2. تُوضّح كل من الملفات في هذا المشروع وظيفتها بالتفصيل في [تقرير الفهم الذاتي `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPTAcademic项目自译解报告). يمكنك في أي وقت أن تنقر على إضافة وظيفة ذات صلة لاستدعاء GPT وإعادة إنشاء تقرير الفهم الذاتي للمشروع. للأسئلة الشائعة [`الويكي`](https://github.com/binary-husky/gpt_academic/wiki). [طرق التثبيت العادية](#installation) | [نصب بنقرة واحدة](https://github.com/binary-husky/gpt_academic/releases) | [تعليمات التكوين](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明).
>
>
> 3. يتم توافق هذا المشروع مع ودعم توصيات اللغة البيجائية الأكبر شمولًا وشجاعة لمثل ChatGLM. يمكنك توفير العديد من مفاتيح Api المشتركة في تكوين الملف، مثل `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`. عند تبديل مؤقت لـ `API_KEY`، قم بإدخال `API_KEY` المؤقت في منطقة الإدخال ثم اضغط على زر "إدخال" لجعله ساري المفعول.
<div align="center">
@@ -46,7 +46,7 @@
⭐إضغط على وكيل "شارلوت الذكي" | [وظائف] استكمال الذكاء للكأس الأول للذكاء المكتسب من مايكروسوفت، اكتشاف وتطوير عالمي العميل
تبديل الواجهة المُظلمة | يمكنك التبديل إلى الواجهة المظلمة بإضافة ```/?__theme=dark``` إلى نهاية عنوان URL في المتصفح
دعم المزيد من نماذج LLM | دعم لجميع GPT3.5 وGPT4 و[ChatGLM2 في جامعة ثوه في لين](https://github.com/THUDM/ChatGLM2-6B) و[MOSS في جامعة فودان](https://github.com/OpenLMLab/MOSS)
⭐تحوي انطباعة "ChatGLM2" | يدعم استيراد "ChatGLM2" ويوفر إضافة المساعدة في تعديله
⭐تحوي انطباعة "ChatGLM2" | يدعم استيراد "ChatGLM2" ويوفر إضافة المساعدة في تعديله
دعم المزيد من نماذج "LLM"، دعم [نشر الحديس](https://huggingface.co/spaces/qingxu98/gpt-academic) | انضم إلى واجهة "Newbing" (Bing الجديدة)،نقدم نماذج Jittorllms الجديدة تؤيدهم [LLaMA](https://github.com/facebookresearch/llama) و [盘古α](https://openi.org.cn/pangu/)
⭐حزمة "void-terminal" للشبكة (pip) | قم بطلب كافة وظائف إضافة هذا المشروع في python بدون واجهة رسومية (قيد التطوير)
⭐PCI-Express لإعلام (PCI) | [وظائف] باللغة الطبيعية، قم بتنفيذ المِهام الأخرى في المشروع
@@ -200,8 +200,8 @@ docker-compose up
```
"ترجمة سوبر الإنجليزية إلى العربية": {
# البادئة، ستتم إضافتها قبل إدخالاتك. مثلاً، لوصف ما تريده مثل ترجمة أو شرح كود أو تلوين وهلم جرا
"بادئة": "يرجى ترجمة النص التالي إلى العربية ثم استخدم جدول Markdown لشرح المصطلحات المختصة المذكورة في النص:\n\n",
"بادئة": "يرجى ترجمة النص التالي إلى العربية ثم استخدم جدول Markdown لشرح المصطلحات المختصة المذكورة في النص:\n\n",
# اللاحقة، سيتم إضافتها بعد إدخالاتك. يمكن استخدامها لوضع علامات اقتباس حول إدخالك.
"لاحقة": "",
},
@@ -341,4 +341,3 @@ https://github.com/oobabooga/one-click-installers
# المزيد:
https://github.com/gradio-app/gradio
https://github.com/fghrsh/live2d_demo

View File

@@ -18,11 +18,11 @@ To translate this project to arbitrary language with GPT, read and run [`multi_l
> 1.Please note that only plugins (buttons) highlighted in **bold** support reading files, and some plugins are located in the **dropdown menu** in the plugin area. Additionally, we welcome and process any new plugins with the **highest priority** through PRs.
>
> 2.The functionalities of each file in this project are described in detail in the [self-analysis report `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPTAcademic项目自译解报告). As the version iterates, you can also click on the relevant function plugin at any time to call GPT to regenerate the project's self-analysis report. Common questions are in the [`wiki`](https://github.com/binary-husky/gpt_academic/wiki). [Regular installation method](#installation) | [One-click installation script](https://github.com/binary-husky/gpt_academic/releases) | [Configuration instructions](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明).
>
>
> 3.This project is compatible with and encourages the use of domestic large-scale language models such as ChatGLM. Multiple api-keys can be used together. You can fill in the configuration file with `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"` to temporarily switch `API_KEY` during input, enter the temporary `API_KEY`, and then press enter to apply it.
<div align="center">
@@ -126,7 +126,7 @@ python -m pip install -r requirements.txt # This step is the same as the pip ins
【Optional Step】If you need to support THU ChatGLM2 or Fudan MOSS as backends, you need to install additional dependencies (Prerequisites: Familiar with Python + Familiar with Pytorch + Sufficient computer configuration):
```sh
# 【Optional Step I】Support THU ChatGLM2. Note: If you encounter the "Call ChatGLM fail unable to load ChatGLM parameters" error, refer to the following: 1. The default installation above is for torch+cpu version. To use cuda, uninstall torch and reinstall torch+cuda; 2. If the model cannot be loaded due to insufficient local configuration, you can modify the model accuracy in request_llm/bridge_chatglm.py. Change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
python -m pip install -r request_llms/requirements_chatglm.txt
python -m pip install -r request_llms/requirements_chatglm.txt
# 【Optional Step II】Support Fudan MOSS
python -m pip install -r request_llms/requirements_moss.txt
@@ -204,8 +204,8 @@ For example:
```
"Super Translation": {
# Prefix: will be added before your input. For example, used to describe your request, such as translation, code explanation, proofreading, etc.
"Prefix": "Please translate the following paragraph into Chinese and then explain each proprietary term in the text using a markdown table:\n\n",
"Prefix": "Please translate the following paragraph into Chinese and then explain each proprietary term in the text using a markdown table:\n\n",
# Suffix: will be added after your input. For example, used to wrap your input in quotation marks along with the prefix.
"Suffix": "",
},
@@ -355,4 +355,3 @@ https://github.com/oobabooga/one-click-installers
# More:
https://github.com/gradio-app/gradio
https://github.com/fghrsh/live2d_demo

View File

@@ -2,9 +2,9 @@
> **Remarque**
>
>
> Ce README a été traduit par GPT (implémenté par le plugin de ce projet) et n'est pas fiable à 100 %. Veuillez examiner attentivement les résultats de la traduction.
>
>
> 7 novembre 2023 : Lors de l'installation des dépendances, veuillez choisir les versions **spécifiées** dans le fichier `requirements.txt`. Commande d'installation : `pip install -r requirements.txt`.
@@ -12,7 +12,7 @@
**Si vous aimez ce projet, merci de lui donner une étoile ; si vous avez inventé des raccourcis ou des plugins utiles, n'hésitez pas à envoyer des demandes d'extraction !**
Si vous aimez ce projet, veuillez lui donner une étoile.
Si vous aimez ce projet, veuillez lui donner une étoile.
Pour traduire ce projet dans une langue arbitraire avec GPT, lisez et exécutez [`multi_language.py`](multi_language.py) (expérimental).
> **Remarque**
@@ -22,7 +22,7 @@ Pour traduire ce projet dans une langue arbitraire avec GPT, lisez et exécutez
> 2. Les fonctionnalités de chaque fichier de ce projet sont spécifiées en détail dans [le rapport d'auto-analyse `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPTAcademic个项目自译解报告). Vous pouvez également cliquer à tout moment sur les plugins de fonctions correspondants pour appeler GPT et générer un rapport d'auto-analyse du projet. Questions fréquemment posées [wiki](https://github.com/binary-husky/gpt_academic/wiki). [Méthode d'installation standard](#installation) | [Script d'installation en un clic](https://github.com/binary-husky/gpt_academic/releases) | [Instructions de configuration](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)..
>
> 3. Ce projet est compatible avec et recommande l'expérimentation de grands modèles de langage chinois tels que ChatGLM, etc. Prend en charge plusieurs clés API, vous pouvez les remplir dans le fichier de configuration comme `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`. Pour changer temporairement la clé API, entrez la clé API temporaire dans la zone de saisie, puis appuyez sur Entrée pour soumettre et activer celle-ci.
<div align="center">
@@ -128,7 +128,7 @@ python -m pip install -r requirements.txt # This step is the same as the pip ins
[Optional Steps] If you need to support Tsinghua ChatGLM2/Fudan MOSS as backends, you need to install additional dependencies (Prerequisites: Familiar with Python + Have used PyTorch + Sufficient computer configuration):
```sh
# [Optional Step I] Support Tsinghua ChatGLM2. Comment on this note: If you encounter the error "Call ChatGLM generated an error and cannot load the parameters of ChatGLM", refer to the following: 1: The default installation is the torch+cpu version. To use cuda, you need to uninstall torch and reinstall torch+cuda; 2: If the model cannot be loaded due to insufficient computer configuration, you can modify the model precision in request_llm/bridge_chatglm.py. Change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True).
python -m pip install -r request_llms/requirements_chatglm.txt
python -m pip install -r request_llms/requirements_chatglm.txt
# [Optional Step II] Support Fudan MOSS
python -m pip install -r request_llms/requirements_moss.txt
@@ -201,7 +201,7 @@ Par exemple:
"Traduction avancée de l'anglais vers le français": {
# Préfixe, ajouté avant votre saisie. Par exemple, utilisez-le pour décrire votre demande, telle que la traduction, l'explication du code, l'amélioration, etc.
"Prefix": "Veuillez traduire le contenu suivant en français, puis expliquer chaque terme propre à la langue anglaise utilisé dans le texte à l'aide d'un tableau markdown : \n\n",
# Suffixe, ajouté après votre saisie. Par exemple, en utilisant le préfixe, vous pouvez entourer votre contenu par des guillemets.
"Suffix": "",
},
@@ -354,4 +354,3 @@ https://github.com/oobabooga/one-click-installers
# Plus
https://github.com/gradio-app/gradio
https://github.com/fghrsh/live2d_demo

View File

@@ -2,9 +2,9 @@
> **Hinweis**
>
> Dieses README wurde mithilfe der GPT-Übersetzung (durch das Plugin dieses Projekts) erstellt und ist nicht zu 100 % zuverlässig. Bitte überprüfen Sie die Übersetzungsergebnisse sorgfältig.
>
>
> Dieses README wurde mithilfe der GPT-Übersetzung (durch das Plugin dieses Projekts) erstellt und ist nicht zu 100 % zuverlässig. Bitte überprüfen Sie die Übersetzungsergebnisse sorgfältig.
>
> 7. November 2023: Beim Installieren der Abhängigkeiten bitte nur die in der `requirements.txt` **angegebenen Versionen** auswählen. Installationsbefehl: `pip install -r requirements.txt`.
@@ -12,19 +12,19 @@
**Wenn Ihnen dieses Projekt gefällt, geben Sie ihm bitte einen Star. Wenn Sie praktische Tastenkombinationen oder Plugins entwickelt haben, sind Pull-Anfragen willkommen!**
Wenn Ihnen dieses Projekt gefällt, geben Sie ihm bitte einen Star.
Wenn Ihnen dieses Projekt gefällt, geben Sie ihm bitte einen Star.
Um dieses Projekt mit GPT in eine beliebige Sprache zu übersetzen, lesen Sie [`multi_language.py`](multi_language.py) (experimentell).
> **Hinweis**
>
> 1. Beachten Sie bitte, dass nur die mit **hervorgehobenen** Plugins (Schaltflächen) Dateien lesen können. Einige Plugins befinden sich im **Drop-down-Menü** des Plugin-Bereichs. Außerdem freuen wir uns über jede neue Plugin-PR mit **höchster Priorität**.
>
>
> 2. Die Funktionen jeder Datei in diesem Projekt sind im [Selbstanalysebericht `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT-Academic-Selbstanalysebericht) ausführlich erläutert. Sie können jederzeit auf die relevanten Funktions-Plugins klicken und GPT aufrufen, um den Selbstanalysebericht des Projekts neu zu generieren. Häufig gestellte Fragen finden Sie im [`Wiki`](https://github.com/binary-husky/gpt_academic/wiki). [Standardinstallationsmethode](#installation) | [Ein-Klick-Installationsskript](https://github.com/binary-husky/gpt_academic/releases) | [Konfigurationsanleitung](https://github.com/binary-husky/gpt_academic/wiki/Projekt-Konfigurationsanleitung).
>
>
> 3. Dieses Projekt ist kompatibel mit und unterstützt auch die Verwendung von inländischen Sprachmodellen wie ChatGLM. Die gleichzeitige Verwendung mehrerer API-Schlüssel ist möglich, indem Sie sie in der Konfigurationsdatei wie folgt angeben: `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`. Wenn Sie den `API_KEY` vorübergehend ändern möchten, geben Sie vorübergehend den temporären `API_KEY` im Eingabebereich ein und drücken Sie die Eingabetaste, um die Änderung wirksam werden zu lassen.
<div align="center">
@@ -93,7 +93,7 @@ Weitere Funktionen anzeigen (z. B. Bildgenerierung) …… | Siehe das Ende dies
</div>
# Installation
### Installation Method I: Run directly (Windows, Linux or MacOS)
### Installation Method I: Run directly (Windows, Linux or MacOS)
1. Download the project
```sh
@@ -128,7 +128,7 @@ python -m pip install -r requirements.txt # This step is the same as installing
[Optional] If you need to support Tsinghua ChatGLM2/Fudan MOSS as the backend, you need to install additional dependencies (Prerequisites: Familiar with Python + Have used PyTorch + Strong computer configuration):
```sh
# [Optional Step I] Support Tsinghua ChatGLM2. Tsinghua ChatGLM note: If you encounter the error "Call ChatGLM fail cannot load ChatGLM parameters normally", refer to the following: 1: The default installation above is torch+cpu version. To use cuda, you need to uninstall torch and reinstall torch+cuda; 2: If you cannot load the model due to insufficient computer configuration, you can modify the model accuracy in request_llm/bridge_chatglm.py. Change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
python -m pip install -r request_llms/requirements_chatglm.txt
python -m pip install -r request_llms/requirements_chatglm.txt
# [Optional Step II] Support Fudan MOSS
python -m pip install -r request_llms/requirements_moss.txt
@@ -207,8 +207,8 @@ Beispiel:
```
"Übersetzung von Englisch nach Chinesisch": {
# Präfix, wird vor Ihrer Eingabe hinzugefügt. Zum Beispiel, um Ihre Anforderungen zu beschreiben, z.B. Übersetzen, Code erklären, verbessern usw.
"Präfix": "Bitte übersetzen Sie den folgenden Abschnitt ins Chinesische und erklären Sie dann jedes Fachwort in einer Markdown-Tabelle:\n\n",
"Präfix": "Bitte übersetzen Sie den folgenden Abschnitt ins Chinesische und erklären Sie dann jedes Fachwort in einer Markdown-Tabelle:\n\n",
# Suffix, wird nach Ihrer Eingabe hinzugefügt. Zum Beispiel, um Ihre Eingabe in Anführungszeichen zu setzen.
"Suffix": "",
},
@@ -361,4 +361,3 @@ https://github.com/oobabooga/one-click-installers
# Weitere:
https://github.com/gradio-app/gradio
https://github.com/fghrsh/live2d_demo

View File

@@ -12,7 +12,7 @@
**Se ti piace questo progetto, per favore dagli una stella; se hai idee o plugin utili, fai una pull request!**
Se ti piace questo progetto, dagli una stella.
Se ti piace questo progetto, dagli una stella.
Per tradurre questo progetto in qualsiasi lingua con GPT, leggi ed esegui [`multi_language.py`](multi_language.py) (sperimentale).
> **Nota**
@@ -20,11 +20,11 @@ Per tradurre questo progetto in qualsiasi lingua con GPT, leggi ed esegui [`mult
> 1. Fai attenzione che solo i plugin (pulsanti) **evidenziati** supportano la lettura dei file, alcuni plugin si trovano nel **menu a tendina** nell'area dei plugin. Inoltre, accogliamo e gestiamo con **massima priorità** qualsiasi nuovo plugin attraverso pull request.
>
> 2. Le funzioni di ogni file in questo progetto sono descritte in dettaglio nel [rapporto di traduzione automatica del progetto `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPTAcademic项目自译解报告). Con l'iterazione della versione, puoi anche fare clic sui plugin delle funzioni rilevanti in qualsiasi momento per richiamare GPT e rigenerare il rapporto di auto-analisi del progetto. Domande frequenti [`wiki`](https://github.com/binary-husky/gpt_academic/wiki) | [Metodo di installazione standard](#installazione) | [Script di installazione one-click](https://github.com/binary-husky/gpt_academic/releases) | [Configurazione](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。
>
>
> 3. Questo progetto è compatibile e incoraggia l'uso di modelli di linguaggio di grandi dimensioni nazionali, come ChatGLM. Supporto per la coesistenza di più chiavi API, puoi compilare nel file di configurazione come `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`. Quando è necessario sostituire temporaneamente `API_KEY`, inserisci temporaneamente `API_KEY` nell'area di input e premi Invio per confermare.
<div align="center">
@@ -128,7 +128,7 @@ python -m pip install -r requirements.txt # Questo passaggio è identico alla pr
[Optional] Se desideri utilizzare ChatGLM2 di Tsinghua/Fudan MOSS come backend, è necessario installare ulteriori dipendenze (Requisiti: conoscenza di Python + esperienza con Pytorch + hardware potente):
```sh
# [Optional Step I] Supporto per ChatGLM2 di Tsinghua. Note di ChatGLM di Tsinghua: Se si verifica l'errore "Call ChatGLM fail non può caricare i parametri di ChatGLM", fare riferimento a quanto segue: 1: L'installazione predefinita è la versione torch+cpu, per usare cuda è necessario disinstallare torch ed installare nuovamente la versione con torch+cuda; 2: Se il modello non può essere caricato a causa di una configurazione insufficiente, è possibile modificare la precisione del modello in request_llm/bridge_chatglm.py, sostituendo AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) con AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
python -m pip install -r request_llms/requirements_chatglm.txt
python -m pip install -r request_llms/requirements_chatglm.txt
# [Optional Step II] Supporto per Fudan MOSS
python -m pip install -r request_llms/requirements_moss.txt
@@ -206,8 +206,8 @@ Ad esempio,
```
"Traduzione avanzata Cinese-Inglese": {
# Prefisso, sarà aggiunto prima del tuo input. Ad esempio, utilizzato per descrivere la tua richiesta, come traduzione, spiegazione del codice, rifinitura, ecc.
"Prefisso": "Si prega di tradurre il seguente testo in cinese e fornire spiegazione per i termini tecnici utilizzati, utilizzando una tabella in markdown uno per uno:\n\n",
"Prefisso": "Si prega di tradurre il seguente testo in cinese e fornire spiegazione per i termini tecnici utilizzati, utilizzando una tabella in markdown uno per uno:\n\n",
# Suffisso, sarà aggiunto dopo il tuo input. Ad esempio, in combinazione con il prefisso, puoi circondare il tuo input con virgolette.
"Suffisso": "",
},
@@ -224,7 +224,7 @@ La scrittura di plugin per questo progetto è facile e richiede solo conoscenze
# Aggiornamenti
### I: Aggiornamenti
1. Funzionalità di salvataggio della conversazione. Chiamare `Salva la conversazione corrente` nell'area del plugin per salvare la conversazione corrente come un file html leggibile e ripristinabile.
1. Funzionalità di salvataggio della conversazione. Chiamare `Salva la conversazione corrente` nell'area del plugin per salvare la conversazione corrente come un file html leggibile e ripristinabile.
Inoltre, nella stessa area del plugin (menu a tendina) chiamare `Carica la cronologia della conversazione` per ripristinare una conversazione precedente.
Suggerimento: fare clic su `Carica la cronologia della conversazione` senza specificare un file per visualizzare la tua cronologia di archiviazione HTML.
<div align="center">
@@ -358,4 +358,3 @@ https://github.com/oobabooga/one-click-installers
# Altre risorse:
https://github.com/gradio-app/gradio
https://github.com/fghrsh/live2d_demo

View File

@@ -2,9 +2,9 @@
> **注意**
>
>
> 此READMEはGPTによる翻訳で生成されましたこのプロジェクトのプラグインによって実装されています、翻訳結果は100%正確ではないため、注意してください。
>
>
> 2023年11月7日: 依存関係をインストールする際は、`requirements.txt`で**指定されたバージョン**を選択してください。 インストールコマンド: `pip install -r requirements.txt`。
@@ -18,11 +18,11 @@ GPTを使用してこのプロジェクトを任意の言語に翻訳するに
> 1. **強調された** プラグインボタンのみがファイルを読み込むことができることに注意してください。一部のプラグインは、プラグインエリアのドロップダウンメニューにあります。また、新しいプラグインのPRを歓迎し、最優先で対応します。
>
> 2. このプロジェクトの各ファイルの機能は、[自己分析レポート`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPTAcademic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E5%A0%82)で詳しく説明されています。バージョンが進化するにつれて、関連する関数プラグインをクリックして、プロジェクトの自己分析レポートをGPTで再生成することもできます。よくある質問については、[`wiki`](https://github.com/binary-husky/gpt_academic/wiki)をご覧ください。[標準的なインストール方法](#installation) | [ワンクリックインストールスクリプト](https://github.com/binary-husky/gpt_academic/releases) | [構成の説明](https://github.com/binary-husky/gpt_academic/wiki/Project-Configuration-Explain)。
>
>
> 3. このプロジェクトは、[ChatGLM](https://www.chatglm.dev/)などの中国製の大規模言語モデルも互換性があり、試してみることを推奨しています。複数のAPIキーを共存させることができ、設定ファイルに`API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`のように記入できます。`API_KEY`を一時的に変更する必要がある場合は、入力エリアに一時的な`API_KEY`を入力し、Enterキーを押して提出すると有効になります。
<div align="center">
@@ -189,7 +189,7 @@ Python環境に詳しくないWindowsユーザーは、[リリース](https://gi
"超级英译中" {
# プレフィックス、入力の前に追加されます。例えば、要求を記述するために使用されます。翻訳、コードの解説、校正など
"プレフィックス" "下記の内容を中国語に翻訳し、専門用語を一つずつマークダウンテーブルで解説してください:\n\n"、
# サフィックス、入力の後に追加されます。プレフィックスと一緒に使用して、入力内容を引用符で囲むことができます。
"サフィックス" ""、
}、
@@ -342,4 +342,3 @@ https://github.com/oobabooga/one-click-installers
# その他:
https://github.com/gradio-app/gradio
https://github.com/fghrsh/live2d_demo

View File

@@ -27,7 +27,7 @@ GPT를 사용하여 이 프로젝트를 임의의 언어로 번역하려면 [`mu
<div align="center">
@@ -130,7 +130,7 @@ python -m pip install -r requirements.txt # This step is the same as the pip ins
[Optional Step] If you need support for Tsinghua ChatGLM2/Fudan MOSS as the backend, you need to install additional dependencies (Prerequisites: Familiar with Python + Have used Pytorch + Sufficient computer configuration):
```sh
# [Optional Step I] Support for Tsinghua ChatGLM2. Note for Tsinghua ChatGLM: If you encounter the error "Call ChatGLM fail cannot load ChatGLM parameters", refer to the following: 1: The default installation above is torch+cpu version. To use cuda, uninstall torch and reinstall torch+cuda; 2: If you cannot load the model due to insufficient computer configuration, you can modify the model precision in request_llm/bridge_chatglm.py, change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
python -m pip install -r request_llms/requirements_chatglm.txt
python -m pip install -r request_llms/requirements_chatglm.txt
# [Optional Step II] Support for Fudan MOSS
python -m pip install -r request_llms/requirements_moss.txt
@@ -208,8 +208,8 @@ Please visit the [cloud server remote deployment wiki](https://github.com/binary
```
"초급영문 번역": {
# 접두사, 입력 내용 앞에 추가됩니다. 예를 들어 요구 사항을 설명하는 데 사용됩니다. 예를 들어 번역, 코드 설명, 교정 등
"Prefix": "다음 내용을 한국어로 번역하고 전문 용어에 대한 설명을 적용한 마크다운 표를 사용하세요:\n\n",
"Prefix": "다음 내용을 한국어로 번역하고 전문 용어에 대한 설명을 적용한 마크다운 표를 사용하세요:\n\n",
# 접미사, 입력 내용 뒤에 추가됩니다. 예를 들어 접두사와 함께 입력 내용을 따옴표로 감쌀 수 있습니다.
"Suffix": "",
},
@@ -361,4 +361,3 @@ https://github.com/oobabooga/one-click-installers
# 더보기:
https://github.com/gradio-app/gradio
https://github.com/fghrsh/live2d_demo

View File

@@ -2,9 +2,9 @@
> **Nota**
>
>
> Este README foi traduzido pelo GPT (implementado por um plugin deste projeto) e não é 100% confiável. Por favor, verifique cuidadosamente o resultado da tradução.
>
>
> 7 de novembro de 2023: Ao instalar as dependências, favor selecionar as **versões especificadas** no `requirements.txt`. Comando de instalação: `pip install -r requirements.txt`.
# <div align=center><img src="logo.png" width="40"> GPT Acadêmico</div>
@@ -15,12 +15,12 @@ Para traduzir este projeto para qualquer idioma utilizando o GPT, leia e execute
> **Nota**
>
> 1. Observe que apenas os plugins (botões) marcados em **destaque** são capazes de ler arquivos, alguns plugins estão localizados no **menu suspenso** do plugin area. Também damos boas-vindas e prioridade máxima a qualquer novo plugin via PR.
>
>
> 2. As funcionalidades de cada arquivo deste projeto estão detalhadamente explicadas em [autoanálise `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPTAcademic项目自译解报告). Com a iteração das versões, você também pode clicar nos plugins de funções relevantes a qualquer momento para chamar o GPT para regerar o relatório de autonálise do projeto. Perguntas frequentes [`wiki`](https://github.com/binary-husky/gpt_academic/wiki) | [Método de instalação convencional](#installation) | [Script de instalação em um clique](https://github.com/binary-husky/gpt_academic/releases) | [Explicação de configuração](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。
>
> 3. Este projeto é compatível e encoraja o uso de modelos de linguagem chineses, como ChatGLM. Vários api-keys podem ser usados simultaneamente, podendo ser especificados no arquivo de configuração como `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`. Quando precisar alterar temporariamente o `API_KEY`, insira o `API_KEY` temporário na área de entrada e pressione Enter para que ele seja efetivo.
<div align="center">
Funcionalidades (⭐= funcionalidade recentemente adicionada) | Descrição
@@ -89,7 +89,7 @@ Apresentação de mais novas funcionalidades (geração de imagens, etc.) ... |
</div>
# Instalação
### Método de instalação I: Executar diretamente (Windows, Linux ou MacOS)
### Método de instalação I: Executar diretamente (Windows, Linux ou MacOS)
1. Baixe o projeto
```sh
@@ -124,7 +124,7 @@ python -m pip install -r requirements.txt # Este passo é igual ao da instalaç
[Opcional] Se você quiser suporte para o ChatGLM2 do THU/ MOSS do Fudan, precisará instalar dependências extras (pré-requisitos: familiarizado com o Python + já usou o PyTorch + o computador tem configuração suficiente):
```sh
# [Opcional Passo I] Suporte para ChatGLM2 do THU. Observações sobre o ChatGLM2 do THU: Se você encontrar o erro "Call ChatGLM fail 不能正常加载ChatGLM的参数" (Falha ao chamar o ChatGLM, não é possível carregar os parâmetros do ChatGLM), consulte o seguinte: 1: A versão instalada por padrão é a versão torch+cpu. Se você quiser usar a versão cuda, desinstale o torch e reinstale uma versão com torch+cuda; 2: Se a sua configuração não for suficiente para carregar o modelo, você pode modificar a precisão do modelo em request_llm/bridge_chatglm.py, alterando todas as ocorrências de AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) para AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
python -m pip install -r request_llms/requirements_chatglm.txt
python -m pip install -r request_llms/requirements_chatglm.txt
# [Opcional Passo II] Suporte para MOSS do Fudan
python -m pip install -r request_llms/requirements_moss.txt
@@ -202,8 +202,8 @@ Por exemplo:
```
"超级英译中": {
# Prefixo, adicionado antes do seu input. Por exemplo, usado para descrever sua solicitação, como traduzir, explicar o código, revisar, etc.
"Prefix": "Por favor, traduza o parágrafo abaixo para o chinês e explique cada termo técnico dentro de uma tabela markdown:\n\n",
"Prefix": "Por favor, traduza o parágrafo abaixo para o chinês e explique cada termo técnico dentro de uma tabela markdown:\n\n",
# Sufixo, adicionado após o seu input. Por exemplo, em conjunto com o prefixo, pode-se colocar seu input entre aspas.
"Suffix": "",
},
@@ -355,4 +355,3 @@ https://github.com/oobabooga/instaladores-de-um-clique
# Mais:
https://github.com/gradio-app/gradio
https://github.com/fghrsh/live2d_demo

View File

@@ -2,9 +2,9 @@
> **Примечание**
>
>
> Этот README был переведен с помощью GPT (реализовано с помощью плагина этого проекта) и не может быть полностью надежным, пожалуйста, внимательно проверьте результаты перевода.
>
>
> 7 ноября 2023 года: При установке зависимостей, пожалуйста, выберите **указанные версии** из `requirements.txt`. Команда установки: `pip install -r requirements.txt`.
@@ -17,12 +17,12 @@
>
> 1. Пожалуйста, обратите внимание, что только плагины (кнопки), выделенные **жирным шрифтом**, поддерживают чтение файлов, некоторые плагины находятся в выпадающем меню **плагинов**. Кроме того, мы с радостью приветствуем и обрабатываем PR для любых новых плагинов с **наивысшим приоритетом**.
>
> 2. Функции каждого файла в этом проекте подробно описаны в [отчете о самостоятельном анализе проекта `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPTAcademic项目自译解报告). С каждым новым релизом вы также можете в любое время нажать на соответствующий функциональный плагин, вызвать GPT для повторной генерации сводного отчета о самоанализе проекта. Часто задаваемые вопросы [`wiki`](https://github.com/binary-husky/gpt_academic/wiki) | [обычные методы установки](#installation) | [скрипт одношаговой установки](https://github.com/binary-husky/gpt_academic/releases) | [инструкции по настройке](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明).
> 2. Функции каждого файла в этом проекте подробно описаны в [отчете о самостоятельном анализе проекта `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPTAcademic项目自译解报告). С каждым новым релизом вы также можете в любое время нажать на соответствующий функциональный плагин, вызвать GPT для повторной генерации сводного отчета о самоанализе проекта. Часто задаваемые вопросы [`wiki`](https://github.com/binary-husky/gpt_academic/wiki) | [обычные методы установки](#installation) | [скрипт одношаговой установки](https://github.com/binary-husky/gpt_academic/releases) | [инструкции по настройке](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明).
>
> 3. Этот проект совместим и настоятельно рекомендуется использование китайской NLP-модели ChatGLM и других моделей больших языков производства Китая. Поддерживает одновременное использование нескольких ключей API, которые можно указать в конфигурационном файле, например, `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`. Если нужно временно заменить `API_KEY`, введите временный `API_KEY` в окне ввода и нажмите Enter для его подтверждения.
<div align="center">
@@ -204,8 +204,8 @@ docker-compose up
```
"Супер-англо-русский перевод": {
# Префикс, который будет добавлен перед вашим вводом. Например, используется для описания вашего запроса, например, перевода, объяснения кода, редактирования и т.д.
"Префикс": "Пожалуйста, переведите следующий абзац на русский язык, а затем покажите каждый термин на экране с помощью таблицы Markdown:\n\n",
"Префикс": "Пожалуйста, переведите следующий абзац на русский язык, а затем покажите каждый термин на экране с помощью таблицы Markdown:\n\n",
# Суффикс, который будет добавлен после вашего ввода. Например, можно использовать с префиксом, чтобы заключить ваш ввод в кавычки.
"Суффикс": "",
},
@@ -335,7 +335,7 @@ GPT Academic Группа QQ разработчиков: `610599535`
```
В коде использовались многие функции, представленные в других отличных проектах, поэтому их порядок не имеет значения:
# ChatGLM2-6B от Тиньхуа:
# ChatGLM2-6B от Тиньхуа:
https://github.com/THUDM/ChatGLM2-6B
# Линейные модели с ограниченной памятью от Тиньхуа:
@@ -358,4 +358,3 @@ https://github.com/oobabooga/one-click-installers
# Больше:
https://github.com/gradio-app/gradio
https://github.com/fghrsh/live2d_demo

View File

@@ -17,18 +17,18 @@ nano config.py
- # 如果需要在二级路径下运行
- # CUSTOM_PATH = get_conf('CUSTOM_PATH')
- # if CUSTOM_PATH != "/":
- # if CUSTOM_PATH != "/":
- # from toolbox import run_gradio_in_subpath
- # run_gradio_in_subpath(demo, auth=AUTHENTICATION, port=PORT, custom_path=CUSTOM_PATH)
- # else:
- # else:
- # demo.launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png")
+ 如果需要在二级路径下运行
+ CUSTOM_PATH = get_conf('CUSTOM_PATH')
+ if CUSTOM_PATH != "/":
+ if CUSTOM_PATH != "/":
+ from toolbox import run_gradio_in_subpath
+ run_gradio_in_subpath(demo, auth=AUTHENTICATION, port=PORT, custom_path=CUSTOM_PATH)
+ else:
+ else:
+ demo.launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png")
if __name__ == "__main__":

Binary file not shown.

View File

@@ -165,7 +165,7 @@ toolbox.py是一个工具类库其中主要包含了一些函数装饰器和
3. read_file_to_chat(chatbot, history, file_name):从传入的文件中读取内容,解析出对话历史记录并更新聊天显示框。
4. 对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port)一个主要函数用于保存当前对话记录并提醒用户。如果用户希望加载历史记录则调用read_file_to_chat()来更新聊天显示框。如果用户希望删除历史记录,调用删除所有本地对话历史记录()函数完成删除操作。
4. 对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)一个主要函数用于保存当前对话记录并提醒用户。如果用户希望加载历史记录则调用read_file_to_chat()来更新聊天显示框。如果用户希望删除历史记录,调用删除所有本地对话历史记录()函数完成删除操作。
## [19/48] 请对下面的程序文件做一个概述: crazy_functions\总结word文档.py

View File

@@ -7,13 +7,27 @@ sample = """
"""
import re
def preprocess_newbing_out(s):
pattern = r'\^(\d+)\^' # 匹配^数字^
pattern2 = r'\[(\d+)\]' # 匹配^数字^
sub = lambda m: '\['+m.group(1)+'\]' # 将匹配到的数字作为替换值
result = re.sub(pattern, sub, s) # 替换操作
if '[1]' in result:
result += '<br/><hr style="border-top: dotted 1px #44ac5c;"><br/><small>' + "<br/>".join([re.sub(pattern2, sub, r) for r in result.split('\n') if r.startswith('[')]) + '</small>'
pattern = r"\^(\d+)\^" # 匹配^数字^
pattern2 = r"\[(\d+)\]" # 匹配^数字^
def sub(m):
return "\\[" + m.group(1) + "\\]" # 将匹配到的数字作为替换值
result = re.sub(pattern, sub, s) # 替换操作
if "[1]" in result:
result += (
'<br/><hr style="border-top: dotted 1px #44ac5c;"><br/><small>'
+ "<br/>".join(
[
re.sub(pattern2, sub, r)
for r in result.split("\n")
if r.startswith("[")
]
)
+ "</small>"
)
return result
@@ -28,37 +42,39 @@ def close_up_code_segment_during_stream(gpt_reply):
str: 返回一个新的字符串,将输出代码片段的“后面的```”补上。
"""
if '```' not in gpt_reply:
if "```" not in gpt_reply:
return gpt_reply
if gpt_reply.endswith('```'):
if gpt_reply.endswith("```"):
return gpt_reply
# 排除了以上两个情况,我们
segments = gpt_reply.split('```')
segments = gpt_reply.split("```")
n_mark = len(segments) - 1
if n_mark % 2 == 1:
# print('输出代码片段中!')
return gpt_reply+'\n```'
return gpt_reply + "\n```"
else:
return gpt_reply
import markdown
from latex2mathml.converter import convert as tex2mathml
from functools import wraps, lru_cache
def markdown_convertion(txt):
"""
将Markdown格式的文本转换为HTML格式。如果包含数学公式则先将公式转换为HTML格式。
"""
pre = '<div class="markdown-body">'
suf = '</div>'
suf = "</div>"
if txt.startswith(pre) and txt.endswith(suf):
# print('警告,输入了已经经过转化的字符串,二次转化可能出问题')
return txt # 已经被转化过,不需要再次转化
return txt # 已经被转化过,不需要再次转化
markdown_extension_configs = {
'mdx_math': {
'enable_dollar_delimiter': True,
'use_gitlab_delimiters': False,
"mdx_math": {
"enable_dollar_delimiter": True,
"use_gitlab_delimiters": False,
},
}
find_equation_pattern = r'<script type="math/tex(?:.*?)>(.*?)</script>'
@@ -72,19 +88,19 @@ def markdown_convertion(txt):
def replace_math_no_render(match):
content = match.group(1)
if 'mode=display' in match.group(0):
content = content.replace('\n', '</br>')
return f"<font color=\"#00FF00\">$$</font><font color=\"#FF00FF\">{content}</font><font color=\"#00FF00\">$$</font>"
if "mode=display" in match.group(0):
content = content.replace("\n", "</br>")
return f'<font color="#00FF00">$$</font><font color="#FF00FF">{content}</font><font color="#00FF00">$$</font>'
else:
return f"<font color=\"#00FF00\">$</font><font color=\"#FF00FF\">{content}</font><font color=\"#00FF00\">$</font>"
return f'<font color="#00FF00">$</font><font color="#FF00FF">{content}</font><font color="#00FF00">$</font>'
def replace_math_render(match):
content = match.group(1)
if 'mode=display' in match.group(0):
if '\\begin{aligned}' in content:
content = content.replace('\\begin{aligned}', '\\begin{array}')
content = content.replace('\\end{aligned}', '\\end{array}')
content = content.replace('&', ' ')
if "mode=display" in match.group(0):
if "\\begin{aligned}" in content:
content = content.replace("\\begin{aligned}", "\\begin{array}")
content = content.replace("\\end{aligned}", "\\end{array}")
content = content.replace("&", " ")
content = tex2mathml_catch_exception(content, display="block")
return content
else:
@@ -94,37 +110,58 @@ def markdown_convertion(txt):
"""
解决一个mdx_math的bug单$包裹begin命令时多余<script>
"""
content = content.replace('<script type="math/tex">\n<script type="math/tex; mode=display">', '<script type="math/tex; mode=display">')
content = content.replace('</script>\n</script>', '</script>')
content = content.replace(
'<script type="math/tex">\n<script type="math/tex; mode=display">',
'<script type="math/tex; mode=display">',
)
content = content.replace("</script>\n</script>", "</script>")
return content
if ('$' in txt) and ('```' not in txt): # 有$标识的公式符号,且没有代码段```的标识
if ("$" in txt) and ("```" not in txt): # 有$标识的公式符号,且没有代码段```的标识
# convert everything to html format
split = markdown.markdown(text='---')
convert_stage_1 = markdown.markdown(text=txt, extensions=['mdx_math', 'fenced_code', 'tables', 'sane_lists'], extension_configs=markdown_extension_configs)
split = markdown.markdown(text="---")
convert_stage_1 = markdown.markdown(
text=txt,
extensions=["mdx_math", "fenced_code", "tables", "sane_lists"],
extension_configs=markdown_extension_configs,
)
convert_stage_1 = markdown_bug_hunt(convert_stage_1)
# re.DOTALL: Make the '.' special character match any character at all, including a newline; without this flag, '.' will match anything except a newline. Corresponds to the inline flag (?s).
# 1. convert to easy-to-copy tex (do not render math)
convert_stage_2_1, n = re.subn(find_equation_pattern, replace_math_no_render, convert_stage_1, flags=re.DOTALL)
convert_stage_2_1, n = re.subn(
find_equation_pattern,
replace_math_no_render,
convert_stage_1,
flags=re.DOTALL,
)
# 2. convert to rendered equation
convert_stage_2_2, n = re.subn(find_equation_pattern, replace_math_render, convert_stage_1, flags=re.DOTALL)
convert_stage_2_2, n = re.subn(
find_equation_pattern, replace_math_render, convert_stage_1, flags=re.DOTALL
)
# cat them together
return pre + convert_stage_2_1 + f'{split}' + convert_stage_2_2 + suf
return pre + convert_stage_2_1 + f"{split}" + convert_stage_2_2 + suf
else:
return pre + markdown.markdown(txt, extensions=['fenced_code', 'codehilite', 'tables', 'sane_lists']) + suf
return (
pre
+ markdown.markdown(
txt, extensions=["fenced_code", "codehilite", "tables", "sane_lists"]
)
+ suf
)
sample = preprocess_newbing_out(sample)
sample = close_up_code_segment_during_stream(sample)
sample = markdown_convertion(sample)
with open('tmp.html', 'w', encoding='utf8') as f:
f.write("""
with open("tmp.html", "w", encoding="utf8") as f:
f.write(
"""
<head>
<title>My Website</title>
<link rel="stylesheet" type="text/css" href="style.css">
</head>
""")
"""
)
f.write(sample)

View File

@@ -2106,4 +2106,4 @@
"改变输入参数的顺序与结构": "入力パラメータの順序と構造を変更する",
"正在精细切分latex文件": "LaTeXファイルを細かく分割しています",
"读取文件": "ファイルを読み込んでいます"
}
}

View File

@@ -98,4 +98,4 @@
"图片生成_DALLE2": "ImageGeneration_DALLE2",
"图片生成_DALLE3": "ImageGeneration_DALLE3",
"图片修改_DALLE2": "ImageModification_DALLE2"
}
}

View File

@@ -3,7 +3,7 @@
## 1. 安装额外依赖
```
pip install --upgrade pyOpenSSL scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git
pip install --upgrade pyOpenSSL webrtcvad scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git
```
如果因为特色网络问题导致上述命令无法执行:
@@ -61,4 +61,3 @@ VI 两种音频监听模式切换时,需要刷新页面才有效。
VII 非localhost运行+非https情况下无法打开录音功能的坑https://blog.csdn.net/weixin_39461487/article/details/109594434
## 5.点击函数插件区“实时音频采集” 或者其他音频交互功能

View File

@@ -8,8 +8,8 @@ try {
live2d_settings['modelId'] = 5; // 默认模型 ID
live2d_settings['modelTexturesId'] = 1; // 默认材质 ID
live2d_settings['modelStorage'] = false; // 不储存模型 ID
live2d_settings['waifuSize'] = '210x187';
live2d_settings['waifuTipsSize'] = '187x52';
live2d_settings['waifuSize'] = '210x187';
live2d_settings['waifuTipsSize'] = '187x52';
live2d_settings['canSwitchModel'] = true;
live2d_settings['canSwitchTextures'] = true;
live2d_settings['canSwitchHitokoto'] = false;

View File

@@ -123,4 +123,4 @@
<glyph unicode="&#xe65e;" d="M512 748.8l211.2 179.2 300.8-198.4-204.8-166.4-307.2 185.6zM1024 396.8l-300.8-198.4-211.2 172.8 300.8 185.6 211.2-160zM300.8 198.4l-300.8 198.4 204.8 166.4 307.2-192-211.2-172.8zM0 729.6l300.8 198.4 211.2-179.2-300.8-192-211.2 172.8zM512 332.8l211.2-179.2 89.6 57.6v-64l-300.8-179.2-300.8 179.2v64l89.6-51.2 211.2 172.8z" />
<glyph unicode="&#xe65f;" d="M864 249.6c-38.4 0-64 32-64 64v256c0 38.4 32 64 64 64 38.4 0 64-32 64-64v-256c0-32-25.6-64-64-64zM697.6 102.4h-38.4v-108.8c0-38.4-25.6-64-57.6-64s-57.6 25.6-57.6 64v108.8h-70.4v-108.8c0-38.4-25.6-64-57.6-64s-57.6 25.6-57.6 64v108.8h-32c-19.2 0-38.4 19.2-38.4 44.8v428.8h448v-422.4c0-32-12.8-51.2-38.4-51.2zM736 633.6h-448c0 89.6 32 153.6 76.8 192l-70.4 83.2c-6.4 12.8-6.4 25.6 0 38.4 12.8 12.8 25.6 12.8 38.4 0l83.2-96c32 12.8 64 19.2 96 19.2s70.4-6.4 96-19.2l83.2 96c12.8 12.8 25.6 12.8 38.4 0s12.8-32 0-38.4l-70.4-83.2c44.8-32 76.8-102.4 76.8-192zM441.6 761.6c-12.8 0-25.6-12.8-25.6-32s12.8-32 25.6-32 25.6 12.8 25.6 32-12.8 32-25.6 32zM582.4 761.6c-12.8 0-25.6-12.8-25.6-32s12.8-32 25.6-32 25.6 19.2 25.6 32-12.8 32-25.6 32zM160 249.6c-38.4 0-64 32-64 64v256c0 38.4 25.6 64 64 64s64-32 64-64v-256c0-32-25.6-64-64-64z" />
<glyph unicode="&#xe660;" d="M921.6 211.2c-32-153.6-115.2-211.2-147.2-249.6-32-25.6-121.6-25.6-153.6-6.4-38.4 25.6-134.4 25.6-166.4 0-44.8-32-115.2-19.2-128-12.8-256 179.2-352 716.8 12.8 774.4 64 12.8 134.4-32 134.4-32 51.2-25.6 70.4-12.8 115.2 6.4 96 44.8 243.2 44.8 313.6-76.8-147.2-96-153.6-294.4 19.2-403.2zM716.8 960c12.8-70.4-64-224-204.8-230.4-12.8 38.4 32 217.6 204.8 230.4z" />
</font></defs></svg>
</font></defs></svg>

Before

Width:  |  Height:  |  Size: 56 KiB

After

Width:  |  Height:  |  Size: 56 KiB

File diff suppressed because one or more lines are too long

View File

@@ -1 +1 @@
https://github.com/fghrsh/live2d_demo
https://github.com/fghrsh/live2d_demo

View File

@@ -5,11 +5,11 @@ window.live2d_settings = Array(); /*
      /`ー'    L//`ヽ、 Live2D 看板娘 参数设置
     /  ,  /|  ,  ,    ', Version 1.4.2
   イ  / /-/  L_ ハ ヽ!  i Update 2018.11.12
    レ ヘ 7イ  レ'ァ-ト、!ハ|  |
    レ ヘ 7イ  レ'ァ-ト、!ハ|  |
     !,/7 '0'   ´0iソ|   |   
     |.从"  _   ,,,, / |./   | 网页添加 Live2D 看板娘
     レ'| i.、,,__ _,.イ /  .i  | https://www.fghrsh.net/post/123.html
      レ'| | / k__/レ'ヽ, ハ. |
      レ'| | / k__/レ'ヽ, ハ. |
       | |/i 〈|/  i ,.ヘ | i | Thanks
      .|/ /    ヘ!   | journey-ad / https://github.com/journey-ad/live2d_src
        kヽ>、ハ   _,.ヘ、   /、! xiazeyu / https://github.com/xiazeyu/live2d-widget.js
@@ -77,11 +77,11 @@ String.prototype.render = function(context) {
return this.replace(tokenReg, function (word, slash1, token, slash2) {
if (slash1 || slash2) { return word.replace('\\', ''); }
var variables = token.replace(/\s/g, '').split('.');
var currentObject = context;
var i, length, variable;
for (i = 0, length = variables.length; i < length; ++i) {
variable = variables[i];
currentObject = currentObject[variable];
@@ -101,9 +101,9 @@ function showMessage(text, timeout, flag) {
if(flag || sessionStorage.getItem('waifu-text') === '' || sessionStorage.getItem('waifu-text') === null){
if(Array.isArray(text)) text = text[Math.floor(Math.random() * text.length + 1)-1];
if (live2d_settings.showF12Message) console.log('[Message]', text.replace(/<[^<>]+>/g,''));
if(flag) sessionStorage.setItem('waifu-text', text);
$('.waifu-tips').stop();
$('.waifu-tips').html(text).fadeTo(200, 1);
if (timeout === undefined) timeout = 5000;
@@ -121,15 +121,15 @@ function hideMessage(timeout) {
function initModel(waifuPath, type) {
/* console welcome message */
eval(function(p,a,c,k,e,r){e=function(c){return(c<a?'':e(parseInt(c/a)))+((c=c%a)>35?String.fromCharCode(c+29):c.toString(36))};if(!''.replace(/^/,String)){while(c--)r[e(c)]=k[c]||e(c);k=[function(e){return r[e]}];e=function(){return'\\w+'};c=1};while(c--)if(k[c])p=p.replace(new RegExp('\\b'+e(c)+'\\b','g'),k[c]);return p}('8.d(" ");8.d("\\U,.\\y\\5.\\1\\1\\1\\1/\\1,\\u\\2 \\H\\n\\1\\1\\1\\1\\1\\b \', !-\\r\\j-i\\1/\\1/\\g\\n\\1\\1\\1 \\1 \\a\\4\\f\'\\1\\1\\1 L/\\a\\4\\5\\2\\n\\1\\1 \\1 /\\1 \\a,\\1 /|\\1 ,\\1 ,\\1\\1\\1 \',\\n\\1\\1\\1\\q \\1/ /-\\j/\\1\\h\\E \\9 \\5!\\1 i\\n\\1\\1\\1 \\3 \\6 7\\q\\4\\c\\1 \\3\'\\s-\\c\\2!\\t|\\1 |\\n\\1\\1\\1\\1 !,/7 \'0\'\\1\\1 \\X\\w| \\1 |\\1\\1\\1\\n\\1\\1\\1\\1 |.\\x\\"\\1\\l\\1\\1 ,,,, / |./ \\1 |\\n\\1\\1\\1\\1 \\3\'| i\\z.\\2,,A\\l,.\\B / \\1.i \\1|\\n\\1\\1\\1\\1\\1 \\3\'| | / C\\D/\\3\'\\5,\\1\\9.\\1|\\n\\1\\1\\1\\1\\1\\1 | |/i \\m|/\\1 i\\1,.\\6 |\\F\\1|\\n\\1\\1\\1\\1\\1\\1.|/ /\\1\\h\\G \\1 \\6!\\1\\1\\b\\1|\\n\\1\\1\\1 \\1 \\1 k\\5>\\2\\9 \\1 o,.\\6\\2 \\1 /\\2!\\n\\1\\1\\1\\1\\1\\1 !\'\\m//\\4\\I\\g\', \\b \\4\'7\'\\J\'\\n\\1\\1\\1\\1\\1\\1 \\3\'\\K|M,p,\\O\\3|\\P\\n\\1\\1\\1\\1\\1 \\1\\1\\1\\c-,/\\1|p./\\n\\1\\1\\1\\1\\1 \\1\\1\\1\'\\f\'\\1\\1!o,.:\\Q \\R\\S\\T v"+e.V+" / W "+e.N);8.d(" ");',60,60,'|u3000|uff64|uff9a|uff40|u30fd|uff8d||console|uff8a|uff0f|uff3c|uff84|log|live2d_settings|uff70|u00b4|uff49||u2010||u3000_|u3008||_|___|uff72|u2500|uff67|u30cf|u30fc||u30bd|u4ece|u30d8|uff1e|__|u30a4|k_|uff17_|u3000L_|u3000i|uff1a|u3009|uff34|uff70r|u30fdL__||___i|l2dVerDate|u30f3|u30ce|nLive2D|u770b|u677f|u5a18|u304f__|l2dVersion|FGHRSH|u00b40i'.split('|'),0,{}));
/* 判断 JQuery */
if (typeof($.ajax) != 'function') typeof(jQuery.ajax) == 'function' ? window.$ = jQuery : console.log('[Error] JQuery is not defined.');
/* 加载看板娘样式 */
live2d_settings.waifuSize = live2d_settings.waifuSize.split('x');
live2d_settings.waifuTipsSize = live2d_settings.waifuTipsSize.split('x');
live2d_settings.waifuEdgeSide = live2d_settings.waifuEdgeSide.split(':');
$("#live2d").attr("width",live2d_settings.waifuSize[0]);
$("#live2d").attr("height",live2d_settings.waifuSize[1]);
$(".waifu-tips").width(live2d_settings.waifuTipsSize[0]);
@@ -138,32 +138,32 @@ function initModel(waifuPath, type) {
$(".waifu-tips").css("font-size",live2d_settings.waifuFontSize);
$(".waifu-tool").css("font-size",live2d_settings.waifuToolFont);
$(".waifu-tool span").css("line-height",live2d_settings.waifuToolLine);
if (live2d_settings.waifuEdgeSide[0] == 'left') $(".waifu").css("left",live2d_settings.waifuEdgeSide[1]+'px');
else if (live2d_settings.waifuEdgeSide[0] == 'right') $(".waifu").css("right",live2d_settings.waifuEdgeSide[1]+'px');
window.waifuResize = function() { $(window).width() <= Number(live2d_settings.waifuMinWidth.replace('px','')) ? $(".waifu").hide() : $(".waifu").show(); };
if (live2d_settings.waifuMinWidth != 'disable') { waifuResize(); $(window).resize(function() {waifuResize()}); }
try {
if (live2d_settings.waifuDraggable == 'axis-x') $(".waifu").draggable({ axis: "x", revert: live2d_settings.waifuDraggableRevert });
else if (live2d_settings.waifuDraggable == 'unlimited') $(".waifu").draggable({ revert: live2d_settings.waifuDraggableRevert });
else $(".waifu").css("transition", 'all .3s ease-in-out');
} catch(err) { console.log('[Error] JQuery UI is not defined.') }
live2d_settings.homePageUrl = live2d_settings.homePageUrl == 'auto' ? window.location.protocol+'//'+window.location.hostname+'/' : live2d_settings.homePageUrl;
if (window.location.protocol == 'file:' && live2d_settings.modelAPI.substr(0,2) == '//') live2d_settings.modelAPI = 'http:'+live2d_settings.modelAPI;
$('.waifu-tool .fui-home').click(function (){
//window.location = 'https://www.fghrsh.net/';
window.location = live2d_settings.homePageUrl;
});
$('.waifu-tool .fui-info-circle').click(function (){
//window.open('https://imjad.cn/archives/lab/add-dynamic-poster-girl-with-live2d-to-your-blog-02');
window.open(live2d_settings.aboutPageUrl);
});
if (typeof(waifuPath) == "object") loadTipsMessage(waifuPath); else {
$.ajax({
cache: true,
@@ -172,7 +172,7 @@ function initModel(waifuPath, type) {
success: function (result){ loadTipsMessage(result); }
});
}
if (!live2d_settings.showToolMenu) $('.waifu-tool').hide();
if (!live2d_settings.canCloseLive2d) $('.waifu-tool .fui-cross').hide();
if (!live2d_settings.canSwitchModel) $('.waifu-tool .fui-eye').hide();
@@ -185,7 +185,7 @@ function initModel(waifuPath, type) {
if (waifuPath === undefined) waifuPath = '';
var modelId = localStorage.getItem('modelId');
var modelTexturesId = localStorage.getItem('modelTexturesId');
if (!live2d_settings.modelStorage || modelId == null) {
var modelId = live2d_settings.modelId;
var modelTexturesId = live2d_settings.modelTexturesId;
@@ -204,7 +204,7 @@ function loadModel(modelId, modelTexturesId=0) {
function loadTipsMessage(result) {
window.waifu_tips = result;
$.each(result.mouseover, function (index, tips){
$(document).on("mouseover", tips.selector, function (){
var text = getRandText(tips.text);
@@ -223,50 +223,50 @@ function loadTipsMessage(result) {
var now = new Date();
var after = tips.date.split('-')[0];
var before = tips.date.split('-')[1] || after;
if((after.split('/')[0] <= now.getMonth()+1 && now.getMonth()+1 <= before.split('/')[0]) &&
if((after.split('/')[0] <= now.getMonth()+1 && now.getMonth()+1 <= before.split('/')[0]) &&
(after.split('/')[1] <= now.getDate() && now.getDate() <= before.split('/')[1])){
var text = getRandText(tips.text);
text = text.render({year: now.getFullYear()});
showMessage(text, 6000, true);
}
});
if (live2d_settings.showF12OpenMsg) {
re.toString = function() {
showMessage(getRandText(result.waifu.console_open_msg), 5000, true);
return '';
};
}
if (live2d_settings.showCopyMessage) {
$(document).on('copy', function() {
showMessage(getRandText(result.waifu.copy_message), 5000, true);
});
}
$('.waifu-tool .fui-photo').click(function(){
showMessage(getRandText(result.waifu.screenshot_message), 5000, true);
window.Live2D.captureName = live2d_settings.screenshotCaptureName;
window.Live2D.captureFrame = true;
});
$('.waifu-tool .fui-cross').click(function(){
sessionStorage.setItem('waifu-dsiplay', 'none');
showMessage(getRandText(result.waifu.hidden_message), 1300, true);
window.setTimeout(function() {$('.waifu').hide();}, 1300);
});
window.showWelcomeMessage = function(result) {
showMessage('欢迎使用GPT-Academic', 6000);
}; if (live2d_settings.showWelcomeMessage) showWelcomeMessage(result);
var waifu_tips = result.waifu;
function loadOtherModel() {
var modelId = modelStorageGetItem('modelId');
var modelRandMode = live2d_settings.modelRandMode;
$.ajax({
cache: modelRandMode == 'switch' ? true : false,
url: live2d_settings.modelAPI+modelRandMode+'/?id='+modelId,
@@ -279,12 +279,12 @@ function loadTipsMessage(result) {
}
});
}
function loadRandTextures() {
var modelId = modelStorageGetItem('modelId');
var modelTexturesId = modelStorageGetItem('modelTexturesId');
var modelTexturesRandMode = live2d_settings.modelTexturesRandMode;
$.ajax({
cache: modelTexturesRandMode == 'switch' ? true : false,
url: live2d_settings.modelAPI+modelTexturesRandMode+'_textures/?id='+modelId+'-'+modelTexturesId,
@@ -297,32 +297,32 @@ function loadTipsMessage(result) {
}
});
}
function modelStorageGetItem(key) { return live2d_settings.modelStorage ? localStorage.getItem(key) : sessionStorage.getItem(key); }
/* 检测用户活动状态,并在空闲时显示一言 */
if (live2d_settings.showHitokoto) {
window.getActed = false; window.hitokotoTimer = 0; window.hitokotoInterval = false;
$(document).mousemove(function(e){getActed = true;}).keydown(function(){getActed = true;});
setInterval(function(){ if (!getActed) ifActed(); else elseActed(); }, 1000);
}
function ifActed() {
if (!hitokotoInterval) {
hitokotoInterval = true;
hitokotoTimer = window.setInterval(showHitokotoActed, 30000);
}
}
function elseActed() {
getActed = hitokotoInterval = false;
window.clearInterval(hitokotoTimer);
}
function showHitokotoActed() {
if ($(document)[0].visibilityState == 'visible') showHitokoto();
}
function showHitokoto() {
switch(live2d_settings.hitokotoAPI) {
case 'lwl12.com':
@@ -366,7 +366,7 @@ function loadTipsMessage(result) {
});
}
}
$('.waifu-tool .fui-eye').click(function (){loadOtherModel()});
$('.waifu-tool .fui-user').click(function (){loadRandTextures()});
$('.waifu-tool .fui-chat').click(function (){showHitokoto()});

View File

@@ -31,7 +31,7 @@
},
"model_message": {
"1": ["来自 Potion Maker 的 Pio 酱 ~"],
"2": ["来自 Potion Maker 的 Tia 酱 ~"]
"2": ["来自 Potion Maker 的 Tia 酱 ~"]
},
"hitokoto_api_message": {
"lwl12.com": ["这句一言来自 <span style=\"color:#0099cc;\">『{source}』</span>", ",是 <span style=\"color:#0099cc;\">{creator}</span> 投稿的", "。"],
@@ -111,4 +111,4 @@
{ "date": "11/05-11/12", "text": ["今年的<span style=\"color:#0099cc;\">双十一</span>是和谁一起过的呢~"] },
{ "date": "12/20-12/31", "text": ["这几天是<span style=\"color:#0099cc;\">圣诞节</span>,主人肯定又去剁手买买买了~"] }
]
}
}

View File

@@ -287,4 +287,4 @@
}
.fui-user:before {
content: "\e631";
}
}

59
main.py
View File

@@ -1,9 +1,9 @@
import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
help_menu_description = \
"""Github源代码开源和更新[地址🚀](https://github.com/binary-husky/gpt_academic),
"""Github源代码开源和更新[地址🚀](https://github.com/binary-husky/gpt_academic),
感谢热情的[开发者们❤️](https://github.com/binary-husky/gpt_academic/graphs/contributors).
</br></br>常见问题请查阅[项目Wiki](https://github.com/binary-husky/gpt_academic/wiki),
</br></br>常见问题请查阅[项目Wiki](https://github.com/binary-husky/gpt_academic/wiki),
如遇到Bug请前往[Bug反馈](https://github.com/binary-husky/gpt_academic/issues).
</br></br>普通对话使用说明: 1. 输入问题; 2. 点击提交
</br></br>基础功能区使用说明: 1. 输入文本; 2. 点击任意基础功能区按钮
@@ -15,7 +15,7 @@ help_menu_description = \
def main():
import gradio as gr
if gr.__version__ not in ['3.32.6']:
if gr.__version__ not in ['3.32.6', '3.32.7', '3.32.8']:
raise ModuleNotFoundError("使用项目内置Gradio获取最优体验! 请运行 `pip install -r requirements.txt` 指令安装内置Gradio及其他依赖, 详情信息见requirements.txt.")
from request_llms.bridge_all import predict
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, load_chat_cookies, DummyWith
@@ -33,7 +33,7 @@ def main():
from themes.theme import js_code_for_css_changing, js_code_for_darkmode_init, js_code_for_toggle_darkmode, js_code_for_persistent_cookie_init
from themes.theme import load_dynamic_theme, to_cookie_str, from_cookie_str, init_cookie
title_html = f"<h1 align=\"center\">GPT 学术优化 {get_current_version()}</h1>{theme_declaration}"
# 问询记录, python 版本建议3.9+(越新越好)
import logging, uuid
os.makedirs(PATH_LOGGING, exist_ok=True)
@@ -93,7 +93,7 @@ def main():
resetBtn = gr.Button("重置", elem_id="elem_reset", variant="secondary"); resetBtn.style(size="sm")
stopBtn = gr.Button("停止", elem_id="elem_stop", variant="secondary"); stopBtn.style(size="sm")
clearBtn = gr.Button("清除", elem_id="elem_clear", variant="secondary", visible=False); clearBtn.style(size="sm")
if ENABLE_AUDIO:
if ENABLE_AUDIO:
with gr.Row():
audio_mic = gr.Audio(source="microphone", type="numpy", elem_id="elem_audio", streaming=True, show_label=False).style(container=False)
with gr.Row():
@@ -114,7 +114,7 @@ def main():
with gr.Row():
gr.Markdown("插件可读取“输入区”文本/路径作为参数(上传文件自动修正路径)")
with gr.Row(elem_id="input-plugin-group"):
plugin_group_sel = gr.Dropdown(choices=all_plugin_groups, label='', show_label=False, value=DEFAULT_FN_GROUPS,
plugin_group_sel = gr.Dropdown(choices=all_plugin_groups, label='', show_label=False, value=DEFAULT_FN_GROUPS,
multiselect=True, interactive=True, elem_classes='normal_mut_select').style(container=False)
with gr.Row():
for k, plugin in plugins.items():
@@ -122,7 +122,7 @@ def main():
visible = True if match_group(plugin['Group'], DEFAULT_FN_GROUPS) else False
variant = plugins[k]["Color"] if "Color" in plugin else "secondary"
info = plugins[k].get("Info", k)
plugin['Button'] = plugins[k]['Button'] = gr.Button(k, variant=variant,
plugin['Button'] = plugins[k]['Button'] = gr.Button(k, variant=variant,
visible=visible, info_str=f'函数插件区: {info}').style(size="sm")
with gr.Row():
with gr.Accordion("更多函数插件", open=True):
@@ -134,22 +134,22 @@ def main():
with gr.Row():
dropdown = gr.Dropdown(dropdown_fn_list, value=r"打开插件列表", label="", show_label=False).style(container=False)
with gr.Row():
plugin_advanced_arg = gr.Textbox(show_label=True, label="高级参数输入区", visible=False,
plugin_advanced_arg = gr.Textbox(show_label=True, label="高级参数输入区", visible=False,
placeholder="这里是特殊函数插件的高级参数输入区").style(container=False)
with gr.Row():
switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary").style(size="sm")
with gr.Row():
with gr.Accordion("点击展开“文件上传区”。上传本地文件/压缩包供函数插件调用", open=False) as area_file_up:
with gr.Accordion("点击展开“文件下载区”", open=False) as area_file_up:
file_upload = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple", elem_id="elem_upload")
with gr.Floating(init_x="0%", init_y="0%", visible=True, width=None, drag="forbidden"):
with gr.Floating(init_x="0%", init_y="0%", visible=True, width=None, drag="forbidden", elem_id="tooltip"):
with gr.Row():
with gr.Tab("上传文件", elem_id="interact-panel"):
gr.Markdown("请上传本地文件/压缩包供“函数插件区”功能调用。请注意: 上传文件后会自动把输入区修改为相应路径。")
file_upload_2 = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple", elem_id="elem_upload_float")
with gr.Tab("更换模型 & Prompt", elem_id="interact-panel"):
with gr.Tab("更换模型", elem_id="interact-panel"):
md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False)
top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
@@ -158,13 +158,12 @@ def main():
with gr.Tab("界面外观", elem_id="interact-panel"):
theme_dropdown = gr.Dropdown(AVAIL_THEMES, value=THEME, label="更换UI主题").style(container=False)
checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "浮动输入区", "输入清除键", "插件参数区"],
checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "浮动输入区", "输入清除键", "插件参数区"],
value=["基础功能区", "函数插件区"], label="显示/隐藏功能区", elem_id='cbs').style(container=False)
checkboxes_2 = gr.CheckboxGroup(["自定义菜单"],
value=[], label="显示/隐藏自定义菜单", elem_id='cbs').style(container=False)
checkboxes_2 = gr.CheckboxGroup(["自定义菜单"],
value=[], label="显示/隐藏自定义菜单", elem_id='cbsc').style(container=False)
dark_mode_btn = gr.Button("切换界面明暗 ☀", variant="secondary").style(size="sm")
dark_mode_btn.click(None, None, None, _js=js_code_for_toggle_darkmode,
)
dark_mode_btn.click(None, None, None, _js=js_code_for_toggle_darkmode)
with gr.Tab("帮助", elem_id="interact-panel"):
gr.Markdown(help_menu_description)
@@ -218,7 +217,7 @@ def main():
persistent_cookie_ = to_cookie_str(persistent_cookie_) # persistent cookie to dict
ret.update({persistent_cookie: persistent_cookie_}) # write persistent cookie
return ret
def reflesh_btn(persistent_cookie_, cookies_):
ret = {}
for k in customize_btns:
@@ -226,7 +225,7 @@ def main():
try: persistent_cookie_ = from_cookie_str(persistent_cookie_) # persistent cookie to dict
except: return ret
customize_fn_overwrite_ = persistent_cookie_.get("custom_bnt", {})
cookies_['customize_fn_overwrite'] = customize_fn_overwrite_
ret.update({cookies: cookies_})
@@ -236,9 +235,9 @@ def main():
if k in customize_btns: ret.update({customize_btns[k]: gr.update(visible=True, value=v['Title'])})
else: ret.update({predefined_btns[k]: gr.update(visible=True, value=v['Title'])})
return ret
basic_fn_load.click(reflesh_btn, [persistent_cookie, cookies], [cookies, *customize_btns.values(), *predefined_btns.values()])
h = basic_fn_confirm.click(assign_btn, [persistent_cookie, cookies, basic_btn_dropdown, basic_fn_title, basic_fn_prefix, basic_fn_suffix],
h = basic_fn_confirm.click(assign_btn, [persistent_cookie, cookies, basic_btn_dropdown, basic_fn_title, basic_fn_prefix, basic_fn_suffix],
[persistent_cookie, cookies, *customize_btns.values(), *predefined_btns.values()])
# save persistent cookie
h.then(None, [persistent_cookie], None, _js="""(persistent_cookie)=>{setCookie("persistent_cookie", persistent_cookie, 5);}""")
@@ -322,7 +321,7 @@ def main():
else:
css_part2 = adjust_theme()._get_theme_css()
return css_part2 + css_part1
theme_handle = theme_dropdown.select(on_theme_dropdown_changed, [theme_dropdown, secret_css], [secret_css])
theme_handle.then(
None,
@@ -347,13 +346,13 @@ def main():
if not group_list: # 处理特殊情况:没有选择任何插件组
return [*[plugin['Button'].update(visible=False) for _, plugin in plugins_as_btn.items()], gr.Dropdown.update(choices=[])]
for k, plugin in plugins.items():
if plugin.get("AsButton", True):
if plugin.get("AsButton", True):
btn_list.append(plugin['Button'].update(visible=match_group(plugin['Group'], group_list))) # 刷新按钮
if plugin.get('AdvancedArgs', False): dropdown_fn_list.append(k) # 对于需要高级参数的插件,亦在下拉菜单中显示
elif match_group(plugin['Group'], group_list): fns_list.append(k) # 刷新下拉列表
return [*btn_list, gr.Dropdown.update(choices=fns_list)]
plugin_group_sel.select(fn=on_group_change, inputs=[plugin_group_sel], outputs=[*[plugin['Button'] for name, plugin in plugins_as_btn.items()], dropdown])
if ENABLE_AUDIO:
if ENABLE_AUDIO:
from crazy_functions.live_audio.audio_io import RealtimeAudioDistribution
rad = RealtimeAudioDistribution()
def deal_audio(audio, cookies):
@@ -366,7 +365,7 @@ def main():
demo.load(None, inputs=None, outputs=[persistent_cookie], _js=js_code_for_persistent_cookie_init)
demo.load(None, inputs=[dark_mode], outputs=None, _js=darkmode_js) # 配置暗色主题或亮色主题
demo.load(None, inputs=[gr.Textbox(LAYOUT, visible=False)], outputs=None, _js='(LAYOUT)=>{GptAcademicJavaScriptInit(LAYOUT);}')
# gradio的inbrowser触发不太稳定回滚代码到原始的浏览器打开函数
def run_delayed_tasks():
import threading, webbrowser, time
@@ -377,7 +376,7 @@ def main():
def auto_updates(): time.sleep(0); auto_update()
def open_browser(): time.sleep(2); webbrowser.open_new_tab(f"http://localhost:{PORT}")
def warm_up_mods(): time.sleep(6); warm_up_modules()
threading.Thread(target=auto_updates, name="self-upgrade", daemon=True).start() # 查看自动更新
threading.Thread(target=open_browser, name="open-browser", daemon=True).start() # 打开浏览器页面
threading.Thread(target=warm_up_mods, name="warm-up", daemon=True).start() # 预热tiktoken模块
@@ -385,21 +384,21 @@ def main():
run_delayed_tasks()
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(
quiet=True,
server_name="0.0.0.0",
server_name="0.0.0.0",
ssl_keyfile=None if SSL_KEYFILE == "" else SSL_KEYFILE,
ssl_certfile=None if SSL_CERTFILE == "" else SSL_CERTFILE,
ssl_verify=False,
server_port=PORT,
favicon_path=os.path.join(os.path.dirname(__file__), "docs/logo.png"),
favicon_path=os.path.join(os.path.dirname(__file__), "docs/logo.png"),
auth=AUTHENTICATION if len(AUTHENTICATION) != 0 else None,
blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile",f"{PATH_LOGGING}/admin"])
# 如果需要在二级路径下运行
# CUSTOM_PATH = get_conf('CUSTOM_PATH')
# if CUSTOM_PATH != "/":
# if CUSTOM_PATH != "/":
# from toolbox import run_gradio_in_subpath
# run_gradio_in_subpath(demo, auth=AUTHENTICATION, port=PORT, custom_path=CUSTOM_PATH)
# else:
# else:
# demo.launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png",
# blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile",f"{PATH_LOGGING}/admin"])

View File

@@ -352,9 +352,9 @@ def step_1_core_key_translate():
chinese_core_keys_norepeat_mapping.update({k:cached_translation[k]})
chinese_core_keys_norepeat_mapping = dict(sorted(chinese_core_keys_norepeat_mapping.items(), key=lambda x: -len(x[0])))
# ===============================================
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# copy
# ===============================================
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def copy_source_code():
from toolbox import get_conf
@@ -367,9 +367,9 @@ def step_1_core_key_translate():
shutil.copytree('./', backup_dir, ignore=lambda x, y: blacklist)
copy_source_code()
# ===============================================
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# primary key replace
# ===============================================
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
directory_path = f'./multi-language/{LANG}/'
for root, dirs, files in os.walk(directory_path):
for file in files:
@@ -389,9 +389,9 @@ def step_1_core_key_translate():
def step_2_core_key_translate():
# =================================================================================================
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# step2
# =================================================================================================
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
def load_string(strings, string_input):
string_ = string_input.strip().strip(',').strip().strip('.').strip()
@@ -492,9 +492,9 @@ def step_2_core_key_translate():
cached_translation.update(read_map_from_json(language=LANG_STD))
cached_translation = dict(sorted(cached_translation.items(), key=lambda x: -len(x[0])))
# ===============================================
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# literal key replace
# ===============================================
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
directory_path = f'./multi-language/{LANG}/'
for root, dirs, files in os.walk(directory_path):
for file in files:

View File

@@ -32,4 +32,4 @@ P.S. 如果您按照以下步骤成功接入了新的大模型欢迎发Pull R
5. 测试通过后,在`request_llms/bridge_all.py`中做最后的修改,把你的模型完全接入到框架中(聪慧如您,只需要看一眼该文件就明白怎么修改了)
6. 修改`LLM_MODEL`配置,然后运行`python main.py`,测试最后的效果
6. 修改`LLM_MODEL`配置,然后运行`python main.py`,测试最后的效果

View File

@@ -11,7 +11,7 @@
import tiktoken, copy
from functools import lru_cache
from concurrent.futures import ThreadPoolExecutor
from toolbox import get_conf, trimmed_format_exc
from toolbox import get_conf, trimmed_format_exc, apply_gpt_academic_string_mask
from .bridge_chatgpt import predict_no_ui_long_connection as chatgpt_noui
from .bridge_chatgpt import predict as chatgpt_ui
@@ -28,6 +28,9 @@ from .bridge_chatglm3 import predict as chatglm3_ui
from .bridge_qianfan import predict_no_ui_long_connection as qianfan_noui
from .bridge_qianfan import predict as qianfan_ui
from .bridge_google_gemini import predict as genai_ui
from .bridge_google_gemini import predict_no_ui_long_connection as genai_noui
colors = ['#FF00FF', '#00FFFF', '#FF0000', '#990099', '#009999', '#990044']
class LazyloadTiktoken(object):
@@ -147,6 +150,15 @@ model_info = {
"token_cnt": get_token_num_gpt4,
},
"gpt-4-turbo-preview": {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
"endpoint": openai_endpoint,
"max_token": 128000,
"tokenizer": tokenizer_gpt4,
"token_cnt": get_token_num_gpt4,
},
"gpt-4-1106-preview": {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
@@ -156,6 +168,15 @@ model_info = {
"token_cnt": get_token_num_gpt4,
},
"gpt-4-0125-preview": {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
"endpoint": openai_endpoint,
"max_token": 128000,
"tokenizer": tokenizer_gpt4,
"token_cnt": get_token_num_gpt4,
},
"gpt-3.5-random": {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
@@ -246,6 +267,22 @@ model_info = {
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
"gemini-pro": {
"fn_with_ui": genai_ui,
"fn_without_ui": genai_noui,
"endpoint": None,
"max_token": 1024 * 32,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
"gemini-pro-vision": {
"fn_with_ui": genai_ui,
"fn_without_ui": genai_noui,
"endpoint": None,
"max_token": 1024 * 32,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
}
# -=-=-=-=-=-=- api2d 对齐支持 -=-=-=-=-=-=-
@@ -431,14 +468,14 @@ if "chatglm_onnx" in AVAIL_LLM_MODELS:
})
except:
print(trimmed_format_exc())
if "qwen" in AVAIL_LLM_MODELS:
if "qwen-local" in AVAIL_LLM_MODELS:
try:
from .bridge_qwen import predict_no_ui_long_connection as qwen_noui
from .bridge_qwen import predict as qwen_ui
from .bridge_qwen_local import predict_no_ui_long_connection as qwen_local_noui
from .bridge_qwen_local import predict as qwen_local_ui
model_info.update({
"qwen": {
"fn_with_ui": qwen_ui,
"fn_without_ui": qwen_noui,
"qwen-local": {
"fn_with_ui": qwen_local_ui,
"fn_without_ui": qwen_local_noui,
"endpoint": None,
"max_token": 4096,
"tokenizer": tokenizer_gpt35,
@@ -447,16 +484,32 @@ if "qwen" in AVAIL_LLM_MODELS:
})
except:
print(trimmed_format_exc())
if "chatgpt_website" in AVAIL_LLM_MODELS: # 接入一些逆向工程https://github.com/acheong08/ChatGPT-to-API/
if "qwen-turbo" in AVAIL_LLM_MODELS or "qwen-plus" in AVAIL_LLM_MODELS or "qwen-max" in AVAIL_LLM_MODELS: # zhipuai
try:
from .bridge_chatgpt_website import predict_no_ui_long_connection as chatgpt_website_noui
from .bridge_chatgpt_website import predict as chatgpt_website_ui
from .bridge_qwen import predict_no_ui_long_connection as qwen_noui
from .bridge_qwen import predict as qwen_ui
model_info.update({
"chatgpt_website": {
"fn_with_ui": chatgpt_website_ui,
"fn_without_ui": chatgpt_website_noui,
"endpoint": openai_endpoint,
"max_token": 4096,
"qwen-turbo": {
"fn_with_ui": qwen_ui,
"fn_without_ui": qwen_noui,
"endpoint": None,
"max_token": 6144,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
"qwen-plus": {
"fn_with_ui": qwen_ui,
"fn_without_ui": qwen_noui,
"endpoint": None,
"max_token": 30720,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
"qwen-max": {
"fn_with_ui": qwen_ui,
"fn_without_ui": qwen_noui,
"endpoint": None,
"max_token": 28672,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
}
@@ -559,6 +612,23 @@ if "deepseekcoder" in AVAIL_LLM_MODELS: # deepseekcoder
})
except:
print(trimmed_format_exc())
# if "skylark" in AVAIL_LLM_MODELS:
# try:
# from .bridge_skylark2 import predict_no_ui_long_connection as skylark_noui
# from .bridge_skylark2 import predict as skylark_ui
# model_info.update({
# "skylark": {
# "fn_with_ui": skylark_ui,
# "fn_without_ui": skylark_noui,
# "endpoint": None,
# "max_token": 4096,
# "tokenizer": tokenizer_gpt35,
# "token_cnt": get_token_num_gpt35,
# }
# })
# except:
# print(trimmed_format_exc())
# <-- 用于定义和切换多个azure模型 -->
AZURE_CFG_ARRAY = get_conf("AZURE_CFG_ARRAY")
@@ -616,6 +686,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
"""
import threading, time, copy
inputs = apply_gpt_academic_string_mask(inputs, mode="show_llm")
model = llm_kwargs['llm_model']
n_model = 1
if '&' not in model:
@@ -689,6 +760,7 @@ def predict(inputs, llm_kwargs, *args, **kwargs):
additional_fn代表点击的哪个按钮按钮见functional.py
"""
inputs = apply_gpt_academic_string_mask(inputs, mode="show_llm")
method = model_info[llm_kwargs['llm_model']]["fn_with_ui"] # 如果这里报错检查config中的AVAIL_LLM_MODELS选项
yield from method(inputs, llm_kwargs, *args, **kwargs)

View File

@@ -102,20 +102,25 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
result = ''
json_data = None
while True:
try: chunk = next(stream_response).decode()
try: chunk = next(stream_response)
except StopIteration:
break
except requests.exceptions.ConnectionError:
chunk = next(stream_response).decode() # 失败了,重试一次?再失败就没办法了。
if len(chunk)==0: continue
if not chunk.startswith('data:'):
error_msg = get_full_error(chunk.encode('utf8'), stream_response).decode()
chunk = next(stream_response) # 失败了,重试一次?再失败就没办法了。
chunk_decoded, chunkjson, has_choices, choice_valid, has_content, has_role = decode_chunk(chunk)
if len(chunk_decoded)==0: continue
if not chunk_decoded.startswith('data:'):
error_msg = get_full_error(chunk, stream_response).decode()
if "reduce the length" in error_msg:
raise ConnectionAbortedError("OpenAI拒绝了请求:" + error_msg)
else:
raise RuntimeError("OpenAI拒绝了请求" + error_msg)
if ('data: [DONE]' in chunk): break # api2d 正常完成
json_data = json.loads(chunk.lstrip('data:'))['choices'][0]
if ('data: [DONE]' in chunk_decoded): break # api2d 正常完成
# 提前读取一些信息 (用于判断异常)
if has_choices and not choice_valid:
# 一些垃圾第三方接口的出现这样的错误
continue
json_data = chunkjson['choices'][0]
delta = json_data["delta"]
if len(delta) == 0: break
if "role" in delta: continue
@@ -239,6 +244,9 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
if has_choices and not choice_valid:
# 一些垃圾第三方接口的出现这样的错误
continue
if ('data: [DONE]' not in chunk_decoded) and len(chunk_decoded) > 0 and (chunkjson is None):
# 传递进来一些奇怪的东西
raise ValueError(f'无法读取以下数据,请检查配置。\n\n{chunk_decoded}')
# 前者是API2D的结束条件后者是OPENAI的结束条件
if ('data: [DONE]' in chunk_decoded) or (len(chunkjson['choices'][0]["delta"]) == 0):
# 判定为数据流的结束gpt_replying_buffer也写完了

View File

@@ -0,0 +1,114 @@
# encoding: utf-8
# @Time : 2023/12/21
# @Author : Spike
# @Descr :
import json
import re
import os
import time
from request_llms.com_google import GoogleChatInit
from toolbox import get_conf, update_ui, update_ui_lastest_msg, have_any_recent_upload_image_files, trimmed_format_exc
proxies, TIMEOUT_SECONDS, MAX_RETRY = get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY')
timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \
'网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。'
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None,
console_slience=False):
# 检查API_KEY
if get_conf("GEMINI_API_KEY") == "":
raise ValueError(f"请配置 GEMINI_API_KEY。")
genai = GoogleChatInit()
watch_dog_patience = 5 # 看门狗的耐心, 设置5秒即可
gpt_replying_buffer = ''
stream_response = genai.generate_chat(inputs, llm_kwargs, history, sys_prompt)
for response in stream_response:
results = response.decode()
match = re.search(r'"text":\s*"((?:[^"\\]|\\.)*)"', results, flags=re.DOTALL)
error_match = re.search(r'\"message\":\s*\"(.*?)\"', results, flags=re.DOTALL)
if match:
try:
paraphrase = json.loads('{"text": "%s"}' % match.group(1))
except:
raise ValueError(f"解析GEMINI消息出错。")
buffer = paraphrase['text']
gpt_replying_buffer += buffer
if len(observe_window) >= 1:
observe_window[0] = gpt_replying_buffer
if len(observe_window) >= 2:
if (time.time() - observe_window[1]) > watch_dog_patience: raise RuntimeError("程序终止。")
if error_match:
raise RuntimeError(f'{gpt_replying_buffer} 对话错误')
return gpt_replying_buffer
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream=True, additional_fn=None):
# 检查API_KEY
if get_conf("GEMINI_API_KEY") == "":
yield from update_ui_lastest_msg(f"请配置 GEMINI_API_KEY。", chatbot=chatbot, history=history, delay=0)
return
# 适配润色区域
if additional_fn is not None:
from core_functional import handle_core_functionality
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
if "vision" in llm_kwargs["llm_model"]:
have_recent_file, image_paths = have_any_recent_upload_image_files(chatbot)
def make_media_input(inputs, image_paths):
for image_path in image_paths:
inputs = inputs + f'<br/><br/><div align="center"><img src="file={os.path.abspath(image_path)}"></div>'
return inputs
if have_recent_file:
inputs = make_media_input(inputs, image_paths)
chatbot.append((inputs, ""))
yield from update_ui(chatbot=chatbot, history=history)
genai = GoogleChatInit()
retry = 0
while True:
try:
stream_response = genai.generate_chat(inputs, llm_kwargs, history, system_prompt)
break
except Exception as e:
retry += 1
chatbot[-1] = ((chatbot[-1][0], trimmed_format_exc()))
yield from update_ui(chatbot=chatbot, history=history, msg="请求失败") # 刷新界面
return
gpt_replying_buffer = ""
gpt_security_policy = ""
history.extend([inputs, ''])
for response in stream_response:
results = response.decode("utf-8") # 被这个解码给耍了。。
gpt_security_policy += results
match = re.search(r'"text":\s*"((?:[^"\\]|\\.)*)"', results, flags=re.DOTALL)
error_match = re.search(r'\"message\":\s*\"(.*)\"', results, flags=re.DOTALL)
if match:
try:
paraphrase = json.loads('{"text": "%s"}' % match.group(1))
except:
raise ValueError(f"解析GEMINI消息出错。")
gpt_replying_buffer += paraphrase['text'] # 使用 json 解析库进行处理
chatbot[-1] = (inputs, gpt_replying_buffer)
history[-1] = gpt_replying_buffer
yield from update_ui(chatbot=chatbot, history=history)
if error_match:
history = history[-2] # 错误的不纳入对话
chatbot[-1] = (inputs, gpt_replying_buffer + f"对话错误请查看message\n\n```\n{error_match.group(1)}\n```")
yield from update_ui(chatbot=chatbot, history=history)
raise RuntimeError('对话错误')
if not gpt_replying_buffer:
history = history[-2] # 错误的不纳入对话
chatbot[-1] = (inputs, gpt_replying_buffer + f"触发了Google的安全访问策略没有回答\n\n```\n{gpt_security_policy}\n```")
yield from update_ui(chatbot=chatbot, history=history)
if __name__ == '__main__':
import sys
llm_kwargs = {'llm_model': 'gemini-pro'}
result = predict('Write long a story about a magic backpack.', llm_kwargs, llm_kwargs, [])
for i in result:
print(i)

View File

@@ -1,16 +1,17 @@
"""
========================================================================
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
第一部分来自EdgeGPT.py
https://github.com/acheong08/EdgeGPT
========================================================================
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
"""
from .edge_gpt_free import Chatbot as NewbingChatbot
load_message = "等待NewBing响应。"
"""
========================================================================
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
第二部分子进程Worker调用主体
========================================================================
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
"""
import time
import json
@@ -22,19 +23,30 @@ import threading
from toolbox import update_ui, get_conf, trimmed_format_exc
from multiprocessing import Process, Pipe
def preprocess_newbing_out(s):
pattern = r'\^(\d+)\^' # 匹配^数字^
sub = lambda m: '('+m.group(1)+')' # 将匹配到的数字作为替换值
result = re.sub(pattern, sub, s) # 替换操作
if '[1]' in result:
result += '\n\n```reference\n' + "\n".join([r for r in result.split('\n') if r.startswith('[')]) + '\n```\n'
pattern = r"\^(\d+)\^" # 匹配^数字^
sub = lambda m: "(" + m.group(1) + ")" # 将匹配到的数字作为替换值
result = re.sub(pattern, sub, s) # 替换操作
if "[1]" in result:
result += (
"\n\n```reference\n"
+ "\n".join([r for r in result.split("\n") if r.startswith("[")])
+ "\n```\n"
)
return result
def preprocess_newbing_out_simple(result):
if '[1]' in result:
result += '\n\n```reference\n' + "\n".join([r for r in result.split('\n') if r.startswith('[')]) + '\n```\n'
if "[1]" in result:
result += (
"\n\n```reference\n"
+ "\n".join([r for r in result.split("\n") if r.startswith("[")])
+ "\n```\n"
)
return result
class NewBingHandle(Process):
def __init__(self):
super().__init__(daemon=True)
@@ -46,11 +58,12 @@ class NewBingHandle(Process):
self.check_dependency()
self.start()
self.threadLock = threading.Lock()
def check_dependency(self):
try:
self.success = False
import certifi, httpx, rich
self.info = "依赖检测通过等待NewBing响应。注意目前不能多人同时调用NewBing接口有线程锁否则将导致每个人的NewBing问询历史互相渗透。调用NewBing时会自动使用已配置的代理。"
self.success = True
except:
@@ -62,18 +75,19 @@ class NewBingHandle(Process):
async def async_run(self):
# 读取配置
NEWBING_STYLE = get_conf('NEWBING_STYLE')
NEWBING_STYLE = get_conf("NEWBING_STYLE")
from request_llms.bridge_all import model_info
endpoint = model_info['newbing']['endpoint']
endpoint = model_info["newbing"]["endpoint"]
while True:
# 等待
kwargs = self.child.recv()
question=kwargs['query']
history=kwargs['history']
system_prompt=kwargs['system_prompt']
question = kwargs["query"]
history = kwargs["history"]
system_prompt = kwargs["system_prompt"]
# 是否重置
if len(self.local_history) > 0 and len(history)==0:
if len(self.local_history) > 0 and len(history) == 0:
await self.newbing_model.reset()
self.local_history = []
@@ -81,34 +95,33 @@ class NewBingHandle(Process):
prompt = ""
if system_prompt not in self.local_history:
self.local_history.append(system_prompt)
prompt += system_prompt + '\n'
prompt += system_prompt + "\n"
# 追加历史
for ab in history:
a, b = ab
if a not in self.local_history:
self.local_history.append(a)
prompt += a + '\n'
prompt += a + "\n"
# 问题
prompt += question
self.local_history.append(question)
print('question:', prompt)
print("question:", prompt)
# 提交
async for final, response in self.newbing_model.ask_stream(
prompt=question,
conversation_style=NEWBING_STYLE, # ["creative", "balanced", "precise"]
wss_link=endpoint, # "wss://sydney.bing.com/sydney/ChatHub"
conversation_style=NEWBING_STYLE, # ["creative", "balanced", "precise"]
wss_link=endpoint, # "wss://sydney.bing.com/sydney/ChatHub"
):
if not final:
print(response)
self.child.send(str(response))
else:
print('-------- receive final ---------')
self.child.send('[Finish]')
print("-------- receive final ---------")
self.child.send("[Finish]")
# self.local_history.append(response)
def run(self):
"""
这个函数运行在子进程
@@ -118,32 +131,37 @@ class NewBingHandle(Process):
self.local_history = []
if (self.newbing_model is None) or (not self.success):
# 代理设置
proxies, NEWBING_COOKIES = get_conf('proxies', 'NEWBING_COOKIES')
if proxies is None:
proxies, NEWBING_COOKIES = get_conf("proxies", "NEWBING_COOKIES")
if proxies is None:
self.proxies_https = None
else:
self.proxies_https = proxies['https']
else:
self.proxies_https = proxies["https"]
if (NEWBING_COOKIES is not None) and len(NEWBING_COOKIES) > 100:
try:
cookies = json.loads(NEWBING_COOKIES)
except:
self.success = False
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
self.child.send(f'[Local Message] NEWBING_COOKIES未填写或有格式错误。')
self.child.send('[Fail]'); self.child.send('[Finish]')
tb_str = "\n```\n" + trimmed_format_exc() + "\n```\n"
self.child.send(f"[Local Message] NEWBING_COOKIES未填写或有格式错误。")
self.child.send("[Fail]")
self.child.send("[Finish]")
raise RuntimeError(f"NEWBING_COOKIES未填写或有格式错误。")
else:
cookies = None
try:
self.newbing_model = NewbingChatbot(proxy=self.proxies_https, cookies=cookies)
self.newbing_model = NewbingChatbot(
proxy=self.proxies_https, cookies=cookies
)
except:
self.success = False
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
self.child.send(f'[Local Message] 不能加载Newbing组件请注意Newbing组件已不再维护。{tb_str}')
self.child.send('[Fail]')
self.child.send('[Finish]')
tb_str = "\n```\n" + trimmed_format_exc() + "\n```\n"
self.child.send(
f"[Local Message] 不能加载Newbing组件请注意Newbing组件已不再维护。{tb_str}"
)
self.child.send("[Fail]")
self.child.send("[Finish]")
raise RuntimeError(f"不能加载Newbing组件请注意Newbing组件已不再维护。")
self.success = True
@@ -151,66 +169,100 @@ class NewBingHandle(Process):
# 进入任务等待状态
asyncio.run(self.async_run())
except Exception:
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
self.child.send(f'[Local Message] Newbing 请求失败,报错信息如下. 如果是与网络相关的问题建议更换代理协议推荐http或代理节点 {tb_str}.')
self.child.send('[Fail]')
self.child.send('[Finish]')
tb_str = "\n```\n" + trimmed_format_exc() + "\n```\n"
self.child.send(
f"[Local Message] Newbing 请求失败,报错信息如下. 如果是与网络相关的问题建议更换代理协议推荐http或代理节点 {tb_str}."
)
self.child.send("[Fail]")
self.child.send("[Finish]")
def stream_chat(self, **kwargs):
"""
这个函数运行在主进程
"""
self.threadLock.acquire() # 获取线程锁
self.parent.send(kwargs) # 请求子进程
self.threadLock.acquire() # 获取线程锁
self.parent.send(kwargs) # 请求子进程
while True:
res = self.parent.recv() # 等待newbing回复的片段
if res == '[Finish]': break # 结束
elif res == '[Fail]': self.success = False; break # 失败
else: yield res # newbing回复的片段
self.threadLock.release() # 释放线程锁
res = self.parent.recv() # 等待newbing回复的片段
if res == "[Finish]":
break # 结束
elif res == "[Fail]":
self.success = False
break # 失败
else:
yield res # newbing回复的片段
self.threadLock.release() # 释放线程锁
"""
========================================================================
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
第三部分:主进程统一调用函数接口
========================================================================
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
"""
global newbingfree_handle
newbingfree_handle = None
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
def predict_no_ui_long_connection(
inputs,
llm_kwargs,
history=[],
sys_prompt="",
observe_window=[],
console_slience=False,
):
"""
多线程方法
函数的说明请见 request_llms/bridge_all.py
多线程方法
函数的说明请见 request_llms/bridge_all.py
"""
global newbingfree_handle
if (newbingfree_handle is None) or (not newbingfree_handle.success):
newbingfree_handle = NewBingHandle()
if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + newbingfree_handle.info
if not newbingfree_handle.success:
if len(observe_window) >= 1:
observe_window[0] = load_message + "\n\n" + newbingfree_handle.info
if not newbingfree_handle.success:
error = newbingfree_handle.info
newbingfree_handle = None
raise RuntimeError(error)
# 没有 sys_prompt 接口因此把prompt加入 history
history_feedin = []
for i in range(len(history)//2):
history_feedin.append([history[2*i], history[2*i+1]] )
for i in range(len(history) // 2):
history_feedin.append([history[2 * i], history[2 * i + 1]])
watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
response = ""
if len(observe_window) >= 1: observe_window[0] = "[Local Message] 等待NewBing响应中 ..."
for response in newbingfree_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
if len(observe_window) >= 1: observe_window[0] = preprocess_newbing_out_simple(response)
if len(observe_window) >= 2:
if (time.time()-observe_window[1]) > watch_dog_patience:
if len(observe_window) >= 1:
observe_window[0] = "[Local Message] 等待NewBing响应中 ..."
for response in newbingfree_handle.stream_chat(
query=inputs,
history=history_feedin,
system_prompt=sys_prompt,
max_length=llm_kwargs["max_length"],
top_p=llm_kwargs["top_p"],
temperature=llm_kwargs["temperature"],
):
if len(observe_window) >= 1:
observe_window[0] = preprocess_newbing_out_simple(response)
if len(observe_window) >= 2:
if (time.time() - observe_window[1]) > watch_dog_patience:
raise RuntimeError("程序终止。")
return preprocess_newbing_out_simple(response)
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
def predict(
inputs,
llm_kwargs,
plugin_kwargs,
chatbot,
history=[],
system_prompt="",
stream=True,
additional_fn=None,
):
"""
单线程方法
函数的说明请见 request_llms/bridge_all.py
单线程方法
函数的说明请见 request_llms/bridge_all.py
"""
chatbot.append((inputs, "[Local Message] 等待NewBing响应中 ..."))
@@ -219,27 +271,41 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
newbingfree_handle = NewBingHandle()
chatbot[-1] = (inputs, load_message + "\n\n" + newbingfree_handle.info)
yield from update_ui(chatbot=chatbot, history=[])
if not newbingfree_handle.success:
if not newbingfree_handle.success:
newbingfree_handle = None
return
if additional_fn is not None:
from core_functional import handle_core_functionality
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
inputs, history = handle_core_functionality(
additional_fn, inputs, history, chatbot
)
history_feedin = []
for i in range(len(history)//2):
history_feedin.append([history[2*i], history[2*i+1]] )
for i in range(len(history) // 2):
history_feedin.append([history[2 * i], history[2 * i + 1]])
chatbot[-1] = (inputs, "[Local Message] 等待NewBing响应中 ...")
response = "[Local Message] 等待NewBing响应中 ..."
yield from update_ui(chatbot=chatbot, history=history, msg="NewBing响应缓慢尚未完成全部响应请耐心完成后再提交新问题。")
for response in newbingfree_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
yield from update_ui(
chatbot=chatbot, history=history, msg="NewBing响应缓慢尚未完成全部响应请耐心完成后再提交新问题。"
)
for response in newbingfree_handle.stream_chat(
query=inputs,
history=history_feedin,
system_prompt=system_prompt,
max_length=llm_kwargs["max_length"],
top_p=llm_kwargs["top_p"],
temperature=llm_kwargs["temperature"],
):
chatbot[-1] = (inputs, preprocess_newbing_out(response))
yield from update_ui(chatbot=chatbot, history=history, msg="NewBing响应缓慢尚未完成全部响应请耐心完成后再提交新问题。")
if response == "[Local Message] 等待NewBing响应中 ...": response = "[Local Message] NewBing响应异常请刷新界面重试 ..."
yield from update_ui(
chatbot=chatbot, history=history, msg="NewBing响应缓慢尚未完成全部响应请耐心完成后再提交新问题。"
)
if response == "[Local Message] 等待NewBing响应中 ...":
response = "[Local Message] NewBing响应异常请刷新界面重试 ..."
history.extend([inputs, response])
logging.info(f'[raw_input] {inputs}')
logging.info(f'[response] {response}')
logging.info(f"[raw_input] {inputs}")
logging.info(f"[response] {response}")
yield from update_ui(chatbot=chatbot, history=history, msg="完成全部响应,请提交新问题。")

View File

@@ -1,59 +1,62 @@
model_name = "Qwen"
cmd_to_install = "`pip install -r request_llms/requirements_qwen.txt`"
import time
import os
from toolbox import update_ui, get_conf, update_ui_lastest_msg
from toolbox import check_packages, report_exception
from toolbox import ProxyNetworkActivate, get_conf
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
model_name = 'Qwen'
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
"""
⭐多线程方法
函数的说明请见 request_llms/bridge_all.py
"""
watch_dog_patience = 5
response = ""
from .com_qwenapi import QwenRequestInstance
sri = QwenRequestInstance()
for response in sri.generate(inputs, llm_kwargs, history, sys_prompt):
if len(observe_window) >= 1:
observe_window[0] = response
if len(observe_window) >= 2:
if (time.time()-observe_window[1]) > watch_dog_patience: raise RuntimeError("程序终止。")
return response
# ------------------------------------------------------------------------------------------------------------------------
# 🔌💻 Local Model
# ------------------------------------------------------------------------------------------------------------------------
class GetQwenLMHandle(LocalLLMHandle):
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
"""
⭐单线程方法
函数的说明请见 request_llms/bridge_all.py
"""
chatbot.append((inputs, ""))
yield from update_ui(chatbot=chatbot, history=history)
def load_model_info(self):
# 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行
self.model_name = model_name
self.cmd_to_install = cmd_to_install
# 尝试导入依赖,如果缺少依赖,则给出安装建议
try:
check_packages(["dashscope"])
except:
yield from update_ui_lastest_msg(f"导入软件依赖失败。使用该模型需要额外依赖,安装方法```pip install --upgrade dashscope```。",
chatbot=chatbot, history=history, delay=0)
return
def load_model_and_tokenizer(self):
# 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行
# from modelscope import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
with ProxyNetworkActivate('Download_LLM'):
model_id = get_conf('QWEN_MODEL_SELECTION')
self._tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True, resume_download=True)
# use fp16
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", trust_remote_code=True).eval()
model.generation_config = GenerationConfig.from_pretrained(model_id, trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参
self._model = model
# 检查DASHSCOPE_API_KEY
if get_conf("DASHSCOPE_API_KEY") == "":
yield from update_ui_lastest_msg(f"请配置 DASHSCOPE_API_KEY。",
chatbot=chatbot, history=history, delay=0)
return
return self._model, self._tokenizer
if additional_fn is not None:
from core_functional import handle_core_functionality
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
def llm_stream_generator(self, **kwargs):
# 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行
def adaptor(kwargs):
query = kwargs['query']
max_length = kwargs['max_length']
top_p = kwargs['top_p']
temperature = kwargs['temperature']
history = kwargs['history']
return query, max_length, top_p, temperature, history
# 开始接收回复
from .com_qwenapi import QwenRequestInstance
sri = QwenRequestInstance()
for response in sri.generate(inputs, llm_kwargs, history, system_prompt):
chatbot[-1] = (inputs, response)
yield from update_ui(chatbot=chatbot, history=history)
query, max_length, top_p, temperature, history = adaptor(kwargs)
for response in self._model.chat_stream(self._tokenizer, query, history=history):
yield response
def try_to_import_special_deps(self, **kwargs):
# import something that will raise error if the user does not install requirement_*.txt
# 🏃‍♂️🏃‍♂️🏃‍♂️ 主进程执行
import importlib
importlib.import_module('modelscope')
# ------------------------------------------------------------------------------------------------------------------------
# 🔌💻 GPT-Academic Interface
# ------------------------------------------------------------------------------------------------------------------------
predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetQwenLMHandle, model_name)
# 总结输出
if response == f"[Local Message] 等待{model_name}响应中 ...":
response = f"[Local Message] {model_name}响应异常 ..."
history.extend([inputs, response])
yield from update_ui(chatbot=chatbot, history=history)

View File

@@ -0,0 +1,59 @@
model_name = "Qwen_Local"
cmd_to_install = "`pip install -r request_llms/requirements_qwen_local.txt`"
from toolbox import ProxyNetworkActivate, get_conf
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
# ------------------------------------------------------------------------------------------------------------------------
# 🔌💻 Local Model
# ------------------------------------------------------------------------------------------------------------------------
class GetQwenLMHandle(LocalLLMHandle):
def load_model_info(self):
# 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行
self.model_name = model_name
self.cmd_to_install = cmd_to_install
def load_model_and_tokenizer(self):
# 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行
# from modelscope import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
with ProxyNetworkActivate('Download_LLM'):
model_id = get_conf('QWEN_LOCAL_MODEL_SELECTION')
self._tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True, resume_download=True)
# use fp16
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", trust_remote_code=True).eval()
model.generation_config = GenerationConfig.from_pretrained(model_id, trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参
self._model = model
return self._model, self._tokenizer
def llm_stream_generator(self, **kwargs):
# 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行
def adaptor(kwargs):
query = kwargs['query']
max_length = kwargs['max_length']
top_p = kwargs['top_p']
temperature = kwargs['temperature']
history = kwargs['history']
return query, max_length, top_p, temperature, history
query, max_length, top_p, temperature, history = adaptor(kwargs)
for response in self._model.chat_stream(self._tokenizer, query, history=history):
yield response
def try_to_import_special_deps(self, **kwargs):
# import something that will raise error if the user does not install requirement_*.txt
# 🏃‍♂️🏃‍♂️🏃‍♂️ 主进程执行
import importlib
importlib.import_module('modelscope')
# ------------------------------------------------------------------------------------------------------------------------
# 🔌💻 GPT-Academic Interface
# ------------------------------------------------------------------------------------------------------------------------
predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetQwenLMHandle, model_name)

View File

@@ -0,0 +1,67 @@
import time
from toolbox import update_ui, get_conf, update_ui_lastest_msg
from toolbox import check_packages, report_exception
model_name = '云雀大模型'
def validate_key():
YUNQUE_SECRET_KEY = get_conf("YUNQUE_SECRET_KEY")
if YUNQUE_SECRET_KEY == '': return False
return True
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
"""
⭐ 多线程方法
函数的说明请见 request_llms/bridge_all.py
"""
watch_dog_patience = 5
response = ""
if validate_key() is False:
raise RuntimeError('请配置YUNQUE_SECRET_KEY')
from .com_skylark2api import YUNQUERequestInstance
sri = YUNQUERequestInstance()
for response in sri.generate(inputs, llm_kwargs, history, sys_prompt):
if len(observe_window) >= 1:
observe_window[0] = response
if len(observe_window) >= 2:
if (time.time()-observe_window[1]) > watch_dog_patience: raise RuntimeError("程序终止。")
return response
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
"""
⭐ 单线程方法
函数的说明请见 request_llms/bridge_all.py
"""
chatbot.append((inputs, ""))
yield from update_ui(chatbot=chatbot, history=history)
# 尝试导入依赖,如果缺少依赖,则给出安装建议
try:
check_packages(["zhipuai"])
except:
yield from update_ui_lastest_msg(f"导入软件依赖失败。使用该模型需要额外依赖,安装方法```pip install --upgrade zhipuai```。",
chatbot=chatbot, history=history, delay=0)
return
if validate_key() is False:
yield from update_ui_lastest_msg(lastmsg="[Local Message] 请配置HUOSHAN_API_KEY", chatbot=chatbot, history=history, delay=0)
return
if additional_fn is not None:
from core_functional import handle_core_functionality
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
# 开始接收回复
from .com_skylark2api import YUNQUERequestInstance
sri = YUNQUERequestInstance()
for response in sri.generate(inputs, llm_kwargs, history, system_prompt):
chatbot[-1] = (inputs, response)
yield from update_ui(chatbot=chatbot, history=history)
# 总结输出
if response == f"[Local Message] 等待{model_name}响应中 ...":
response = f"[Local Message] {model_name}响应异常 ..."
history.extend([inputs, response])
yield from update_ui(chatbot=chatbot, history=history)

View File

@@ -26,7 +26,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
from .com_sparkapi import SparkRequestInstance
sri = SparkRequestInstance()
for response in sri.generate(inputs, llm_kwargs, history, sys_prompt):
for response in sri.generate(inputs, llm_kwargs, history, sys_prompt, use_image_api=False):
if len(observe_window) >= 1:
observe_window[0] = response
if len(observe_window) >= 2:
@@ -52,7 +52,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
# 开始接收回复
from .com_sparkapi import SparkRequestInstance
sri = SparkRequestInstance()
for response in sri.generate(inputs, llm_kwargs, history, system_prompt):
for response in sri.generate(inputs, llm_kwargs, history, system_prompt, use_image_api=True):
chatbot[-1] = (inputs, response)
yield from update_ui(chatbot=chatbot, history=history)

View File

@@ -7,14 +7,15 @@ import logging
import time
from toolbox import get_conf
import asyncio
load_message = "正在加载Claude组件请稍候..."
try:
"""
========================================================================
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
第一部分Slack API Client
https://github.com/yokonsan/claude-in-slack-api
========================================================================
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
"""
from slack_sdk.errors import SlackApiError
@@ -23,20 +24,23 @@ try:
class SlackClient(AsyncWebClient):
"""SlackClient类用于与Slack API进行交互实现消息发送、接收等功能。
属性:
- CHANNEL_IDstr类型表示频道ID。
属性:
- CHANNEL_IDstr类型表示频道ID。
方法:
- open_channel()异步方法。通过调用conversations_open方法打开一个频道并将返回的频道ID保存在属性CHANNEL_ID中。
- chat(text: str):异步方法。向已打开的频道发送一条文本消息。
- get_slack_messages():异步方法。获取已打开频道的最新消息并返回消息列表,目前不支持历史消息查询。
- get_reply():异步方法。循环监听已打开频道的消息,如果收到"Typing…_"结尾的消息说明Claude还在继续输出否则结束循环。
方法:
- open_channel()异步方法。通过调用conversations_open方法打开一个频道并将返回的频道ID保存在属性CHANNEL_ID中。
- chat(text: str):异步方法。向已打开的频道发送一条文本消息。
- get_slack_messages():异步方法。获取已打开频道的最新消息并返回消息列表,目前不支持历史消息查询。
- get_reply():异步方法。循环监听已打开频道的消息,如果收到"Typing…_"结尾的消息说明Claude还在继续输出否则结束循环。
"""
CHANNEL_ID = None
async def open_channel(self):
response = await self.conversations_open(users=get_conf('SLACK_CLAUDE_BOT_ID'))
response = await self.conversations_open(
users=get_conf("SLACK_CLAUDE_BOT_ID")
)
self.CHANNEL_ID = response["channel"]["id"]
async def chat(self, text):
@@ -49,33 +53,39 @@ try:
async def get_slack_messages(self):
try:
# TODO暂时不支持历史消息因为在同一个频道里存在多人使用时历史消息渗透问题
resp = await self.conversations_history(channel=self.CHANNEL_ID, oldest=self.LAST_TS, limit=1)
msg = [msg for msg in resp["messages"]
if msg.get("user") == get_conf('SLACK_CLAUDE_BOT_ID')]
resp = await self.conversations_history(
channel=self.CHANNEL_ID, oldest=self.LAST_TS, limit=1
)
msg = [
msg
for msg in resp["messages"]
if msg.get("user") == get_conf("SLACK_CLAUDE_BOT_ID")
]
return msg
except (SlackApiError, KeyError) as e:
raise RuntimeError(f"获取Slack消息失败。")
async def get_reply(self):
while True:
slack_msgs = await self.get_slack_messages()
if len(slack_msgs) == 0:
await asyncio.sleep(0.5)
continue
msg = slack_msgs[-1]
if msg["text"].endswith("Typing…_"):
yield False, msg["text"]
else:
yield True, msg["text"]
break
except:
pass
"""
========================================================================
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
第二部分子进程Worker调用主体
========================================================================
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
"""
@@ -88,7 +98,7 @@ class ClaudeHandle(Process):
self.success = True
self.local_history = []
self.check_dependency()
if self.success:
if self.success:
self.start()
self.threadLock = threading.Lock()
@@ -96,6 +106,7 @@ class ClaudeHandle(Process):
try:
self.success = False
import slack_sdk
self.info = "依赖检测通过等待Claude响应。注意目前不能多人同时调用Claude接口有线程锁否则将导致每个人的Claude问询历史互相渗透。调用Claude时会自动使用已配置的代理。"
self.success = True
except:
@@ -103,40 +114,44 @@ class ClaudeHandle(Process):
self.success = False
def ready(self):
return self.claude_model is not None
return self.claude_model is not None
async def async_run(self):
await self.claude_model.open_channel()
while True:
# 等待
kwargs = self.child.recv()
question = kwargs['query']
history = kwargs['history']
question = kwargs["query"]
history = kwargs["history"]
# 开始问问题
prompt = ""
# 问题
prompt += question
print('question:', prompt)
print("question:", prompt)
# 提交
await self.claude_model.chat(prompt)
# 获取回复
async for final, response in self.claude_model.get_reply():
async for final, response in self.claude_model.get_reply():
if not final:
print(response)
self.child.send(str(response))
else:
# 防止丢失最后一条消息
slack_msgs = await self.claude_model.get_slack_messages()
last_msg = slack_msgs[-1]["text"] if slack_msgs and len(slack_msgs) > 0 else ""
last_msg = (
slack_msgs[-1]["text"]
if slack_msgs and len(slack_msgs) > 0
else ""
)
if last_msg:
self.child.send(last_msg)
print('-------- receive final ---------')
self.child.send('[Finish]')
print("-------- receive final ---------")
self.child.send("[Finish]")
def run(self):
"""
这个函数运行在子进程
@@ -146,22 +161,24 @@ class ClaudeHandle(Process):
self.local_history = []
if (self.claude_model is None) or (not self.success):
# 代理设置
proxies = get_conf('proxies')
proxies = get_conf("proxies")
if proxies is None:
self.proxies_https = None
else:
self.proxies_https = proxies['https']
self.proxies_https = proxies["https"]
try:
SLACK_CLAUDE_USER_TOKEN = get_conf('SLACK_CLAUDE_USER_TOKEN')
self.claude_model = SlackClient(token=SLACK_CLAUDE_USER_TOKEN, proxy=self.proxies_https)
print('Claude组件初始化成功。')
SLACK_CLAUDE_USER_TOKEN = get_conf("SLACK_CLAUDE_USER_TOKEN")
self.claude_model = SlackClient(
token=SLACK_CLAUDE_USER_TOKEN, proxy=self.proxies_https
)
print("Claude组件初始化成功。")
except:
self.success = False
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
self.child.send(f'[Local Message] 不能加载Claude组件。{tb_str}')
self.child.send('[Fail]')
self.child.send('[Finish]')
tb_str = "\n```\n" + trimmed_format_exc() + "\n```\n"
self.child.send(f"[Local Message] 不能加载Claude组件。{tb_str}")
self.child.send("[Fail]")
self.child.send("[Finish]")
raise RuntimeError(f"不能加载Claude组件。")
self.success = True
@@ -169,42 +186,49 @@ class ClaudeHandle(Process):
# 进入任务等待状态
asyncio.run(self.async_run())
except Exception:
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
self.child.send(f'[Local Message] Claude失败 {tb_str}.')
self.child.send('[Fail]')
self.child.send('[Finish]')
tb_str = "\n```\n" + trimmed_format_exc() + "\n```\n"
self.child.send(f"[Local Message] Claude失败 {tb_str}.")
self.child.send("[Fail]")
self.child.send("[Finish]")
def stream_chat(self, **kwargs):
"""
这个函数运行在主进程
"""
self.threadLock.acquire()
self.parent.send(kwargs) # 发送请求到子进程
self.parent.send(kwargs) # 发送请求到子进程
while True:
res = self.parent.recv() # 等待Claude回复的片段
if res == '[Finish]':
break # 结束
elif res == '[Fail]':
res = self.parent.recv() # 等待Claude回复的片段
if res == "[Finish]":
break # 结束
elif res == "[Fail]":
self.success = False
break
else:
yield res # Claude回复的片段
yield res # Claude回复的片段
self.threadLock.release()
"""
========================================================================
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
第三部分:主进程统一调用函数接口
========================================================================
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
"""
global claude_handle
claude_handle = None
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
def predict_no_ui_long_connection(
inputs,
llm_kwargs,
history=[],
sys_prompt="",
observe_window=None,
console_slience=False,
):
"""
多线程方法
函数的说明请见 request_llms/bridge_all.py
多线程方法
函数的说明请见 request_llms/bridge_all.py
"""
global claude_handle
if (claude_handle is None) or (not claude_handle.success):
@@ -217,24 +241,40 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
# 没有 sys_prompt 接口因此把prompt加入 history
history_feedin = []
for i in range(len(history)//2):
history_feedin.append([history[2*i], history[2*i+1]])
for i in range(len(history) // 2):
history_feedin.append([history[2 * i], history[2 * i + 1]])
watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
response = ""
observe_window[0] = "[Local Message] 等待Claude响应中 ..."
for response in claude_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
for response in claude_handle.stream_chat(
query=inputs,
history=history_feedin,
system_prompt=sys_prompt,
max_length=llm_kwargs["max_length"],
top_p=llm_kwargs["top_p"],
temperature=llm_kwargs["temperature"],
):
observe_window[0] = preprocess_newbing_out_simple(response)
if len(observe_window) >= 2:
if (time.time()-observe_window[1]) > watch_dog_patience:
if (time.time() - observe_window[1]) > watch_dog_patience:
raise RuntimeError("程序终止。")
return preprocess_newbing_out_simple(response)
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream=True, additional_fn=None):
def predict(
inputs,
llm_kwargs,
plugin_kwargs,
chatbot,
history=[],
system_prompt="",
stream=True,
additional_fn=None,
):
"""
单线程方法
函数的说明请见 request_llms/bridge_all.py
单线程方法
函数的说明请见 request_llms/bridge_all.py
"""
chatbot.append((inputs, "[Local Message] 等待Claude响应中 ..."))
@@ -249,21 +289,30 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
if additional_fn is not None:
from core_functional import handle_core_functionality
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
inputs, history = handle_core_functionality(
additional_fn, inputs, history, chatbot
)
history_feedin = []
for i in range(len(history)//2):
history_feedin.append([history[2*i], history[2*i+1]])
for i in range(len(history) // 2):
history_feedin.append([history[2 * i], history[2 * i + 1]])
chatbot[-1] = (inputs, "[Local Message] 等待Claude响应中 ...")
response = "[Local Message] 等待Claude响应中 ..."
yield from update_ui(chatbot=chatbot, history=history, msg="Claude响应缓慢尚未完成全部响应请耐心完成后再提交新问题。")
for response in claude_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt):
yield from update_ui(
chatbot=chatbot, history=history, msg="Claude响应缓慢尚未完成全部响应请耐心完成后再提交新问题。"
)
for response in claude_handle.stream_chat(
query=inputs, history=history_feedin, system_prompt=system_prompt
):
chatbot[-1] = (inputs, preprocess_newbing_out(response))
yield from update_ui(chatbot=chatbot, history=history, msg="Claude响应缓慢尚未完成全部响应请耐心完成后再提交新问题。")
yield from update_ui(
chatbot=chatbot, history=history, msg="Claude响应缓慢尚未完成全部响应请耐心完成后再提交新问题。"
)
if response == "[Local Message] 等待Claude响应中 ...":
response = "[Local Message] Claude响应异常请刷新界面重试 ..."
history.extend([inputs, response])
logging.info(f'[raw_input] {inputs}')
logging.info(f'[response] {response}')
logging.info(f"[raw_input] {inputs}")
logging.info(f"[response] {response}")
yield from update_ui(chatbot=chatbot, history=history, msg="完成全部响应,请提交新问题。")

View File

@@ -42,7 +42,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
try:
check_packages(["zhipuai"])
except:
yield from update_ui_lastest_msg(f"导入软件依赖失败。使用该模型需要额外依赖,安装方法```pip install --upgrade zhipuai```。",
yield from update_ui_lastest_msg(f"导入软件依赖失败。使用该模型需要额外依赖,安装方法```pip install zhipuai==1.0.7```。",
chatbot=chatbot, history=history, delay=0)
return

229
request_llms/com_google.py Normal file
View File

@@ -0,0 +1,229 @@
# encoding: utf-8
# @Time : 2023/12/25
# @Author : Spike
# @Descr :
import json
import os
import re
import requests
from typing import List, Dict, Tuple
from toolbox import get_conf, encode_image, get_pictures_list
proxies, TIMEOUT_SECONDS = get_conf("proxies", "TIMEOUT_SECONDS")
"""
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
第五部分 一些文件处理方法
files_filter_handler 根据type过滤文件
input_encode_handler 提取input中的文件并解析
file_manifest_filter_html 根据type过滤文件, 并解析为html or md 文本
link_mtime_to_md 文件增加本地时间参数,避免下载到缓存文件
html_view_blank 超链接
html_local_file 本地文件取相对路径
to_markdown_tabs 文件list 转换为 md tab
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
"""
def files_filter_handler(file_list):
new_list = []
filter_ = [
"png",
"jpg",
"jpeg",
"bmp",
"svg",
"webp",
"ico",
"tif",
"tiff",
"raw",
"eps",
]
for file in file_list:
file = str(file).replace("file=", "")
if os.path.exists(file):
if str(os.path.basename(file)).split(".")[-1] in filter_:
new_list.append(file)
return new_list
def input_encode_handler(inputs, llm_kwargs):
if llm_kwargs["most_recent_uploaded"].get("path"):
image_paths = get_pictures_list(llm_kwargs["most_recent_uploaded"]["path"])
md_encode = []
for md_path in image_paths:
type_ = os.path.splitext(md_path)[1].replace(".", "")
type_ = "jpeg" if type_ == "jpg" else type_
md_encode.append({"data": encode_image(md_path), "type": type_})
return inputs, md_encode
def file_manifest_filter_html(file_list, filter_: list = None, md_type=False):
new_list = []
if not filter_:
filter_ = [
"png",
"jpg",
"jpeg",
"bmp",
"svg",
"webp",
"ico",
"tif",
"tiff",
"raw",
"eps",
]
for file in file_list:
if str(os.path.basename(file)).split(".")[-1] in filter_:
new_list.append(html_local_img(file, md=md_type))
elif os.path.exists(file):
new_list.append(link_mtime_to_md(file))
else:
new_list.append(file)
return new_list
def link_mtime_to_md(file):
link_local = html_local_file(file)
link_name = os.path.basename(file)
a = f"[{link_name}]({link_local}?{os.path.getmtime(file)})"
return a
def html_local_file(file):
base_path = os.path.dirname(__file__) # 项目目录
if os.path.exists(str(file)):
file = f'file={file.replace(base_path, ".")}'
return file
def html_local_img(__file, layout="left", max_width=None, max_height=None, md=True):
style = ""
if max_width is not None:
style += f"max-width: {max_width};"
if max_height is not None:
style += f"max-height: {max_height};"
__file = html_local_file(__file)
a = f'<div align="{layout}"><img src="{__file}" style="{style}"></div>'
if md:
a = f"![{__file}]({__file})"
return a
def to_markdown_tabs(head: list, tabs: list, alignment=":---:", column=False):
"""
Args:
head: 表头:[]
tabs: 表值:[[列1], [列2], [列3], [列4]]
alignment: :--- 左对齐, :---: 居中对齐, ---: 右对齐
column: True to keep data in columns, False to keep data in rows (default).
Returns:
A string representation of the markdown table.
"""
if column:
transposed_tabs = list(map(list, zip(*tabs)))
else:
transposed_tabs = tabs
# Find the maximum length among the columns
max_len = max(len(column) for column in transposed_tabs)
tab_format = "| %s "
tabs_list = "".join([tab_format % i for i in head]) + "|\n"
tabs_list += "".join([tab_format % alignment for i in head]) + "|\n"
for i in range(max_len):
row_data = [tab[i] if i < len(tab) else "" for tab in transposed_tabs]
row_data = file_manifest_filter_html(row_data, filter_=None)
tabs_list += "".join([tab_format % i for i in row_data]) + "|\n"
return tabs_list
class GoogleChatInit:
def __init__(self):
self.url_gemini = "https://generativelanguage.googleapis.com/v1beta/models/%m:streamGenerateContent?key=%k"
def generate_chat(self, inputs, llm_kwargs, history, system_prompt):
headers, payload = self.generate_message_payload(
inputs, llm_kwargs, history, system_prompt
)
response = requests.post(
url=self.url_gemini,
headers=headers,
data=json.dumps(payload),
stream=True,
proxies=proxies,
timeout=TIMEOUT_SECONDS,
)
return response.iter_lines()
def __conversation_user(self, user_input, llm_kwargs):
what_i_have_asked = {"role": "user", "parts": []}
if "vision" not in self.url_gemini:
input_ = user_input
encode_img = []
else:
input_, encode_img = input_encode_handler(user_input, llm_kwargs=llm_kwargs)
what_i_have_asked["parts"].append({"text": input_})
if encode_img:
for data in encode_img:
what_i_have_asked["parts"].append(
{
"inline_data": {
"mime_type": f"image/{data['type']}",
"data": data["data"],
}
}
)
return what_i_have_asked
def __conversation_history(self, history, llm_kwargs):
messages = []
conversation_cnt = len(history) // 2
if conversation_cnt:
for index in range(0, 2 * conversation_cnt, 2):
what_i_have_asked = self.__conversation_user(history[index], llm_kwargs)
what_gpt_answer = {
"role": "model",
"parts": [{"text": history[index + 1]}],
}
messages.append(what_i_have_asked)
messages.append(what_gpt_answer)
return messages
def generate_message_payload(
self, inputs, llm_kwargs, history, system_prompt
) -> Tuple[Dict, Dict]:
messages = [
# {"role": "system", "parts": [{"text": system_prompt}]}, # gemini 不允许对话轮次为偶数,所以这个没有用,看后续支持吧。。。
# {"role": "user", "parts": [{"text": ""}]},
# {"role": "model", "parts": [{"text": ""}]}
]
self.url_gemini = self.url_gemini.replace(
"%m", llm_kwargs["llm_model"]
).replace("%k", get_conf("GEMINI_API_KEY"))
header = {"Content-Type": "application/json"}
if "vision" not in self.url_gemini: # 不是vision 才处理history
messages.extend(
self.__conversation_history(history, llm_kwargs)
) # 处理 history
messages.append(self.__conversation_user(inputs, llm_kwargs)) # 处理用户对话
payload = {
"contents": messages,
"generationConfig": {
# "maxOutputTokens": 800,
"stopSequences": str(llm_kwargs.get("stop", "")).split(" "),
"temperature": llm_kwargs.get("temperature", 1),
"topP": llm_kwargs.get("top_p", 0.8),
"topK": 10,
},
}
return header, payload
if __name__ == "__main__":
google = GoogleChatInit()
# print(gootle.generate_message_payload('你好呀', {}, ['123123', '3123123'], ''))
# gootle.input_encode_handle('123123[123123](./123123), ![53425](./asfafa/fff.jpg)')

View File

@@ -0,0 +1,94 @@
from http import HTTPStatus
from toolbox import get_conf
import threading
import logging
timeout_bot_msg = '[Local Message] Request timeout. Network error.'
class QwenRequestInstance():
def __init__(self):
import dashscope
self.time_to_yield_event = threading.Event()
self.time_to_exit_event = threading.Event()
self.result_buf = ""
def validate_key():
DASHSCOPE_API_KEY = get_conf("DASHSCOPE_API_KEY")
if DASHSCOPE_API_KEY == '': return False
return True
if not validate_key():
raise RuntimeError('请配置 DASHSCOPE_API_KEY')
dashscope.api_key = get_conf("DASHSCOPE_API_KEY")
def generate(self, inputs, llm_kwargs, history, system_prompt):
# import _thread as thread
from dashscope import Generation
QWEN_MODEL = {
'qwen-turbo': Generation.Models.qwen_turbo,
'qwen-plus': Generation.Models.qwen_plus,
'qwen-max': Generation.Models.qwen_max,
}[llm_kwargs['llm_model']]
top_p = llm_kwargs.get('top_p', 0.8)
if top_p == 0: top_p += 1e-5
if top_p == 1: top_p -= 1e-5
self.result_buf = ""
responses = Generation.call(
model=QWEN_MODEL,
messages=generate_message_payload(inputs, llm_kwargs, history, system_prompt),
top_p=top_p,
temperature=llm_kwargs.get('temperature', 1.0),
result_format='message',
stream=True,
incremental_output=True
)
for response in responses:
if response.status_code == HTTPStatus.OK:
if response.output.choices[0].finish_reason == 'stop':
yield self.result_buf
break
elif response.output.choices[0].finish_reason == 'length':
self.result_buf += "[Local Message] 生成长度过长,后续输出被截断"
yield self.result_buf
break
else:
self.result_buf += response.output.choices[0].message.content
yield self.result_buf
else:
self.result_buf += f"[Local Message] 请求错误:状态码:{response.status_code},错误码:{response.code},消息:{response.message}"
yield self.result_buf
break
logging.info(f'[raw_input] {inputs}')
logging.info(f'[response] {self.result_buf}')
return self.result_buf
def generate_message_payload(inputs, llm_kwargs, history, system_prompt):
conversation_cnt = len(history) // 2
if system_prompt == '': system_prompt = 'Hello!'
messages = [{"role": "user", "content": system_prompt}, {"role": "assistant", "content": "Certainly!"}]
if conversation_cnt:
for index in range(0, 2*conversation_cnt, 2):
what_i_have_asked = {}
what_i_have_asked["role"] = "user"
what_i_have_asked["content"] = history[index]
what_gpt_answer = {}
what_gpt_answer["role"] = "assistant"
what_gpt_answer["content"] = history[index+1]
if what_i_have_asked["content"] != "":
if what_gpt_answer["content"] == "":
continue
if what_gpt_answer["content"] == timeout_bot_msg:
continue
messages.append(what_i_have_asked)
messages.append(what_gpt_answer)
else:
messages[-1]['content'] = what_gpt_answer['content']
what_i_ask_now = {}
what_i_ask_now["role"] = "user"
what_i_ask_now["content"] = inputs
messages.append(what_i_ask_now)
return messages

View File

@@ -0,0 +1,95 @@
from toolbox import get_conf
import threading
import logging
import os
timeout_bot_msg = '[Local Message] Request timeout. Network error.'
#os.environ['VOLC_ACCESSKEY'] = ''
#os.environ['VOLC_SECRETKEY'] = ''
class YUNQUERequestInstance():
def __init__(self):
self.time_to_yield_event = threading.Event()
self.time_to_exit_event = threading.Event()
self.result_buf = ""
def generate(self, inputs, llm_kwargs, history, system_prompt):
# import _thread as thread
from volcengine.maas import MaasService, MaasException
maas = MaasService('maas-api.ml-platform-cn-beijing.volces.com', 'cn-beijing')
YUNQUE_SECRET_KEY, YUNQUE_ACCESS_KEY,YUNQUE_MODEL = get_conf("YUNQUE_SECRET_KEY", "YUNQUE_ACCESS_KEY","YUNQUE_MODEL")
maas.set_ak(YUNQUE_ACCESS_KEY) #填写 VOLC_ACCESSKEY
maas.set_sk(YUNQUE_SECRET_KEY) #填写 'VOLC_SECRETKEY'
self.result_buf = ""
req = {
"model": {
"name": YUNQUE_MODEL,
"version": "1.0", # use default version if not specified.
},
"parameters": {
"max_new_tokens": 4000, # 输出文本的最大tokens限制
"min_new_tokens": 1, # 输出文本的最小tokens限制
"temperature": llm_kwargs['temperature'], # 用于控制生成文本的随机性和创造性Temperature值越大随机性越大取值范围0~1
"top_p": llm_kwargs['top_p'], # 用于控制输出tokens的多样性TopP值越大输出的tokens类型越丰富取值范围0~1
"top_k": 0, # 选择预测值最大的k个token进行采样取值范围0-10000表示不生效
"max_prompt_tokens": 4000, # 最大输入 token 数,如果给出的 prompt 的 token 长度超过此限制,取最后 max_prompt_tokens 个 token 输入模型。
},
"messages": self.generate_message_payload(inputs, llm_kwargs, history, system_prompt)
}
response = maas.stream_chat(req)
for resp in response:
self.result_buf += resp.choice.message.content
yield self.result_buf
'''
for event in response.events():
if event.event == "add":
self.result_buf += event.data
yield self.result_buf
elif event.event == "error" or event.event == "interrupted":
raise RuntimeError("Unknown error:" + event.data)
elif event.event == "finish":
yield self.result_buf
break
else:
raise RuntimeError("Unknown error:" + str(event))
logging.info(f'[raw_input] {inputs}')
logging.info(f'[response] {self.result_buf}')
'''
return self.result_buf
def generate_message_payload(inputs, llm_kwargs, history, system_prompt):
from volcengine.maas import ChatRole
conversation_cnt = len(history) // 2
messages = [{"role": ChatRole.USER, "content": system_prompt},
{"role": ChatRole.ASSISTANT, "content": "Certainly!"}]
if conversation_cnt:
for index in range(0, 2 * conversation_cnt, 2):
what_i_have_asked = {}
what_i_have_asked["role"] = ChatRole.USER
what_i_have_asked["content"] = history[index]
what_gpt_answer = {}
what_gpt_answer["role"] = ChatRole.ASSISTANT
what_gpt_answer["content"] = history[index + 1]
if what_i_have_asked["content"] != "":
if what_gpt_answer["content"] == "":
continue
if what_gpt_answer["content"] == timeout_bot_msg:
continue
messages.append(what_i_have_asked)
messages.append(what_gpt_answer)
else:
messages[-1]['content'] = what_gpt_answer['content']
what_i_ask_now = {}
what_i_ask_now["role"] = ChatRole.USER
what_i_ask_now["content"] = inputs
messages.append(what_i_ask_now)
return messages

View File

@@ -72,12 +72,12 @@ class SparkRequestInstance():
self.result_buf = ""
def generate(self, inputs, llm_kwargs, history, system_prompt):
def generate(self, inputs, llm_kwargs, history, system_prompt, use_image_api=False):
llm_kwargs = llm_kwargs
history = history
system_prompt = system_prompt
import _thread as thread
thread.start_new_thread(self.create_blocking_request, (inputs, llm_kwargs, history, system_prompt))
thread.start_new_thread(self.create_blocking_request, (inputs, llm_kwargs, history, system_prompt, use_image_api))
while True:
self.time_to_yield_event.wait(timeout=1)
if self.time_to_yield_event.is_set():
@@ -86,7 +86,7 @@ class SparkRequestInstance():
return self.result_buf
def create_blocking_request(self, inputs, llm_kwargs, history, system_prompt):
def create_blocking_request(self, inputs, llm_kwargs, history, system_prompt, use_image_api):
if llm_kwargs['llm_model'] == 'sparkv2':
gpt_url = self.gpt_url_v2
elif llm_kwargs['llm_model'] == 'sparkv3':
@@ -94,10 +94,12 @@ class SparkRequestInstance():
else:
gpt_url = self.gpt_url
file_manifest = []
if llm_kwargs.get('most_recent_uploaded'):
if use_image_api and llm_kwargs.get('most_recent_uploaded'):
if llm_kwargs['most_recent_uploaded'].get('path'):
file_manifest = get_pictures_list(llm_kwargs['most_recent_uploaded']['path'])
gpt_url = self.gpt_url_img
if len(file_manifest) > 0:
print('正在使用讯飞图片理解API')
gpt_url = self.gpt_url_img
wsParam = Ws_Param(self.appid, self.api_key, self.api_secret, gpt_url)
websocket.enableTrace(False)
wsUrl = wsParam.create_url()

View File

@@ -21,11 +21,13 @@ class ZhipuRequestInstance():
response = zhipuai.model_api.sse_invoke(
model=ZHIPUAI_MODEL,
prompt=generate_message_payload(inputs, llm_kwargs, history, system_prompt),
top_p=llm_kwargs['top_p'],
temperature=llm_kwargs['temperature'],
top_p=llm_kwargs['top_p']*0.7, # 智谱的API抽风手动*0.7给做个线性变换
temperature=llm_kwargs['temperature']*0.95, # 智谱的API抽风手动*0.7给做个线性变换
)
for event in response.events():
if event.event == "add":
# if self.result_buf == "" and event.data.startswith(" "):
# event.data = event.data.lstrip(" ") # 每次智谱为啥都要带个空格开头呢?
self.result_buf += event.data
yield self.result_buf
elif event.event == "error" or event.event == "interrupted":
@@ -35,7 +37,8 @@ class ZhipuRequestInstance():
break
else:
raise RuntimeError("Unknown error:" + str(event))
if self.result_buf == "":
yield "智谱没有返回任何数据, 请检查ZHIPUAI_API_KEY和ZHIPUAI_MODEL是否填写正确."
logging.info(f'[raw_input] {inputs}')
logging.info(f'[response] {self.result_buf}')
return self.result_buf

View File

@@ -1,8 +1,8 @@
"""
========================================================================
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
第一部分来自EdgeGPT.py
https://github.com/acheong08/EdgeGPT
========================================================================
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
"""
"""
Main.py
@@ -196,9 +196,9 @@ class _ChatHubRequest:
self,
prompt: str,
conversation_style: CONVERSATION_STYLE_TYPE,
options = None,
webpage_context = None,
search_result = False,
options=None,
webpage_context=None,
search_result=False,
) -> None:
"""
Updates request object
@@ -294,9 +294,9 @@ class _Conversation:
def __init__(
self,
proxy = None,
async_mode = False,
cookies = None,
proxy=None,
async_mode=False,
cookies=None,
) -> None:
if async_mode:
return
@@ -350,8 +350,8 @@ class _Conversation:
@staticmethod
async def create(
proxy = None,
cookies = None,
proxy=None,
cookies=None,
):
self = _Conversation(async_mode=True)
self.struct = {
@@ -418,8 +418,8 @@ class _ChatHub:
def __init__(
self,
conversation: _Conversation,
proxy = None,
cookies = None,
proxy=None,
cookies=None,
) -> None:
self.session = None
self.wss = None
@@ -441,7 +441,7 @@ class _ChatHub:
conversation_style: CONVERSATION_STYLE_TYPE = None,
raw: bool = False,
options: dict = None,
webpage_context = None,
webpage_context=None,
search_result: bool = False,
) -> Generator[str, None, None]:
"""
@@ -452,10 +452,12 @@ class _ChatHub:
ws_cookies = []
for cookie in self.cookies:
ws_cookies.append(f"{cookie['name']}={cookie['value']}")
req_header.update({
'Cookie': ';'.join(ws_cookies),
})
req_header.update(
{
"Cookie": ";".join(ws_cookies),
}
)
timeout = aiohttp.ClientTimeout(total=30)
self.session = aiohttp.ClientSession(timeout=timeout)
@@ -521,9 +523,9 @@ class _ChatHub:
msg = await self.wss.receive()
try:
objects = msg.data.split(DELIMITER)
except :
except:
continue
for obj in objects:
if obj is None or not obj:
continue
@@ -624,8 +626,8 @@ class Chatbot:
def __init__(
self,
proxy = None,
cookies = None,
proxy=None,
cookies=None,
) -> None:
self.proxy = proxy
self.chat_hub: _ChatHub = _ChatHub(
@@ -636,8 +638,8 @@ class Chatbot:
@staticmethod
async def create(
proxy = None,
cookies = None,
proxy=None,
cookies=None,
):
self = Chatbot.__new__(Chatbot)
self.proxy = proxy
@@ -654,7 +656,7 @@ class Chatbot:
wss_link: str = "wss://sydney.bing.com/sydney/ChatHub",
conversation_style: CONVERSATION_STYLE_TYPE = None,
options: dict = None,
webpage_context = None,
webpage_context=None,
search_result: bool = False,
) -> dict:
"""
@@ -680,7 +682,7 @@ class Chatbot:
conversation_style: CONVERSATION_STYLE_TYPE = None,
raw: bool = False,
options: dict = None,
webpage_context = None,
webpage_context=None,
search_result: bool = False,
) -> Generator[str, None, None]:
"""

View File

@@ -2,4 +2,4 @@ protobuf
cpm_kernels
torch>=1.10
mdtex2html
sentencepiece
sentencepiece

View File

@@ -3,4 +3,4 @@ jtorch >= 0.1.3
torch
torchvision
pandas
jieba
jieba

View File

@@ -5,4 +5,3 @@ accelerate
matplotlib
huggingface_hub
triton

Some files were not shown because too many files have changed in this diff Show More