Compare commits
149 Commits
version3.6
...
binary-hus
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c453539296 | ||
|
|
2c740fc641 | ||
|
|
96832a8228 | ||
|
|
5f18d4a1af | ||
|
|
2bc65a99ca | ||
|
|
0a2805513e | ||
|
|
c22867b74c | ||
|
|
2abe665521 | ||
|
|
b0e6c4d365 | ||
|
|
d883c7f34b | ||
|
|
aba871342f | ||
|
|
37744a9cb1 | ||
|
|
480516380d | ||
|
|
60ba712131 | ||
|
|
a7c960dcb0 | ||
|
|
a96f842b3a | ||
|
|
417ca91e23 | ||
|
|
ef8fadfa18 | ||
|
|
865c4ca993 | ||
|
|
31304f481a | ||
|
|
1bd3637d32 | ||
|
|
160a683667 | ||
|
|
49ca03ca06 | ||
|
|
c625348ce1 | ||
|
|
6d4a74893a | ||
|
|
5c7499cada | ||
|
|
f522691529 | ||
|
|
ca85573ec1 | ||
|
|
2c7bba5c63 | ||
|
|
e22f0226d5 | ||
|
|
0f250305b4 | ||
|
|
7606f5c130 | ||
|
|
4f0dcc431c | ||
|
|
6ca0dd2f9e | ||
|
|
e3e9921f6b | ||
|
|
867ddd355e | ||
|
|
bb431db7d3 | ||
|
|
43568b83e1 | ||
|
|
2b90302851 | ||
|
|
f7588d4776 | ||
|
|
a0bfa7ba1c | ||
|
|
c60a7452bf | ||
|
|
68a49d3758 | ||
|
|
ac3d4cf073 | ||
|
|
9479dd984c | ||
|
|
3c271302cc | ||
|
|
6e9936531d | ||
|
|
439147e4b7 | ||
|
|
8d13821099 | ||
|
|
49fe06ed69 | ||
|
|
7882ce7304 | ||
|
|
dc68e601a5 | ||
|
|
d169fb4b16 | ||
|
|
36e19d5202 | ||
|
|
c5f1e4e392 | ||
|
|
d3f7267a63 | ||
|
|
f4127a9c9c | ||
|
|
c181ad38b4 | ||
|
|
107944f5b7 | ||
|
|
8c7569b689 | ||
|
|
fa374bf1fc | ||
|
|
c0a36e37be | ||
|
|
2f2b869efd | ||
|
|
2f148bada0 | ||
|
|
916b2e8aa7 | ||
|
|
0cb7dd5280 | ||
|
|
892ccb14c7 | ||
|
|
21bccf69d2 | ||
|
|
7bac8f4bd3 | ||
|
|
d0c2923ab1 | ||
|
|
8a6e96c369 | ||
|
|
49f3fcf2c0 | ||
|
|
2b96a60b76 | ||
|
|
ec60a85cac | ||
|
|
647d9f88db | ||
|
|
b0c627909a | ||
|
|
102bf2f1eb | ||
|
|
26291b33d1 | ||
|
|
4f04d810b7 | ||
|
|
6d2f126253 | ||
|
|
e5b296d221 | ||
|
|
7933675c12 | ||
|
|
692ff4b59c | ||
|
|
4d8b535c79 | ||
|
|
3c03f240ba | ||
|
|
9bfc3400f9 | ||
|
|
95504f0bb7 | ||
|
|
0cd3274d04 | ||
|
|
2cef81abbe | ||
|
|
6f9bc5d206 | ||
|
|
94ab41d3c0 | ||
|
|
da376068e1 | ||
|
|
552219fd5a | ||
|
|
4985986243 | ||
|
|
d99b443b4c | ||
|
|
2aab6cb708 | ||
|
|
1134723c80 | ||
|
|
6126024f2c | ||
|
|
ef12d4f754 | ||
|
|
e8dd3c02f2 | ||
|
|
e7f4c804eb | ||
|
|
3d6ee5c755 | ||
|
|
d8958da8cd | ||
|
|
a64d550045 | ||
|
|
d876a81e78 | ||
|
|
6723eb77b2 | ||
|
|
86891e3535 | ||
|
|
2f805db35d | ||
|
|
ecaf2bdf45 | ||
|
|
22e00eb1c5 | ||
|
|
900fad69cf | ||
|
|
55d807c116 | ||
|
|
9a0ed248ca | ||
|
|
88802b0f72 | ||
|
|
5720ac127c | ||
|
|
f44642d9d2 | ||
|
|
29775dedd8 | ||
|
|
6417ca9dde | ||
|
|
f417c1ce6d | ||
|
|
e4c057f5a3 | ||
|
|
f9e9b6f4ec | ||
|
|
c141e767c6 | ||
|
|
17f361d63b | ||
|
|
8780fe29f1 | ||
|
|
d57bb8afbe | ||
|
|
d39945c415 | ||
|
|
688df6aa24 | ||
|
|
b24fef8a61 | ||
|
|
8c840f3d4c | ||
|
|
577d3d566b | ||
|
|
fd92766083 | ||
|
|
2d2e02040d | ||
|
|
aee57364dd | ||
|
|
7ca37c4831 | ||
|
|
5b06a6cae5 | ||
|
|
5d5695cd9a | ||
|
|
fd72894c90 | ||
|
|
c1abec2e4b | ||
|
|
9916f59753 | ||
|
|
e6716ccf63 | ||
|
|
e533ed6d12 | ||
|
|
4fefbb80ac | ||
|
|
203d5f7296 | ||
|
|
b470af7c7b | ||
|
|
f8c5f9045d | ||
|
|
cdca36f5d2 | ||
|
|
6ed88fe848 | ||
|
|
ea4e03b1d8 | ||
|
|
aa341fd268 |
14
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
14
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -34,7 +34,7 @@ body:
|
||||
- Others | 非最新版
|
||||
validations:
|
||||
required: true
|
||||
|
||||
|
||||
- type: dropdown
|
||||
id: os
|
||||
attributes:
|
||||
@@ -47,7 +47,7 @@ body:
|
||||
- Docker
|
||||
validations:
|
||||
required: true
|
||||
|
||||
|
||||
- type: textarea
|
||||
id: describe
|
||||
attributes:
|
||||
@@ -55,7 +55,7 @@ body:
|
||||
description: Describe the bug | 简述
|
||||
validations:
|
||||
required: true
|
||||
|
||||
|
||||
- type: textarea
|
||||
id: screenshot
|
||||
attributes:
|
||||
@@ -63,15 +63,9 @@ body:
|
||||
description: Screen Shot | 有帮助的截图
|
||||
validations:
|
||||
required: true
|
||||
|
||||
|
||||
- type: textarea
|
||||
id: traceback
|
||||
attributes:
|
||||
label: Terminal Traceback & Material to Help Reproduce Bugs | 终端traceback(如有) + 帮助我们复现的测试材料样本(如有)
|
||||
description: Terminal Traceback & Material to Help Reproduce Bugs | 终端traceback(如有) + 帮助我们复现的测试材料样本(如有)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
5
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
5
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
@@ -21,8 +21,3 @@ body:
|
||||
attributes:
|
||||
label: Feature Request | 功能请求
|
||||
description: Feature Request | 功能请求
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
44
.github/workflows/build-with-all-capacity-beta.yml
vendored
Normal file
44
.github/workflows/build-with-all-capacity-beta.yml
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
|
||||
name: build-with-all-capacity-beta
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}_with_all_capacity_beta
|
||||
|
||||
jobs:
|
||||
build-and-push-image:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
file: docs/GithubAction+AllCapacityBeta
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
2
.github/workflows/stale.yml
vendored
2
.github/workflows/stale.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: read
|
||||
|
||||
|
||||
steps:
|
||||
- uses: actions/stale@v8
|
||||
with:
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -152,3 +152,4 @@ request_llms/moss
|
||||
media
|
||||
flagged
|
||||
request_llms/ChatGLM-6b-onnx-u8s8
|
||||
.pre-commit-config.yaml
|
||||
|
||||
203
README.md
203
README.md
@@ -1,43 +1,71 @@
|
||||
> **Note**
|
||||
>
|
||||
> [!IMPORTANT]
|
||||
>
|
||||
> 全力支持Qwen、GLM、DeepseekCoder等国内中文大语言基座模型!
|
||||
>
|
||||
> 2023.11.12: 某些依赖包尚不兼容python 3.12,推荐python 3.11。
|
||||
>
|
||||
> 2023.11.7: 安装依赖时,请选择`requirements.txt`中**指定的版本**。 安装命令:`pip install -r requirements.txt`。本项目开源免费,近期发现有人蔑视开源协议并利用本项目违规圈钱,请提高警惕,谨防上当受骗。
|
||||
>
|
||||
> 2023.12.26: 安装依赖时,请选择`requirements.txt`中**指定的版本**。 安装命令:`pip install -r requirements.txt`。本项目完全开源免费,您可通过订阅[在线服务](https://github.com/binary-husky/gpt_academic/wiki/online)的方式鼓励本项目的发展。
|
||||
|
||||
<br>
|
||||
|
||||
<div align=center>
|
||||
<h1 aligh="center">
|
||||
<img src="docs/logo.png" width="40"> GPT 学术优化 (GPT Academic)
|
||||
</h1>
|
||||
|
||||
[![Github][Github-image]][Github-url]
|
||||
[![License][License-image]][License-url]
|
||||
[![Releases][Releases-image]][Releases-url]
|
||||
[![Installation][Installation-image]][Installation-url]
|
||||
[![Wiki][Wiki-image]][Wiki-url]
|
||||
[![PR][PRs-image]][PRs-url]
|
||||
|
||||
[Github-image]: https://img.shields.io/badge/github-12100E.svg?style=flat-square
|
||||
[License-image]: https://img.shields.io/github/license/binary-husky/gpt_academic?label=License&style=flat-square&color=orange
|
||||
[Releases-image]: https://img.shields.io/github/release/binary-husky/gpt_academic?label=Release&style=flat-square&color=blue
|
||||
[Installation-image]: https://img.shields.io/badge/dynamic/json?color=blue&url=https://raw.githubusercontent.com/binary-husky/gpt_academic/master/version&query=$.version&label=Installation&style=flat-square
|
||||
[Wiki-image]: https://img.shields.io/badge/wiki-项目文档-black?style=flat-square
|
||||
[PRs-image]: https://img.shields.io/badge/PRs-welcome-pink?style=flat-square
|
||||
|
||||
[Github-url]: https://github.com/binary-husky/gpt_academic
|
||||
[License-url]: https://github.com/binary-husky/gpt_academic/blob/master/LICENSE
|
||||
[Releases-url]: https://github.com/binary-husky/gpt_academic/releases
|
||||
[Installation-url]: https://github.com/binary-husky/gpt_academic#installation
|
||||
[Wiki-url]: https://github.com/binary-husky/gpt_academic/wiki
|
||||
[PRs-url]: https://github.com/binary-husky/gpt_academic/pulls
|
||||
|
||||
|
||||
|
||||
# <div align=center><img src="docs/logo.png" width="40"> GPT 学术优化 (GPT Academic)</div>
|
||||
</div>
|
||||
<br>
|
||||
|
||||
**如果喜欢这个项目,请给它一个Star;如果您发明了好用的快捷键或插件,欢迎发pull requests!**
|
||||
|
||||
If you like this project, please give it a Star. We also have a README in [English|](docs/README.English.md)[日本語|](docs/README.Japanese.md)[한국어|](docs/README.Korean.md)[Русский|](docs/README.Russian.md)[Français](docs/README.French.md) translated by this project itself.
|
||||
To translate this project to arbitrary language with GPT, read and run [`multi_language.py`](multi_language.py) (experimental).
|
||||
If you like this project, please give it a Star.
|
||||
Read this in [English](docs/README.English.md) | [日本語](docs/README.Japanese.md) | [한국어](docs/README.Korean.md) | [Русский](docs/README.Russian.md) | [Français](docs/README.French.md). All translations have been provided by the project itself. To translate this project to arbitrary language with GPT, read and run [`multi_language.py`](multi_language.py) (experimental).
|
||||
<br>
|
||||
|
||||
> **Note**
|
||||
> [!NOTE]
|
||||
> 1.本项目中每个文件的功能都在[自译解报告](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告)`self_analysis.md`详细说明。随着版本的迭代,您也可以随时自行点击相关函数插件,调用GPT重新生成项目的自我解析报告。常见问题请查阅wiki。
|
||||
> [](#installation) [](https://github.com/binary-husky/gpt_academic/releases) [](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明) []([https://github.com/binary-husky/gpt_academic/wiki/项目配置说明](https://github.com/binary-husky/gpt_academic/wiki))
|
||||
>
|
||||
> 1.请注意只有 **高亮** 标识的插件(按钮)才支持读取文件,部分插件位于插件区的**下拉菜单**中。另外我们以**最高优先级**欢迎和处理任何新插件的PR。
|
||||
>
|
||||
> 2.本项目中每个文件的功能都在[自译解报告`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告)详细说明。随着版本的迭代,您也可以随时自行点击相关函数插件,调用GPT重新生成项目的自我解析报告。常见问题[`wiki`](https://github.com/binary-husky/gpt_academic/wiki)。[常规安装方法](#installation) | [一键安装脚本](https://github.com/binary-husky/gpt_academic/releases) | [配置说明](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。
|
||||
>
|
||||
> 3.本项目兼容并鼓励尝试国产大语言模型ChatGLM等。支持多个api-key共存,可在配置文件中填写如`API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`。需要临时更换`API_KEY`时,在输入区输入临时的`API_KEY`然后回车键提交后即可生效。
|
||||
> 2.本项目兼容并鼓励尝试国内中文大语言基座模型如通义千问,智谱GLM等。支持多个api-key共存,可在配置文件中填写如`API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`。需要临时更换`API_KEY`时,在输入区输入临时的`API_KEY`然后回车键提交即可生效。
|
||||
|
||||
|
||||
|
||||
<br><br>
|
||||
|
||||
<div align="center">
|
||||
|
||||
功能(⭐= 近期新增功能) | 描述
|
||||
--- | ---
|
||||
⭐[接入新模型](https://github.com/binary-husky/gpt_academic/wiki/%E5%A6%82%E4%BD%95%E5%88%87%E6%8D%A2%E6%A8%A1%E5%9E%8B)! | 百度[千帆](https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu)与文心一言, [通义千问](https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary),上海AI-Lab[书生](https://github.com/InternLM/InternLM),讯飞[星火](https://xinghuo.xfyun.cn/),[LLaMa2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf),智谱API,DALLE3
|
||||
⭐[接入新模型](https://github.com/binary-husky/gpt_academic/wiki/%E5%A6%82%E4%BD%95%E5%88%87%E6%8D%A2%E6%A8%A1%E5%9E%8B) | 百度[千帆](https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu)与文心一言, 通义千问[Qwen](https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary),上海AI-Lab[书生](https://github.com/InternLM/InternLM),讯飞[星火](https://xinghuo.xfyun.cn/),[LLaMa2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf),[智谱GLM4](https://open.bigmodel.cn/),DALLE3, [DeepseekCoder](https://coder.deepseek.com/)
|
||||
润色、翻译、代码解释 | 一键润色、翻译、查找论文语法错误、解释代码
|
||||
[自定义快捷键](https://www.bilibili.com/video/BV14s4y1E7jN) | 支持自定义快捷键
|
||||
模块化设计 | 支持自定义强大的[插件](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions),插件支持[热更新](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)
|
||||
[程序剖析](https://www.bilibili.com/video/BV1cj411A7VW) | [插件] 一键可以剖析Python/C/C++/Java/Lua/...项目树 或 [自我剖析](https://www.bilibili.com/video/BV1cj411A7VW)
|
||||
[程序剖析](https://www.bilibili.com/video/BV1cj411A7VW) | [插件] 一键剖析Python/C/C++/Java/Lua/...项目树 或 [自我剖析](https://www.bilibili.com/video/BV1cj411A7VW)
|
||||
读论文、[翻译](https://www.bilibili.com/video/BV1KT411x7Wn)论文 | [插件] 一键解读latex/pdf论文全文并生成摘要
|
||||
Latex全文[翻译](https://www.bilibili.com/video/BV1nk4y1Y7Js/)、[润色](https://www.bilibili.com/video/BV1FT411H7c5/) | [插件] 一键翻译或润色latex论文
|
||||
批量注释生成 | [插件] 一键批量生成函数注释
|
||||
Markdown[中英互译](https://www.bilibili.com/video/BV1yo4y157jV/) | [插件] 看到上面5种语言的[README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md)了吗?
|
||||
chat分析报告生成 | [插件] 运行后自动生成总结汇报
|
||||
Markdown[中英互译](https://www.bilibili.com/video/BV1yo4y157jV/) | [插件] 看到上面5种语言的[README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md)了吗?就是出自他的手笔
|
||||
⭐支持mermaid图像渲染 | 支持让GPT生成[流程图](https://www.bilibili.com/video/BV18c41147H9/)、状态转移图、甘特图、饼状图、GitGraph等等(3.7版本)
|
||||
[PDF论文全文翻译功能](https://www.bilibili.com/video/BV1KT411x7Wn) | [插件] PDF论文提取题目&摘要+翻译全文(多线程)
|
||||
[Arxiv小助手](https://www.bilibili.com/video/BV1LM4y1279X) | [插件] 输入arxiv文章url即可一键翻译摘要+下载PDF
|
||||
Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼写纠错+输出对照PDF
|
||||
@@ -48,22 +76,21 @@ Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼
|
||||
公式/图片/表格显示 | 可以同时显示公式的[tex形式和渲染形式](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png),支持公式、代码高亮
|
||||
⭐AutoGen多智能体插件 | [插件] 借助微软AutoGen,探索多Agent的智能涌现可能!
|
||||
启动暗色[主题](https://github.com/binary-husky/gpt_academic/issues/173) | 在浏览器url后面添加```/?__theme=dark```可以切换dark主题
|
||||
[多LLM模型](https://www.bilibili.com/video/BV1wT411p7yf)支持 | 同时被GPT3.5、GPT4、[清华ChatGLM2](https://github.com/THUDM/ChatGLM2-6B)、[复旦MOSS](https://github.com/OpenLMLab/MOSS)同时伺候的感觉一定会很不错吧?
|
||||
⭐ChatGLM2微调模型 | 支持加载ChatGLM2微调模型,提供ChatGLM2微调辅助插件
|
||||
[多LLM模型](https://www.bilibili.com/video/BV1wT411p7yf)支持 | 同时被GPT3.5、GPT4、[清华ChatGLM2](https://github.com/THUDM/ChatGLM2-6B)、[复旦MOSS](https://github.com/OpenLMLab/MOSS)伺候的感觉一定会很不错吧?
|
||||
更多LLM模型接入,支持[huggingface部署](https://huggingface.co/spaces/qingxu98/gpt-academic) | 加入Newbing接口(新必应),引入清华[Jittorllms](https://github.com/Jittor/JittorLLMs)支持[LLaMA](https://github.com/facebookresearch/llama)和[盘古α](https://openi.org.cn/pangu/)
|
||||
⭐[void-terminal](https://github.com/binary-husky/void-terminal) pip包 | 脱离GUI,在Python中直接调用本项目的所有函数插件(开发中)
|
||||
⭐虚空终端插件 | [插件] 用自然语言,直接调度本项目其他插件
|
||||
⭐虚空终端插件 | [插件] 能够使用自然语言直接调度本项目其他插件
|
||||
更多新功能展示 (图像生成等) …… | 见本文档结尾处 ……
|
||||
</div>
|
||||
|
||||
|
||||
- 新界面(修改`config.py`中的LAYOUT选项即可实现“左右布局”和“上下布局”的切换)
|
||||
<div align="center">
|
||||
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/d81137c3-affd-4cd1-bb5e-b15610389762" width="700" >
|
||||
<img src="https://user-images.githubusercontent.com/96192199/279702205-d81137c3-affd-4cd1-bb5e-b15610389762.gif" width="700" >
|
||||
</div>
|
||||
|
||||
|
||||
- 所有按钮都通过读取functional.py动态生成,可随意加自定义功能,解放粘贴板
|
||||
- 所有按钮都通过读取functional.py动态生成,可随意加自定义功能,解放剪贴板
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/231975334-b4788e91-4887-412f-8b43-2b9c5f41d248.gif" width="700" >
|
||||
</div>
|
||||
@@ -73,58 +100,62 @@ Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼
|
||||
<img src="https://user-images.githubusercontent.com/96192199/231980294-f374bdcb-3309-4560-b424-38ef39f04ebd.gif" width="700" >
|
||||
</div>
|
||||
|
||||
- 如果输出包含公式,会同时以tex形式和渲染形式显示,方便复制和阅读
|
||||
- 如果输出包含公式,会以tex形式和渲染形式同时显示,方便复制和阅读
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png" width="700" >
|
||||
</div>
|
||||
|
||||
- 懒得看项目代码?整个工程直接给chatgpt炫嘴里
|
||||
- 懒得看项目代码?直接把整个工程炫ChatGPT嘴里
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="700" >
|
||||
</div>
|
||||
|
||||
- 多种大语言模型混合调用(ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4)
|
||||
- 多种大语言模型混合调用(ChatGLM + OpenAI-GPT3.5 + GPT4)
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/232537274-deca0563-7aa6-4b5d-94a2-b7c453c47794.png" width="700" >
|
||||
</div>
|
||||
|
||||
<br><br>
|
||||
|
||||
# Installation
|
||||
### 安装方法I:直接运行 (Windows, Linux or MacOS)
|
||||
### 安装方法I:直接运行 (Windows, Linux or MacOS)
|
||||
|
||||
1. 下载项目
|
||||
```sh
|
||||
git clone --depth=1 https://github.com/binary-husky/gpt_academic.git
|
||||
cd gpt_academic
|
||||
```
|
||||
|
||||
2. 配置API_KEY
|
||||
```sh
|
||||
git clone --depth=1 https://github.com/binary-husky/gpt_academic.git
|
||||
cd gpt_academic
|
||||
```
|
||||
|
||||
在`config.py`中,配置API KEY等设置,[点击查看特殊网络环境设置方法](https://github.com/binary-husky/gpt_academic/issues/1) 。[Wiki页面](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。
|
||||
2. 配置API_KEY等变量
|
||||
|
||||
「 程序会优先检查是否存在名为`config_private.py`的私密配置文件,并用其中的配置覆盖`config.py`的同名配置。如您能理解该读取逻辑,我们强烈建议您在`config.py`旁边创建一个名为`config_private.py`的新配置文件,并把`config.py`中的配置转移(复制)到`config_private.py`中(仅复制您修改过的配置条目即可)。 」
|
||||
在`config.py`中,配置API KEY等变量。[特殊网络环境设置方法](https://github.com/binary-husky/gpt_academic/issues/1)、[Wiki-项目配置说明](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。
|
||||
|
||||
「 支持通过`环境变量`配置项目,环境变量的书写格式参考`docker-compose.yml`文件或者我们的[Wiki页面](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。配置读取优先级: `环境变量` > `config_private.py` > `config.py`。 」
|
||||
「 程序会优先检查是否存在名为`config_private.py`的私密配置文件,并用其中的配置覆盖`config.py`的同名配置。如您能理解以上读取逻辑,我们强烈建议您在`config.py`同路径下创建一个名为`config_private.py`的新配置文件,并使用`config_private.py`配置项目,以确保更新或其他用户无法轻易查看您的私有配置 」。
|
||||
|
||||
「 支持通过`环境变量`配置项目,环境变量的书写格式参考`docker-compose.yml`文件或者我们的[Wiki页面](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。配置读取优先级: `环境变量` > `config_private.py` > `config.py` 」。
|
||||
|
||||
|
||||
3. 安装依赖
|
||||
```sh
|
||||
# (选择I: 如熟悉python, python推荐版本 3.9 ~ 3.11)备注:使用官方pip源或者阿里pip源, 临时换源方法:python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
|
||||
python -m pip install -r requirements.txt
|
||||
```sh
|
||||
# (选择I: 如熟悉python, python推荐版本 3.9 ~ 3.11)备注:使用官方pip源或者阿里pip源, 临时换源方法:python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
|
||||
python -m pip install -r requirements.txt
|
||||
|
||||
# (选择II: 使用Anaconda)步骤也是类似的 (https://www.bilibili.com/video/BV1rc411W7Dr):
|
||||
conda create -n gptac_venv python=3.11 # 创建anaconda环境
|
||||
conda activate gptac_venv # 激活anaconda环境
|
||||
python -m pip install -r requirements.txt # 这个步骤和pip安装一样的步骤
|
||||
```
|
||||
# (选择II: 使用Anaconda)步骤也是类似的 (https://www.bilibili.com/video/BV1rc411W7Dr):
|
||||
conda create -n gptac_venv python=3.11 # 创建anaconda环境
|
||||
conda activate gptac_venv # 激活anaconda环境
|
||||
python -m pip install -r requirements.txt # 这个步骤和pip安装一样的步骤
|
||||
```
|
||||
|
||||
|
||||
<details><summary>如果需要支持清华ChatGLM2/复旦MOSS/RWKV作为后端,请点击展开此处</summary>
|
||||
<p>
|
||||
|
||||
【可选步骤】如果需要支持清华ChatGLM2/复旦MOSS作为后端,需要额外安装更多依赖(前提条件:熟悉Python + 用过Pytorch + 电脑配置够强):
|
||||
【可选步骤】如果需要支持清华ChatGLM3/复旦MOSS作为后端,需要额外安装更多依赖(前提条件:熟悉Python + 用过Pytorch + 电脑配置够强):
|
||||
|
||||
```sh
|
||||
# 【可选步骤I】支持清华ChatGLM2。清华ChatGLM备注:如果遇到"Call ChatGLM fail 不能正常加载ChatGLM的参数" 错误,参考如下: 1:以上默认安装的为torch+cpu版,使用cuda需要卸载torch重新安装torch+cuda; 2:如因本机配置不够无法加载模型,可以修改request_llm/bridge_chatglm.py中的模型精度, 将 AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) 都修改为 AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
|
||||
python -m pip install -r request_llms/requirements_chatglm.txt
|
||||
# 【可选步骤I】支持清华ChatGLM3。清华ChatGLM备注:如果遇到"Call ChatGLM fail 不能正常加载ChatGLM的参数" 错误,参考如下: 1:以上默认安装的为torch+cpu版,使用cuda需要卸载torch重新安装torch+cuda; 2:如因本机配置不够无法加载模型,可以修改request_llm/bridge_chatglm.py中的模型精度, 将 AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) 都修改为 AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
|
||||
python -m pip install -r request_llms/requirements_chatglm.txt
|
||||
|
||||
# 【可选步骤II】支持复旦MOSS
|
||||
python -m pip install -r request_llms/requirements_moss.txt
|
||||
@@ -135,6 +166,14 @@ git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss #
|
||||
|
||||
# 【可选步骤IV】确保config.py配置文件的AVAIL_LLM_MODELS包含了期望的模型,目前支持的全部模型如下(jittorllms系列目前仅支持docker方案):
|
||||
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
|
||||
|
||||
# 【可选步骤V】支持本地模型INT8,INT4量化(这里所指的模型本身不是量化版本,目前deepseek-coder支持,后面测试后会加入更多模型量化选择)
|
||||
pip install bitsandbyte
|
||||
# windows用户安装bitsandbytes需要使用下面bitsandbytes-windows-webui
|
||||
python -m pip install bitsandbytes --prefer-binary --extra-index-url=https://jllllll.github.io/bitsandbytes-windows-webui
|
||||
pip install -U git+https://github.com/huggingface/transformers.git
|
||||
pip install -U git+https://github.com/huggingface/accelerate.git
|
||||
pip install peft
|
||||
```
|
||||
|
||||
</p>
|
||||
@@ -143,70 +182,73 @@ AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-
|
||||
|
||||
|
||||
4. 运行
|
||||
```sh
|
||||
python main.py
|
||||
```
|
||||
```sh
|
||||
python main.py
|
||||
```
|
||||
|
||||
### 安装方法II:使用Docker
|
||||
|
||||
0. 部署项目的全部能力(这个是包含cuda和latex的大型镜像。但如果您网速慢、硬盘小,则不推荐使用这个)
|
||||
0. 部署项目的全部能力(这个是包含cuda和latex的大型镜像。但如果您网速慢、硬盘小,则不推荐该方法部署完整项目)
|
||||
[](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml)
|
||||
|
||||
``` sh
|
||||
# 修改docker-compose.yml,保留方案0并删除其他方案。然后运行:
|
||||
docker-compose up
|
||||
```
|
||||
``` sh
|
||||
# 修改docker-compose.yml,保留方案0并删除其他方案。然后运行:
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
1. 仅ChatGPT+文心一言+spark等在线模型(推荐大多数人选择)
|
||||
1. 仅ChatGPT + GLM4 + 文心一言+spark等在线模型(推荐大多数人选择)
|
||||
[](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml)
|
||||
[](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml)
|
||||
[](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml)
|
||||
|
||||
``` sh
|
||||
# 修改docker-compose.yml,保留方案1并删除其他方案。然后运行:
|
||||
docker-compose up
|
||||
```
|
||||
``` sh
|
||||
# 修改docker-compose.yml,保留方案1并删除其他方案。然后运行:
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
P.S. 如果需要依赖Latex的插件功能,请见Wiki。另外,您也可以直接使用方案4或者方案0获取Latex功能。
|
||||
|
||||
2. ChatGPT + ChatGLM2 + MOSS + LLAMA2 + 通义千问(需要熟悉[Nvidia Docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installing-on-ubuntu-and-debian)运行时)
|
||||
2. ChatGPT + GLM3 + MOSS + LLAMA2 + 通义千问(需要熟悉[Nvidia Docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installing-on-ubuntu-and-debian)运行时)
|
||||
[](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml)
|
||||
|
||||
``` sh
|
||||
# 修改docker-compose.yml,保留方案2并删除其他方案。然后运行:
|
||||
docker-compose up
|
||||
```
|
||||
``` sh
|
||||
# 修改docker-compose.yml,保留方案2并删除其他方案。然后运行:
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
|
||||
### 安装方法III:其他部署姿势
|
||||
### 安装方法III:其他部署方法
|
||||
1. **Windows一键运行脚本**。
|
||||
完全不熟悉python环境的Windows用户可以下载[Release](https://github.com/binary-husky/gpt_academic/releases)中发布的一键运行脚本安装无本地模型的版本。
|
||||
脚本的贡献来源是[oobabooga](https://github.com/oobabooga/one-click-installers)。
|
||||
完全不熟悉python环境的Windows用户可以下载[Release](https://github.com/binary-husky/gpt_academic/releases)中发布的一键运行脚本安装无本地模型的版本。脚本贡献来源:[oobabooga](https://github.com/oobabooga/one-click-installers)。
|
||||
|
||||
2. 使用第三方API、Azure等、文心一言、星火等,见[Wiki页面](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)
|
||||
|
||||
3. 云服务器远程部署避坑指南。
|
||||
请访问[云服务器远程部署wiki](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97)
|
||||
|
||||
4. 一些新型的部署平台或方法
|
||||
4. 在其他平台部署&二级网址部署
|
||||
- 使用Sealos[一键部署](https://github.com/binary-husky/gpt_academic/issues/993)。
|
||||
- 使用WSL2(Windows Subsystem for Linux 子系统)。请访问[部署wiki-2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2)
|
||||
- 如何在二级网址(如`http://localhost/subpath`)下运行。请访问[FastAPI运行说明](docs/WithFastapi.md)
|
||||
|
||||
<br><br>
|
||||
|
||||
# Advanced Usage
|
||||
### I:自定义新的便捷按钮(学术快捷键)
|
||||
任意文本编辑器打开`core_functional.py`,添加条目如下,然后重启程序。(如按钮已存在,那么前缀、后缀都支持热修改,无需重启程序即可生效。)
|
||||
|
||||
任意文本编辑器打开`core_functional.py`,添加如下条目,然后重启程序。(如果按钮已存在,那么可以直接修改(前缀、后缀都已支持热修改),无需重启程序即可生效。)
|
||||
例如
|
||||
```
|
||||
|
||||
```python
|
||||
"超级英译中": {
|
||||
# 前缀,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等
|
||||
"Prefix": "请翻译把下面一段内容成中文,然后用一个markdown表格逐一解释文中出现的专有名词:\n\n",
|
||||
|
||||
"Prefix": "请翻译把下面一段内容成中文,然后用一个markdown表格逐一解释文中出现的专有名词:\n\n",
|
||||
|
||||
# 后缀,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来。
|
||||
"Suffix": "",
|
||||
},
|
||||
```
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226899272-477c2134-ed71-4326-810c-29891fe4a508.png" width="500" >
|
||||
</div>
|
||||
@@ -216,6 +258,7 @@ docker-compose up
|
||||
本项目的插件编写、调试难度很低,只要您具备一定的python基础知识,就可以仿照我们提供的模板实现自己的插件功能。
|
||||
详情请参考[函数插件指南](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)。
|
||||
|
||||
<br><br>
|
||||
|
||||
# Updates
|
||||
### I:动态
|
||||
@@ -264,9 +307,9 @@ Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史h
|
||||
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/bc7ab234-ad90-48a0-8d62-f703d9e74665" width="500" >
|
||||
</div>
|
||||
|
||||
8. OpenAI音频解析与总结
|
||||
8. 基于mermaid的流图、脑图绘制
|
||||
<div align="center">
|
||||
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/709ccf95-3aee-498a-934a-e1c22d3d5d5b" width="500" >
|
||||
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/c518b82f-bd53-46e2-baf5-ad1b081c1da4" width="500" >
|
||||
</div>
|
||||
|
||||
9. Latex全文校对纠错
|
||||
@@ -283,6 +326,7 @@ Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史h
|
||||
|
||||
|
||||
### II:版本:
|
||||
|
||||
- version 3.70(todo): 优化AutoGen插件主题并设计一系列衍生插件
|
||||
- version 3.60: 引入AutoGen作为新一代插件的基石
|
||||
- version 3.57: 支持GLM3,星火v3,文心一言v4,修复本地模型的并发BUG
|
||||
@@ -303,7 +347,7 @@ Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史h
|
||||
- version 3.0: 对chatglm和其他小型llm的支持
|
||||
- version 2.6: 重构了插件结构,提高了交互性,加入更多插件
|
||||
- version 2.5: 自更新,解决总结大工程源代码时文本过长、token溢出的问题
|
||||
- version 2.4: (1)新增PDF全文翻译功能; (2)新增输入区切换位置的功能; (3)新增垂直布局选项; (4)多线程函数插件优化。
|
||||
- version 2.4: 新增PDF全文翻译功能; 新增输入区切换位置的功能
|
||||
- version 2.3: 增强多线程交互性
|
||||
- version 2.2: 函数插件支持热重载
|
||||
- version 2.1: 可折叠式布局
|
||||
@@ -314,7 +358,7 @@ GPT Academic开发者QQ群:`610599535`
|
||||
|
||||
- 已知问题
|
||||
- 某些浏览器翻译插件干扰此软件前端的运行
|
||||
- 官方Gradio目前有很多兼容性Bug,请务必使用`requirement.txt`安装Gradio
|
||||
- 官方Gradio目前有很多兼容性问题,请**务必使用`requirement.txt`安装Gradio**
|
||||
|
||||
### III:主题
|
||||
可以通过修改`THEME`选项(config.py)变更主题
|
||||
@@ -325,7 +369,8 @@ GPT Academic开发者QQ群:`610599535`
|
||||
|
||||
1. `master` 分支: 主分支,稳定版
|
||||
2. `frontier` 分支: 开发分支,测试版
|
||||
|
||||
3. 如何[接入其他大模型](request_llms/README.md)
|
||||
4. 访问GPT-Academic的[在线服务并支持我们](https://github.com/binary-husky/gpt_academic/wiki/online)
|
||||
|
||||
### V:参考与学习
|
||||
|
||||
|
||||
@@ -159,7 +159,15 @@ def warm_up_modules():
|
||||
enc.encode("模块预热", disallowed_special=())
|
||||
enc = model_info["gpt-4"]['tokenizer']
|
||||
enc.encode("模块预热", disallowed_special=())
|
||||
|
||||
def warm_up_vectordb():
|
||||
print('正在执行一些模块的预热 ...')
|
||||
from toolbox import ProxyNetworkActivate
|
||||
with ProxyNetworkActivate("Warmup_Modules"):
|
||||
import nltk
|
||||
with ProxyNetworkActivate("Warmup_Modules"): nltk.download("punkt")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import os
|
||||
os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
||||
|
||||
72
config.py
72
config.py
@@ -15,13 +15,13 @@ API_KEY = "此处填API密钥" # 可同时填写多个API-KEY,用英文逗
|
||||
USE_PROXY = False
|
||||
if USE_PROXY:
|
||||
"""
|
||||
代理网络的地址,打开你的代理软件查看代理协议(socks5h / http)、地址(localhost)和端口(11284)
|
||||
填写格式是 [协议]:// [地址] :[端口],填写之前不要忘记把USE_PROXY改成True,如果直接在海外服务器部署,此处不修改
|
||||
<配置教程&视频教程> https://github.com/binary-husky/gpt_academic/issues/1>
|
||||
[协议] 常见协议无非socks5h/http; 例如 v2**y 和 ss* 的默认本地协议是socks5h; 而cl**h 的默认本地协议是http
|
||||
[地址] 懂的都懂,不懂就填localhost或者127.0.0.1肯定错不了(localhost意思是代理软件安装在本机上)
|
||||
[地址] 填localhost或者127.0.0.1(localhost意思是代理软件安装在本机上)
|
||||
[端口] 在代理软件的设置里找。虽然不同的代理软件界面不一样,但端口号都应该在最显眼的位置上
|
||||
"""
|
||||
# 代理网络的地址,打开你的*学*网软件查看代理的协议(socks5h / http)、地址(localhost)和端口(11284)
|
||||
proxies = {
|
||||
# [协议]:// [地址] :[端口]
|
||||
"http": "socks5h://localhost:11284", # 再例如 "http": "http://127.0.0.1:7890",
|
||||
@@ -87,19 +87,32 @@ DEFAULT_FN_GROUPS = ['对话', '编程', '学术', '智能体']
|
||||
|
||||
# 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 )
|
||||
LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
|
||||
AVAIL_LLM_MODELS = ["gpt-3.5-turbo-1106","gpt-4-1106-preview",
|
||||
AVAIL_LLM_MODELS = ["gpt-3.5-turbo-1106","gpt-4-1106-preview","gpt-4-vision-preview",
|
||||
"gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5",
|
||||
"api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k',
|
||||
"gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4",
|
||||
"chatglm3", "moss", "newbing", "claude-2"]
|
||||
# P.S. 其他可用的模型还包括 ["zhipuai", "qianfan", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-random"
|
||||
# "spark", "sparkv2", "sparkv3", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"]
|
||||
"gemini-pro", "chatglm3", "moss", "claude-2"]
|
||||
# P.S. 其他可用的模型还包括 [
|
||||
# "qwen-turbo", "qwen-plus", "qwen-max"
|
||||
# "zhipuai", "qianfan", "deepseekcoder", "llama2", "qwen-local", "gpt-3.5-turbo-0613",
|
||||
# "gpt-3.5-turbo-16k-0613", "gpt-3.5-random", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k',
|
||||
# "spark", "sparkv2", "sparkv3", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"
|
||||
# ]
|
||||
|
||||
|
||||
# 定义界面上“询问多个GPT模型”插件应该使用哪些模型,请从AVAIL_LLM_MODELS中选择,并在不同模型之间用`&`间隔,例如"gpt-3.5-turbo&chatglm3&azure-gpt-4"
|
||||
MULTI_QUERY_LLM_MODELS = "gpt-3.5-turbo&chatglm3"
|
||||
|
||||
|
||||
# 选择本地模型变体(只有当AVAIL_LLM_MODELS包含了对应本地模型时,才会起作用)
|
||||
# 如果你选择Qwen系列的模型,那么请在下面的QWEN_MODEL_SELECTION中指定具体的模型
|
||||
# 也可以是具体的模型路径
|
||||
QWEN_LOCAL_MODEL_SELECTION = "Qwen/Qwen-1_8B-Chat-Int8"
|
||||
|
||||
|
||||
# 接入通义千问在线大模型 https://dashscope.console.aliyun.com/
|
||||
DASHSCOPE_API_KEY = "" # 阿里灵积云API_KEY
|
||||
|
||||
|
||||
# 百度千帆(LLM_MODEL="qianfan")
|
||||
BAIDU_CLOUD_API_KEY = ''
|
||||
BAIDU_CLOUD_SECRET_KEY = ''
|
||||
@@ -114,7 +127,6 @@ CHATGLM_PTUNING_CHECKPOINT = "" # 例如"/home/hmp/ChatGLM2-6B/ptuning/output/6b
|
||||
LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
|
||||
LOCAL_MODEL_QUANT = "FP16" # 默认 "FP16" "INT4" 启用量化INT4版本 "INT8" 启用量化INT8版本
|
||||
|
||||
|
||||
# 设置gradio的并行线程数(不需要修改)
|
||||
CONCURRENT_COUNT = 100
|
||||
|
||||
@@ -183,7 +195,7 @@ XFYUN_API_KEY = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
|
||||
|
||||
# 接入智谱大模型
|
||||
ZHIPUAI_API_KEY = ""
|
||||
ZHIPUAI_MODEL = "chatglm_turbo"
|
||||
ZHIPUAI_MODEL = "glm-4" # 可选 "glm-3-turbo" "glm-4"
|
||||
|
||||
|
||||
# Claude API KEY
|
||||
@@ -194,6 +206,10 @@ ANTHROPIC_API_KEY = ""
|
||||
CUSTOM_API_KEY_PATTERN = ""
|
||||
|
||||
|
||||
# Google Gemini API-Key
|
||||
GEMINI_API_KEY = ''
|
||||
|
||||
|
||||
# HUGGINGFACE的TOKEN,下载LLAMA时起作用 https://huggingface.co/docs/hub/security-tokens
|
||||
HUGGINGFACE_ACCESS_TOKEN = "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"
|
||||
|
||||
@@ -232,6 +248,10 @@ WHEN_TO_USE_PROXY = ["Download_LLM", "Download_Gradio_Theme", "Connect_Grobid",
|
||||
BLOCK_INVALID_APIKEY = False
|
||||
|
||||
|
||||
# 启用插件热加载
|
||||
PLUGIN_HOT_RELOAD = False
|
||||
|
||||
|
||||
# 自定义按钮的最大数量限制
|
||||
NUM_CUSTOM_BASIC_BTN = 4
|
||||
|
||||
@@ -271,11 +291,37 @@ NUM_CUSTOM_BASIC_BTN = 4
|
||||
│ ├── BAIDU_CLOUD_API_KEY
|
||||
│ └── BAIDU_CLOUD_SECRET_KEY
|
||||
│
|
||||
├── "newbing" Newbing接口不再稳定,不推荐使用
|
||||
├── "zhipuai" 智谱AI大模型chatglm_turbo
|
||||
│ ├── ZHIPUAI_API_KEY
|
||||
│ └── ZHIPUAI_MODEL
|
||||
│
|
||||
├── "qwen-turbo" 等通义千问大模型
|
||||
│ └── DASHSCOPE_API_KEY
|
||||
│
|
||||
├── "Gemini"
|
||||
│ └── GEMINI_API_KEY
|
||||
│
|
||||
└── "newbing" Newbing接口不再稳定,不推荐使用
|
||||
├── NEWBING_STYLE
|
||||
└── NEWBING_COOKIES
|
||||
|
||||
|
||||
本地大模型示意图
|
||||
│
|
||||
├── "chatglm3"
|
||||
├── "chatglm"
|
||||
├── "chatglm_onnx"
|
||||
├── "chatglmft"
|
||||
├── "internlm"
|
||||
├── "moss"
|
||||
├── "jittorllms_pangualpha"
|
||||
├── "jittorllms_llama"
|
||||
├── "deepseekcoder"
|
||||
├── "qwen-local"
|
||||
├── RWKV的支持见Wiki
|
||||
└── "llama2"
|
||||
|
||||
|
||||
用户图形界面布局依赖关系示意图
|
||||
│
|
||||
├── CHATBOT_HEIGHT 对话窗的高度
|
||||
@@ -286,7 +332,7 @@ NUM_CUSTOM_BASIC_BTN = 4
|
||||
├── THEME 色彩主题
|
||||
├── AUTO_CLEAR_TXT 是否在提交时自动清空输入框
|
||||
├── ADD_WAIFU 加一个live2d装饰
|
||||
├── ALLOW_RESET_CONFIG 是否允许通过自然语言描述修改本页的配置,该功能具有一定的危险性
|
||||
└── ALLOW_RESET_CONFIG 是否允许通过自然语言描述修改本页的配置,该功能具有一定的危险性
|
||||
|
||||
|
||||
插件在线服务配置依赖关系示意图
|
||||
@@ -298,7 +344,7 @@ NUM_CUSTOM_BASIC_BTN = 4
|
||||
│ ├── ALIYUN_ACCESSKEY
|
||||
│ └── ALIYUN_SECRET
|
||||
│
|
||||
├── PDF文档精准解析
|
||||
│ └── GROBID_URLS
|
||||
└── PDF文档精准解析
|
||||
└── GROBID_URLS
|
||||
|
||||
"""
|
||||
|
||||
@@ -345,7 +345,7 @@ def get_crazy_functions():
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
||||
"ArgsReminder": "支持任意数量的llm接口,用&符号分隔。例如chatglm&gpt-3.5-turbo&api2d-gpt-4", # 高级参数输入区的显示提示
|
||||
"ArgsReminder": "支持任意数量的llm接口,用&符号分隔。例如chatglm&gpt-3.5-turbo&gpt-4", # 高级参数输入区的显示提示
|
||||
"Function": HotReload(同时问询_指定模型)
|
||||
},
|
||||
})
|
||||
@@ -354,9 +354,9 @@ def get_crazy_functions():
|
||||
print('Load function plugin failed')
|
||||
|
||||
try:
|
||||
from crazy_functions.图片生成 import 图片生成_DALLE2, 图片生成_DALLE3
|
||||
from crazy_functions.图片生成 import 图片生成_DALLE2, 图片生成_DALLE3, 图片修改_DALLE2
|
||||
function_plugins.update({
|
||||
"图片生成_DALLE2 (先切换模型到openai或api2d)": {
|
||||
"图片生成_DALLE2 (先切换模型到gpt-*)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
@@ -367,16 +367,26 @@ def get_crazy_functions():
|
||||
},
|
||||
})
|
||||
function_plugins.update({
|
||||
"图片生成_DALLE3 (先切换模型到openai或api2d)": {
|
||||
"图片生成_DALLE3 (先切换模型到gpt-*)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
||||
"ArgsReminder": "在这里输入分辨率, 如1024x1024(默认),支持 1024x1024, 1792x1024, 1024x1792。如需生成高清图像,请输入 1024x1024-HD, 1792x1024-HD, 1024x1792-HD。", # 高级参数输入区的显示提示
|
||||
"ArgsReminder": "在这里输入自定义参数「分辨率-质量(可选)-风格(可选)」, 参数示例「1024x1024-hd-vivid」 || 分辨率支持 「1024x1024」(默认) /「1792x1024」/「1024x1792」 || 质量支持 「-standard」(默认) /「-hd」 || 风格支持 「-vivid」(默认) /「-natural」", # 高级参数输入区的显示提示
|
||||
"Info": "使用DALLE3生成图片 | 输入参数字符串,提供图像的内容",
|
||||
"Function": HotReload(图片生成_DALLE3)
|
||||
},
|
||||
})
|
||||
function_plugins.update({
|
||||
"图片修改_DALLE2 (先切换模型到gpt-*)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": False, # 调用时,唤起高级参数输入区(默认False)
|
||||
# "Info": "使用DALLE2修改图片 | 输入参数字符串,提供图像的内容",
|
||||
"Function": HotReload(图片修改_DALLE2)
|
||||
},
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
print('Load function plugin failed')
|
||||
@@ -430,7 +440,7 @@ def get_crazy_functions():
|
||||
print('Load function plugin failed')
|
||||
|
||||
try:
|
||||
from crazy_functions.Langchain知识库 import 知识库问答
|
||||
from crazy_functions.知识库问答 import 知识库文件注入
|
||||
function_plugins.update({
|
||||
"构建知识库(先上传文件素材,再运行此插件)": {
|
||||
"Group": "对话",
|
||||
@@ -438,7 +448,7 @@ def get_crazy_functions():
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True,
|
||||
"ArgsReminder": "此处待注入的知识库名称id, 默认为default。文件进入知识库后可长期保存。可以通过再次调用本插件的方式,向知识库追加更多文档。",
|
||||
"Function": HotReload(知识库问答)
|
||||
"Function": HotReload(知识库文件注入)
|
||||
}
|
||||
})
|
||||
except:
|
||||
@@ -446,9 +456,9 @@ def get_crazy_functions():
|
||||
print('Load function plugin failed')
|
||||
|
||||
try:
|
||||
from crazy_functions.Langchain知识库 import 读取知识库作答
|
||||
from crazy_functions.知识库问答 import 读取知识库作答
|
||||
function_plugins.update({
|
||||
"知识库问答(构建知识库后,再运行此插件)": {
|
||||
"知识库文件注入(构建知识库后,再运行此插件)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
@@ -489,7 +499,7 @@ def get_crazy_functions():
|
||||
})
|
||||
from crazy_functions.Latex输出PDF结果 import Latex翻译中文并重新编译PDF
|
||||
function_plugins.update({
|
||||
"Arixv论文精细翻译(输入arxivID)[需Latex]": {
|
||||
"Arxiv论文精细翻译(输入arxivID)[需Latex]": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
@@ -580,6 +590,20 @@ def get_crazy_functions():
|
||||
print(trimmed_format_exc())
|
||||
print('Load function plugin failed')
|
||||
|
||||
try:
|
||||
from crazy_functions.互动小游戏 import 随机小游戏
|
||||
function_plugins.update({
|
||||
"随机互动小游戏(仅供测试)": {
|
||||
"Group": "智能体",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"Function": HotReload(随机小游戏)
|
||||
}
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
print('Load function plugin failed')
|
||||
|
||||
# try:
|
||||
# from crazy_functions.chatglm微调工具 import 微调数据集生成
|
||||
# function_plugins.update({
|
||||
|
||||
@@ -26,8 +26,8 @@ class PaperFileGroup():
|
||||
self.sp_file_index.append(index)
|
||||
self.sp_file_tag.append(self.file_paths[index])
|
||||
else:
|
||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
||||
segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit)
|
||||
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
|
||||
segments = breakdown_text_to_satisfy_token_limit(file_content, max_token_limit)
|
||||
for j, segment in enumerate(segments):
|
||||
self.sp_file_contents.append(segment)
|
||||
self.sp_file_index.append(index)
|
||||
|
||||
@@ -26,8 +26,8 @@ class PaperFileGroup():
|
||||
self.sp_file_index.append(index)
|
||||
self.sp_file_tag.append(self.file_paths[index])
|
||||
else:
|
||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
||||
segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit)
|
||||
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
|
||||
segments = breakdown_text_to_satisfy_token_limit(file_content, max_token_limit)
|
||||
for j, segment in enumerate(segments):
|
||||
self.sp_file_contents.append(segment)
|
||||
self.sp_file_index.append(index)
|
||||
|
||||
@@ -88,6 +88,9 @@ def arxiv_download(chatbot, history, txt, allow_cache=True):
|
||||
target_file = pj(translation_dir, 'translate_zh.pdf')
|
||||
if os.path.exists(target_file):
|
||||
promote_file_to_downloadzone(target_file, rename_file=None, chatbot=chatbot)
|
||||
target_file_compare = pj(translation_dir, 'comparison.pdf')
|
||||
if os.path.exists(target_file_compare):
|
||||
promote_file_to_downloadzone(target_file_compare, rename_file=None, chatbot=chatbot)
|
||||
return target_file
|
||||
return False
|
||||
def is_float(s):
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from toolbox import update_ui, get_conf, trimmed_format_exc, get_log_folder
|
||||
from toolbox import update_ui, get_conf, trimmed_format_exc, get_max_token, Singleton
|
||||
import threading
|
||||
import os
|
||||
import logging
|
||||
@@ -92,7 +92,7 @@ def request_gpt_model_in_new_thread_with_ui_alive(
|
||||
# 【选择处理】 尝试计算比例,尽可能多地保留文本
|
||||
from toolbox import get_reduce_token_percent
|
||||
p_ratio, n_exceed = get_reduce_token_percent(str(token_exceeded_error))
|
||||
MAX_TOKEN = 4096
|
||||
MAX_TOKEN = get_max_token(llm_kwargs)
|
||||
EXCEED_ALLO = 512 + 512 * exceeded_cnt
|
||||
inputs, history = input_clipping(inputs, history, max_token_limit=MAX_TOKEN-EXCEED_ALLO)
|
||||
mutable[0] += f'[Local Message] 警告,文本过长将进行截断,Token溢出数:{n_exceed}。\n\n'
|
||||
@@ -139,6 +139,8 @@ def can_multi_process(llm):
|
||||
if llm.startswith('gpt-'): return True
|
||||
if llm.startswith('api2d-'): return True
|
||||
if llm.startswith('azure-'): return True
|
||||
if llm.startswith('spark'): return True
|
||||
if llm.startswith('zhipuai'): return True
|
||||
return False
|
||||
|
||||
def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
@@ -224,7 +226,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
# 【选择处理】 尝试计算比例,尽可能多地保留文本
|
||||
from toolbox import get_reduce_token_percent
|
||||
p_ratio, n_exceed = get_reduce_token_percent(str(token_exceeded_error))
|
||||
MAX_TOKEN = 4096
|
||||
MAX_TOKEN = get_max_token(llm_kwargs)
|
||||
EXCEED_ALLO = 512 + 512 * exceeded_cnt
|
||||
inputs, history = input_clipping(inputs, history, max_token_limit=MAX_TOKEN-EXCEED_ALLO)
|
||||
gpt_say += f'[Local Message] 警告,文本过长将进行截断,Token溢出数:{n_exceed}。\n\n'
|
||||
@@ -312,95 +314,6 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
return gpt_response_collection
|
||||
|
||||
|
||||
def breakdown_txt_to_satisfy_token_limit(txt, get_token_fn, limit):
|
||||
def cut(txt_tocut, must_break_at_empty_line): # 递归
|
||||
if get_token_fn(txt_tocut) <= limit:
|
||||
return [txt_tocut]
|
||||
else:
|
||||
lines = txt_tocut.split('\n')
|
||||
estimated_line_cut = limit / get_token_fn(txt_tocut) * len(lines)
|
||||
estimated_line_cut = int(estimated_line_cut)
|
||||
for cnt in reversed(range(estimated_line_cut)):
|
||||
if must_break_at_empty_line:
|
||||
if lines[cnt] != "":
|
||||
continue
|
||||
print(cnt)
|
||||
prev = "\n".join(lines[:cnt])
|
||||
post = "\n".join(lines[cnt:])
|
||||
if get_token_fn(prev) < limit:
|
||||
break
|
||||
if cnt == 0:
|
||||
raise RuntimeError("存在一行极长的文本!")
|
||||
# print(len(post))
|
||||
# 列表递归接龙
|
||||
result = [prev]
|
||||
result.extend(cut(post, must_break_at_empty_line))
|
||||
return result
|
||||
try:
|
||||
return cut(txt, must_break_at_empty_line=True)
|
||||
except RuntimeError:
|
||||
return cut(txt, must_break_at_empty_line=False)
|
||||
|
||||
|
||||
def force_breakdown(txt, limit, get_token_fn):
|
||||
"""
|
||||
当无法用标点、空行分割时,我们用最暴力的方法切割
|
||||
"""
|
||||
for i in reversed(range(len(txt))):
|
||||
if get_token_fn(txt[:i]) < limit:
|
||||
return txt[:i], txt[i:]
|
||||
return "Tiktoken未知错误", "Tiktoken未知错误"
|
||||
|
||||
def breakdown_txt_to_satisfy_token_limit_for_pdf(txt, get_token_fn, limit):
|
||||
# 递归
|
||||
def cut(txt_tocut, must_break_at_empty_line, break_anyway=False):
|
||||
if get_token_fn(txt_tocut) <= limit:
|
||||
return [txt_tocut]
|
||||
else:
|
||||
lines = txt_tocut.split('\n')
|
||||
estimated_line_cut = limit / get_token_fn(txt_tocut) * len(lines)
|
||||
estimated_line_cut = int(estimated_line_cut)
|
||||
cnt = 0
|
||||
for cnt in reversed(range(estimated_line_cut)):
|
||||
if must_break_at_empty_line:
|
||||
if lines[cnt] != "":
|
||||
continue
|
||||
prev = "\n".join(lines[:cnt])
|
||||
post = "\n".join(lines[cnt:])
|
||||
if get_token_fn(prev) < limit:
|
||||
break
|
||||
if cnt == 0:
|
||||
if break_anyway:
|
||||
prev, post = force_breakdown(txt_tocut, limit, get_token_fn)
|
||||
else:
|
||||
raise RuntimeError(f"存在一行极长的文本!{txt_tocut}")
|
||||
# print(len(post))
|
||||
# 列表递归接龙
|
||||
result = [prev]
|
||||
result.extend(cut(post, must_break_at_empty_line, break_anyway=break_anyway))
|
||||
return result
|
||||
try:
|
||||
# 第1次尝试,将双空行(\n\n)作为切分点
|
||||
return cut(txt, must_break_at_empty_line=True)
|
||||
except RuntimeError:
|
||||
try:
|
||||
# 第2次尝试,将单空行(\n)作为切分点
|
||||
return cut(txt, must_break_at_empty_line=False)
|
||||
except RuntimeError:
|
||||
try:
|
||||
# 第3次尝试,将英文句号(.)作为切分点
|
||||
res = cut(txt.replace('.', '。\n'), must_break_at_empty_line=False) # 这个中文的句号是故意的,作为一个标识而存在
|
||||
return [r.replace('。\n', '.') for r in res]
|
||||
except RuntimeError as e:
|
||||
try:
|
||||
# 第4次尝试,将中文句号(。)作为切分点
|
||||
res = cut(txt.replace('。', '。。\n'), must_break_at_empty_line=False)
|
||||
return [r.replace('。。\n', '。') for r in res]
|
||||
except RuntimeError as e:
|
||||
# 第5次尝试,没办法了,随便切一下敷衍吧
|
||||
return cut(txt, must_break_at_empty_line=False, break_anyway=True)
|
||||
|
||||
|
||||
|
||||
def read_and_clean_pdf_text(fp):
|
||||
"""
|
||||
@@ -553,6 +466,9 @@ def read_and_clean_pdf_text(fp):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
# 对于某些PDF会有第一个段落就以小写字母开头,为了避免索引错误将其更改为大写
|
||||
if starts_with_lowercase_word(meta_txt[0]):
|
||||
meta_txt[0] = meta_txt[0].capitalize()
|
||||
for _ in range(100):
|
||||
for index, block_txt in enumerate(meta_txt):
|
||||
if starts_with_lowercase_word(block_txt):
|
||||
@@ -631,90 +547,6 @@ def get_files_from_everything(txt, type): # type='.md'
|
||||
|
||||
|
||||
|
||||
|
||||
def Singleton(cls):
|
||||
_instance = {}
|
||||
|
||||
def _singleton(*args, **kargs):
|
||||
if cls not in _instance:
|
||||
_instance[cls] = cls(*args, **kargs)
|
||||
return _instance[cls]
|
||||
|
||||
return _singleton
|
||||
|
||||
|
||||
@Singleton
|
||||
class knowledge_archive_interface():
|
||||
def __init__(self) -> None:
|
||||
self.threadLock = threading.Lock()
|
||||
self.current_id = ""
|
||||
self.kai_path = None
|
||||
self.qa_handle = None
|
||||
self.text2vec_large_chinese = None
|
||||
|
||||
def get_chinese_text2vec(self):
|
||||
if self.text2vec_large_chinese is None:
|
||||
# < -------------------预热文本向量化模组--------------- >
|
||||
from toolbox import ProxyNetworkActivate
|
||||
print('Checking Text2vec ...')
|
||||
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
||||
with ProxyNetworkActivate('Download_LLM'): # 临时地激活代理网络
|
||||
self.text2vec_large_chinese = HuggingFaceEmbeddings(model_name="GanymedeNil/text2vec-large-chinese")
|
||||
|
||||
return self.text2vec_large_chinese
|
||||
|
||||
|
||||
def feed_archive(self, file_manifest, id="default"):
|
||||
self.threadLock.acquire()
|
||||
# import uuid
|
||||
self.current_id = id
|
||||
from zh_langchain import construct_vector_store
|
||||
self.qa_handle, self.kai_path = construct_vector_store(
|
||||
vs_id=self.current_id,
|
||||
files=file_manifest,
|
||||
sentence_size=100,
|
||||
history=[],
|
||||
one_conent="",
|
||||
one_content_segmentation="",
|
||||
text2vec = self.get_chinese_text2vec(),
|
||||
)
|
||||
self.threadLock.release()
|
||||
|
||||
def get_current_archive_id(self):
|
||||
return self.current_id
|
||||
|
||||
def get_loaded_file(self):
|
||||
return self.qa_handle.get_loaded_file()
|
||||
|
||||
def answer_with_archive_by_id(self, txt, id):
|
||||
self.threadLock.acquire()
|
||||
if not self.current_id == id:
|
||||
self.current_id = id
|
||||
from zh_langchain import construct_vector_store
|
||||
self.qa_handle, self.kai_path = construct_vector_store(
|
||||
vs_id=self.current_id,
|
||||
files=[],
|
||||
sentence_size=100,
|
||||
history=[],
|
||||
one_conent="",
|
||||
one_content_segmentation="",
|
||||
text2vec = self.get_chinese_text2vec(),
|
||||
)
|
||||
VECTOR_SEARCH_SCORE_THRESHOLD = 0
|
||||
VECTOR_SEARCH_TOP_K = 4
|
||||
CHUNK_SIZE = 512
|
||||
resp, prompt = self.qa_handle.get_knowledge_based_conent_test(
|
||||
query = txt,
|
||||
vs_path = self.kai_path,
|
||||
score_threshold=VECTOR_SEARCH_SCORE_THRESHOLD,
|
||||
vector_search_top_k=VECTOR_SEARCH_TOP_K,
|
||||
chunk_conent=True,
|
||||
chunk_size=CHUNK_SIZE,
|
||||
text2vec = self.get_chinese_text2vec(),
|
||||
)
|
||||
self.threadLock.release()
|
||||
return resp, prompt
|
||||
|
||||
@Singleton
|
||||
class nougat_interface():
|
||||
def __init__(self):
|
||||
|
||||
42
crazy_functions/game_fns/game_ascii_art.py
Normal file
42
crazy_functions/game_fns/game_ascii_art.py
Normal file
@@ -0,0 +1,42 @@
|
||||
from toolbox import CatchException, update_ui, update_ui_lastest_msg
|
||||
from crazy_functions.multi_stage.multi_stage_utils import GptAcademicGameBaseState
|
||||
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
from request_llms.bridge_all import predict_no_ui_long_connection
|
||||
from crazy_functions.game_fns.game_utils import get_code_block, is_same_thing
|
||||
import random
|
||||
|
||||
|
||||
class MiniGame_ASCII_Art(GptAcademicGameBaseState):
|
||||
def step(self, prompt, chatbot, history):
|
||||
if self.step_cnt == 0:
|
||||
chatbot.append(["我画你猜(动物)", "请稍等..."])
|
||||
else:
|
||||
if prompt.strip() == 'exit':
|
||||
self.delete_game = True
|
||||
yield from update_ui_lastest_msg(lastmsg=f"谜底是{self.obj},游戏结束。", chatbot=chatbot, history=history, delay=0.)
|
||||
return
|
||||
chatbot.append([prompt, ""])
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
|
||||
if self.step_cnt == 0:
|
||||
self.lock_plugin(chatbot)
|
||||
self.cur_task = 'draw'
|
||||
|
||||
if self.cur_task == 'draw':
|
||||
avail_obj = ["狗","猫","鸟","鱼","老鼠","蛇"]
|
||||
self.obj = random.choice(avail_obj)
|
||||
inputs = "I want to play a game called Guess the ASCII art. You can draw the ASCII art and I will try to guess it. " + \
|
||||
f"This time you draw a {self.obj}. Note that you must not indicate what you have draw in the text, and you should only produce the ASCII art wrapped by ```. "
|
||||
raw_res = predict_no_ui_long_connection(inputs=inputs, llm_kwargs=self.llm_kwargs, history=[], sys_prompt="")
|
||||
self.cur_task = 'identify user guess'
|
||||
res = get_code_block(raw_res)
|
||||
history += ['', f'the answer is {self.obj}', inputs, res]
|
||||
yield from update_ui_lastest_msg(lastmsg=res, chatbot=chatbot, history=history, delay=0.)
|
||||
|
||||
elif self.cur_task == 'identify user guess':
|
||||
if is_same_thing(self.obj, prompt, self.llm_kwargs):
|
||||
self.delete_game = True
|
||||
yield from update_ui_lastest_msg(lastmsg="你猜对了!", chatbot=chatbot, history=history, delay=0.)
|
||||
else:
|
||||
self.cur_task = 'identify user guess'
|
||||
yield from update_ui_lastest_msg(lastmsg="猜错了,再试试,输入“exit”获取答案。", chatbot=chatbot, history=history, delay=0.)
|
||||
212
crazy_functions/game_fns/game_interactive_story.py
Normal file
212
crazy_functions/game_fns/game_interactive_story.py
Normal file
@@ -0,0 +1,212 @@
|
||||
prompts_hs = """ 请以“{headstart}”为开头,编写一个小说的第一幕。
|
||||
|
||||
- 尽量短,不要包含太多情节,因为你接下来将会与用户互动续写下面的情节,要留出足够的互动空间。
|
||||
- 出现人物时,给出人物的名字。
|
||||
- 积极地运用环境描写、人物描写等手法,让读者能够感受到你的故事世界。
|
||||
- 积极地运用修辞手法,比如比喻、拟人、排比、对偶、夸张等等。
|
||||
- 字数要求:第一幕的字数少于300字,且少于2个段落。
|
||||
"""
|
||||
|
||||
prompts_interact = """ 小说的前文回顾:
|
||||
「
|
||||
{previously_on_story}
|
||||
」
|
||||
|
||||
你是一个作家,根据以上的情节,给出4种不同的后续剧情发展方向,每个发展方向都精明扼要地用一句话说明。稍后,我将在这4个选择中,挑选一种剧情发展。
|
||||
|
||||
输出格式例如:
|
||||
1. 后续剧情发展1
|
||||
2. 后续剧情发展2
|
||||
3. 后续剧情发展3
|
||||
4. 后续剧情发展4
|
||||
"""
|
||||
|
||||
|
||||
prompts_resume = """小说的前文回顾:
|
||||
「
|
||||
{previously_on_story}
|
||||
」
|
||||
|
||||
你是一个作家,我们正在互相讨论,确定后续剧情的发展。
|
||||
在以下的剧情发展中,
|
||||
「
|
||||
{choice}
|
||||
」
|
||||
我认为更合理的是:{user_choice}。
|
||||
请在前文的基础上(不要重复前文),围绕我选定的剧情情节,编写小说的下一幕。
|
||||
|
||||
- 禁止杜撰不符合我选择的剧情。
|
||||
- 尽量短,不要包含太多情节,因为你接下来将会与用户互动续写下面的情节,要留出足够的互动空间。
|
||||
- 不要重复前文。
|
||||
- 出现人物时,给出人物的名字。
|
||||
- 积极地运用环境描写、人物描写等手法,让读者能够感受到你的故事世界。
|
||||
- 积极地运用修辞手法,比如比喻、拟人、排比、对偶、夸张等等。
|
||||
- 小说的下一幕字数少于300字,且少于2个段落。
|
||||
"""
|
||||
|
||||
|
||||
prompts_terminate = """小说的前文回顾:
|
||||
「
|
||||
{previously_on_story}
|
||||
」
|
||||
|
||||
你是一个作家,我们正在互相讨论,确定后续剧情的发展。
|
||||
现在,故事该结束了,我认为最合理的故事结局是:{user_choice}。
|
||||
|
||||
请在前文的基础上(不要重复前文),编写小说的最后一幕。
|
||||
|
||||
- 不要重复前文。
|
||||
- 出现人物时,给出人物的名字。
|
||||
- 积极地运用环境描写、人物描写等手法,让读者能够感受到你的故事世界。
|
||||
- 积极地运用修辞手法,比如比喻、拟人、排比、对偶、夸张等等。
|
||||
- 字数要求:最后一幕的字数少于1000字。
|
||||
"""
|
||||
|
||||
|
||||
from toolbox import CatchException, update_ui, update_ui_lastest_msg
|
||||
from crazy_functions.multi_stage.multi_stage_utils import GptAcademicGameBaseState
|
||||
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
from request_llms.bridge_all import predict_no_ui_long_connection
|
||||
from crazy_functions.game_fns.game_utils import get_code_block, is_same_thing
|
||||
import random
|
||||
|
||||
|
||||
class MiniGame_ResumeStory(GptAcademicGameBaseState):
|
||||
story_headstart = [
|
||||
'先行者知道,他现在是全宇宙中唯一的一个人了。',
|
||||
'深夜,一个年轻人穿过天安门广场向纪念堂走去。在二十二世纪编年史中,计算机把他的代号定为M102。',
|
||||
'他知道,这最后一课要提前讲了。又一阵剧痛从肝部袭来,几乎使他晕厥过去。',
|
||||
'在距地球五万光年的远方,在银河系的中心,一场延续了两万年的星际战争已接近尾声。那里的太空中渐渐隐现出一个方形区域,仿佛灿烂的群星的背景被剪出一个方口。',
|
||||
'伊依一行三人乘坐一艘游艇在南太平洋上做吟诗航行,他们的目的地是南极,如果几天后能顺利到达那里,他们将钻出地壳去看诗云。',
|
||||
'很多人生来就会莫名其妙地迷上一样东西,仿佛他的出生就是要和这东西约会似的,正是这样,圆圆迷上了肥皂泡。'
|
||||
]
|
||||
|
||||
|
||||
def begin_game_step_0(self, prompt, chatbot, history):
|
||||
# init game at step 0
|
||||
self.headstart = random.choice(self.story_headstart)
|
||||
self.story = []
|
||||
chatbot.append(["互动写故事", f"这次的故事开头是:{self.headstart}"])
|
||||
self.sys_prompt_ = '你是一个想象力丰富的杰出作家。正在与你的朋友互动,一起写故事,因此你每次写的故事段落应少于300字(结局除外)。'
|
||||
|
||||
|
||||
def generate_story_image(self, story_paragraph):
|
||||
try:
|
||||
from crazy_functions.图片生成 import gen_image
|
||||
prompt_ = predict_no_ui_long_connection(inputs=story_paragraph, llm_kwargs=self.llm_kwargs, history=[], sys_prompt='你需要根据用户给出的小说段落,进行简短的环境描写。要求:80字以内。')
|
||||
image_url, image_path = gen_image(self.llm_kwargs, prompt_, '512x512', model="dall-e-2", quality='standard', style='natural')
|
||||
return f'<br/><div align="center"><img src="file={image_path}"></div>'
|
||||
except:
|
||||
return ''
|
||||
|
||||
def step(self, prompt, chatbot, history):
|
||||
|
||||
"""
|
||||
首先,处理游戏初始化等特殊情况
|
||||
"""
|
||||
if self.step_cnt == 0:
|
||||
self.begin_game_step_0(prompt, chatbot, history)
|
||||
self.lock_plugin(chatbot)
|
||||
self.cur_task = 'head_start'
|
||||
else:
|
||||
if prompt.strip() == 'exit' or prompt.strip() == '结束剧情':
|
||||
# should we terminate game here?
|
||||
self.delete_game = True
|
||||
yield from update_ui_lastest_msg(lastmsg=f"游戏结束。", chatbot=chatbot, history=history, delay=0.)
|
||||
return
|
||||
if '剧情收尾' in prompt:
|
||||
self.cur_task = 'story_terminate'
|
||||
# # well, game resumes
|
||||
# chatbot.append([prompt, ""])
|
||||
# update ui, don't keep the user waiting
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
|
||||
|
||||
"""
|
||||
处理游戏的主体逻辑
|
||||
"""
|
||||
if self.cur_task == 'head_start':
|
||||
"""
|
||||
这是游戏的第一步
|
||||
"""
|
||||
inputs_ = prompts_hs.format(headstart=self.headstart)
|
||||
history_ = []
|
||||
story_paragraph = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs_, '故事开头', self.llm_kwargs,
|
||||
chatbot, history_, self.sys_prompt_
|
||||
)
|
||||
self.story.append(story_paragraph)
|
||||
# # 配图
|
||||
yield from update_ui_lastest_msg(lastmsg=story_paragraph + '<br/>正在生成插图中 ...', chatbot=chatbot, history=history, delay=0.)
|
||||
yield from update_ui_lastest_msg(lastmsg=story_paragraph + '<br/>'+ self.generate_story_image(story_paragraph), chatbot=chatbot, history=history, delay=0.)
|
||||
|
||||
# # 构建后续剧情引导
|
||||
previously_on_story = ""
|
||||
for s in self.story:
|
||||
previously_on_story += s + '\n'
|
||||
inputs_ = prompts_interact.format(previously_on_story=previously_on_story)
|
||||
history_ = []
|
||||
self.next_choices = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs_, '请在以下几种故事走向中,选择一种(当然,您也可以选择给出其他故事走向):', self.llm_kwargs,
|
||||
chatbot,
|
||||
history_,
|
||||
self.sys_prompt_
|
||||
)
|
||||
self.cur_task = 'user_choice'
|
||||
|
||||
|
||||
elif self.cur_task == 'user_choice':
|
||||
"""
|
||||
根据用户的提示,确定故事的下一步
|
||||
"""
|
||||
if '请在以下几种故事走向中,选择一种' in chatbot[-1][0]: chatbot.pop(-1)
|
||||
previously_on_story = ""
|
||||
for s in self.story:
|
||||
previously_on_story += s + '\n'
|
||||
inputs_ = prompts_resume.format(previously_on_story=previously_on_story, choice=self.next_choices, user_choice=prompt)
|
||||
history_ = []
|
||||
story_paragraph = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs_, f'下一段故事(您的选择是:{prompt})。', self.llm_kwargs,
|
||||
chatbot, history_, self.sys_prompt_
|
||||
)
|
||||
self.story.append(story_paragraph)
|
||||
# # 配图
|
||||
yield from update_ui_lastest_msg(lastmsg=story_paragraph + '<br/>正在生成插图中 ...', chatbot=chatbot, history=history, delay=0.)
|
||||
yield from update_ui_lastest_msg(lastmsg=story_paragraph + '<br/>'+ self.generate_story_image(story_paragraph), chatbot=chatbot, history=history, delay=0.)
|
||||
|
||||
# # 构建后续剧情引导
|
||||
previously_on_story = ""
|
||||
for s in self.story:
|
||||
previously_on_story += s + '\n'
|
||||
inputs_ = prompts_interact.format(previously_on_story=previously_on_story)
|
||||
history_ = []
|
||||
self.next_choices = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs_,
|
||||
'请在以下几种故事走向中,选择一种。当然,您也可以给出您心中的其他故事走向。另外,如果您希望剧情立即收尾,请输入剧情走向,并以“剧情收尾”四个字提示程序。', self.llm_kwargs,
|
||||
chatbot,
|
||||
history_,
|
||||
self.sys_prompt_
|
||||
)
|
||||
self.cur_task = 'user_choice'
|
||||
|
||||
|
||||
elif self.cur_task == 'story_terminate':
|
||||
"""
|
||||
根据用户的提示,确定故事的结局
|
||||
"""
|
||||
previously_on_story = ""
|
||||
for s in self.story:
|
||||
previously_on_story += s + '\n'
|
||||
inputs_ = prompts_terminate.format(previously_on_story=previously_on_story, user_choice=prompt)
|
||||
history_ = []
|
||||
story_paragraph = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs_, f'故事收尾(您的选择是:{prompt})。', self.llm_kwargs,
|
||||
chatbot, history_, self.sys_prompt_
|
||||
)
|
||||
# # 配图
|
||||
yield from update_ui_lastest_msg(lastmsg=story_paragraph + '<br/>正在生成插图中 ...', chatbot=chatbot, history=history, delay=0.)
|
||||
yield from update_ui_lastest_msg(lastmsg=story_paragraph + '<br/>'+ self.generate_story_image(story_paragraph), chatbot=chatbot, history=history, delay=0.)
|
||||
|
||||
# terminate game
|
||||
self.delete_game = True
|
||||
return
|
||||
35
crazy_functions/game_fns/game_utils.py
Normal file
35
crazy_functions/game_fns/game_utils.py
Normal file
@@ -0,0 +1,35 @@
|
||||
|
||||
from crazy_functions.json_fns.pydantic_io import GptJsonIO, JsonStringError
|
||||
from request_llms.bridge_all import predict_no_ui_long_connection
|
||||
def get_code_block(reply):
|
||||
import re
|
||||
pattern = r"```([\s\S]*?)```" # regex pattern to match code blocks
|
||||
matches = re.findall(pattern, reply) # find all code blocks in text
|
||||
if len(matches) == 1:
|
||||
return "```" + matches[0] + "```" # code block
|
||||
raise RuntimeError("GPT is not generating proper code.")
|
||||
|
||||
def is_same_thing(a, b, llm_kwargs):
|
||||
from pydantic import BaseModel, Field
|
||||
class IsSameThing(BaseModel):
|
||||
is_same_thing: bool = Field(description="determine whether two objects are same thing.", default=False)
|
||||
|
||||
def run_gpt_fn(inputs, sys_prompt, history=[]):
|
||||
return predict_no_ui_long_connection(
|
||||
inputs=inputs, llm_kwargs=llm_kwargs,
|
||||
history=history, sys_prompt=sys_prompt, observe_window=[]
|
||||
)
|
||||
|
||||
gpt_json_io = GptJsonIO(IsSameThing)
|
||||
inputs_01 = "Identity whether the user input and the target is the same thing: \n target object: {a} \n user input object: {b} \n\n\n".format(a=a, b=b)
|
||||
inputs_01 += "\n\n\n Note that the user may describe the target object with a different language, e.g. cat and 猫 are the same thing."
|
||||
analyze_res_cot_01 = run_gpt_fn(inputs_01, "", [])
|
||||
|
||||
inputs_02 = inputs_01 + gpt_json_io.format_instructions
|
||||
analyze_res = run_gpt_fn(inputs_02, "", [inputs_01, analyze_res_cot_01])
|
||||
|
||||
try:
|
||||
res = gpt_json_io.generate_output_auto_repair(analyze_res, run_gpt_fn)
|
||||
return res.is_same_thing
|
||||
except JsonStringError as e:
|
||||
return False
|
||||
37
crazy_functions/ipc_fns/mp.py
Normal file
37
crazy_functions/ipc_fns/mp.py
Normal file
@@ -0,0 +1,37 @@
|
||||
import platform
|
||||
import pickle
|
||||
import multiprocessing
|
||||
|
||||
def run_in_subprocess_wrapper_func(v_args):
|
||||
func, args, kwargs, return_dict, exception_dict = pickle.loads(v_args)
|
||||
import sys
|
||||
try:
|
||||
result = func(*args, **kwargs)
|
||||
return_dict['result'] = result
|
||||
except Exception as e:
|
||||
exc_info = sys.exc_info()
|
||||
exception_dict['exception'] = exc_info
|
||||
|
||||
def run_in_subprocess_with_timeout(func, timeout=60):
|
||||
if platform.system() == 'Linux':
|
||||
def wrapper(*args, **kwargs):
|
||||
return_dict = multiprocessing.Manager().dict()
|
||||
exception_dict = multiprocessing.Manager().dict()
|
||||
v_args = pickle.dumps((func, args, kwargs, return_dict, exception_dict))
|
||||
process = multiprocessing.Process(target=run_in_subprocess_wrapper_func, args=(v_args,))
|
||||
process.start()
|
||||
process.join(timeout)
|
||||
if process.is_alive():
|
||||
process.terminate()
|
||||
raise TimeoutError(f'功能单元{str(func)}未能在规定时间内完成任务')
|
||||
process.close()
|
||||
if 'exception' in exception_dict:
|
||||
# ooops, the subprocess ran into an exception
|
||||
exc_info = exception_dict['exception']
|
||||
raise exc_info[1].with_traceback(exc_info[2])
|
||||
if 'result' in return_dict.keys():
|
||||
# If the subprocess ran successfully, return the result
|
||||
return return_dict['result']
|
||||
return wrapper
|
||||
else:
|
||||
return func
|
||||
@@ -175,7 +175,6 @@ class LatexPaperFileGroup():
|
||||
self.sp_file_contents = []
|
||||
self.sp_file_index = []
|
||||
self.sp_file_tag = []
|
||||
|
||||
# count_token
|
||||
from request_llms.bridge_all import model_info
|
||||
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
||||
@@ -192,13 +191,12 @@ class LatexPaperFileGroup():
|
||||
self.sp_file_index.append(index)
|
||||
self.sp_file_tag.append(self.file_paths[index])
|
||||
else:
|
||||
from ..crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
||||
segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit)
|
||||
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
|
||||
segments = breakdown_text_to_satisfy_token_limit(file_content, max_token_limit)
|
||||
for j, segment in enumerate(segments):
|
||||
self.sp_file_contents.append(segment)
|
||||
self.sp_file_index.append(index)
|
||||
self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.tex")
|
||||
print('Segmentation: done')
|
||||
|
||||
def merge_result(self):
|
||||
self.file_result = ["" for _ in range(len(self.file_paths))]
|
||||
@@ -404,7 +402,7 @@ def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_f
|
||||
result_pdf = pj(work_folder_modified, f'merge_diff.pdf') # get pdf path
|
||||
promote_file_to_downloadzone(result_pdf, rename_file=None, chatbot=chatbot) # promote file to web UI
|
||||
if modified_pdf_success:
|
||||
yield from update_ui_lastest_msg(f'转化PDF编译已经成功, 即将退出 ...', chatbot, history) # 刷新Gradio前端界面
|
||||
yield from update_ui_lastest_msg(f'转化PDF编译已经成功, 正在尝试生成对比PDF, 请稍候 ...', chatbot, history) # 刷新Gradio前端界面
|
||||
result_pdf = pj(work_folder_modified, f'{main_file_modified}.pdf') # get pdf path
|
||||
origin_pdf = pj(work_folder_original, f'{main_file_original}.pdf') # get pdf path
|
||||
if os.path.exists(pj(work_folder, '..', 'translation')):
|
||||
@@ -416,8 +414,11 @@ def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_f
|
||||
from .latex_toolbox import merge_pdfs
|
||||
concat_pdf = pj(work_folder_modified, f'comparison.pdf')
|
||||
merge_pdfs(origin_pdf, result_pdf, concat_pdf)
|
||||
if os.path.exists(pj(work_folder, '..', 'translation')):
|
||||
shutil.copyfile(concat_pdf, pj(work_folder, '..', 'translation', 'comparison.pdf'))
|
||||
promote_file_to_downloadzone(concat_pdf, rename_file=None, chatbot=chatbot) # promote file to web UI
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
return True # 成功啦
|
||||
else:
|
||||
|
||||
@@ -250,8 +250,8 @@ def find_main_tex_file(file_manifest, mode):
|
||||
else: # if len(canidates) >= 2 通过一些Latex模板中常见(但通常不会出现在正文)的单词,对不同latex源文件扣分,取评分最高者返回
|
||||
canidates_score = []
|
||||
# 给出一些判定模板文档的词作为扣分项
|
||||
unexpected_words = ['\LaTeX', 'manuscript', 'Guidelines', 'font', 'citations', 'rejected', 'blind review', 'reviewers']
|
||||
expected_words = ['\input', '\ref', '\cite']
|
||||
unexpected_words = ['\\LaTeX', 'manuscript', 'Guidelines', 'font', 'citations', 'rejected', 'blind review', 'reviewers']
|
||||
expected_words = ['\\input', '\\ref', '\\cite']
|
||||
for texf in canidates:
|
||||
canidates_score.append(0)
|
||||
with open(texf, 'r', encoding='utf8', errors='ignore') as f:
|
||||
@@ -493,11 +493,38 @@ def compile_latex_with_timeout(command, cwd, timeout=60):
|
||||
return False
|
||||
return True
|
||||
|
||||
def run_in_subprocess_wrapper_func(func, args, kwargs, return_dict, exception_dict):
|
||||
import sys
|
||||
try:
|
||||
result = func(*args, **kwargs)
|
||||
return_dict['result'] = result
|
||||
except Exception as e:
|
||||
exc_info = sys.exc_info()
|
||||
exception_dict['exception'] = exc_info
|
||||
|
||||
def run_in_subprocess(func):
|
||||
import multiprocessing
|
||||
def wrapper(*args, **kwargs):
|
||||
return_dict = multiprocessing.Manager().dict()
|
||||
exception_dict = multiprocessing.Manager().dict()
|
||||
process = multiprocessing.Process(target=run_in_subprocess_wrapper_func,
|
||||
args=(func, args, kwargs, return_dict, exception_dict))
|
||||
process.start()
|
||||
process.join()
|
||||
process.close()
|
||||
if 'exception' in exception_dict:
|
||||
# ooops, the subprocess ran into an exception
|
||||
exc_info = exception_dict['exception']
|
||||
raise exc_info[1].with_traceback(exc_info[2])
|
||||
if 'result' in return_dict.keys():
|
||||
# If the subprocess ran successfully, return the result
|
||||
return return_dict['result']
|
||||
return wrapper
|
||||
|
||||
def merge_pdfs(pdf1_path, pdf2_path, output_path):
|
||||
import PyPDF2
|
||||
def _merge_pdfs(pdf1_path, pdf2_path, output_path):
|
||||
import PyPDF2 # PyPDF2这个库有严重的内存泄露问题,把它放到子进程中运行,从而方便内存的释放
|
||||
Percent = 0.95
|
||||
# raise RuntimeError('PyPDF2 has a serious memory leak problem, please use other tools to merge PDF files.')
|
||||
# Open the first PDF file
|
||||
with open(pdf1_path, 'rb') as pdf1_file:
|
||||
pdf1_reader = PyPDF2.PdfFileReader(pdf1_file)
|
||||
@@ -531,3 +558,5 @@ def merge_pdfs(pdf1_path, pdf2_path, output_path):
|
||||
# Save the merged PDF file
|
||||
with open(output_path, 'wb') as output_file:
|
||||
output_writer.write(output_file)
|
||||
|
||||
merge_pdfs = run_in_subprocess(_merge_pdfs) # PyPDF2这个库有严重的内存泄露问题,把它放到子进程中运行,从而方便内存的释放
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import List
|
||||
from toolbox import update_ui_lastest_msg, disable_auto_promotion
|
||||
from toolbox import CatchException, update_ui, get_conf, select_api_key, get_log_folder
|
||||
from request_llms.bridge_all import predict_no_ui_long_connection
|
||||
from crazy_functions.json_fns.pydantic_io import GptJsonIO, JsonStringError
|
||||
import time
|
||||
@@ -21,11 +22,7 @@ class GptAcademicState():
|
||||
def reset(self):
|
||||
pass
|
||||
|
||||
def lock_plugin(self, chatbot):
|
||||
chatbot._cookies['plugin_state'] = pickle.dumps(self)
|
||||
|
||||
def unlock_plugin(self, chatbot):
|
||||
self.reset()
|
||||
def dump_state(self, chatbot):
|
||||
chatbot._cookies['plugin_state'] = pickle.dumps(self)
|
||||
|
||||
def set_state(self, chatbot, key, value):
|
||||
@@ -40,6 +37,57 @@ class GptAcademicState():
|
||||
state.chatbot = chatbot
|
||||
return state
|
||||
|
||||
class GatherMaterials():
|
||||
def __init__(self, materials) -> None:
|
||||
materials = ['image', 'prompt']
|
||||
|
||||
class GptAcademicGameBaseState():
|
||||
"""
|
||||
1. first init: __init__ ->
|
||||
"""
|
||||
def init_game(self, chatbot, lock_plugin):
|
||||
self.plugin_name = None
|
||||
self.callback_fn = None
|
||||
self.delete_game = False
|
||||
self.step_cnt = 0
|
||||
|
||||
def lock_plugin(self, chatbot):
|
||||
if self.callback_fn is None:
|
||||
raise ValueError("callback_fn is None")
|
||||
chatbot._cookies['lock_plugin'] = self.callback_fn
|
||||
self.dump_state(chatbot)
|
||||
|
||||
def get_plugin_name(self):
|
||||
if self.plugin_name is None:
|
||||
raise ValueError("plugin_name is None")
|
||||
return self.plugin_name
|
||||
|
||||
def dump_state(self, chatbot):
|
||||
chatbot._cookies[f'plugin_state/{self.get_plugin_name()}'] = pickle.dumps(self)
|
||||
|
||||
def set_state(self, chatbot, key, value):
|
||||
setattr(self, key, value)
|
||||
chatbot._cookies[f'plugin_state/{self.get_plugin_name()}'] = pickle.dumps(self)
|
||||
|
||||
@staticmethod
|
||||
def sync_state(chatbot, llm_kwargs, cls, plugin_name, callback_fn, lock_plugin=True):
|
||||
state = chatbot._cookies.get(f'plugin_state/{plugin_name}', None)
|
||||
if state is not None:
|
||||
state = pickle.loads(state)
|
||||
else:
|
||||
state = cls()
|
||||
state.init_game(chatbot, lock_plugin)
|
||||
state.plugin_name = plugin_name
|
||||
state.llm_kwargs = llm_kwargs
|
||||
state.chatbot = chatbot
|
||||
state.callback_fn = callback_fn
|
||||
return state
|
||||
|
||||
def continue_game(self, prompt, chatbot, history):
|
||||
# 游戏主体
|
||||
yield from self.step(prompt, chatbot, history)
|
||||
self.step_cnt += 1
|
||||
# 保存状态,收尾
|
||||
self.dump_state(chatbot)
|
||||
# 如果游戏结束,清理
|
||||
if self.delete_game:
|
||||
chatbot._cookies['lock_plugin'] = None
|
||||
chatbot._cookies[f'plugin_state/{self.get_plugin_name()}'] = None
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
|
||||
125
crazy_functions/pdf_fns/breakdown_txt.py
Normal file
125
crazy_functions/pdf_fns/breakdown_txt.py
Normal file
@@ -0,0 +1,125 @@
|
||||
from crazy_functions.ipc_fns.mp import run_in_subprocess_with_timeout
|
||||
|
||||
def force_breakdown(txt, limit, get_token_fn):
|
||||
""" 当无法用标点、空行分割时,我们用最暴力的方法切割
|
||||
"""
|
||||
for i in reversed(range(len(txt))):
|
||||
if get_token_fn(txt[:i]) < limit:
|
||||
return txt[:i], txt[i:]
|
||||
return "Tiktoken未知错误", "Tiktoken未知错误"
|
||||
|
||||
|
||||
def maintain_storage(remain_txt_to_cut, remain_txt_to_cut_storage):
|
||||
""" 为了加速计算,我们采样一个特殊的手段。当 remain_txt_to_cut > `_max` 时, 我们把 _max 后的文字转存至 remain_txt_to_cut_storage
|
||||
当 remain_txt_to_cut < `_min` 时,我们再把 remain_txt_to_cut_storage 中的部分文字取出
|
||||
"""
|
||||
_min = int(5e4)
|
||||
_max = int(1e5)
|
||||
# print(len(remain_txt_to_cut), len(remain_txt_to_cut_storage))
|
||||
if len(remain_txt_to_cut) < _min and len(remain_txt_to_cut_storage) > 0:
|
||||
remain_txt_to_cut = remain_txt_to_cut + remain_txt_to_cut_storage
|
||||
remain_txt_to_cut_storage = ""
|
||||
if len(remain_txt_to_cut) > _max:
|
||||
remain_txt_to_cut_storage = remain_txt_to_cut[_max:] + remain_txt_to_cut_storage
|
||||
remain_txt_to_cut = remain_txt_to_cut[:_max]
|
||||
return remain_txt_to_cut, remain_txt_to_cut_storage
|
||||
|
||||
|
||||
def cut(limit, get_token_fn, txt_tocut, must_break_at_empty_line, break_anyway=False):
|
||||
""" 文本切分
|
||||
"""
|
||||
res = []
|
||||
total_len = len(txt_tocut)
|
||||
fin_len = 0
|
||||
remain_txt_to_cut = txt_tocut
|
||||
remain_txt_to_cut_storage = ""
|
||||
# 为了加速计算,我们采样一个特殊的手段。当 remain_txt_to_cut > `_max` 时, 我们把 _max 后的文字转存至 remain_txt_to_cut_storage
|
||||
remain_txt_to_cut, remain_txt_to_cut_storage = maintain_storage(remain_txt_to_cut, remain_txt_to_cut_storage)
|
||||
|
||||
while True:
|
||||
if get_token_fn(remain_txt_to_cut) <= limit:
|
||||
# 如果剩余文本的token数小于限制,那么就不用切了
|
||||
res.append(remain_txt_to_cut); fin_len+=len(remain_txt_to_cut)
|
||||
break
|
||||
else:
|
||||
# 如果剩余文本的token数大于限制,那么就切
|
||||
lines = remain_txt_to_cut.split('\n')
|
||||
|
||||
# 估计一个切分点
|
||||
estimated_line_cut = limit / get_token_fn(remain_txt_to_cut) * len(lines)
|
||||
estimated_line_cut = int(estimated_line_cut)
|
||||
|
||||
# 开始查找合适切分点的偏移(cnt)
|
||||
cnt = 0
|
||||
for cnt in reversed(range(estimated_line_cut)):
|
||||
if must_break_at_empty_line:
|
||||
# 首先尝试用双空行(\n\n)作为切分点
|
||||
if lines[cnt] != "":
|
||||
continue
|
||||
prev = "\n".join(lines[:cnt])
|
||||
post = "\n".join(lines[cnt:])
|
||||
if get_token_fn(prev) < limit:
|
||||
break
|
||||
|
||||
if cnt == 0:
|
||||
# 如果没有找到合适的切分点
|
||||
if break_anyway:
|
||||
# 是否允许暴力切分
|
||||
prev, post = force_breakdown(remain_txt_to_cut, limit, get_token_fn)
|
||||
else:
|
||||
# 不允许直接报错
|
||||
raise RuntimeError(f"存在一行极长的文本!{remain_txt_to_cut}")
|
||||
|
||||
# 追加列表
|
||||
res.append(prev); fin_len+=len(prev)
|
||||
# 准备下一次迭代
|
||||
remain_txt_to_cut = post
|
||||
remain_txt_to_cut, remain_txt_to_cut_storage = maintain_storage(remain_txt_to_cut, remain_txt_to_cut_storage)
|
||||
process = fin_len/total_len
|
||||
print(f'正在文本切分 {int(process*100)}%')
|
||||
if len(remain_txt_to_cut.strip()) == 0:
|
||||
break
|
||||
return res
|
||||
|
||||
|
||||
def breakdown_text_to_satisfy_token_limit_(txt, limit, llm_model="gpt-3.5-turbo"):
|
||||
""" 使用多种方式尝试切分文本,以满足 token 限制
|
||||
"""
|
||||
from request_llms.bridge_all import model_info
|
||||
enc = model_info[llm_model]['tokenizer']
|
||||
def get_token_fn(txt): return len(enc.encode(txt, disallowed_special=()))
|
||||
try:
|
||||
# 第1次尝试,将双空行(\n\n)作为切分点
|
||||
return cut(limit, get_token_fn, txt, must_break_at_empty_line=True)
|
||||
except RuntimeError:
|
||||
try:
|
||||
# 第2次尝试,将单空行(\n)作为切分点
|
||||
return cut(limit, get_token_fn, txt, must_break_at_empty_line=False)
|
||||
except RuntimeError:
|
||||
try:
|
||||
# 第3次尝试,将英文句号(.)作为切分点
|
||||
res = cut(limit, get_token_fn, txt.replace('.', '。\n'), must_break_at_empty_line=False) # 这个中文的句号是故意的,作为一个标识而存在
|
||||
return [r.replace('。\n', '.') for r in res]
|
||||
except RuntimeError as e:
|
||||
try:
|
||||
# 第4次尝试,将中文句号(。)作为切分点
|
||||
res = cut(limit, get_token_fn, txt.replace('。', '。。\n'), must_break_at_empty_line=False)
|
||||
return [r.replace('。。\n', '。') for r in res]
|
||||
except RuntimeError as e:
|
||||
# 第5次尝试,没办法了,随便切一下吧
|
||||
return cut(limit, get_token_fn, txt, must_break_at_empty_line=False, break_anyway=True)
|
||||
|
||||
breakdown_text_to_satisfy_token_limit = run_in_subprocess_with_timeout(breakdown_text_to_satisfy_token_limit_, timeout=60)
|
||||
|
||||
if __name__ == '__main__':
|
||||
from crazy_functions.crazy_utils import read_and_clean_pdf_text
|
||||
file_content, page_one = read_and_clean_pdf_text("build/assets/at.pdf")
|
||||
|
||||
from request_llms.bridge_all import model_info
|
||||
for i in range(5):
|
||||
file_content += file_content
|
||||
|
||||
print(len(file_content))
|
||||
TOKEN_LIMIT_PER_FRAGMENT = 2500
|
||||
res = breakdown_text_to_satisfy_token_limit(file_content, TOKEN_LIMIT_PER_FRAGMENT)
|
||||
|
||||
@@ -74,7 +74,7 @@ def produce_report_markdown(gpt_response_collection, meta, paper_meta_info, chat
|
||||
|
||||
def translate_pdf(article_dict, llm_kwargs, chatbot, fp, generated_conclusion_files, TOKEN_LIMIT_PER_FRAGMENT, DST_LANG):
|
||||
from crazy_functions.pdf_fns.report_gen_html import construct_html
|
||||
from crazy_functions.crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
||||
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
|
||||
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
||||
|
||||
@@ -116,7 +116,7 @@ def translate_pdf(article_dict, llm_kwargs, chatbot, fp, generated_conclusion_fi
|
||||
# find a smooth token limit to achieve even seperation
|
||||
count = int(math.ceil(raw_token_num / TOKEN_LIMIT_PER_FRAGMENT))
|
||||
token_limit_smooth = raw_token_num // count + count
|
||||
return breakdown_txt_to_satisfy_token_limit_for_pdf(txt, get_token_fn=get_token_num, limit=token_limit_smooth)
|
||||
return breakdown_text_to_satisfy_token_limit(txt, limit=token_limit_smooth, llm_model=llm_kwargs['llm_model'])
|
||||
|
||||
for section in article_dict.get('sections'):
|
||||
if len(section['text']) == 0: continue
|
||||
|
||||
0
crazy_functions/vector_fns/__init__.py
Normal file
0
crazy_functions/vector_fns/__init__.py
Normal file
70
crazy_functions/vector_fns/general_file_loader.py
Normal file
70
crazy_functions/vector_fns/general_file_loader.py
Normal file
@@ -0,0 +1,70 @@
|
||||
# From project chatglm-langchain
|
||||
|
||||
|
||||
from langchain.document_loaders import UnstructuredFileLoader
|
||||
from langchain.text_splitter import CharacterTextSplitter
|
||||
import re
|
||||
from typing import List
|
||||
|
||||
class ChineseTextSplitter(CharacterTextSplitter):
|
||||
def __init__(self, pdf: bool = False, sentence_size: int = None, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self.pdf = pdf
|
||||
self.sentence_size = sentence_size
|
||||
|
||||
def split_text1(self, text: str) -> List[str]:
|
||||
if self.pdf:
|
||||
text = re.sub(r"\n{3,}", "\n", text)
|
||||
text = re.sub('\s', ' ', text)
|
||||
text = text.replace("\n\n", "")
|
||||
sent_sep_pattern = re.compile('([﹒﹔﹖﹗.。!?]["’”」』]{0,2}|(?=["‘“「『]{1,2}|$))') # del :;
|
||||
sent_list = []
|
||||
for ele in sent_sep_pattern.split(text):
|
||||
if sent_sep_pattern.match(ele) and sent_list:
|
||||
sent_list[-1] += ele
|
||||
elif ele:
|
||||
sent_list.append(ele)
|
||||
return sent_list
|
||||
|
||||
def split_text(self, text: str) -> List[str]: ##此处需要进一步优化逻辑
|
||||
if self.pdf:
|
||||
text = re.sub(r"\n{3,}", r"\n", text)
|
||||
text = re.sub('\s', " ", text)
|
||||
text = re.sub("\n\n", "", text)
|
||||
|
||||
text = re.sub(r'([;;.!?。!?\?])([^”’])', r"\1\n\2", text) # 单字符断句符
|
||||
text = re.sub(r'(\.{6})([^"’”」』])', r"\1\n\2", text) # 英文省略号
|
||||
text = re.sub(r'(\…{2})([^"’”」』])', r"\1\n\2", text) # 中文省略号
|
||||
text = re.sub(r'([;;!?。!?\?]["’”」』]{0,2})([^;;!?,。!?\?])', r'\1\n\2', text)
|
||||
# 如果双引号前有终止符,那么双引号才是句子的终点,把分句符\n放到双引号后,注意前面的几句都小心保留了双引号
|
||||
text = text.rstrip() # 段尾如果有多余的\n就去掉它
|
||||
# 很多规则中会考虑分号;,但是这里我把它忽略不计,破折号、英文双引号等同样忽略,需要的再做些简单调整即可。
|
||||
ls = [i for i in text.split("\n") if i]
|
||||
for ele in ls:
|
||||
if len(ele) > self.sentence_size:
|
||||
ele1 = re.sub(r'([,,.]["’”」』]{0,2})([^,,.])', r'\1\n\2', ele)
|
||||
ele1_ls = ele1.split("\n")
|
||||
for ele_ele1 in ele1_ls:
|
||||
if len(ele_ele1) > self.sentence_size:
|
||||
ele_ele2 = re.sub(r'([\n]{1,}| {2,}["’”」』]{0,2})([^\s])', r'\1\n\2', ele_ele1)
|
||||
ele2_ls = ele_ele2.split("\n")
|
||||
for ele_ele2 in ele2_ls:
|
||||
if len(ele_ele2) > self.sentence_size:
|
||||
ele_ele3 = re.sub('( ["’”」』]{0,2})([^ ])', r'\1\n\2', ele_ele2)
|
||||
ele2_id = ele2_ls.index(ele_ele2)
|
||||
ele2_ls = ele2_ls[:ele2_id] + [i for i in ele_ele3.split("\n") if i] + ele2_ls[
|
||||
ele2_id + 1:]
|
||||
ele_id = ele1_ls.index(ele_ele1)
|
||||
ele1_ls = ele1_ls[:ele_id] + [i for i in ele2_ls if i] + ele1_ls[ele_id + 1:]
|
||||
|
||||
id = ls.index(ele)
|
||||
ls = ls[:id] + [i for i in ele1_ls if i] + ls[id + 1:]
|
||||
return ls
|
||||
|
||||
def load_file(filepath, sentence_size):
|
||||
loader = UnstructuredFileLoader(filepath, mode="elements")
|
||||
textsplitter = ChineseTextSplitter(pdf=False, sentence_size=sentence_size)
|
||||
docs = loader.load_and_split(text_splitter=textsplitter)
|
||||
# write_check_file(filepath, docs)
|
||||
return docs
|
||||
|
||||
338
crazy_functions/vector_fns/vector_database.py
Normal file
338
crazy_functions/vector_fns/vector_database.py
Normal file
@@ -0,0 +1,338 @@
|
||||
# From project chatglm-langchain
|
||||
|
||||
import threading
|
||||
from toolbox import Singleton
|
||||
import os
|
||||
import shutil
|
||||
import os
|
||||
import uuid
|
||||
import tqdm
|
||||
from langchain.vectorstores import FAISS
|
||||
from langchain.docstore.document import Document
|
||||
from typing import List, Tuple
|
||||
import numpy as np
|
||||
from crazy_functions.vector_fns.general_file_loader import load_file
|
||||
|
||||
embedding_model_dict = {
|
||||
"ernie-tiny": "nghuyong/ernie-3.0-nano-zh",
|
||||
"ernie-base": "nghuyong/ernie-3.0-base-zh",
|
||||
"text2vec-base": "shibing624/text2vec-base-chinese",
|
||||
"text2vec": "GanymedeNil/text2vec-large-chinese",
|
||||
}
|
||||
|
||||
# Embedding model name
|
||||
EMBEDDING_MODEL = "text2vec"
|
||||
|
||||
# Embedding running device
|
||||
EMBEDDING_DEVICE = "cpu"
|
||||
|
||||
# 基于上下文的prompt模版,请务必保留"{question}"和"{context}"
|
||||
PROMPT_TEMPLATE = """已知信息:
|
||||
{context}
|
||||
|
||||
根据上述已知信息,简洁和专业的来回答用户的问题。如果无法从中得到答案,请说 “根据已知信息无法回答该问题” 或 “没有提供足够的相关信息”,不允许在答案中添加编造成分,答案请使用中文。 问题是:{question}"""
|
||||
|
||||
# 文本分句长度
|
||||
SENTENCE_SIZE = 100
|
||||
|
||||
# 匹配后单段上下文长度
|
||||
CHUNK_SIZE = 250
|
||||
|
||||
# LLM input history length
|
||||
LLM_HISTORY_LEN = 3
|
||||
|
||||
# return top-k text chunk from vector store
|
||||
VECTOR_SEARCH_TOP_K = 5
|
||||
|
||||
# 知识检索内容相关度 Score, 数值范围约为0-1100,如果为0,则不生效,经测试设置为小于500时,匹配结果更精准
|
||||
VECTOR_SEARCH_SCORE_THRESHOLD = 0
|
||||
|
||||
NLTK_DATA_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), "nltk_data")
|
||||
|
||||
FLAG_USER_NAME = uuid.uuid4().hex
|
||||
|
||||
# 是否开启跨域,默认为False,如果需要开启,请设置为True
|
||||
# is open cross domain
|
||||
OPEN_CROSS_DOMAIN = False
|
||||
|
||||
def similarity_search_with_score_by_vector(
|
||||
self, embedding: List[float], k: int = 4
|
||||
) -> List[Tuple[Document, float]]:
|
||||
|
||||
def seperate_list(ls: List[int]) -> List[List[int]]:
|
||||
lists = []
|
||||
ls1 = [ls[0]]
|
||||
for i in range(1, len(ls)):
|
||||
if ls[i - 1] + 1 == ls[i]:
|
||||
ls1.append(ls[i])
|
||||
else:
|
||||
lists.append(ls1)
|
||||
ls1 = [ls[i]]
|
||||
lists.append(ls1)
|
||||
return lists
|
||||
|
||||
scores, indices = self.index.search(np.array([embedding], dtype=np.float32), k)
|
||||
docs = []
|
||||
id_set = set()
|
||||
store_len = len(self.index_to_docstore_id)
|
||||
for j, i in enumerate(indices[0]):
|
||||
if i == -1 or 0 < self.score_threshold < scores[0][j]:
|
||||
# This happens when not enough docs are returned.
|
||||
continue
|
||||
_id = self.index_to_docstore_id[i]
|
||||
doc = self.docstore.search(_id)
|
||||
if not self.chunk_conent:
|
||||
if not isinstance(doc, Document):
|
||||
raise ValueError(f"Could not find document for id {_id}, got {doc}")
|
||||
doc.metadata["score"] = int(scores[0][j])
|
||||
docs.append(doc)
|
||||
continue
|
||||
id_set.add(i)
|
||||
docs_len = len(doc.page_content)
|
||||
for k in range(1, max(i, store_len - i)):
|
||||
break_flag = False
|
||||
for l in [i + k, i - k]:
|
||||
if 0 <= l < len(self.index_to_docstore_id):
|
||||
_id0 = self.index_to_docstore_id[l]
|
||||
doc0 = self.docstore.search(_id0)
|
||||
if docs_len + len(doc0.page_content) > self.chunk_size:
|
||||
break_flag = True
|
||||
break
|
||||
elif doc0.metadata["source"] == doc.metadata["source"]:
|
||||
docs_len += len(doc0.page_content)
|
||||
id_set.add(l)
|
||||
if break_flag:
|
||||
break
|
||||
if not self.chunk_conent:
|
||||
return docs
|
||||
if len(id_set) == 0 and self.score_threshold > 0:
|
||||
return []
|
||||
id_list = sorted(list(id_set))
|
||||
id_lists = seperate_list(id_list)
|
||||
for id_seq in id_lists:
|
||||
for id in id_seq:
|
||||
if id == id_seq[0]:
|
||||
_id = self.index_to_docstore_id[id]
|
||||
doc = self.docstore.search(_id)
|
||||
else:
|
||||
_id0 = self.index_to_docstore_id[id]
|
||||
doc0 = self.docstore.search(_id0)
|
||||
doc.page_content += " " + doc0.page_content
|
||||
if not isinstance(doc, Document):
|
||||
raise ValueError(f"Could not find document for id {_id}, got {doc}")
|
||||
doc_score = min([scores[0][id] for id in [indices[0].tolist().index(i) for i in id_seq if i in indices[0]]])
|
||||
doc.metadata["score"] = int(doc_score)
|
||||
docs.append(doc)
|
||||
return docs
|
||||
|
||||
|
||||
class LocalDocQA:
|
||||
llm: object = None
|
||||
embeddings: object = None
|
||||
top_k: int = VECTOR_SEARCH_TOP_K
|
||||
chunk_size: int = CHUNK_SIZE
|
||||
chunk_conent: bool = True
|
||||
score_threshold: int = VECTOR_SEARCH_SCORE_THRESHOLD
|
||||
|
||||
def init_cfg(self,
|
||||
top_k=VECTOR_SEARCH_TOP_K,
|
||||
):
|
||||
|
||||
self.llm = None
|
||||
self.top_k = top_k
|
||||
|
||||
def init_knowledge_vector_store(self,
|
||||
filepath,
|
||||
vs_path: str or os.PathLike = None,
|
||||
sentence_size=SENTENCE_SIZE,
|
||||
text2vec=None):
|
||||
loaded_files = []
|
||||
failed_files = []
|
||||
if isinstance(filepath, str):
|
||||
if not os.path.exists(filepath):
|
||||
print("路径不存在")
|
||||
return None
|
||||
elif os.path.isfile(filepath):
|
||||
file = os.path.split(filepath)[-1]
|
||||
try:
|
||||
docs = load_file(filepath, SENTENCE_SIZE)
|
||||
print(f"{file} 已成功加载")
|
||||
loaded_files.append(filepath)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
print(f"{file} 未能成功加载")
|
||||
return None
|
||||
elif os.path.isdir(filepath):
|
||||
docs = []
|
||||
for file in tqdm(os.listdir(filepath), desc="加载文件"):
|
||||
fullfilepath = os.path.join(filepath, file)
|
||||
try:
|
||||
docs += load_file(fullfilepath, SENTENCE_SIZE)
|
||||
loaded_files.append(fullfilepath)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
failed_files.append(file)
|
||||
|
||||
if len(failed_files) > 0:
|
||||
print("以下文件未能成功加载:")
|
||||
for file in failed_files:
|
||||
print(f"{file}\n")
|
||||
|
||||
else:
|
||||
docs = []
|
||||
for file in filepath:
|
||||
docs += load_file(file, SENTENCE_SIZE)
|
||||
print(f"{file} 已成功加载")
|
||||
loaded_files.append(file)
|
||||
|
||||
if len(docs) > 0:
|
||||
print("文件加载完毕,正在生成向量库")
|
||||
if vs_path and os.path.isdir(vs_path):
|
||||
try:
|
||||
self.vector_store = FAISS.load_local(vs_path, text2vec)
|
||||
self.vector_store.add_documents(docs)
|
||||
except:
|
||||
self.vector_store = FAISS.from_documents(docs, text2vec)
|
||||
else:
|
||||
self.vector_store = FAISS.from_documents(docs, text2vec) # docs 为Document列表
|
||||
|
||||
self.vector_store.save_local(vs_path)
|
||||
return vs_path, loaded_files
|
||||
else:
|
||||
raise RuntimeError("文件加载失败,请检查文件格式是否正确")
|
||||
|
||||
def get_loaded_file(self, vs_path):
|
||||
ds = self.vector_store.docstore
|
||||
return set([ds._dict[k].metadata['source'].split(vs_path)[-1] for k in ds._dict])
|
||||
|
||||
|
||||
# query 查询内容
|
||||
# vs_path 知识库路径
|
||||
# chunk_conent 是否启用上下文关联
|
||||
# score_threshold 搜索匹配score阈值
|
||||
# vector_search_top_k 搜索知识库内容条数,默认搜索5条结果
|
||||
# chunk_sizes 匹配单段内容的连接上下文长度
|
||||
def get_knowledge_based_conent_test(self, query, vs_path, chunk_conent,
|
||||
score_threshold=VECTOR_SEARCH_SCORE_THRESHOLD,
|
||||
vector_search_top_k=VECTOR_SEARCH_TOP_K, chunk_size=CHUNK_SIZE,
|
||||
text2vec=None):
|
||||
self.vector_store = FAISS.load_local(vs_path, text2vec)
|
||||
self.vector_store.chunk_conent = chunk_conent
|
||||
self.vector_store.score_threshold = score_threshold
|
||||
self.vector_store.chunk_size = chunk_size
|
||||
|
||||
embedding = self.vector_store.embedding_function.embed_query(query)
|
||||
related_docs_with_score = similarity_search_with_score_by_vector(self.vector_store, embedding, k=vector_search_top_k)
|
||||
|
||||
if not related_docs_with_score:
|
||||
response = {"query": query,
|
||||
"source_documents": []}
|
||||
return response, ""
|
||||
# prompt = f"{query}. You should answer this question using information from following documents: \n\n"
|
||||
prompt = f"{query}. 你必须利用以下文档中包含的信息回答这个问题: \n\n---\n\n"
|
||||
prompt += "\n\n".join([f"({k}): " + doc.page_content for k, doc in enumerate(related_docs_with_score)])
|
||||
prompt += "\n\n---\n\n"
|
||||
prompt = prompt.encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars
|
||||
# print(prompt)
|
||||
response = {"query": query, "source_documents": related_docs_with_score}
|
||||
return response, prompt
|
||||
|
||||
|
||||
|
||||
|
||||
def construct_vector_store(vs_id, vs_path, files, sentence_size, history, one_conent, one_content_segmentation, text2vec):
|
||||
for file in files:
|
||||
assert os.path.exists(file), "输入文件不存在:" + file
|
||||
import nltk
|
||||
if NLTK_DATA_PATH not in nltk.data.path: nltk.data.path = [NLTK_DATA_PATH] + nltk.data.path
|
||||
local_doc_qa = LocalDocQA()
|
||||
local_doc_qa.init_cfg()
|
||||
filelist = []
|
||||
if not os.path.exists(os.path.join(vs_path, vs_id)):
|
||||
os.makedirs(os.path.join(vs_path, vs_id))
|
||||
for file in files:
|
||||
file_name = file.name if not isinstance(file, str) else file
|
||||
filename = os.path.split(file_name)[-1]
|
||||
shutil.copyfile(file_name, os.path.join(vs_path, vs_id, filename))
|
||||
filelist.append(os.path.join(vs_path, vs_id, filename))
|
||||
vs_path, loaded_files = local_doc_qa.init_knowledge_vector_store(filelist, os.path.join(vs_path, vs_id), sentence_size, text2vec)
|
||||
|
||||
if len(loaded_files):
|
||||
file_status = f"已添加 {'、'.join([os.path.split(i)[-1] for i in loaded_files if i])} 内容至知识库,并已加载知识库,请开始提问"
|
||||
else:
|
||||
pass
|
||||
# file_status = "文件未成功加载,请重新上传文件"
|
||||
# print(file_status)
|
||||
return local_doc_qa, vs_path
|
||||
|
||||
@Singleton
|
||||
class knowledge_archive_interface():
|
||||
def __init__(self) -> None:
|
||||
self.threadLock = threading.Lock()
|
||||
self.current_id = ""
|
||||
self.kai_path = None
|
||||
self.qa_handle = None
|
||||
self.text2vec_large_chinese = None
|
||||
|
||||
def get_chinese_text2vec(self):
|
||||
if self.text2vec_large_chinese is None:
|
||||
# < -------------------预热文本向量化模组--------------- >
|
||||
from toolbox import ProxyNetworkActivate
|
||||
print('Checking Text2vec ...')
|
||||
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
||||
with ProxyNetworkActivate('Download_LLM'): # 临时地激活代理网络
|
||||
self.text2vec_large_chinese = HuggingFaceEmbeddings(model_name="GanymedeNil/text2vec-large-chinese")
|
||||
|
||||
return self.text2vec_large_chinese
|
||||
|
||||
|
||||
def feed_archive(self, file_manifest, vs_path, id="default"):
|
||||
self.threadLock.acquire()
|
||||
# import uuid
|
||||
self.current_id = id
|
||||
self.qa_handle, self.kai_path = construct_vector_store(
|
||||
vs_id=self.current_id,
|
||||
vs_path=vs_path,
|
||||
files=file_manifest,
|
||||
sentence_size=100,
|
||||
history=[],
|
||||
one_conent="",
|
||||
one_content_segmentation="",
|
||||
text2vec = self.get_chinese_text2vec(),
|
||||
)
|
||||
self.threadLock.release()
|
||||
|
||||
def get_current_archive_id(self):
|
||||
return self.current_id
|
||||
|
||||
def get_loaded_file(self, vs_path):
|
||||
return self.qa_handle.get_loaded_file(vs_path)
|
||||
|
||||
def answer_with_archive_by_id(self, txt, id, vs_path):
|
||||
self.threadLock.acquire()
|
||||
if not self.current_id == id:
|
||||
self.current_id = id
|
||||
self.qa_handle, self.kai_path = construct_vector_store(
|
||||
vs_id=self.current_id,
|
||||
vs_path=vs_path,
|
||||
files=[],
|
||||
sentence_size=100,
|
||||
history=[],
|
||||
one_conent="",
|
||||
one_content_segmentation="",
|
||||
text2vec = self.get_chinese_text2vec(),
|
||||
)
|
||||
VECTOR_SEARCH_SCORE_THRESHOLD = 0
|
||||
VECTOR_SEARCH_TOP_K = 4
|
||||
CHUNK_SIZE = 512
|
||||
resp, prompt = self.qa_handle.get_knowledge_based_conent_test(
|
||||
query = txt,
|
||||
vs_path = self.kai_path,
|
||||
score_threshold=VECTOR_SEARCH_SCORE_THRESHOLD,
|
||||
vector_search_top_k=VECTOR_SEARCH_TOP_K,
|
||||
chunk_conent=True,
|
||||
chunk_size=CHUNK_SIZE,
|
||||
text2vec = self.get_chinese_text2vec(),
|
||||
)
|
||||
self.threadLock.release()
|
||||
return resp, prompt
|
||||
40
crazy_functions/互动小游戏.py
Normal file
40
crazy_functions/互动小游戏.py
Normal file
@@ -0,0 +1,40 @@
|
||||
from toolbox import CatchException, update_ui, update_ui_lastest_msg
|
||||
from crazy_functions.multi_stage.multi_stage_utils import GptAcademicGameBaseState
|
||||
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
from request_llms.bridge_all import predict_no_ui_long_connection
|
||||
from crazy_functions.game_fns.game_utils import get_code_block, is_same_thing
|
||||
|
||||
@CatchException
|
||||
def 随机小游戏(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
from crazy_functions.game_fns.game_interactive_story import MiniGame_ResumeStory
|
||||
# 清空历史
|
||||
history = []
|
||||
# 选择游戏
|
||||
cls = MiniGame_ResumeStory
|
||||
# 如果之前已经初始化了游戏实例,则继续该实例;否则重新初始化
|
||||
state = cls.sync_state(chatbot,
|
||||
llm_kwargs,
|
||||
cls,
|
||||
plugin_name='MiniGame_ResumeStory',
|
||||
callback_fn='crazy_functions.互动小游戏->随机小游戏',
|
||||
lock_plugin=True
|
||||
)
|
||||
yield from state.continue_game(prompt, chatbot, history)
|
||||
|
||||
|
||||
@CatchException
|
||||
def 随机小游戏1(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
from crazy_functions.game_fns.game_ascii_art import MiniGame_ASCII_Art
|
||||
# 清空历史
|
||||
history = []
|
||||
# 选择游戏
|
||||
cls = MiniGame_ASCII_Art
|
||||
# 如果之前已经初始化了游戏实例,则继续该实例;否则重新初始化
|
||||
state = cls.sync_state(chatbot,
|
||||
llm_kwargs,
|
||||
cls,
|
||||
plugin_name='MiniGame_ASCII_Art',
|
||||
callback_fn='crazy_functions.互动小游戏->随机小游戏1',
|
||||
lock_plugin=True
|
||||
)
|
||||
yield from state.continue_game(prompt, chatbot, history)
|
||||
@@ -2,7 +2,7 @@ from toolbox import CatchException, update_ui, get_conf, select_api_key, get_log
|
||||
from crazy_functions.multi_stage.multi_stage_utils import GptAcademicState
|
||||
|
||||
|
||||
def gen_image(llm_kwargs, prompt, resolution="1024x1024", model="dall-e-2", quality=None):
|
||||
def gen_image(llm_kwargs, prompt, resolution="1024x1024", model="dall-e-2", quality=None, style=None):
|
||||
import requests, json, time, os
|
||||
from request_llms.bridge_all import model_info
|
||||
|
||||
@@ -25,7 +25,10 @@ def gen_image(llm_kwargs, prompt, resolution="1024x1024", model="dall-e-2", qual
|
||||
'model': model,
|
||||
'response_format': 'url'
|
||||
}
|
||||
if quality is not None: data.update({'quality': quality})
|
||||
if quality is not None:
|
||||
data['quality'] = quality
|
||||
if style is not None:
|
||||
data['style'] = style
|
||||
response = requests.post(url, headers=headers, json=data, proxies=proxies)
|
||||
print(response.content)
|
||||
try:
|
||||
@@ -54,19 +57,25 @@ def edit_image(llm_kwargs, prompt, image_path, resolution="1024x1024", model="da
|
||||
img_endpoint = chat_endpoint.replace('chat/completions','images/edits')
|
||||
# # Generate the image
|
||||
url = img_endpoint
|
||||
n = 1
|
||||
headers = {
|
||||
'Authorization': f"Bearer {api_key}",
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
data = {
|
||||
'image': open(image_path, 'rb'),
|
||||
'prompt': prompt,
|
||||
'n': 1,
|
||||
'size': resolution,
|
||||
'model': model,
|
||||
'response_format': 'url'
|
||||
}
|
||||
response = requests.post(url, headers=headers, json=data, proxies=proxies)
|
||||
make_transparent(image_path, image_path+'.tsp.png')
|
||||
make_square_image(image_path+'.tsp.png', image_path+'.tspsq.png')
|
||||
resize_image(image_path+'.tspsq.png', image_path+'.ready.png', max_size=1024)
|
||||
image_path = image_path+'.ready.png'
|
||||
with open(image_path, 'rb') as f:
|
||||
file_content = f.read()
|
||||
files = {
|
||||
'image': (os.path.basename(image_path), file_content),
|
||||
# 'mask': ('mask.png', open('mask.png', 'rb'))
|
||||
'prompt': (None, prompt),
|
||||
"n": (None, str(n)),
|
||||
'size': (None, resolution),
|
||||
}
|
||||
|
||||
response = requests.post(url, headers=headers, files=files, proxies=proxies)
|
||||
print(response.content)
|
||||
try:
|
||||
image_url = json.loads(response.content.decode('utf8'))['data'][0]['url']
|
||||
@@ -95,7 +104,11 @@ def 图片生成_DALLE2(prompt, llm_kwargs, plugin_kwargs, chatbot, history, sys
|
||||
web_port 当前软件运行的端口号
|
||||
"""
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
chatbot.append(("您正在调用“图像生成”插件。", "[Local Message] 生成图像, 请先把模型切换至gpt-*或者api2d-*。如果中文Prompt效果不理想, 请尝试英文Prompt。正在处理中 ....."))
|
||||
if prompt.strip() == "":
|
||||
chatbot.append((prompt, "[Local Message] 图像生成提示为空白,请在“输入区”输入图像生成提示。"))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 界面更新
|
||||
return
|
||||
chatbot.append(("您正在调用“图像生成”插件。", "[Local Message] 生成图像, 请先把模型切换至gpt-*。如果中文Prompt效果不理想, 请尝试英文Prompt。正在处理中 ....."))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
||||
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
||||
resolution = plugin_kwargs.get("advanced_arg", '1024x1024')
|
||||
@@ -112,16 +125,25 @@ def 图片生成_DALLE2(prompt, llm_kwargs, plugin_kwargs, chatbot, history, sys
|
||||
@CatchException
|
||||
def 图片生成_DALLE3(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
chatbot.append(("您正在调用“图像生成”插件。", "[Local Message] 生成图像, 请先把模型切换至gpt-*或者api2d-*。如果中文Prompt效果不理想, 请尝试英文Prompt。正在处理中 ....."))
|
||||
if prompt.strip() == "":
|
||||
chatbot.append((prompt, "[Local Message] 图像生成提示为空白,请在“输入区”输入图像生成提示。"))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 界面更新
|
||||
return
|
||||
chatbot.append(("您正在调用“图像生成”插件。", "[Local Message] 生成图像, 请先把模型切换至gpt-*。如果中文Prompt效果不理想, 请尝试英文Prompt。正在处理中 ....."))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
||||
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
||||
resolution = plugin_kwargs.get("advanced_arg", '1024x1024').lower()
|
||||
if resolution.endswith('-hd'):
|
||||
resolution = resolution.replace('-hd', '')
|
||||
quality = 'hd'
|
||||
else:
|
||||
quality = 'standard'
|
||||
image_url, image_path = gen_image(llm_kwargs, prompt, resolution, model="dall-e-3", quality=quality)
|
||||
resolution_arg = plugin_kwargs.get("advanced_arg", '1024x1024-standard-vivid').lower()
|
||||
parts = resolution_arg.split('-')
|
||||
resolution = parts[0] # 解析分辨率
|
||||
quality = 'standard' # 质量与风格默认值
|
||||
style = 'vivid'
|
||||
# 遍历检查是否有额外参数
|
||||
for part in parts[1:]:
|
||||
if part in ['hd', 'standard']:
|
||||
quality = part
|
||||
elif part in ['vivid', 'natural']:
|
||||
style = part
|
||||
image_url, image_path = gen_image(llm_kwargs, prompt, resolution, model="dall-e-3", quality=quality, style=style)
|
||||
chatbot.append([prompt,
|
||||
f'图像中转网址: <br/>`{image_url}`<br/>'+
|
||||
f'中转网址预览: <br/><div align="center"><img src="{image_url}"></div>'
|
||||
@@ -130,6 +152,7 @@ def 图片生成_DALLE3(prompt, llm_kwargs, plugin_kwargs, chatbot, history, sys
|
||||
])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 界面更新
|
||||
|
||||
|
||||
class ImageEditState(GptAcademicState):
|
||||
# 尚未完成
|
||||
def get_image_file(self, x):
|
||||
@@ -142,18 +165,27 @@ class ImageEditState(GptAcademicState):
|
||||
file = None if not confirm else file_manifest[0]
|
||||
return confirm, file
|
||||
|
||||
def lock_plugin(self, chatbot):
|
||||
chatbot._cookies['lock_plugin'] = 'crazy_functions.图片生成->图片修改_DALLE2'
|
||||
self.dump_state(chatbot)
|
||||
|
||||
def unlock_plugin(self, chatbot):
|
||||
self.reset()
|
||||
chatbot._cookies['lock_plugin'] = None
|
||||
self.dump_state(chatbot)
|
||||
|
||||
def get_resolution(self, x):
|
||||
return (x in ['256x256', '512x512', '1024x1024']), x
|
||||
|
||||
|
||||
def get_prompt(self, x):
|
||||
confirm = (len(x)>=5) and (not self.get_resolution(x)[0]) and (not self.get_image_file(x)[0])
|
||||
return confirm, x
|
||||
|
||||
|
||||
def reset(self):
|
||||
self.req = [
|
||||
{'value':None, 'description': '请先上传图像(必须是.png格式), 然后再次点击本插件', 'verify_fn': self.get_image_file},
|
||||
{'value':None, 'description': '请输入分辨率,可选:256x256, 512x512 或 1024x1024', 'verify_fn': self.get_resolution},
|
||||
{'value':None, 'description': '请输入修改需求,建议您使用英文提示词', 'verify_fn': self.get_prompt},
|
||||
{'value':None, 'description': '请先上传图像(必须是.png格式), 然后再次点击本插件', 'verify_fn': self.get_image_file},
|
||||
{'value':None, 'description': '请输入分辨率,可选:256x256, 512x512 或 1024x1024, 然后再次点击本插件', 'verify_fn': self.get_resolution},
|
||||
{'value':None, 'description': '请输入修改需求,建议您使用英文提示词, 然后再次点击本插件', 'verify_fn': self.get_prompt},
|
||||
]
|
||||
self.info = ""
|
||||
|
||||
@@ -163,7 +195,7 @@ class ImageEditState(GptAcademicState):
|
||||
confirm, res = r['verify_fn'](prompt)
|
||||
if confirm:
|
||||
r['value'] = res
|
||||
self.set_state(chatbot, 'dummy_key', 'dummy_value')
|
||||
self.dump_state(chatbot)
|
||||
break
|
||||
return self
|
||||
|
||||
@@ -182,23 +214,63 @@ def 图片修改_DALLE2(prompt, llm_kwargs, plugin_kwargs, chatbot, history, sys
|
||||
history = [] # 清空历史
|
||||
state = ImageEditState.get_state(chatbot, ImageEditState)
|
||||
state = state.feed(prompt, chatbot)
|
||||
state.lock_plugin(chatbot)
|
||||
if not state.already_obtained_all_materials():
|
||||
chatbot.append(["图片修改(先上传图片,再输入修改需求,最后输入分辨率)", state.next_req()])
|
||||
chatbot.append(["图片修改\n\n1. 上传图片(图片中需要修改的位置用橡皮擦擦除为纯白色,即RGB=255,255,255)\n2. 输入分辨率 \n3. 输入修改需求", state.next_req()])
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
return
|
||||
|
||||
image_path = state.req[0]
|
||||
resolution = state.req[1]
|
||||
prompt = state.req[2]
|
||||
image_path = state.req[0]['value']
|
||||
resolution = state.req[1]['value']
|
||||
prompt = state.req[2]['value']
|
||||
chatbot.append(["图片修改, 执行中", f"图片:`{image_path}`<br/>分辨率:`{resolution}`<br/>修改需求:`{prompt}`"])
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
|
||||
image_url, image_path = edit_image(llm_kwargs, prompt, image_path, resolution)
|
||||
chatbot.append([state.prompt,
|
||||
chatbot.append([prompt,
|
||||
f'图像中转网址: <br/>`{image_url}`<br/>'+
|
||||
f'中转网址预览: <br/><div align="center"><img src="{image_url}"></div>'
|
||||
f'本地文件地址: <br/>`{image_path}`<br/>'+
|
||||
f'本地文件预览: <br/><div align="center"><img src="file={image_path}"></div>'
|
||||
])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 界面更新
|
||||
state.unlock_plugin(chatbot)
|
||||
|
||||
def make_transparent(input_image_path, output_image_path):
|
||||
from PIL import Image
|
||||
image = Image.open(input_image_path)
|
||||
image = image.convert("RGBA")
|
||||
data = image.getdata()
|
||||
new_data = []
|
||||
for item in data:
|
||||
if item[0] == 255 and item[1] == 255 and item[2] == 255:
|
||||
new_data.append((255, 255, 255, 0))
|
||||
else:
|
||||
new_data.append(item)
|
||||
image.putdata(new_data)
|
||||
image.save(output_image_path, "PNG")
|
||||
|
||||
def resize_image(input_path, output_path, max_size=1024):
|
||||
from PIL import Image
|
||||
with Image.open(input_path) as img:
|
||||
width, height = img.size
|
||||
if width > max_size or height > max_size:
|
||||
if width >= height:
|
||||
new_width = max_size
|
||||
new_height = int((max_size / width) * height)
|
||||
else:
|
||||
new_height = max_size
|
||||
new_width = int((max_size / height) * width)
|
||||
|
||||
resized_img = img.resize(size=(new_width, new_height))
|
||||
resized_img.save(output_path)
|
||||
else:
|
||||
img.save(output_path)
|
||||
|
||||
def make_square_image(input_path, output_path):
|
||||
from PIL import Image
|
||||
with Image.open(input_path) as img:
|
||||
width, height = img.size
|
||||
size = max(width, height)
|
||||
new_img = Image.new("RGBA", (size, size), color="black")
|
||||
new_img.paste(img, ((size - width) // 2, (size - height) // 2))
|
||||
new_img.save(output_path)
|
||||
|
||||
@@ -29,17 +29,12 @@ def 解析docx(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot
|
||||
except:
|
||||
raise RuntimeError('请先将.doc文档转换为.docx文档。')
|
||||
|
||||
print(file_content)
|
||||
# private_upload里面的文件名在解压zip后容易出现乱码(rar和7z格式正常),故可以只分析文章内容,不输入文件名
|
||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
||||
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
|
||||
from request_llms.bridge_all import model_info
|
||||
max_token = model_info[llm_kwargs['llm_model']]['max_token']
|
||||
TOKEN_LIMIT_PER_FRAGMENT = max_token * 3 // 4
|
||||
paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
|
||||
txt=file_content,
|
||||
get_token_fn=model_info[llm_kwargs['llm_model']]['token_cnt'],
|
||||
limit=TOKEN_LIMIT_PER_FRAGMENT
|
||||
)
|
||||
paper_fragments = breakdown_text_to_satisfy_token_limit(txt=file_content, limit=TOKEN_LIMIT_PER_FRAGMENT, llm_model=llm_kwargs['llm_model'])
|
||||
this_paper_history = []
|
||||
for i, paper_frag in enumerate(paper_fragments):
|
||||
i_say = f'请对下面的文章片段用中文做概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{paper_frag}```'
|
||||
|
||||
@@ -28,8 +28,8 @@ class PaperFileGroup():
|
||||
self.sp_file_index.append(index)
|
||||
self.sp_file_tag.append(self.file_paths[index])
|
||||
else:
|
||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
||||
segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit)
|
||||
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
|
||||
segments = breakdown_text_to_satisfy_token_limit(file_content, max_token_limit)
|
||||
for j, segment in enumerate(segments):
|
||||
self.sp_file_contents.append(segment)
|
||||
self.sp_file_index.append(index)
|
||||
|
||||
@@ -20,14 +20,9 @@ def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot,
|
||||
|
||||
TOKEN_LIMIT_PER_FRAGMENT = 2500
|
||||
|
||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
||||
from request_llms.bridge_all import model_info
|
||||
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
||||
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
|
||||
paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
|
||||
txt=file_content, get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT)
|
||||
page_one_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
|
||||
txt=str(page_one), get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT//4)
|
||||
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
|
||||
paper_fragments = breakdown_text_to_satisfy_token_limit(txt=file_content, limit=TOKEN_LIMIT_PER_FRAGMENT, llm_model=llm_kwargs['llm_model'])
|
||||
page_one_fragments = breakdown_text_to_satisfy_token_limit(txt=str(page_one), limit=TOKEN_LIMIT_PER_FRAGMENT//4, llm_model=llm_kwargs['llm_model'])
|
||||
# 为了更好的效果,我们剥离Introduction之后的部分(如果有)
|
||||
paper_meta = page_one_fragments[0].split('introduction')[0].split('Introduction')[0].split('INTRODUCTION')[0]
|
||||
|
||||
|
||||
@@ -91,14 +91,9 @@ def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot,
|
||||
page_one = str(page_one).encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars
|
||||
|
||||
# 递归地切割PDF文件
|
||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
||||
from request_llms.bridge_all import model_info
|
||||
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
||||
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
|
||||
paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
|
||||
txt=file_content, get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT)
|
||||
page_one_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
|
||||
txt=page_one, get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT//4)
|
||||
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
|
||||
paper_fragments = breakdown_text_to_satisfy_token_limit(txt=file_content, limit=TOKEN_LIMIT_PER_FRAGMENT, llm_model=llm_kwargs['llm_model'])
|
||||
page_one_fragments = breakdown_text_to_satisfy_token_limit(txt=page_one, limit=TOKEN_LIMIT_PER_FRAGMENT//4, llm_model=llm_kwargs['llm_model'])
|
||||
|
||||
# 为了更好的效果,我们剥离Introduction之后的部分(如果有)
|
||||
paper_meta = page_one_fragments[0].split('introduction')[0].split('Introduction')[0].split('INTRODUCTION')[0]
|
||||
|
||||
@@ -18,14 +18,9 @@ def 解析PDF(file_name, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
|
||||
|
||||
TOKEN_LIMIT_PER_FRAGMENT = 2500
|
||||
|
||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
||||
from request_llms.bridge_all import model_info
|
||||
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
||||
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
|
||||
paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
|
||||
txt=file_content, get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT)
|
||||
page_one_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
|
||||
txt=str(page_one), get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT//4)
|
||||
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
|
||||
paper_fragments = breakdown_text_to_satisfy_token_limit(txt=file_content, limit=TOKEN_LIMIT_PER_FRAGMENT, llm_model=llm_kwargs['llm_model'])
|
||||
page_one_fragments = breakdown_text_to_satisfy_token_limit(txt=str(page_one), limit=TOKEN_LIMIT_PER_FRAGMENT//4, llm_model=llm_kwargs['llm_model'])
|
||||
# 为了更好的效果,我们剥离Introduction之后的部分(如果有)
|
||||
paper_meta = page_one_fragments[0].split('introduction')[0].split('Introduction')[0].split('INTRODUCTION')[0]
|
||||
|
||||
@@ -45,7 +40,7 @@ def 解析PDF(file_name, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
|
||||
for i in range(n_fragment):
|
||||
NUM_OF_WORD = MAX_WORD_TOTAL // n_fragment
|
||||
i_say = f"Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {paper_fragments[i]}"
|
||||
i_say_show_user = f"[{i+1}/{n_fragment}] Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {paper_fragments[i][:200]}"
|
||||
i_say_show_user = f"[{i+1}/{n_fragment}] Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {paper_fragments[i][:200]} ...."
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, # i_say=真正给chatgpt的提问, i_say_show_user=给用户看的提问
|
||||
llm_kwargs, chatbot,
|
||||
history=["The main idea of the previous section is?", last_iteration_result], # 迭代上一次的结果
|
||||
|
||||
@@ -1,10 +1,19 @@
|
||||
from toolbox import CatchException, update_ui, ProxyNetworkActivate, update_ui_lastest_msg
|
||||
from toolbox import CatchException, update_ui, ProxyNetworkActivate, update_ui_lastest_msg, get_log_folder, get_user
|
||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, get_files_from_everything
|
||||
|
||||
install_msg ="""
|
||||
|
||||
1. python -m pip install torch --index-url https://download.pytorch.org/whl/cpu
|
||||
|
||||
2. python -m pip install transformers protobuf langchain sentence-transformers faiss-cpu nltk beautifulsoup4 bitsandbytes tabulate icetk --upgrade
|
||||
|
||||
3. python -m pip install unstructured[all-docs] --upgrade
|
||||
|
||||
4. python -c 'import nltk; nltk.download("punkt")'
|
||||
"""
|
||||
|
||||
@CatchException
|
||||
def 知识库问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
def 知识库文件注入(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
"""
|
||||
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||
llm_kwargs gpt模型参数, 如温度和top_p等, 一般原样传递下去就行
|
||||
@@ -25,15 +34,15 @@ def 知识库问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
|
||||
|
||||
# resolve deps
|
||||
try:
|
||||
from zh_langchain import construct_vector_store
|
||||
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
||||
from .crazy_utils import knowledge_archive_interface
|
||||
# from zh_langchain import construct_vector_store
|
||||
# from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
||||
from crazy_functions.vector_fns.vector_database import knowledge_archive_interface
|
||||
except Exception as e:
|
||||
chatbot.append(["依赖不足", "导入依赖失败。正在尝试自动安装,请查看终端的输出或耐心等待..."])
|
||||
chatbot.append(["依赖不足", f"{str(e)}\n\n导入依赖失败。请用以下命令安装" + install_msg])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
from .crazy_utils import try_install_deps
|
||||
try_install_deps(['zh_langchain==0.2.1', 'pypinyin'], reload_m=['pypinyin', 'zh_langchain'])
|
||||
yield from update_ui_lastest_msg("安装完成,您可以再次重试。", chatbot, history)
|
||||
# from .crazy_utils import try_install_deps
|
||||
# try_install_deps(['zh_langchain==0.2.1', 'pypinyin'], reload_m=['pypinyin', 'zh_langchain'])
|
||||
# yield from update_ui_lastest_msg("安装完成,您可以再次重试。", chatbot, history)
|
||||
return
|
||||
|
||||
# < --------------------读取文件--------------- >
|
||||
@@ -42,7 +51,7 @@ def 知识库问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
|
||||
for sp in spl:
|
||||
_, file_manifest_tmp, _ = get_files_from_everything(txt, type=f'.{sp}')
|
||||
file_manifest += file_manifest_tmp
|
||||
|
||||
|
||||
if len(file_manifest) == 0:
|
||||
chatbot.append(["没有找到任何可读取文件", "当前支持的格式包括: txt, md, docx, pptx, pdf, json等"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
@@ -62,13 +71,14 @@ def 知识库问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
|
||||
print('Establishing knowledge archive ...')
|
||||
with ProxyNetworkActivate('Download_LLM'): # 临时地激活代理网络
|
||||
kai = knowledge_archive_interface()
|
||||
kai.feed_archive(file_manifest=file_manifest, id=kai_id)
|
||||
kai_files = kai.get_loaded_file()
|
||||
vs_path = get_log_folder(user=get_user(chatbot), plugin_name='vec_store')
|
||||
kai.feed_archive(file_manifest=file_manifest, vs_path=vs_path, id=kai_id)
|
||||
kai_files = kai.get_loaded_file(vs_path=vs_path)
|
||||
kai_files = '<br/>'.join(kai_files)
|
||||
# chatbot.append(['知识库构建成功', "正在将知识库存储至cookie中"])
|
||||
# yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
# chatbot._cookies['langchain_plugin_embedding'] = kai.get_current_archive_id()
|
||||
# chatbot._cookies['lock_plugin'] = 'crazy_functions.Langchain知识库->读取知识库作答'
|
||||
# chatbot._cookies['lock_plugin'] = 'crazy_functions.知识库文件注入->读取知识库作答'
|
||||
# chatbot.append(['完成', "“根据知识库作答”函数插件已经接管问答系统, 提问吧! 但注意, 您接下来不能再使用其他插件了,刷新页面即可以退出知识库问答模式。"])
|
||||
chatbot.append(['构建完成', f"当前知识库内的有效文件:\n\n---\n\n{kai_files}\n\n---\n\n请切换至“知识库问答”插件进行知识库访问, 或者使用此插件继续上传更多文件。"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
||||
@@ -77,15 +87,15 @@ def 知识库问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
|
||||
def 读取知识库作答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port=-1):
|
||||
# resolve deps
|
||||
try:
|
||||
from zh_langchain import construct_vector_store
|
||||
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
||||
from .crazy_utils import knowledge_archive_interface
|
||||
# from zh_langchain import construct_vector_store
|
||||
# from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
||||
from crazy_functions.vector_fns.vector_database import knowledge_archive_interface
|
||||
except Exception as e:
|
||||
chatbot.append(["依赖不足", "导入依赖失败。正在尝试自动安装,请查看终端的输出或耐心等待..."])
|
||||
chatbot.append(["依赖不足", f"{str(e)}\n\n导入依赖失败。请用以下命令安装" + install_msg])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
from .crazy_utils import try_install_deps
|
||||
try_install_deps(['zh_langchain==0.2.1', 'pypinyin'], reload_m=['pypinyin', 'zh_langchain'])
|
||||
yield from update_ui_lastest_msg("安装完成,您可以再次重试。", chatbot, history)
|
||||
# from .crazy_utils import try_install_deps
|
||||
# try_install_deps(['zh_langchain==0.2.1', 'pypinyin'], reload_m=['pypinyin', 'zh_langchain'])
|
||||
# yield from update_ui_lastest_msg("安装完成,您可以再次重试。", chatbot, history)
|
||||
return
|
||||
|
||||
# < ------------------- --------------- >
|
||||
@@ -93,7 +103,8 @@ def 读取知识库作答(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
|
||||
|
||||
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
||||
kai_id = plugin_kwargs.get("advanced_arg", 'default')
|
||||
resp, prompt = kai.answer_with_archive_by_id(txt, kai_id)
|
||||
vs_path = get_log_folder(user=get_user(chatbot), plugin_name='vec_store')
|
||||
resp, prompt = kai.answer_with_archive_by_id(txt, kai_id, vs_path)
|
||||
|
||||
chatbot.append((txt, f'[知识库 {kai_id}] ' + prompt))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
||||
@@ -12,13 +12,6 @@ class PaperFileGroup():
|
||||
self.sp_file_index = []
|
||||
self.sp_file_tag = []
|
||||
|
||||
# count_token
|
||||
from request_llms.bridge_all import model_info
|
||||
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
||||
def get_token_num(txt): return len(
|
||||
enc.encode(txt, disallowed_special=()))
|
||||
self.get_token_num = get_token_num
|
||||
|
||||
def run_file_split(self, max_token_limit=1900):
|
||||
"""
|
||||
将长文本分离开来
|
||||
@@ -29,9 +22,8 @@ class PaperFileGroup():
|
||||
self.sp_file_index.append(index)
|
||||
self.sp_file_tag.append(self.file_paths[index])
|
||||
else:
|
||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
||||
segments = breakdown_txt_to_satisfy_token_limit_for_pdf(
|
||||
file_content, self.get_token_num, max_token_limit)
|
||||
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
|
||||
segments = breakdown_text_to_satisfy_token_limit(file_content, max_token_limit)
|
||||
for j, segment in enumerate(segments):
|
||||
self.sp_file_contents.append(segment)
|
||||
self.sp_file_index.append(index)
|
||||
|
||||
@@ -129,7 +129,7 @@ services:
|
||||
runtime: nvidia
|
||||
devices:
|
||||
- /dev/nvidia0:/dev/nvidia0
|
||||
|
||||
|
||||
# 与宿主的网络融合
|
||||
network_mode: "host"
|
||||
command: >
|
||||
@@ -163,7 +163,7 @@ services:
|
||||
runtime: nvidia
|
||||
devices:
|
||||
- /dev/nvidia0:/dev/nvidia0
|
||||
|
||||
|
||||
# 与宿主的网络融合
|
||||
network_mode: "host"
|
||||
|
||||
@@ -229,4 +229,3 @@ services:
|
||||
# 不使用代理网络拉取最新代码
|
||||
command: >
|
||||
bash -c "python3 -u main.py"
|
||||
|
||||
|
||||
@@ -1,2 +1 @@
|
||||
# 此Dockerfile不再维护,请前往docs/GithubAction+ChatGLM+Moss
|
||||
|
||||
|
||||
@@ -1 +1 @@
|
||||
# 此Dockerfile不再维护,请前往docs/GithubAction+JittorLLMs
|
||||
# 此Dockerfile不再维护,请前往docs/GithubAction+JittorLLMs
|
||||
|
||||
53
docs/GithubAction+AllCapacityBeta
Normal file
53
docs/GithubAction+AllCapacityBeta
Normal file
@@ -0,0 +1,53 @@
|
||||
# docker build -t gpt-academic-all-capacity -f docs/GithubAction+AllCapacity --network=host --build-arg http_proxy=http://localhost:10881 --build-arg https_proxy=http://localhost:10881 .
|
||||
# docker build -t gpt-academic-all-capacity -f docs/GithubAction+AllCapacityBeta --network=host .
|
||||
# docker run -it --net=host gpt-academic-all-capacity bash
|
||||
|
||||
# 从NVIDIA源,从而支持显卡(检查宿主的nvidia-smi中的cuda版本必须>=11.3)
|
||||
FROM fuqingxu/11.3.1-runtime-ubuntu20.04-with-texlive:latest
|
||||
|
||||
# use python3 as the system default python
|
||||
WORKDIR /gpt
|
||||
RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8
|
||||
|
||||
# # 非必要步骤,更换pip源 (以下三行,可以删除)
|
||||
# RUN echo '[global]' > /etc/pip.conf && \
|
||||
# echo 'index-url = https://mirrors.aliyun.com/pypi/simple/' >> /etc/pip.conf && \
|
||||
# echo 'trusted-host = mirrors.aliyun.com' >> /etc/pip.conf
|
||||
|
||||
# 下载pytorch
|
||||
RUN python3 -m pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/cu113
|
||||
# 准备pip依赖
|
||||
RUN python3 -m pip install openai numpy arxiv rich
|
||||
RUN python3 -m pip install colorama Markdown pygments pymupdf
|
||||
RUN python3 -m pip install python-docx moviepy pdfminer
|
||||
RUN python3 -m pip install zh_langchain==0.2.1 pypinyin
|
||||
RUN python3 -m pip install rarfile py7zr
|
||||
RUN python3 -m pip install aliyun-python-sdk-core==2.13.3 pyOpenSSL webrtcvad scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git
|
||||
# 下载分支
|
||||
WORKDIR /gpt
|
||||
RUN git clone --depth=1 https://github.com/binary-husky/gpt_academic.git
|
||||
WORKDIR /gpt/gpt_academic
|
||||
RUN git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss
|
||||
|
||||
RUN python3 -m pip install -r requirements.txt
|
||||
RUN python3 -m pip install -r request_llms/requirements_moss.txt
|
||||
RUN python3 -m pip install -r request_llms/requirements_qwen.txt
|
||||
RUN python3 -m pip install -r request_llms/requirements_chatglm.txt
|
||||
RUN python3 -m pip install -r request_llms/requirements_newbing.txt
|
||||
RUN python3 -m pip install nougat-ocr
|
||||
|
||||
# 预热Tiktoken模块
|
||||
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
|
||||
|
||||
# 安装知识库插件的额外依赖
|
||||
RUN apt-get update && apt-get install libgl1 -y
|
||||
RUN pip3 install transformers protobuf langchain sentence-transformers faiss-cpu nltk beautifulsoup4 bitsandbytes tabulate icetk --upgrade
|
||||
RUN pip3 install unstructured[all-docs] --upgrade
|
||||
RUN python3 -c 'from check_proxy import warm_up_vectordb; warm_up_vectordb()'
|
||||
RUN rm -rf /usr/local/lib/python3.8/dist-packages/tests
|
||||
|
||||
|
||||
# COPY .cache /root/.cache
|
||||
# COPY config_private.py config_private.py
|
||||
# 启动
|
||||
CMD ["python3", "-u", "main.py"]
|
||||
@@ -15,7 +15,7 @@ WORKDIR /gpt
|
||||
|
||||
RUN pip3 install openai numpy arxiv rich
|
||||
RUN pip3 install colorama Markdown pygments pymupdf
|
||||
RUN pip3 install python-docx pdfminer
|
||||
RUN pip3 install python-docx pdfminer
|
||||
RUN pip3 install nougat-ocr
|
||||
|
||||
# 装载项目文件
|
||||
|
||||
26
docs/GithubAction+NoLocal+Vectordb
Normal file
26
docs/GithubAction+NoLocal+Vectordb
Normal file
@@ -0,0 +1,26 @@
|
||||
# 此Dockerfile适用于“无本地模型”的环境构建,如果需要使用chatglm等本地模型,请参考 docs/Dockerfile+ChatGLM
|
||||
# 如何构建: 先修改 `config.py`, 然后 docker build -t gpt-academic-nolocal-vs -f docs/GithubAction+NoLocal+Vectordb .
|
||||
# 如何运行: docker run --rm -it --net=host gpt-academic-nolocal-vs
|
||||
FROM python:3.11
|
||||
|
||||
# 指定路径
|
||||
WORKDIR /gpt
|
||||
|
||||
# 装载项目文件
|
||||
COPY . .
|
||||
|
||||
# 安装依赖
|
||||
RUN pip3 install -r requirements.txt
|
||||
|
||||
# 安装知识库插件的额外依赖
|
||||
RUN apt-get update && apt-get install libgl1 -y
|
||||
RUN pip3 install torch torchvision --index-url https://download.pytorch.org/whl/cpu
|
||||
RUN pip3 install transformers protobuf langchain sentence-transformers faiss-cpu nltk beautifulsoup4 bitsandbytes tabulate icetk --upgrade
|
||||
RUN pip3 install unstructured[all-docs] --upgrade
|
||||
RUN python3 -c 'from check_proxy import warm_up_vectordb; warm_up_vectordb()'
|
||||
|
||||
# 可选步骤,用于预热模块
|
||||
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
|
||||
|
||||
# 启动
|
||||
CMD ["python3", "-u", "main.py"]
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
|
||||
> **ملحوظة**
|
||||
>
|
||||
>
|
||||
> تمت ترجمة هذا الملف README باستخدام GPT (بواسطة المكون الإضافي لهذا المشروع) وقد لا تكون الترجمة 100٪ موثوقة، يُرجى التمييز بعناية بنتائج الترجمة.
|
||||
>
|
||||
>
|
||||
> 2023.11.7: عند تثبيت التبعيات، يُرجى اختيار الإصدار المُحدد في `requirements.txt`. الأمر للتثبيت: `pip install -r requirements.txt`.
|
||||
|
||||
# <div align=center><img src="logo.png" width="40"> GPT الأكاديمي</div>
|
||||
@@ -12,14 +12,14 @@
|
||||
**إذا كنت تحب هذا المشروع، فيُرجى إعطاؤه Star. لترجمة هذا المشروع إلى لغة عشوائية باستخدام GPT، قم بقراءة وتشغيل [`multi_language.py`](multi_language.py) (تجريبي).
|
||||
|
||||
> **ملحوظة**
|
||||
>
|
||||
>
|
||||
> 1. يُرجى ملاحظة أنها الإضافات (الأزرار) المميزة فقط التي تدعم قراءة الملفات، وبعض الإضافات توجد في قائمة منسدلة في منطقة الإضافات. بالإضافة إلى ذلك، نرحب بأي Pull Request جديد بأعلى أولوية لأي إضافة جديدة.
|
||||
>
|
||||
>
|
||||
> 2. تُوضّح كل من الملفات في هذا المشروع وظيفتها بالتفصيل في [تقرير الفهم الذاتي `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告). يمكنك في أي وقت أن تنقر على إضافة وظيفة ذات صلة لاستدعاء GPT وإعادة إنشاء تقرير الفهم الذاتي للمشروع. للأسئلة الشائعة [`الويكي`](https://github.com/binary-husky/gpt_academic/wiki). [طرق التثبيت العادية](#installation) | [نصب بنقرة واحدة](https://github.com/binary-husky/gpt_academic/releases) | [تعليمات التكوين](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明).
|
||||
>
|
||||
>
|
||||
> 3. يتم توافق هذا المشروع مع ودعم توصيات اللغة البيجائية الأكبر شمولًا وشجاعة لمثل ChatGLM. يمكنك توفير العديد من مفاتيح Api المشتركة في تكوين الملف، مثل `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`. عند تبديل مؤقت لـ `API_KEY`، قم بإدخال `API_KEY` المؤقت في منطقة الإدخال ثم اضغط على زر "إدخال" لجعله ساري المفعول.
|
||||
|
||||
|
||||
|
||||
|
||||
<div align="center">
|
||||
|
||||
@@ -46,7 +46,7 @@
|
||||
⭐إضغط على وكيل "شارلوت الذكي" | [وظائف] استكمال الذكاء للكأس الأول للذكاء المكتسب من مايكروسوفت، اكتشاف وتطوير عالمي العميل
|
||||
تبديل الواجهة المُظلمة | يمكنك التبديل إلى الواجهة المظلمة بإضافة ```/?__theme=dark``` إلى نهاية عنوان URL في المتصفح
|
||||
دعم المزيد من نماذج LLM | دعم لجميع GPT3.5 وGPT4 و[ChatGLM2 في جامعة ثوه في لين](https://github.com/THUDM/ChatGLM2-6B) و[MOSS في جامعة فودان](https://github.com/OpenLMLab/MOSS)
|
||||
⭐تحوي انطباعة "ChatGLM2" | يدعم استيراد "ChatGLM2" ويوفر إضافة المساعدة في تعديله
|
||||
⭐تحوي انطباعة "ChatGLM2" | يدعم استيراد "ChatGLM2" ويوفر إضافة المساعدة في تعديله
|
||||
دعم المزيد من نماذج "LLM"، دعم [نشر الحديس](https://huggingface.co/spaces/qingxu98/gpt-academic) | انضم إلى واجهة "Newbing" (Bing الجديدة)،نقدم نماذج Jittorllms الجديدة تؤيدهم [LLaMA](https://github.com/facebookresearch/llama) و [盘古α](https://openi.org.cn/pangu/)
|
||||
⭐حزمة "void-terminal" للشبكة (pip) | قم بطلب كافة وظائف إضافة هذا المشروع في python بدون واجهة رسومية (قيد التطوير)
|
||||
⭐PCI-Express لإعلام (PCI) | [وظائف] باللغة الطبيعية، قم بتنفيذ المِهام الأخرى في المشروع
|
||||
@@ -200,8 +200,8 @@ docker-compose up
|
||||
```
|
||||
"ترجمة سوبر الإنجليزية إلى العربية": {
|
||||
# البادئة، ستتم إضافتها قبل إدخالاتك. مثلاً، لوصف ما تريده مثل ترجمة أو شرح كود أو تلوين وهلم جرا
|
||||
"بادئة": "يرجى ترجمة النص التالي إلى العربية ثم استخدم جدول Markdown لشرح المصطلحات المختصة المذكورة في النص:\n\n",
|
||||
|
||||
"بادئة": "يرجى ترجمة النص التالي إلى العربية ثم استخدم جدول Markdown لشرح المصطلحات المختصة المذكورة في النص:\n\n",
|
||||
|
||||
# اللاحقة، سيتم إضافتها بعد إدخالاتك. يمكن استخدامها لوضع علامات اقتباس حول إدخالك.
|
||||
"لاحقة": "",
|
||||
},
|
||||
@@ -341,4 +341,3 @@ https://github.com/oobabooga/one-click-installers
|
||||
# المزيد:
|
||||
https://github.com/gradio-app/gradio
|
||||
https://github.com/fghrsh/live2d_demo
|
||||
|
||||
|
||||
@@ -18,11 +18,11 @@ To translate this project to arbitrary language with GPT, read and run [`multi_l
|
||||
> 1.Please note that only plugins (buttons) highlighted in **bold** support reading files, and some plugins are located in the **dropdown menu** in the plugin area. Additionally, we welcome and process any new plugins with the **highest priority** through PRs.
|
||||
>
|
||||
> 2.The functionalities of each file in this project are described in detail in the [self-analysis report `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告). As the version iterates, you can also click on the relevant function plugin at any time to call GPT to regenerate the project's self-analysis report. Common questions are in the [`wiki`](https://github.com/binary-husky/gpt_academic/wiki). [Regular installation method](#installation) | [One-click installation script](https://github.com/binary-husky/gpt_academic/releases) | [Configuration instructions](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明).
|
||||
>
|
||||
>
|
||||
> 3.This project is compatible with and encourages the use of domestic large-scale language models such as ChatGLM. Multiple api-keys can be used together. You can fill in the configuration file with `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"` to temporarily switch `API_KEY` during input, enter the temporary `API_KEY`, and then press enter to apply it.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<div align="center">
|
||||
|
||||
@@ -126,7 +126,7 @@ python -m pip install -r requirements.txt # This step is the same as the pip ins
|
||||
【Optional Step】If you need to support THU ChatGLM2 or Fudan MOSS as backends, you need to install additional dependencies (Prerequisites: Familiar with Python + Familiar with Pytorch + Sufficient computer configuration):
|
||||
```sh
|
||||
# 【Optional Step I】Support THU ChatGLM2. Note: If you encounter the "Call ChatGLM fail unable to load ChatGLM parameters" error, refer to the following: 1. The default installation above is for torch+cpu version. To use cuda, uninstall torch and reinstall torch+cuda; 2. If the model cannot be loaded due to insufficient local configuration, you can modify the model accuracy in request_llm/bridge_chatglm.py. Change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
|
||||
python -m pip install -r request_llms/requirements_chatglm.txt
|
||||
python -m pip install -r request_llms/requirements_chatglm.txt
|
||||
|
||||
# 【Optional Step II】Support Fudan MOSS
|
||||
python -m pip install -r request_llms/requirements_moss.txt
|
||||
@@ -204,8 +204,8 @@ For example:
|
||||
```
|
||||
"Super Translation": {
|
||||
# Prefix: will be added before your input. For example, used to describe your request, such as translation, code explanation, proofreading, etc.
|
||||
"Prefix": "Please translate the following paragraph into Chinese and then explain each proprietary term in the text using a markdown table:\n\n",
|
||||
|
||||
"Prefix": "Please translate the following paragraph into Chinese and then explain each proprietary term in the text using a markdown table:\n\n",
|
||||
|
||||
# Suffix: will be added after your input. For example, used to wrap your input in quotation marks along with the prefix.
|
||||
"Suffix": "",
|
||||
},
|
||||
@@ -355,4 +355,3 @@ https://github.com/oobabooga/one-click-installers
|
||||
# More:
|
||||
https://github.com/gradio-app/gradio
|
||||
https://github.com/fghrsh/live2d_demo
|
||||
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
|
||||
> **Remarque**
|
||||
>
|
||||
>
|
||||
> Ce README a été traduit par GPT (implémenté par le plugin de ce projet) et n'est pas fiable à 100 %. Veuillez examiner attentivement les résultats de la traduction.
|
||||
>
|
||||
>
|
||||
> 7 novembre 2023 : Lors de l'installation des dépendances, veuillez choisir les versions **spécifiées** dans le fichier `requirements.txt`. Commande d'installation : `pip install -r requirements.txt`.
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
|
||||
**Si vous aimez ce projet, merci de lui donner une étoile ; si vous avez inventé des raccourcis ou des plugins utiles, n'hésitez pas à envoyer des demandes d'extraction !**
|
||||
|
||||
Si vous aimez ce projet, veuillez lui donner une étoile.
|
||||
Si vous aimez ce projet, veuillez lui donner une étoile.
|
||||
Pour traduire ce projet dans une langue arbitraire avec GPT, lisez et exécutez [`multi_language.py`](multi_language.py) (expérimental).
|
||||
|
||||
> **Remarque**
|
||||
@@ -22,7 +22,7 @@ Pour traduire ce projet dans une langue arbitraire avec GPT, lisez et exécutez
|
||||
> 2. Les fonctionnalités de chaque fichier de ce projet sont spécifiées en détail dans [le rapport d'auto-analyse `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic个项目自译解报告). Vous pouvez également cliquer à tout moment sur les plugins de fonctions correspondants pour appeler GPT et générer un rapport d'auto-analyse du projet. Questions fréquemment posées [wiki](https://github.com/binary-husky/gpt_academic/wiki). [Méthode d'installation standard](#installation) | [Script d'installation en un clic](https://github.com/binary-husky/gpt_academic/releases) | [Instructions de configuration](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)..
|
||||
>
|
||||
> 3. Ce projet est compatible avec et recommande l'expérimentation de grands modèles de langage chinois tels que ChatGLM, etc. Prend en charge plusieurs clés API, vous pouvez les remplir dans le fichier de configuration comme `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`. Pour changer temporairement la clé API, entrez la clé API temporaire dans la zone de saisie, puis appuyez sur Entrée pour soumettre et activer celle-ci.
|
||||
|
||||
|
||||
|
||||
<div align="center">
|
||||
|
||||
@@ -128,7 +128,7 @@ python -m pip install -r requirements.txt # This step is the same as the pip ins
|
||||
[Optional Steps] If you need to support Tsinghua ChatGLM2/Fudan MOSS as backends, you need to install additional dependencies (Prerequisites: Familiar with Python + Have used PyTorch + Sufficient computer configuration):
|
||||
```sh
|
||||
# [Optional Step I] Support Tsinghua ChatGLM2. Comment on this note: If you encounter the error "Call ChatGLM generated an error and cannot load the parameters of ChatGLM", refer to the following: 1: The default installation is the torch+cpu version. To use cuda, you need to uninstall torch and reinstall torch+cuda; 2: If the model cannot be loaded due to insufficient computer configuration, you can modify the model precision in request_llm/bridge_chatglm.py. Change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True).
|
||||
python -m pip install -r request_llms/requirements_chatglm.txt
|
||||
python -m pip install -r request_llms/requirements_chatglm.txt
|
||||
|
||||
# [Optional Step II] Support Fudan MOSS
|
||||
python -m pip install -r request_llms/requirements_moss.txt
|
||||
@@ -201,7 +201,7 @@ Par exemple:
|
||||
"Traduction avancée de l'anglais vers le français": {
|
||||
# Préfixe, ajouté avant votre saisie. Par exemple, utilisez-le pour décrire votre demande, telle que la traduction, l'explication du code, l'amélioration, etc.
|
||||
"Prefix": "Veuillez traduire le contenu suivant en français, puis expliquer chaque terme propre à la langue anglaise utilisé dans le texte à l'aide d'un tableau markdown : \n\n",
|
||||
|
||||
|
||||
# Suffixe, ajouté après votre saisie. Par exemple, en utilisant le préfixe, vous pouvez entourer votre contenu par des guillemets.
|
||||
"Suffix": "",
|
||||
},
|
||||
@@ -354,4 +354,3 @@ https://github.com/oobabooga/one-click-installers
|
||||
# Plus:
|
||||
https://github.com/gradio-app/gradio
|
||||
https://github.com/fghrsh/live2d_demo
|
||||
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
|
||||
> **Hinweis**
|
||||
>
|
||||
> Dieses README wurde mithilfe der GPT-Übersetzung (durch das Plugin dieses Projekts) erstellt und ist nicht zu 100 % zuverlässig. Bitte überprüfen Sie die Übersetzungsergebnisse sorgfältig.
|
||||
>
|
||||
>
|
||||
> Dieses README wurde mithilfe der GPT-Übersetzung (durch das Plugin dieses Projekts) erstellt und ist nicht zu 100 % zuverlässig. Bitte überprüfen Sie die Übersetzungsergebnisse sorgfältig.
|
||||
>
|
||||
> 7. November 2023: Beim Installieren der Abhängigkeiten bitte nur die in der `requirements.txt` **angegebenen Versionen** auswählen. Installationsbefehl: `pip install -r requirements.txt`.
|
||||
|
||||
|
||||
@@ -12,19 +12,19 @@
|
||||
|
||||
**Wenn Ihnen dieses Projekt gefällt, geben Sie ihm bitte einen Star. Wenn Sie praktische Tastenkombinationen oder Plugins entwickelt haben, sind Pull-Anfragen willkommen!**
|
||||
|
||||
Wenn Ihnen dieses Projekt gefällt, geben Sie ihm bitte einen Star.
|
||||
Wenn Ihnen dieses Projekt gefällt, geben Sie ihm bitte einen Star.
|
||||
Um dieses Projekt mit GPT in eine beliebige Sprache zu übersetzen, lesen Sie [`multi_language.py`](multi_language.py) (experimentell).
|
||||
|
||||
> **Hinweis**
|
||||
>
|
||||
> 1. Beachten Sie bitte, dass nur die mit **hervorgehobenen** Plugins (Schaltflächen) Dateien lesen können. Einige Plugins befinden sich im **Drop-down-Menü** des Plugin-Bereichs. Außerdem freuen wir uns über jede neue Plugin-PR mit **höchster Priorität**.
|
||||
>
|
||||
>
|
||||
> 2. Die Funktionen jeder Datei in diesem Projekt sind im [Selbstanalysebericht `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT-Academic-Selbstanalysebericht) ausführlich erläutert. Sie können jederzeit auf die relevanten Funktions-Plugins klicken und GPT aufrufen, um den Selbstanalysebericht des Projekts neu zu generieren. Häufig gestellte Fragen finden Sie im [`Wiki`](https://github.com/binary-husky/gpt_academic/wiki). [Standardinstallationsmethode](#installation) | [Ein-Klick-Installationsskript](https://github.com/binary-husky/gpt_academic/releases) | [Konfigurationsanleitung](https://github.com/binary-husky/gpt_academic/wiki/Projekt-Konfigurationsanleitung).
|
||||
>
|
||||
>
|
||||
> 3. Dieses Projekt ist kompatibel mit und unterstützt auch die Verwendung von inländischen Sprachmodellen wie ChatGLM. Die gleichzeitige Verwendung mehrerer API-Schlüssel ist möglich, indem Sie sie in der Konfigurationsdatei wie folgt angeben: `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`. Wenn Sie den `API_KEY` vorübergehend ändern möchten, geben Sie vorübergehend den temporären `API_KEY` im Eingabebereich ein und drücken Sie die Eingabetaste, um die Änderung wirksam werden zu lassen.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<div align="center">
|
||||
|
||||
@@ -93,7 +93,7 @@ Weitere Funktionen anzeigen (z. B. Bildgenerierung) …… | Siehe das Ende dies
|
||||
</div>
|
||||
|
||||
# Installation
|
||||
### Installation Method I: Run directly (Windows, Linux or MacOS)
|
||||
### Installation Method I: Run directly (Windows, Linux or MacOS)
|
||||
|
||||
1. Download the project
|
||||
```sh
|
||||
@@ -128,7 +128,7 @@ python -m pip install -r requirements.txt # This step is the same as installing
|
||||
[Optional] If you need to support Tsinghua ChatGLM2/Fudan MOSS as the backend, you need to install additional dependencies (Prerequisites: Familiar with Python + Have used PyTorch + Strong computer configuration):
|
||||
```sh
|
||||
# [Optional Step I] Support Tsinghua ChatGLM2. Tsinghua ChatGLM note: If you encounter the error "Call ChatGLM fail cannot load ChatGLM parameters normally", refer to the following: 1: The default installation above is torch+cpu version. To use cuda, you need to uninstall torch and reinstall torch+cuda; 2: If you cannot load the model due to insufficient computer configuration, you can modify the model accuracy in request_llm/bridge_chatglm.py. Change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
|
||||
python -m pip install -r request_llms/requirements_chatglm.txt
|
||||
python -m pip install -r request_llms/requirements_chatglm.txt
|
||||
|
||||
# [Optional Step II] Support Fudan MOSS
|
||||
python -m pip install -r request_llms/requirements_moss.txt
|
||||
@@ -207,8 +207,8 @@ Beispiel:
|
||||
```
|
||||
"Übersetzung von Englisch nach Chinesisch": {
|
||||
# Präfix, wird vor Ihrer Eingabe hinzugefügt. Zum Beispiel, um Ihre Anforderungen zu beschreiben, z.B. Übersetzen, Code erklären, verbessern usw.
|
||||
"Präfix": "Bitte übersetzen Sie den folgenden Abschnitt ins Chinesische und erklären Sie dann jedes Fachwort in einer Markdown-Tabelle:\n\n",
|
||||
|
||||
"Präfix": "Bitte übersetzen Sie den folgenden Abschnitt ins Chinesische und erklären Sie dann jedes Fachwort in einer Markdown-Tabelle:\n\n",
|
||||
|
||||
# Suffix, wird nach Ihrer Eingabe hinzugefügt. Zum Beispiel, um Ihre Eingabe in Anführungszeichen zu setzen.
|
||||
"Suffix": "",
|
||||
},
|
||||
@@ -361,4 +361,3 @@ https://github.com/oobabooga/one-click-installers
|
||||
# Weitere:
|
||||
https://github.com/gradio-app/gradio
|
||||
https://github.com/fghrsh/live2d_demo
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
|
||||
**Se ti piace questo progetto, per favore dagli una stella; se hai idee o plugin utili, fai una pull request!**
|
||||
|
||||
Se ti piace questo progetto, dagli una stella.
|
||||
Se ti piace questo progetto, dagli una stella.
|
||||
Per tradurre questo progetto in qualsiasi lingua con GPT, leggi ed esegui [`multi_language.py`](multi_language.py) (sperimentale).
|
||||
|
||||
> **Nota**
|
||||
@@ -20,11 +20,11 @@ Per tradurre questo progetto in qualsiasi lingua con GPT, leggi ed esegui [`mult
|
||||
> 1. Fai attenzione che solo i plugin (pulsanti) **evidenziati** supportano la lettura dei file, alcuni plugin si trovano nel **menu a tendina** nell'area dei plugin. Inoltre, accogliamo e gestiamo con **massima priorità** qualsiasi nuovo plugin attraverso pull request.
|
||||
>
|
||||
> 2. Le funzioni di ogni file in questo progetto sono descritte in dettaglio nel [rapporto di traduzione automatica del progetto `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告). Con l'iterazione della versione, puoi anche fare clic sui plugin delle funzioni rilevanti in qualsiasi momento per richiamare GPT e rigenerare il rapporto di auto-analisi del progetto. Domande frequenti [`wiki`](https://github.com/binary-husky/gpt_academic/wiki) | [Metodo di installazione standard](#installazione) | [Script di installazione one-click](https://github.com/binary-husky/gpt_academic/releases) | [Configurazione](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。
|
||||
>
|
||||
>
|
||||
> 3. Questo progetto è compatibile e incoraggia l'uso di modelli di linguaggio di grandi dimensioni nazionali, come ChatGLM. Supporto per la coesistenza di più chiavi API, puoi compilare nel file di configurazione come `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`. Quando è necessario sostituire temporaneamente `API_KEY`, inserisci temporaneamente `API_KEY` nell'area di input e premi Invio per confermare.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<div align="center">
|
||||
|
||||
@@ -128,7 +128,7 @@ python -m pip install -r requirements.txt # Questo passaggio è identico alla pr
|
||||
[Optional] Se desideri utilizzare ChatGLM2 di Tsinghua/Fudan MOSS come backend, è necessario installare ulteriori dipendenze (Requisiti: conoscenza di Python + esperienza con Pytorch + hardware potente):
|
||||
```sh
|
||||
# [Optional Step I] Supporto per ChatGLM2 di Tsinghua. Note di ChatGLM di Tsinghua: Se si verifica l'errore "Call ChatGLM fail non può caricare i parametri di ChatGLM", fare riferimento a quanto segue: 1: L'installazione predefinita è la versione torch+cpu, per usare cuda è necessario disinstallare torch ed installare nuovamente la versione con torch+cuda; 2: Se il modello non può essere caricato a causa di una configurazione insufficiente, è possibile modificare la precisione del modello in request_llm/bridge_chatglm.py, sostituendo AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) con AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
|
||||
python -m pip install -r request_llms/requirements_chatglm.txt
|
||||
python -m pip install -r request_llms/requirements_chatglm.txt
|
||||
|
||||
# [Optional Step II] Supporto per Fudan MOSS
|
||||
python -m pip install -r request_llms/requirements_moss.txt
|
||||
@@ -206,8 +206,8 @@ Ad esempio,
|
||||
```
|
||||
"Traduzione avanzata Cinese-Inglese": {
|
||||
# Prefisso, sarà aggiunto prima del tuo input. Ad esempio, utilizzato per descrivere la tua richiesta, come traduzione, spiegazione del codice, rifinitura, ecc.
|
||||
"Prefisso": "Si prega di tradurre il seguente testo in cinese e fornire spiegazione per i termini tecnici utilizzati, utilizzando una tabella in markdown uno per uno:\n\n",
|
||||
|
||||
"Prefisso": "Si prega di tradurre il seguente testo in cinese e fornire spiegazione per i termini tecnici utilizzati, utilizzando una tabella in markdown uno per uno:\n\n",
|
||||
|
||||
# Suffisso, sarà aggiunto dopo il tuo input. Ad esempio, in combinazione con il prefisso, puoi circondare il tuo input con virgolette.
|
||||
"Suffisso": "",
|
||||
},
|
||||
@@ -224,7 +224,7 @@ La scrittura di plugin per questo progetto è facile e richiede solo conoscenze
|
||||
# Aggiornamenti
|
||||
### I: Aggiornamenti
|
||||
|
||||
1. Funzionalità di salvataggio della conversazione. Chiamare `Salva la conversazione corrente` nell'area del plugin per salvare la conversazione corrente come un file html leggibile e ripristinabile.
|
||||
1. Funzionalità di salvataggio della conversazione. Chiamare `Salva la conversazione corrente` nell'area del plugin per salvare la conversazione corrente come un file html leggibile e ripristinabile.
|
||||
Inoltre, nella stessa area del plugin (menu a tendina) chiamare `Carica la cronologia della conversazione` per ripristinare una conversazione precedente.
|
||||
Suggerimento: fare clic su `Carica la cronologia della conversazione` senza specificare un file per visualizzare la tua cronologia di archiviazione HTML.
|
||||
<div align="center">
|
||||
@@ -358,4 +358,3 @@ https://github.com/oobabooga/one-click-installers
|
||||
# Altre risorse:
|
||||
https://github.com/gradio-app/gradio
|
||||
https://github.com/fghrsh/live2d_demo
|
||||
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
|
||||
> **注意**
|
||||
>
|
||||
>
|
||||
> 此READMEはGPTによる翻訳で生成されました(このプロジェクトのプラグインによって実装されています)、翻訳結果は100%正確ではないため、注意してください。
|
||||
>
|
||||
>
|
||||
> 2023年11月7日: 依存関係をインストールする際は、`requirements.txt`で**指定されたバージョン**を選択してください。 インストールコマンド: `pip install -r requirements.txt`。
|
||||
|
||||
|
||||
@@ -18,11 +18,11 @@ GPTを使用してこのプロジェクトを任意の言語に翻訳するに
|
||||
> 1. **強調された** プラグイン(ボタン)のみがファイルを読み込むことができることに注意してください。一部のプラグインは、プラグインエリアのドロップダウンメニューにあります。また、新しいプラグインのPRを歓迎し、最優先で対応します。
|
||||
>
|
||||
> 2. このプロジェクトの各ファイルの機能は、[自己分析レポート`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E5%A0%82)で詳しく説明されています。バージョンが進化するにつれて、関連する関数プラグインをクリックして、プロジェクトの自己分析レポートをGPTで再生成することもできます。よくある質問については、[`wiki`](https://github.com/binary-husky/gpt_academic/wiki)をご覧ください。[標準的なインストール方法](#installation) | [ワンクリックインストールスクリプト](https://github.com/binary-husky/gpt_academic/releases) | [構成の説明](https://github.com/binary-husky/gpt_academic/wiki/Project-Configuration-Explain)。
|
||||
>
|
||||
>
|
||||
> 3. このプロジェクトは、[ChatGLM](https://www.chatglm.dev/)などの中国製の大規模言語モデルも互換性があり、試してみることを推奨しています。複数のAPIキーを共存させることができ、設定ファイルに`API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`のように記入できます。`API_KEY`を一時的に変更する必要がある場合は、入力エリアに一時的な`API_KEY`を入力し、Enterキーを押して提出すると有効になります。
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<div align="center">
|
||||
|
||||
@@ -189,7 +189,7 @@ Python環境に詳しくないWindowsユーザーは、[リリース](https://gi
|
||||
"超级英译中": {
|
||||
# プレフィックス、入力の前に追加されます。例えば、要求を記述するために使用されます。翻訳、コードの解説、校正など
|
||||
"プレフィックス": "下記の内容を中国語に翻訳し、専門用語を一つずつマークダウンテーブルで解説してください:\n\n"、
|
||||
|
||||
|
||||
# サフィックス、入力の後に追加されます。プレフィックスと一緒に使用して、入力内容を引用符で囲むことができます。
|
||||
"サフィックス": ""、
|
||||
}、
|
||||
@@ -342,4 +342,3 @@ https://github.com/oobabooga/one-click-installers
|
||||
# その他:
|
||||
https://github.com/gradio-app/gradio
|
||||
https://github.com/fghrsh/live2d_demo
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ GPT를 사용하여 이 프로젝트를 임의의 언어로 번역하려면 [`mu
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<div align="center">
|
||||
|
||||
@@ -130,7 +130,7 @@ python -m pip install -r requirements.txt # This step is the same as the pip ins
|
||||
[Optional Step] If you need support for Tsinghua ChatGLM2/Fudan MOSS as the backend, you need to install additional dependencies (Prerequisites: Familiar with Python + Have used Pytorch + Sufficient computer configuration):
|
||||
```sh
|
||||
# [Optional Step I] Support for Tsinghua ChatGLM2. Note for Tsinghua ChatGLM: If you encounter the error "Call ChatGLM fail cannot load ChatGLM parameters", refer to the following: 1: The default installation above is torch+cpu version. To use cuda, uninstall torch and reinstall torch+cuda; 2: If you cannot load the model due to insufficient computer configuration, you can modify the model precision in request_llm/bridge_chatglm.py, change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
|
||||
python -m pip install -r request_llms/requirements_chatglm.txt
|
||||
python -m pip install -r request_llms/requirements_chatglm.txt
|
||||
|
||||
# [Optional Step II] Support for Fudan MOSS
|
||||
python -m pip install -r request_llms/requirements_moss.txt
|
||||
@@ -208,8 +208,8 @@ Please visit the [cloud server remote deployment wiki](https://github.com/binary
|
||||
```
|
||||
"초급영문 번역": {
|
||||
# 접두사, 입력 내용 앞에 추가됩니다. 예를 들어 요구 사항을 설명하는 데 사용됩니다. 예를 들어 번역, 코드 설명, 교정 등
|
||||
"Prefix": "다음 내용을 한국어로 번역하고 전문 용어에 대한 설명을 적용한 마크다운 표를 사용하세요:\n\n",
|
||||
|
||||
"Prefix": "다음 내용을 한국어로 번역하고 전문 용어에 대한 설명을 적용한 마크다운 표를 사용하세요:\n\n",
|
||||
|
||||
# 접미사, 입력 내용 뒤에 추가됩니다. 예를 들어 접두사와 함께 입력 내용을 따옴표로 감쌀 수 있습니다.
|
||||
"Suffix": "",
|
||||
},
|
||||
@@ -361,4 +361,3 @@ https://github.com/oobabooga/one-click-installers
|
||||
# 더보기:
|
||||
https://github.com/gradio-app/gradio
|
||||
https://github.com/fghrsh/live2d_demo
|
||||
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
|
||||
> **Nota**
|
||||
>
|
||||
>
|
||||
> Este README foi traduzido pelo GPT (implementado por um plugin deste projeto) e não é 100% confiável. Por favor, verifique cuidadosamente o resultado da tradução.
|
||||
>
|
||||
>
|
||||
> 7 de novembro de 2023: Ao instalar as dependências, favor selecionar as **versões especificadas** no `requirements.txt`. Comando de instalação: `pip install -r requirements.txt`.
|
||||
|
||||
# <div align=center><img src="logo.png" width="40"> GPT Acadêmico</div>
|
||||
@@ -15,12 +15,12 @@ Para traduzir este projeto para qualquer idioma utilizando o GPT, leia e execute
|
||||
> **Nota**
|
||||
>
|
||||
> 1. Observe que apenas os plugins (botões) marcados em **destaque** são capazes de ler arquivos, alguns plugins estão localizados no **menu suspenso** do plugin area. Também damos boas-vindas e prioridade máxima a qualquer novo plugin via PR.
|
||||
>
|
||||
>
|
||||
> 2. As funcionalidades de cada arquivo deste projeto estão detalhadamente explicadas em [autoanálise `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告). Com a iteração das versões, você também pode clicar nos plugins de funções relevantes a qualquer momento para chamar o GPT para regerar o relatório de autonálise do projeto. Perguntas frequentes [`wiki`](https://github.com/binary-husky/gpt_academic/wiki) | [Método de instalação convencional](#installation) | [Script de instalação em um clique](https://github.com/binary-husky/gpt_academic/releases) | [Explicação de configuração](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。
|
||||
>
|
||||
> 3. Este projeto é compatível e encoraja o uso de modelos de linguagem chineses, como ChatGLM. Vários api-keys podem ser usados simultaneamente, podendo ser especificados no arquivo de configuração como `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`. Quando precisar alterar temporariamente o `API_KEY`, insira o `API_KEY` temporário na área de entrada e pressione Enter para que ele seja efetivo.
|
||||
|
||||
|
||||
|
||||
<div align="center">
|
||||
|
||||
Funcionalidades (⭐= funcionalidade recentemente adicionada) | Descrição
|
||||
@@ -89,7 +89,7 @@ Apresentação de mais novas funcionalidades (geração de imagens, etc.) ... |
|
||||
</div>
|
||||
|
||||
# Instalação
|
||||
### Método de instalação I: Executar diretamente (Windows, Linux ou MacOS)
|
||||
### Método de instalação I: Executar diretamente (Windows, Linux ou MacOS)
|
||||
|
||||
1. Baixe o projeto
|
||||
```sh
|
||||
@@ -124,7 +124,7 @@ python -m pip install -r requirements.txt # Este passo é igual ao da instalaç
|
||||
[Opcional] Se você quiser suporte para o ChatGLM2 do THU/ MOSS do Fudan, precisará instalar dependências extras (pré-requisitos: familiarizado com o Python + já usou o PyTorch + o computador tem configuração suficiente):
|
||||
```sh
|
||||
# [Opcional Passo I] Suporte para ChatGLM2 do THU. Observações sobre o ChatGLM2 do THU: Se você encontrar o erro "Call ChatGLM fail 不能正常加载ChatGLM的参数" (Falha ao chamar o ChatGLM, não é possível carregar os parâmetros do ChatGLM), consulte o seguinte: 1: A versão instalada por padrão é a versão torch+cpu. Se você quiser usar a versão cuda, desinstale o torch e reinstale uma versão com torch+cuda; 2: Se a sua configuração não for suficiente para carregar o modelo, você pode modificar a precisão do modelo em request_llm/bridge_chatglm.py, alterando todas as ocorrências de AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) para AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
|
||||
python -m pip install -r request_llms/requirements_chatglm.txt
|
||||
python -m pip install -r request_llms/requirements_chatglm.txt
|
||||
|
||||
# [Opcional Passo II] Suporte para MOSS do Fudan
|
||||
python -m pip install -r request_llms/requirements_moss.txt
|
||||
@@ -202,8 +202,8 @@ Por exemplo:
|
||||
```
|
||||
"超级英译中": {
|
||||
# Prefixo, adicionado antes do seu input. Por exemplo, usado para descrever sua solicitação, como traduzir, explicar o código, revisar, etc.
|
||||
"Prefix": "Por favor, traduza o parágrafo abaixo para o chinês e explique cada termo técnico dentro de uma tabela markdown:\n\n",
|
||||
|
||||
"Prefix": "Por favor, traduza o parágrafo abaixo para o chinês e explique cada termo técnico dentro de uma tabela markdown:\n\n",
|
||||
|
||||
# Sufixo, adicionado após o seu input. Por exemplo, em conjunto com o prefixo, pode-se colocar seu input entre aspas.
|
||||
"Suffix": "",
|
||||
},
|
||||
@@ -355,4 +355,3 @@ https://github.com/oobabooga/instaladores-de-um-clique
|
||||
# Mais:
|
||||
https://github.com/gradio-app/gradio
|
||||
https://github.com/fghrsh/live2d_demo
|
||||
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
|
||||
> **Примечание**
|
||||
>
|
||||
>
|
||||
> Этот README был переведен с помощью GPT (реализовано с помощью плагина этого проекта) и не может быть полностью надежным, пожалуйста, внимательно проверьте результаты перевода.
|
||||
>
|
||||
>
|
||||
> 7 ноября 2023 года: При установке зависимостей, пожалуйста, выберите **указанные версии** из `requirements.txt`. Команда установки: `pip install -r requirements.txt`.
|
||||
|
||||
|
||||
@@ -17,12 +17,12 @@
|
||||
>
|
||||
> 1. Пожалуйста, обратите внимание, что только плагины (кнопки), выделенные **жирным шрифтом**, поддерживают чтение файлов, некоторые плагины находятся в выпадающем меню **плагинов**. Кроме того, мы с радостью приветствуем и обрабатываем PR для любых новых плагинов с **наивысшим приоритетом**.
|
||||
>
|
||||
> 2. Функции каждого файла в этом проекте подробно описаны в [отчете о самостоятельном анализе проекта `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告). С каждым новым релизом вы также можете в любое время нажать на соответствующий функциональный плагин, вызвать GPT для повторной генерации сводного отчета о самоанализе проекта. Часто задаваемые вопросы [`wiki`](https://github.com/binary-husky/gpt_academic/wiki) | [обычные методы установки](#installation) | [скрипт одношаговой установки](https://github.com/binary-husky/gpt_academic/releases) | [инструкции по настройке](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明).
|
||||
> 2. Функции каждого файла в этом проекте подробно описаны в [отчете о самостоятельном анализе проекта `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告). С каждым новым релизом вы также можете в любое время нажать на соответствующий функциональный плагин, вызвать GPT для повторной генерации сводного отчета о самоанализе проекта. Часто задаваемые вопросы [`wiki`](https://github.com/binary-husky/gpt_academic/wiki) | [обычные методы установки](#installation) | [скрипт одношаговой установки](https://github.com/binary-husky/gpt_academic/releases) | [инструкции по настройке](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明).
|
||||
>
|
||||
> 3. Этот проект совместим и настоятельно рекомендуется использование китайской NLP-модели ChatGLM и других моделей больших языков производства Китая. Поддерживает одновременное использование нескольких ключей API, которые можно указать в конфигурационном файле, например, `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`. Если нужно временно заменить `API_KEY`, введите временный `API_KEY` в окне ввода и нажмите Enter для его подтверждения.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<div align="center">
|
||||
|
||||
@@ -204,8 +204,8 @@ docker-compose up
|
||||
```
|
||||
"Супер-англо-русский перевод": {
|
||||
# Префикс, который будет добавлен перед вашим вводом. Например, используется для описания вашего запроса, например, перевода, объяснения кода, редактирования и т.д.
|
||||
"Префикс": "Пожалуйста, переведите следующий абзац на русский язык, а затем покажите каждый термин на экране с помощью таблицы Markdown:\n\n",
|
||||
|
||||
"Префикс": "Пожалуйста, переведите следующий абзац на русский язык, а затем покажите каждый термин на экране с помощью таблицы Markdown:\n\n",
|
||||
|
||||
# Суффикс, который будет добавлен после вашего ввода. Например, можно использовать с префиксом, чтобы заключить ваш ввод в кавычки.
|
||||
"Суффикс": "",
|
||||
},
|
||||
@@ -335,7 +335,7 @@ GPT Academic Группа QQ разработчиков: `610599535`
|
||||
```
|
||||
В коде использовались многие функции, представленные в других отличных проектах, поэтому их порядок не имеет значения:
|
||||
|
||||
# ChatGLM2-6B от Тиньхуа:
|
||||
# ChatGLM2-6B от Тиньхуа:
|
||||
https://github.com/THUDM/ChatGLM2-6B
|
||||
|
||||
# Линейные модели с ограниченной памятью от Тиньхуа:
|
||||
@@ -358,4 +358,3 @@ https://github.com/oobabooga/one-click-installers
|
||||
# Больше:
|
||||
https://github.com/gradio-app/gradio
|
||||
https://github.com/fghrsh/live2d_demo
|
||||
|
||||
|
||||
@@ -17,18 +17,18 @@ nano config.py
|
||||
|
||||
- # 如果需要在二级路径下运行
|
||||
- # CUSTOM_PATH = get_conf('CUSTOM_PATH')
|
||||
- # if CUSTOM_PATH != "/":
|
||||
- # if CUSTOM_PATH != "/":
|
||||
- # from toolbox import run_gradio_in_subpath
|
||||
- # run_gradio_in_subpath(demo, auth=AUTHENTICATION, port=PORT, custom_path=CUSTOM_PATH)
|
||||
- # else:
|
||||
- # else:
|
||||
- # demo.launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png")
|
||||
|
||||
+ 如果需要在二级路径下运行
|
||||
+ CUSTOM_PATH = get_conf('CUSTOM_PATH')
|
||||
+ if CUSTOM_PATH != "/":
|
||||
+ if CUSTOM_PATH != "/":
|
||||
+ from toolbox import run_gradio_in_subpath
|
||||
+ run_gradio_in_subpath(demo, auth=AUTHENTICATION, port=PORT, custom_path=CUSTOM_PATH)
|
||||
+ else:
|
||||
+ else:
|
||||
+ demo.launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png")
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -7,13 +7,27 @@ sample = """
|
||||
"""
|
||||
import re
|
||||
|
||||
|
||||
def preprocess_newbing_out(s):
|
||||
pattern = r'\^(\d+)\^' # 匹配^数字^
|
||||
pattern2 = r'\[(\d+)\]' # 匹配^数字^
|
||||
sub = lambda m: '\['+m.group(1)+'\]' # 将匹配到的数字作为替换值
|
||||
result = re.sub(pattern, sub, s) # 替换操作
|
||||
if '[1]' in result:
|
||||
result += '<br/><hr style="border-top: dotted 1px #44ac5c;"><br/><small>' + "<br/>".join([re.sub(pattern2, sub, r) for r in result.split('\n') if r.startswith('[')]) + '</small>'
|
||||
pattern = r"\^(\d+)\^" # 匹配^数字^
|
||||
pattern2 = r"\[(\d+)\]" # 匹配^数字^
|
||||
|
||||
def sub(m):
|
||||
return "\\[" + m.group(1) + "\\]" # 将匹配到的数字作为替换值
|
||||
|
||||
result = re.sub(pattern, sub, s) # 替换操作
|
||||
if "[1]" in result:
|
||||
result += (
|
||||
'<br/><hr style="border-top: dotted 1px #44ac5c;"><br/><small>'
|
||||
+ "<br/>".join(
|
||||
[
|
||||
re.sub(pattern2, sub, r)
|
||||
for r in result.split("\n")
|
||||
if r.startswith("[")
|
||||
]
|
||||
)
|
||||
+ "</small>"
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
@@ -28,37 +42,39 @@ def close_up_code_segment_during_stream(gpt_reply):
|
||||
str: 返回一个新的字符串,将输出代码片段的“后面的```”补上。
|
||||
|
||||
"""
|
||||
if '```' not in gpt_reply:
|
||||
if "```" not in gpt_reply:
|
||||
return gpt_reply
|
||||
if gpt_reply.endswith('```'):
|
||||
if gpt_reply.endswith("```"):
|
||||
return gpt_reply
|
||||
|
||||
# 排除了以上两个情况,我们
|
||||
segments = gpt_reply.split('```')
|
||||
segments = gpt_reply.split("```")
|
||||
n_mark = len(segments) - 1
|
||||
if n_mark % 2 == 1:
|
||||
# print('输出代码片段中!')
|
||||
return gpt_reply+'\n```'
|
||||
return gpt_reply + "\n```"
|
||||
else:
|
||||
return gpt_reply
|
||||
|
||||
|
||||
|
||||
import markdown
|
||||
from latex2mathml.converter import convert as tex2mathml
|
||||
from functools import wraps, lru_cache
|
||||
|
||||
|
||||
def markdown_convertion(txt):
|
||||
"""
|
||||
将Markdown格式的文本转换为HTML格式。如果包含数学公式,则先将公式转换为HTML格式。
|
||||
"""
|
||||
pre = '<div class="markdown-body">'
|
||||
suf = '</div>'
|
||||
suf = "</div>"
|
||||
if txt.startswith(pre) and txt.endswith(suf):
|
||||
# print('警告,输入了已经经过转化的字符串,二次转化可能出问题')
|
||||
return txt # 已经被转化过,不需要再次转化
|
||||
|
||||
return txt # 已经被转化过,不需要再次转化
|
||||
|
||||
markdown_extension_configs = {
|
||||
'mdx_math': {
|
||||
'enable_dollar_delimiter': True,
|
||||
'use_gitlab_delimiters': False,
|
||||
"mdx_math": {
|
||||
"enable_dollar_delimiter": True,
|
||||
"use_gitlab_delimiters": False,
|
||||
},
|
||||
}
|
||||
find_equation_pattern = r'<script type="math/tex(?:.*?)>(.*?)</script>'
|
||||
@@ -72,19 +88,19 @@ def markdown_convertion(txt):
|
||||
|
||||
def replace_math_no_render(match):
|
||||
content = match.group(1)
|
||||
if 'mode=display' in match.group(0):
|
||||
content = content.replace('\n', '</br>')
|
||||
return f"<font color=\"#00FF00\">$$</font><font color=\"#FF00FF\">{content}</font><font color=\"#00FF00\">$$</font>"
|
||||
if "mode=display" in match.group(0):
|
||||
content = content.replace("\n", "</br>")
|
||||
return f'<font color="#00FF00">$$</font><font color="#FF00FF">{content}</font><font color="#00FF00">$$</font>'
|
||||
else:
|
||||
return f"<font color=\"#00FF00\">$</font><font color=\"#FF00FF\">{content}</font><font color=\"#00FF00\">$</font>"
|
||||
return f'<font color="#00FF00">$</font><font color="#FF00FF">{content}</font><font color="#00FF00">$</font>'
|
||||
|
||||
def replace_math_render(match):
|
||||
content = match.group(1)
|
||||
if 'mode=display' in match.group(0):
|
||||
if '\\begin{aligned}' in content:
|
||||
content = content.replace('\\begin{aligned}', '\\begin{array}')
|
||||
content = content.replace('\\end{aligned}', '\\end{array}')
|
||||
content = content.replace('&', ' ')
|
||||
if "mode=display" in match.group(0):
|
||||
if "\\begin{aligned}" in content:
|
||||
content = content.replace("\\begin{aligned}", "\\begin{array}")
|
||||
content = content.replace("\\end{aligned}", "\\end{array}")
|
||||
content = content.replace("&", " ")
|
||||
content = tex2mathml_catch_exception(content, display="block")
|
||||
return content
|
||||
else:
|
||||
@@ -94,37 +110,58 @@ def markdown_convertion(txt):
|
||||
"""
|
||||
解决一个mdx_math的bug(单$包裹begin命令时多余<script>)
|
||||
"""
|
||||
content = content.replace('<script type="math/tex">\n<script type="math/tex; mode=display">', '<script type="math/tex; mode=display">')
|
||||
content = content.replace('</script>\n</script>', '</script>')
|
||||
content = content.replace(
|
||||
'<script type="math/tex">\n<script type="math/tex; mode=display">',
|
||||
'<script type="math/tex; mode=display">',
|
||||
)
|
||||
content = content.replace("</script>\n</script>", "</script>")
|
||||
return content
|
||||
|
||||
|
||||
if ('$' in txt) and ('```' not in txt): # 有$标识的公式符号,且没有代码段```的标识
|
||||
if ("$" in txt) and ("```" not in txt): # 有$标识的公式符号,且没有代码段```的标识
|
||||
# convert everything to html format
|
||||
split = markdown.markdown(text='---')
|
||||
convert_stage_1 = markdown.markdown(text=txt, extensions=['mdx_math', 'fenced_code', 'tables', 'sane_lists'], extension_configs=markdown_extension_configs)
|
||||
split = markdown.markdown(text="---")
|
||||
convert_stage_1 = markdown.markdown(
|
||||
text=txt,
|
||||
extensions=["mdx_math", "fenced_code", "tables", "sane_lists"],
|
||||
extension_configs=markdown_extension_configs,
|
||||
)
|
||||
convert_stage_1 = markdown_bug_hunt(convert_stage_1)
|
||||
# re.DOTALL: Make the '.' special character match any character at all, including a newline; without this flag, '.' will match anything except a newline. Corresponds to the inline flag (?s).
|
||||
# 1. convert to easy-to-copy tex (do not render math)
|
||||
convert_stage_2_1, n = re.subn(find_equation_pattern, replace_math_no_render, convert_stage_1, flags=re.DOTALL)
|
||||
convert_stage_2_1, n = re.subn(
|
||||
find_equation_pattern,
|
||||
replace_math_no_render,
|
||||
convert_stage_1,
|
||||
flags=re.DOTALL,
|
||||
)
|
||||
# 2. convert to rendered equation
|
||||
convert_stage_2_2, n = re.subn(find_equation_pattern, replace_math_render, convert_stage_1, flags=re.DOTALL)
|
||||
convert_stage_2_2, n = re.subn(
|
||||
find_equation_pattern, replace_math_render, convert_stage_1, flags=re.DOTALL
|
||||
)
|
||||
# cat them together
|
||||
return pre + convert_stage_2_1 + f'{split}' + convert_stage_2_2 + suf
|
||||
return pre + convert_stage_2_1 + f"{split}" + convert_stage_2_2 + suf
|
||||
else:
|
||||
return pre + markdown.markdown(txt, extensions=['fenced_code', 'codehilite', 'tables', 'sane_lists']) + suf
|
||||
return (
|
||||
pre
|
||||
+ markdown.markdown(
|
||||
txt, extensions=["fenced_code", "codehilite", "tables", "sane_lists"]
|
||||
)
|
||||
+ suf
|
||||
)
|
||||
|
||||
|
||||
sample = preprocess_newbing_out(sample)
|
||||
sample = close_up_code_segment_during_stream(sample)
|
||||
sample = markdown_convertion(sample)
|
||||
with open('tmp.html', 'w', encoding='utf8') as f:
|
||||
f.write("""
|
||||
with open("tmp.html", "w", encoding="utf8") as f:
|
||||
f.write(
|
||||
"""
|
||||
|
||||
<head>
|
||||
<title>My Website</title>
|
||||
<link rel="stylesheet" type="text/css" href="style.css">
|
||||
</head>
|
||||
|
||||
""")
|
||||
"""
|
||||
)
|
||||
f.write(sample)
|
||||
|
||||
@@ -923,7 +923,7 @@
|
||||
"的第": "The",
|
||||
"个片段": "fragment",
|
||||
"总结文章": "Summarize the article",
|
||||
"根据以上的对话": "According to the above dialogue",
|
||||
"根据以上的对话": "According to the conversation above",
|
||||
"的主要内容": "The main content of",
|
||||
"所有文件都总结完成了吗": "Are all files summarized?",
|
||||
"如果是.doc文件": "If it is a .doc file",
|
||||
@@ -1501,7 +1501,7 @@
|
||||
"发送请求到OpenAI后": "After sending the request to OpenAI",
|
||||
"上下布局": "Vertical Layout",
|
||||
"左右布局": "Horizontal Layout",
|
||||
"对话窗的高度": "Height of the Dialogue Window",
|
||||
"对话窗的高度": "Height of the Conversation Window",
|
||||
"重试的次数限制": "Retry Limit",
|
||||
"gpt4现在只对申请成功的人开放": "GPT-4 is now only open to those who have successfully applied",
|
||||
"提高限制请查询": "Please check for higher limits",
|
||||
@@ -2183,9 +2183,8 @@
|
||||
"找不到合适插件执行该任务": "Cannot find a suitable plugin to perform this task",
|
||||
"接驳VoidTerminal": "Connect to VoidTerminal",
|
||||
"**很好": "**Very good",
|
||||
"对话|编程": "Conversation|Programming",
|
||||
"对话|编程|学术": "Conversation|Programming|Academic",
|
||||
"4. 建议使用 GPT3.5 或更强的模型": "4. It is recommended to use GPT3.5 or a stronger model",
|
||||
"对话|编程": "Conversation&ImageGenerating|Programming",
|
||||
"对话|编程|学术": "Conversation&ImageGenerating|Programming|Academic", "4. 建议使用 GPT3.5 或更强的模型": "4. It is recommended to use GPT3.5 or a stronger model",
|
||||
"「请调用插件翻译PDF论文": "Please call the plugin to translate the PDF paper",
|
||||
"3. 如果您使用「调用插件xxx」、「修改配置xxx」、「请问」等关键词": "3. If you use keywords such as 'call plugin xxx', 'modify configuration xxx', 'please', etc.",
|
||||
"以下是一篇学术论文的基本信息": "The following is the basic information of an academic paper",
|
||||
@@ -2630,7 +2629,7 @@
|
||||
"已经被记忆": "Already memorized",
|
||||
"默认用英文的": "Default to English",
|
||||
"错误追踪": "Error tracking",
|
||||
"对话|编程|学术|智能体": "Dialogue|Programming|Academic|Intelligent agent",
|
||||
"对话&编程|编程|学术|智能体": "Conversation&ImageGenerating|Programming|Academic|Intelligent agent",
|
||||
"请检查": "Please check",
|
||||
"检测到被滞留的缓存文档": "Detected cached documents being left behind",
|
||||
"还有哪些场合允许使用代理": "What other occasions allow the use of proxies",
|
||||
@@ -2864,7 +2863,7 @@
|
||||
"加载API_KEY": "Loading API_KEY",
|
||||
"协助您编写代码": "Assist you in writing code",
|
||||
"我可以为您提供以下服务": "I can provide you with the following services",
|
||||
"排队中请稍后 ...": "Please wait in line ...",
|
||||
"排队中请稍候 ...": "Please wait in line ...",
|
||||
"建议您使用英文提示词": "It is recommended to use English prompts",
|
||||
"不能支撑AutoGen运行": "Cannot support AutoGen operation",
|
||||
"帮助您解决编程问题": "Help you solve programming problems",
|
||||
@@ -2903,5 +2902,107 @@
|
||||
"高优先级": "High priority",
|
||||
"请配置ZHIPUAI_API_KEY": "Please configure ZHIPUAI_API_KEY",
|
||||
"单个azure模型": "Single Azure model",
|
||||
"预留参数 context 未实现": "Reserved parameter 'context' not implemented"
|
||||
}
|
||||
"预留参数 context 未实现": "Reserved parameter 'context' not implemented",
|
||||
"在输入区输入临时API_KEY后提交": "Submit after entering temporary API_KEY in the input area",
|
||||
"鸟": "Bird",
|
||||
"图片中需要修改的位置用橡皮擦擦除为纯白色": "Erase the areas in the image that need to be modified with an eraser to pure white",
|
||||
"└── PDF文档精准解析": "└── Accurate parsing of PDF documents",
|
||||
"└── ALLOW_RESET_CONFIG 是否允许通过自然语言描述修改本页的配置": "└── ALLOW_RESET_CONFIG Whether to allow modifying the configuration of this page through natural language description",
|
||||
"等待指令": "Waiting for instructions",
|
||||
"不存在": "Does not exist",
|
||||
"选择游戏": "Select game",
|
||||
"本地大模型示意图": "Local large model diagram",
|
||||
"无视此消息即可": "You can ignore this message",
|
||||
"即RGB=255": "That is, RGB=255",
|
||||
"如需追问": "If you have further questions",
|
||||
"也可以是具体的模型路径": "It can also be a specific model path",
|
||||
"才会起作用": "Will take effect",
|
||||
"下载失败": "Download failed",
|
||||
"网页刷新后失效": "Invalid after webpage refresh",
|
||||
"crazy_functions.互动小游戏-": "crazy_functions.Interactive mini game-",
|
||||
"右对齐": "Right alignment",
|
||||
"您可以调用下拉菜单中的“LoadConversationHistoryArchive”还原当下的对话": "You can use the 'LoadConversationHistoryArchive' in the drop-down menu to restore the current conversation",
|
||||
"左对齐": "Left alignment",
|
||||
"使用默认的 FP16": "Use default FP16",
|
||||
"一小时": "One hour",
|
||||
"从而方便内存的释放": "Thus facilitating memory release",
|
||||
"如何临时更换API_KEY": "How to temporarily change API_KEY",
|
||||
"请输入 1024x1024-HD": "Please enter 1024x1024-HD",
|
||||
"使用 INT8 量化": "Use INT8 quantization",
|
||||
"3. 输入修改需求": "3. Enter modification requirements",
|
||||
"刷新界面 由于请求gpt需要一段时间": "Refreshing the interface takes some time due to the request for gpt",
|
||||
"随机小游戏": "Random mini game",
|
||||
"那么请在下面的QWEN_MODEL_SELECTION中指定具体的模型": "So please specify the specific model in QWEN_MODEL_SELECTION below",
|
||||
"表值": "Table value",
|
||||
"我画你猜": "I draw, you guess",
|
||||
"狗": "Dog",
|
||||
"2. 输入分辨率": "2. Enter resolution",
|
||||
"鱼": "Fish",
|
||||
"尚未完成": "Not yet completed",
|
||||
"表头": "Table header",
|
||||
"填localhost或者127.0.0.1": "Fill in localhost or 127.0.0.1",
|
||||
"请上传jpg格式的图片": "Please upload images in jpg format",
|
||||
"API_URL_REDIRECT填写格式是错误的": "The format of API_URL_REDIRECT is incorrect",
|
||||
"├── RWKV的支持见Wiki": "Support for RWKV is available in the Wiki",
|
||||
"如果中文Prompt效果不理想": "If the Chinese prompt is not effective",
|
||||
"/SEAFILE_LOCAL/50503047/我的资料库/学位/paperlatex/aaai/Fu_8368_with_appendix": "/SEAFILE_LOCAL/50503047/My Library/Degree/paperlatex/aaai/Fu_8368_with_appendix",
|
||||
"只有当AVAIL_LLM_MODELS包含了对应本地模型时": "Only when AVAIL_LLM_MODELS contains the corresponding local model",
|
||||
"选择本地模型变体": "Choose the local model variant",
|
||||
"如果您确信自己没填错": "If you are sure you haven't made a mistake",
|
||||
"PyPDF2这个库有严重的内存泄露问题": "PyPDF2 library has serious memory leak issues",
|
||||
"整理文件集合 输出消息": "Organize file collection and output message",
|
||||
"没有检测到任何近期上传的图像文件": "No recently uploaded image files detected",
|
||||
"游戏结束": "Game over",
|
||||
"调用结束": "Call ended",
|
||||
"猫": "Cat",
|
||||
"请及时切换模型": "Please switch models in time",
|
||||
"次中": "In the meantime",
|
||||
"如需生成高清图像": "If you need to generate high-definition images",
|
||||
"CPU 模式": "CPU mode",
|
||||
"项目目录": "Project directory",
|
||||
"动物": "Animal",
|
||||
"居中对齐": "Center alignment",
|
||||
"请注意拓展名需要小写": "Please note that the extension name needs to be lowercase",
|
||||
"重试第": "Retry",
|
||||
"实验性功能": "Experimental feature",
|
||||
"猜错了": "Wrong guess",
|
||||
"打开你的代理软件查看代理协议": "Open your proxy software to view the proxy agreement",
|
||||
"您不需要再重复强调该文件的路径了": "You don't need to emphasize the file path again",
|
||||
"请阅读": "Please read",
|
||||
"请直接输入您的问题": "Please enter your question directly",
|
||||
"API_URL_REDIRECT填错了": "API_URL_REDIRECT is filled incorrectly",
|
||||
"谜底是": "The answer is",
|
||||
"第一个模型": "The first model",
|
||||
"你猜对了!": "You guessed it right!",
|
||||
"已经接收到您上传的文件": "The file you uploaded has been received",
|
||||
"您正在调用“图像生成”插件": "You are calling the 'Image Generation' plugin",
|
||||
"刷新界面 界面更新": "Refresh the interface, interface update",
|
||||
"如果之前已经初始化了游戏实例": "If the game instance has been initialized before",
|
||||
"文件": "File",
|
||||
"老鼠": "Mouse",
|
||||
"列2": "Column 2",
|
||||
"等待图片": "Waiting for image",
|
||||
"使用 INT4 量化": "Use INT4 quantization",
|
||||
"from crazy_functions.互动小游戏 import 随机小游戏": "TranslatedText",
|
||||
"游戏主体": "TranslatedText",
|
||||
"该模型不具备上下文对话能力": "TranslatedText",
|
||||
"列3": "TranslatedText",
|
||||
"清理": "TranslatedText",
|
||||
"检查量化配置": "TranslatedText",
|
||||
"如果游戏结束": "TranslatedText",
|
||||
"蛇": "TranslatedText",
|
||||
"则继续该实例;否则重新初始化": "TranslatedText",
|
||||
"e.g. cat and 猫 are the same thing": "TranslatedText",
|
||||
"第三个模型": "TranslatedText",
|
||||
"如果你选择Qwen系列的模型": "TranslatedText",
|
||||
"列4": "TranslatedText",
|
||||
"输入“exit”获取答案": "TranslatedText",
|
||||
"把它放到子进程中运行": "TranslatedText",
|
||||
"列1": "TranslatedText",
|
||||
"使用该模型需要额外依赖": "TranslatedText",
|
||||
"再试试": "TranslatedText",
|
||||
"1. 上传图片": "TranslatedText",
|
||||
"保存状态": "TranslatedText",
|
||||
"GPT-Academic对话存档": "TranslatedText",
|
||||
"Arxiv论文精细翻译": "TranslatedText"
|
||||
}
|
||||
|
||||
@@ -2106,4 +2106,4 @@
|
||||
"改变输入参数的顺序与结构": "入力パラメータの順序と構造を変更する",
|
||||
"正在精细切分latex文件": "LaTeXファイルを細かく分割しています",
|
||||
"读取文件": "ファイルを読み込んでいます"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -98,4 +98,4 @@
|
||||
"图片生成_DALLE2": "ImageGeneration_DALLE2",
|
||||
"图片生成_DALLE3": "ImageGeneration_DALLE3",
|
||||
"图片修改_DALLE2": "ImageModification_DALLE2"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1043,9 +1043,9 @@
|
||||
"jittorllms响应异常": "jittorllms response exception",
|
||||
"在项目根目录运行这两个指令": "Run these two commands in the project root directory",
|
||||
"获取tokenizer": "Get tokenizer",
|
||||
"chatbot 为WebUI中显示的对话列表": "chatbot is the list of dialogues displayed in WebUI",
|
||||
"chatbot 为WebUI中显示的对话列表": "chatbot is the list of conversations displayed in WebUI",
|
||||
"test_解析一个Cpp项目": "test_parse a Cpp project",
|
||||
"将对话记录history以Markdown格式写入文件中": "Write the dialogue record history to a file in Markdown format",
|
||||
"将对话记录history以Markdown格式写入文件中": "Write the conversations record history to a file in Markdown format",
|
||||
"装饰器函数": "Decorator function",
|
||||
"玫瑰色": "Rose color",
|
||||
"将单空行": "刪除單行空白",
|
||||
@@ -2270,4 +2270,4 @@
|
||||
"标注节点的行数范围": "標註節點的行數範圍",
|
||||
"默认 True": "默認 True",
|
||||
"将两个PDF拼接": "將兩個PDF拼接"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -61,4 +61,3 @@ VI 两种音频监听模式切换时,需要刷新页面才有效。
|
||||
VII 非localhost运行+非https情况下无法打开录音功能的坑:https://blog.csdn.net/weixin_39461487/article/details/109594434
|
||||
|
||||
## 5.点击函数插件区“实时音频采集” 或者其他音频交互功能
|
||||
|
||||
|
||||
@@ -8,8 +8,8 @@ try {
|
||||
live2d_settings['modelId'] = 5; // 默认模型 ID
|
||||
live2d_settings['modelTexturesId'] = 1; // 默认材质 ID
|
||||
live2d_settings['modelStorage'] = false; // 不储存模型 ID
|
||||
live2d_settings['waifuSize'] = '210x187';
|
||||
live2d_settings['waifuTipsSize'] = '187x52';
|
||||
live2d_settings['waifuSize'] = '210x187';
|
||||
live2d_settings['waifuTipsSize'] = '187x52';
|
||||
live2d_settings['canSwitchModel'] = true;
|
||||
live2d_settings['canSwitchTextures'] = true;
|
||||
live2d_settings['canSwitchHitokoto'] = false;
|
||||
|
||||
@@ -123,4 +123,4 @@
|
||||
<glyph unicode="" d="M512 748.8l211.2 179.2 300.8-198.4-204.8-166.4-307.2 185.6zM1024 396.8l-300.8-198.4-211.2 172.8 300.8 185.6 211.2-160zM300.8 198.4l-300.8 198.4 204.8 166.4 307.2-192-211.2-172.8zM0 729.6l300.8 198.4 211.2-179.2-300.8-192-211.2 172.8zM512 332.8l211.2-179.2 89.6 57.6v-64l-300.8-179.2-300.8 179.2v64l89.6-51.2 211.2 172.8z" />
|
||||
<glyph unicode="" d="M864 249.6c-38.4 0-64 32-64 64v256c0 38.4 32 64 64 64 38.4 0 64-32 64-64v-256c0-32-25.6-64-64-64zM697.6 102.4h-38.4v-108.8c0-38.4-25.6-64-57.6-64s-57.6 25.6-57.6 64v108.8h-70.4v-108.8c0-38.4-25.6-64-57.6-64s-57.6 25.6-57.6 64v108.8h-32c-19.2 0-38.4 19.2-38.4 44.8v428.8h448v-422.4c0-32-12.8-51.2-38.4-51.2zM736 633.6h-448c0 89.6 32 153.6 76.8 192l-70.4 83.2c-6.4 12.8-6.4 25.6 0 38.4 12.8 12.8 25.6 12.8 38.4 0l83.2-96c32 12.8 64 19.2 96 19.2s70.4-6.4 96-19.2l83.2 96c12.8 12.8 25.6 12.8 38.4 0s12.8-32 0-38.4l-70.4-83.2c44.8-32 76.8-102.4 76.8-192zM441.6 761.6c-12.8 0-25.6-12.8-25.6-32s12.8-32 25.6-32 25.6 12.8 25.6 32-12.8 32-25.6 32zM582.4 761.6c-12.8 0-25.6-12.8-25.6-32s12.8-32 25.6-32 25.6 19.2 25.6 32-12.8 32-25.6 32zM160 249.6c-38.4 0-64 32-64 64v256c0 38.4 25.6 64 64 64s64-32 64-64v-256c0-32-25.6-64-64-64z" />
|
||||
<glyph unicode="" d="M921.6 211.2c-32-153.6-115.2-211.2-147.2-249.6-32-25.6-121.6-25.6-153.6-6.4-38.4 25.6-134.4 25.6-166.4 0-44.8-32-115.2-19.2-128-12.8-256 179.2-352 716.8 12.8 774.4 64 12.8 134.4-32 134.4-32 51.2-25.6 70.4-12.8 115.2 6.4 96 44.8 243.2 44.8 313.6-76.8-147.2-96-153.6-294.4 19.2-403.2zM716.8 960c12.8-70.4-64-224-204.8-230.4-12.8 38.4 32 217.6 204.8 230.4z" />
|
||||
</font></defs></svg>
|
||||
</font></defs></svg>
|
||||
|
||||
|
Before Width: | Height: | Size: 56 KiB After Width: | Height: | Size: 56 KiB |
2
docs/waifu_plugin/jquery-ui.min.js
vendored
2
docs/waifu_plugin/jquery-ui.min.js
vendored
File diff suppressed because one or more lines are too long
@@ -1 +1 @@
|
||||
https://github.com/fghrsh/live2d_demo
|
||||
https://github.com/fghrsh/live2d_demo
|
||||
|
||||
@@ -5,11 +5,11 @@ window.live2d_settings = Array(); /*
|
||||
/`ー' L//`ヽ、 Live2D 看板娘 参数设置
|
||||
/ /, /| , , ', Version 1.4.2
|
||||
イ / /-‐/ i L_ ハ ヽ! i Update 2018.11.12
|
||||
レ ヘ 7イ`ト レ'ァ-ト、!ハ| |
|
||||
レ ヘ 7イ`ト レ'ァ-ト、!ハ| |
|
||||
!,/7 '0' ´0iソ| |
|
||||
|.从" _ ,,,, / |./ | 网页添加 Live2D 看板娘
|
||||
レ'| i>.、,,__ _,.イ / .i | https://www.fghrsh.net/post/123.html
|
||||
レ'| | / k_7_/レ'ヽ, ハ. |
|
||||
レ'| | / k_7_/レ'ヽ, ハ. |
|
||||
| |/i 〈|/ i ,.ヘ | i | Thanks
|
||||
.|/ / i: ヘ! \ | journey-ad / https://github.com/journey-ad/live2d_src
|
||||
kヽ>、ハ _,.ヘ、 /、! xiazeyu / https://github.com/xiazeyu/live2d-widget.js
|
||||
@@ -77,11 +77,11 @@ String.prototype.render = function(context) {
|
||||
|
||||
return this.replace(tokenReg, function (word, slash1, token, slash2) {
|
||||
if (slash1 || slash2) { return word.replace('\\', ''); }
|
||||
|
||||
|
||||
var variables = token.replace(/\s/g, '').split('.');
|
||||
var currentObject = context;
|
||||
var i, length, variable;
|
||||
|
||||
|
||||
for (i = 0, length = variables.length; i < length; ++i) {
|
||||
variable = variables[i];
|
||||
currentObject = currentObject[variable];
|
||||
@@ -101,9 +101,9 @@ function showMessage(text, timeout, flag) {
|
||||
if(flag || sessionStorage.getItem('waifu-text') === '' || sessionStorage.getItem('waifu-text') === null){
|
||||
if(Array.isArray(text)) text = text[Math.floor(Math.random() * text.length + 1)-1];
|
||||
if (live2d_settings.showF12Message) console.log('[Message]', text.replace(/<[^<>]+>/g,''));
|
||||
|
||||
|
||||
if(flag) sessionStorage.setItem('waifu-text', text);
|
||||
|
||||
|
||||
$('.waifu-tips').stop();
|
||||
$('.waifu-tips').html(text).fadeTo(200, 1);
|
||||
if (timeout === undefined) timeout = 5000;
|
||||
@@ -121,15 +121,15 @@ function hideMessage(timeout) {
|
||||
function initModel(waifuPath, type) {
|
||||
/* console welcome message */
|
||||
eval(function(p,a,c,k,e,r){e=function(c){return(c<a?'':e(parseInt(c/a)))+((c=c%a)>35?String.fromCharCode(c+29):c.toString(36))};if(!''.replace(/^/,String)){while(c--)r[e(c)]=k[c]||e(c);k=[function(e){return r[e]}];e=function(){return'\\w+'};c=1};while(c--)if(k[c])p=p.replace(new RegExp('\\b'+e(c)+'\\b','g'),k[c]);return p}('8.d(" ");8.d("\\U,.\\y\\5.\\1\\1\\1\\1/\\1,\\u\\2 \\H\\n\\1\\1\\1\\1\\1\\b \', !-\\r\\j-i\\1/\\1/\\g\\n\\1\\1\\1 \\1 \\a\\4\\f\'\\1\\1\\1 L/\\a\\4\\5\\2\\n\\1\\1 \\1 /\\1 \\a,\\1 /|\\1 ,\\1 ,\\1\\1\\1 \',\\n\\1\\1\\1\\q \\1/ /-\\j/\\1\\h\\E \\9 \\5!\\1 i\\n\\1\\1\\1 \\3 \\6 7\\q\\4\\c\\1 \\3\'\\s-\\c\\2!\\t|\\1 |\\n\\1\\1\\1\\1 !,/7 \'0\'\\1\\1 \\X\\w| \\1 |\\1\\1\\1\\n\\1\\1\\1\\1 |.\\x\\"\\1\\l\\1\\1 ,,,, / |./ \\1 |\\n\\1\\1\\1\\1 \\3\'| i\\z.\\2,,A\\l,.\\B / \\1.i \\1|\\n\\1\\1\\1\\1\\1 \\3\'| | / C\\D/\\3\'\\5,\\1\\9.\\1|\\n\\1\\1\\1\\1\\1\\1 | |/i \\m|/\\1 i\\1,.\\6 |\\F\\1|\\n\\1\\1\\1\\1\\1\\1.|/ /\\1\\h\\G \\1 \\6!\\1\\1\\b\\1|\\n\\1\\1\\1 \\1 \\1 k\\5>\\2\\9 \\1 o,.\\6\\2 \\1 /\\2!\\n\\1\\1\\1\\1\\1\\1 !\'\\m//\\4\\I\\g\', \\b \\4\'7\'\\J\'\\n\\1\\1\\1\\1\\1\\1 \\3\'\\K|M,p,\\O\\3|\\P\\n\\1\\1\\1\\1\\1 \\1\\1\\1\\c-,/\\1|p./\\n\\1\\1\\1\\1\\1 \\1\\1\\1\'\\f\'\\1\\1!o,.:\\Q \\R\\S\\T v"+e.V+" / W "+e.N);8.d(" ");',60,60,'|u3000|uff64|uff9a|uff40|u30fd|uff8d||console|uff8a|uff0f|uff3c|uff84|log|live2d_settings|uff70|u00b4|uff49||u2010||u3000_|u3008||_|___|uff72|u2500|uff67|u30cf|u30fc||u30bd|u4ece|u30d8|uff1e|__|u30a4|k_|uff17_|u3000L_|u3000i|uff1a|u3009|uff34|uff70r|u30fdL__||___i|l2dVerDate|u30f3|u30ce|nLive2D|u770b|u677f|u5a18|u304f__|l2dVersion|FGHRSH|u00b40i'.split('|'),0,{}));
|
||||
|
||||
|
||||
/* 判断 JQuery */
|
||||
if (typeof($.ajax) != 'function') typeof(jQuery.ajax) == 'function' ? window.$ = jQuery : console.log('[Error] JQuery is not defined.');
|
||||
|
||||
|
||||
/* 加载看板娘样式 */
|
||||
live2d_settings.waifuSize = live2d_settings.waifuSize.split('x');
|
||||
live2d_settings.waifuTipsSize = live2d_settings.waifuTipsSize.split('x');
|
||||
live2d_settings.waifuEdgeSide = live2d_settings.waifuEdgeSide.split(':');
|
||||
|
||||
|
||||
$("#live2d").attr("width",live2d_settings.waifuSize[0]);
|
||||
$("#live2d").attr("height",live2d_settings.waifuSize[1]);
|
||||
$(".waifu-tips").width(live2d_settings.waifuTipsSize[0]);
|
||||
@@ -138,32 +138,32 @@ function initModel(waifuPath, type) {
|
||||
$(".waifu-tips").css("font-size",live2d_settings.waifuFontSize);
|
||||
$(".waifu-tool").css("font-size",live2d_settings.waifuToolFont);
|
||||
$(".waifu-tool span").css("line-height",live2d_settings.waifuToolLine);
|
||||
|
||||
|
||||
if (live2d_settings.waifuEdgeSide[0] == 'left') $(".waifu").css("left",live2d_settings.waifuEdgeSide[1]+'px');
|
||||
else if (live2d_settings.waifuEdgeSide[0] == 'right') $(".waifu").css("right",live2d_settings.waifuEdgeSide[1]+'px');
|
||||
|
||||
|
||||
window.waifuResize = function() { $(window).width() <= Number(live2d_settings.waifuMinWidth.replace('px','')) ? $(".waifu").hide() : $(".waifu").show(); };
|
||||
if (live2d_settings.waifuMinWidth != 'disable') { waifuResize(); $(window).resize(function() {waifuResize()}); }
|
||||
|
||||
|
||||
try {
|
||||
if (live2d_settings.waifuDraggable == 'axis-x') $(".waifu").draggable({ axis: "x", revert: live2d_settings.waifuDraggableRevert });
|
||||
else if (live2d_settings.waifuDraggable == 'unlimited') $(".waifu").draggable({ revert: live2d_settings.waifuDraggableRevert });
|
||||
else $(".waifu").css("transition", 'all .3s ease-in-out');
|
||||
} catch(err) { console.log('[Error] JQuery UI is not defined.') }
|
||||
|
||||
|
||||
live2d_settings.homePageUrl = live2d_settings.homePageUrl == 'auto' ? window.location.protocol+'//'+window.location.hostname+'/' : live2d_settings.homePageUrl;
|
||||
if (window.location.protocol == 'file:' && live2d_settings.modelAPI.substr(0,2) == '//') live2d_settings.modelAPI = 'http:'+live2d_settings.modelAPI;
|
||||
|
||||
|
||||
$('.waifu-tool .fui-home').click(function (){
|
||||
//window.location = 'https://www.fghrsh.net/';
|
||||
window.location = live2d_settings.homePageUrl;
|
||||
});
|
||||
|
||||
|
||||
$('.waifu-tool .fui-info-circle').click(function (){
|
||||
//window.open('https://imjad.cn/archives/lab/add-dynamic-poster-girl-with-live2d-to-your-blog-02');
|
||||
window.open(live2d_settings.aboutPageUrl);
|
||||
});
|
||||
|
||||
|
||||
if (typeof(waifuPath) == "object") loadTipsMessage(waifuPath); else {
|
||||
$.ajax({
|
||||
cache: true,
|
||||
@@ -172,7 +172,7 @@ function initModel(waifuPath, type) {
|
||||
success: function (result){ loadTipsMessage(result); }
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
if (!live2d_settings.showToolMenu) $('.waifu-tool').hide();
|
||||
if (!live2d_settings.canCloseLive2d) $('.waifu-tool .fui-cross').hide();
|
||||
if (!live2d_settings.canSwitchModel) $('.waifu-tool .fui-eye').hide();
|
||||
@@ -185,7 +185,7 @@ function initModel(waifuPath, type) {
|
||||
if (waifuPath === undefined) waifuPath = '';
|
||||
var modelId = localStorage.getItem('modelId');
|
||||
var modelTexturesId = localStorage.getItem('modelTexturesId');
|
||||
|
||||
|
||||
if (!live2d_settings.modelStorage || modelId == null) {
|
||||
var modelId = live2d_settings.modelId;
|
||||
var modelTexturesId = live2d_settings.modelTexturesId;
|
||||
@@ -204,7 +204,7 @@ function loadModel(modelId, modelTexturesId=0) {
|
||||
|
||||
function loadTipsMessage(result) {
|
||||
window.waifu_tips = result;
|
||||
|
||||
|
||||
$.each(result.mouseover, function (index, tips){
|
||||
$(document).on("mouseover", tips.selector, function (){
|
||||
var text = getRandText(tips.text);
|
||||
@@ -223,82 +223,50 @@ function loadTipsMessage(result) {
|
||||
var now = new Date();
|
||||
var after = tips.date.split('-')[0];
|
||||
var before = tips.date.split('-')[1] || after;
|
||||
|
||||
if((after.split('/')[0] <= now.getMonth()+1 && now.getMonth()+1 <= before.split('/')[0]) &&
|
||||
|
||||
if((after.split('/')[0] <= now.getMonth()+1 && now.getMonth()+1 <= before.split('/')[0]) &&
|
||||
(after.split('/')[1] <= now.getDate() && now.getDate() <= before.split('/')[1])){
|
||||
var text = getRandText(tips.text);
|
||||
text = text.render({year: now.getFullYear()});
|
||||
showMessage(text, 6000, true);
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
if (live2d_settings.showF12OpenMsg) {
|
||||
re.toString = function() {
|
||||
showMessage(getRandText(result.waifu.console_open_msg), 5000, true);
|
||||
return '';
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
if (live2d_settings.showCopyMessage) {
|
||||
$(document).on('copy', function() {
|
||||
showMessage(getRandText(result.waifu.copy_message), 5000, true);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
$('.waifu-tool .fui-photo').click(function(){
|
||||
showMessage(getRandText(result.waifu.screenshot_message), 5000, true);
|
||||
window.Live2D.captureName = live2d_settings.screenshotCaptureName;
|
||||
window.Live2D.captureFrame = true;
|
||||
});
|
||||
|
||||
|
||||
$('.waifu-tool .fui-cross').click(function(){
|
||||
sessionStorage.setItem('waifu-dsiplay', 'none');
|
||||
showMessage(getRandText(result.waifu.hidden_message), 1300, true);
|
||||
window.setTimeout(function() {$('.waifu').hide();}, 1300);
|
||||
});
|
||||
|
||||
|
||||
window.showWelcomeMessage = function(result) {
|
||||
var text;
|
||||
if (window.location.href == live2d_settings.homePageUrl) {
|
||||
var now = (new Date()).getHours();
|
||||
if (now > 23 || now <= 5) text = getRandText(result.waifu.hour_tips['t23-5']);
|
||||
else if (now > 5 && now <= 7) text = getRandText(result.waifu.hour_tips['t5-7']);
|
||||
else if (now > 7 && now <= 11) text = getRandText(result.waifu.hour_tips['t7-11']);
|
||||
else if (now > 11 && now <= 14) text = getRandText(result.waifu.hour_tips['t11-14']);
|
||||
else if (now > 14 && now <= 17) text = getRandText(result.waifu.hour_tips['t14-17']);
|
||||
else if (now > 17 && now <= 19) text = getRandText(result.waifu.hour_tips['t17-19']);
|
||||
else if (now > 19 && now <= 21) text = getRandText(result.waifu.hour_tips['t19-21']);
|
||||
else if (now > 21 && now <= 23) text = getRandText(result.waifu.hour_tips['t21-23']);
|
||||
else text = getRandText(result.waifu.hour_tips.default);
|
||||
} else {
|
||||
var referrer_message = result.waifu.referrer_message;
|
||||
if (document.referrer !== '') {
|
||||
var referrer = document.createElement('a');
|
||||
referrer.href = document.referrer;
|
||||
var domain = referrer.hostname.split('.')[1];
|
||||
if (window.location.hostname == referrer.hostname)
|
||||
text = referrer_message.localhost[0] + document.title.split(referrer_message.localhost[2])[0] + referrer_message.localhost[1];
|
||||
else if (domain == 'baidu')
|
||||
text = referrer_message.baidu[0] + referrer.search.split('&wd=')[1].split('&')[0] + referrer_message.baidu[1];
|
||||
else if (domain == 'so')
|
||||
text = referrer_message.so[0] + referrer.search.split('&q=')[1].split('&')[0] + referrer_message.so[1];
|
||||
else if (domain == 'google')
|
||||
text = referrer_message.google[0] + document.title.split(referrer_message.google[2])[0] + referrer_message.google[1];
|
||||
else {
|
||||
$.each(result.waifu.referrer_hostname, function(i,val) {if (i==referrer.hostname) referrer.hostname = getRandText(val)});
|
||||
text = referrer_message.default[0] + referrer.hostname + referrer_message.default[1];
|
||||
}
|
||||
} else text = referrer_message.none[0] + document.title.split(referrer_message.none[2])[0] + referrer_message.none[1];
|
||||
}
|
||||
showMessage(text, 6000);
|
||||
showMessage('欢迎使用GPT-Academic', 6000);
|
||||
}; if (live2d_settings.showWelcomeMessage) showWelcomeMessage(result);
|
||||
|
||||
|
||||
var waifu_tips = result.waifu;
|
||||
|
||||
|
||||
function loadOtherModel() {
|
||||
var modelId = modelStorageGetItem('modelId');
|
||||
var modelRandMode = live2d_settings.modelRandMode;
|
||||
|
||||
|
||||
$.ajax({
|
||||
cache: modelRandMode == 'switch' ? true : false,
|
||||
url: live2d_settings.modelAPI+modelRandMode+'/?id='+modelId,
|
||||
@@ -311,12 +279,12 @@ function loadTipsMessage(result) {
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
function loadRandTextures() {
|
||||
var modelId = modelStorageGetItem('modelId');
|
||||
var modelTexturesId = modelStorageGetItem('modelTexturesId');
|
||||
var modelTexturesRandMode = live2d_settings.modelTexturesRandMode;
|
||||
|
||||
|
||||
$.ajax({
|
||||
cache: modelTexturesRandMode == 'switch' ? true : false,
|
||||
url: live2d_settings.modelAPI+modelTexturesRandMode+'_textures/?id='+modelId+'-'+modelTexturesId,
|
||||
@@ -329,32 +297,32 @@ function loadTipsMessage(result) {
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
function modelStorageGetItem(key) { return live2d_settings.modelStorage ? localStorage.getItem(key) : sessionStorage.getItem(key); }
|
||||
|
||||
|
||||
/* 检测用户活动状态,并在空闲时显示一言 */
|
||||
if (live2d_settings.showHitokoto) {
|
||||
window.getActed = false; window.hitokotoTimer = 0; window.hitokotoInterval = false;
|
||||
$(document).mousemove(function(e){getActed = true;}).keydown(function(){getActed = true;});
|
||||
setInterval(function(){ if (!getActed) ifActed(); else elseActed(); }, 1000);
|
||||
}
|
||||
|
||||
|
||||
function ifActed() {
|
||||
if (!hitokotoInterval) {
|
||||
hitokotoInterval = true;
|
||||
hitokotoTimer = window.setInterval(showHitokotoActed, 30000);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
function elseActed() {
|
||||
getActed = hitokotoInterval = false;
|
||||
window.clearInterval(hitokotoTimer);
|
||||
}
|
||||
|
||||
|
||||
function showHitokotoActed() {
|
||||
if ($(document)[0].visibilityState == 'visible') showHitokoto();
|
||||
}
|
||||
|
||||
|
||||
function showHitokoto() {
|
||||
switch(live2d_settings.hitokotoAPI) {
|
||||
case 'lwl12.com':
|
||||
@@ -398,7 +366,7 @@ function loadTipsMessage(result) {
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
$('.waifu-tool .fui-eye').click(function (){loadOtherModel()});
|
||||
$('.waifu-tool .fui-user').click(function (){loadRandTextures()});
|
||||
$('.waifu-tool .fui-chat').click(function (){showHitokoto()});
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
},
|
||||
"model_message": {
|
||||
"1": ["来自 Potion Maker 的 Pio 酱 ~"],
|
||||
"2": ["来自 Potion Maker 的 Tia 酱 ~"]
|
||||
"2": ["来自 Potion Maker 的 Tia 酱 ~"]
|
||||
},
|
||||
"hitokoto_api_message": {
|
||||
"lwl12.com": ["这句一言来自 <span style=\"color:#0099cc;\">『{source}』</span>", ",是 <span style=\"color:#0099cc;\">{creator}</span> 投稿的", "。"],
|
||||
@@ -83,8 +83,8 @@
|
||||
"很多强大的函数插件隐藏在下拉菜单中呢。",
|
||||
"红色的插件,使用之前需要把文件上传进去哦。",
|
||||
"想添加功能按钮吗?读读readme很容易就学会啦。",
|
||||
"敏感或机密的信息,不可以问chatGPT的哦!",
|
||||
"chatGPT究竟是划时代的创新,还是扼杀创造力的毒药呢?"
|
||||
"敏感或机密的信息,不可以问AI的哦!",
|
||||
"LLM究竟是划时代的创新,还是扼杀创造力的毒药呢?"
|
||||
] }
|
||||
],
|
||||
"click": [
|
||||
@@ -92,8 +92,6 @@
|
||||
"selector": ".waifu #live2d",
|
||||
"text": [
|
||||
"是…是不小心碰到了吧",
|
||||
"萝莉控是什么呀",
|
||||
"你看到我的小熊了吗",
|
||||
"再摸的话我可要报警了!⌇●﹏●⌇",
|
||||
"110吗,这里有个变态一直在摸我(ó﹏ò。)"
|
||||
]
|
||||
@@ -113,4 +111,4 @@
|
||||
{ "date": "11/05-11/12", "text": ["今年的<span style=\"color:#0099cc;\">双十一</span>是和谁一起过的呢~"] },
|
||||
{ "date": "12/20-12/31", "text": ["这几天是<span style=\"color:#0099cc;\">圣诞节</span>,主人肯定又去剁手买买买了~"] }
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -287,4 +287,4 @@
|
||||
}
|
||||
.fui-user:before {
|
||||
content: "\e631";
|
||||
}
|
||||
}
|
||||
|
||||
128
main.py
128
main.py
@@ -1,14 +1,25 @@
|
||||
import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
||||
import pickle
|
||||
import base64
|
||||
|
||||
help_menu_description = \
|
||||
"""Github源代码开源和更新[地址🚀](https://github.com/binary-husky/gpt_academic),
|
||||
感谢热情的[开发者们❤️](https://github.com/binary-husky/gpt_academic/graphs/contributors).
|
||||
</br></br>常见问题请查阅[项目Wiki](https://github.com/binary-husky/gpt_academic/wiki),
|
||||
如遇到Bug请前往[Bug反馈](https://github.com/binary-husky/gpt_academic/issues).
|
||||
</br></br>普通对话使用说明: 1. 输入问题; 2. 点击提交
|
||||
</br></br>基础功能区使用说明: 1. 输入文本; 2. 点击任意基础功能区按钮
|
||||
</br></br>函数插件区使用说明: 1. 输入路径/问题, 或者上传文件; 2. 点击任意函数插件区按钮
|
||||
</br></br>虚空终端使用说明: 点击虚空终端, 然后根据提示输入指令, 再次点击虚空终端
|
||||
</br></br>如何保存对话: 点击保存当前的对话按钮
|
||||
</br></br>如何语音对话: 请阅读Wiki
|
||||
</br></br>如何临时更换API_KEY: 在输入区输入临时API_KEY后提交(网页刷新后失效)"""
|
||||
|
||||
def main():
|
||||
import gradio as gr
|
||||
if gr.__version__ not in ['3.32.6']:
|
||||
if gr.__version__ not in ['3.32.6', '3.32.7']:
|
||||
raise ModuleNotFoundError("使用项目内置Gradio获取最优体验! 请运行 `pip install -r requirements.txt` 指令安装内置Gradio及其他依赖, 详情信息见requirements.txt.")
|
||||
from request_llms.bridge_all import predict
|
||||
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, load_chat_cookies, DummyWith
|
||||
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
|
||||
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址
|
||||
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION')
|
||||
CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT')
|
||||
ENABLE_AUDIO, AUTO_CLEAR_TXT, PATH_LOGGING, AVAIL_THEMES, THEME = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT', 'PATH_LOGGING', 'AVAIL_THEMES', 'THEME')
|
||||
@@ -18,21 +29,11 @@ def main():
|
||||
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
||||
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
||||
from check_proxy import get_current_version
|
||||
from themes.theme import adjust_theme, advanced_css, theme_declaration, load_dynamic_theme
|
||||
|
||||
from themes.theme import adjust_theme, advanced_css, theme_declaration
|
||||
from themes.theme import js_code_for_css_changing, js_code_for_darkmode_init, js_code_for_toggle_darkmode, js_code_for_persistent_cookie_init
|
||||
from themes.theme import load_dynamic_theme, to_cookie_str, from_cookie_str, init_cookie
|
||||
title_html = f"<h1 align=\"center\">GPT 学术优化 {get_current_version()}</h1>{theme_declaration}"
|
||||
description = "Github源代码开源和更新[地址🚀](https://github.com/binary-husky/gpt_academic), "
|
||||
description += "感谢热情的[开发者们❤️](https://github.com/binary-husky/gpt_academic/graphs/contributors)."
|
||||
description += "</br></br>常见问题请查阅[项目Wiki](https://github.com/binary-husky/gpt_academic/wiki), "
|
||||
description += "如遇到Bug请前往[Bug反馈](https://github.com/binary-husky/gpt_academic/issues)."
|
||||
description += "</br></br>普通对话使用说明: 1. 输入问题; 2. 点击提交"
|
||||
description += "</br></br>基础功能区使用说明: 1. 输入文本; 2. 点击任意基础功能区按钮"
|
||||
description += "</br></br>函数插件区使用说明: 1. 输入路径/问题, 或者上传文件; 2. 点击任意函数插件区按钮"
|
||||
description += "</br></br>虚空终端使用说明: 点击虚空终端, 然后根据提示输入指令, 再次点击虚空终端"
|
||||
description += "</br></br>如何保存对话: 点击保存当前的对话按钮"
|
||||
description += "</br></br>如何语音对话: 请阅读Wiki"
|
||||
description += "</br></br>如何临时更换API_KEY: 在输入区输入临时API_KEY后提交(网页刷新后失效)"
|
||||
|
||||
|
||||
# 问询记录, python 版本建议3.9+(越新越好)
|
||||
import logging, uuid
|
||||
os.makedirs(PATH_LOGGING, exist_ok=True)
|
||||
@@ -85,7 +86,7 @@ def main():
|
||||
with gr_L2(scale=1, elem_id="gpt-panel"):
|
||||
with gr.Accordion("输入区", open=True, elem_id="input-panel") as area_input_primary:
|
||||
with gr.Row():
|
||||
txt = gr.Textbox(show_label=False, placeholder="Input question here.").style(container=False)
|
||||
txt = gr.Textbox(show_label=False, placeholder="Input question here.", elem_id='user_input_main').style(container=False)
|
||||
with gr.Row():
|
||||
submitBtn = gr.Button("提交", elem_id="elem_submit", variant="primary")
|
||||
with gr.Row():
|
||||
@@ -138,17 +139,17 @@ def main():
|
||||
with gr.Row():
|
||||
switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary").style(size="sm")
|
||||
with gr.Row():
|
||||
with gr.Accordion("点击展开“文件上传区”。上传本地文件/压缩包供函数插件调用。", open=False) as area_file_up:
|
||||
with gr.Accordion("点击展开“文件下载区”。", open=False) as area_file_up:
|
||||
file_upload = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple", elem_id="elem_upload")
|
||||
|
||||
|
||||
with gr.Floating(init_x="0%", init_y="0%", visible=True, width=None, drag="forbidden"):
|
||||
with gr.Floating(init_x="0%", init_y="0%", visible=True, width=None, drag="forbidden", elem_id="tooltip"):
|
||||
with gr.Row():
|
||||
with gr.Tab("上传文件", elem_id="interact-panel"):
|
||||
gr.Markdown("请上传本地文件/压缩包供“函数插件区”功能调用。请注意: 上传文件后会自动把输入区修改为相应路径。")
|
||||
file_upload_2 = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple")
|
||||
file_upload_2 = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple", elem_id="elem_upload_float")
|
||||
|
||||
with gr.Tab("更换模型 & Prompt", elem_id="interact-panel"):
|
||||
with gr.Tab("更换模型", elem_id="interact-panel"):
|
||||
md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False)
|
||||
top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
|
||||
temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
|
||||
@@ -160,41 +161,25 @@ def main():
|
||||
checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "浮动输入区", "输入清除键", "插件参数区"],
|
||||
value=["基础功能区", "函数插件区"], label="显示/隐藏功能区", elem_id='cbs').style(container=False)
|
||||
checkboxes_2 = gr.CheckboxGroup(["自定义菜单"],
|
||||
value=[], label="显示/隐藏自定义菜单", elem_id='cbs').style(container=False)
|
||||
value=[], label="显示/隐藏自定义菜单", elem_id='cbsc').style(container=False)
|
||||
dark_mode_btn = gr.Button("切换界面明暗 ☀", variant="secondary").style(size="sm")
|
||||
dark_mode_btn.click(None, None, None, _js="""() => {
|
||||
if (document.querySelectorAll('.dark').length) {
|
||||
document.querySelectorAll('.dark').forEach(el => el.classList.remove('dark'));
|
||||
} else {
|
||||
document.querySelector('body').classList.add('dark');
|
||||
}
|
||||
}""",
|
||||
)
|
||||
dark_mode_btn.click(None, None, None, _js=js_code_for_toggle_darkmode)
|
||||
with gr.Tab("帮助", elem_id="interact-panel"):
|
||||
gr.Markdown(description)
|
||||
gr.Markdown(help_menu_description)
|
||||
|
||||
with gr.Floating(init_x="20%", init_y="50%", visible=False, width="40%", drag="top") as area_input_secondary:
|
||||
with gr.Accordion("浮动输入区", open=True, elem_id="input-panel2"):
|
||||
with gr.Row() as row:
|
||||
row.style(equal_height=True)
|
||||
with gr.Column(scale=10):
|
||||
txt2 = gr.Textbox(show_label=False, placeholder="Input question here.", lines=8, label="输入区2").style(container=False)
|
||||
txt2 = gr.Textbox(show_label=False, placeholder="Input question here.",
|
||||
elem_id='user_input_float', lines=8, label="输入区2").style(container=False)
|
||||
with gr.Column(scale=1, min_width=40):
|
||||
submitBtn2 = gr.Button("提交", variant="primary"); submitBtn2.style(size="sm")
|
||||
resetBtn2 = gr.Button("重置", variant="secondary"); resetBtn2.style(size="sm")
|
||||
stopBtn2 = gr.Button("停止", variant="secondary"); stopBtn2.style(size="sm")
|
||||
clearBtn2 = gr.Button("清除", variant="secondary", visible=False); clearBtn2.style(size="sm")
|
||||
|
||||
def to_cookie_str(d):
|
||||
# Pickle the dictionary and encode it as a string
|
||||
pickled_dict = pickle.dumps(d)
|
||||
cookie_value = base64.b64encode(pickled_dict).decode('utf-8')
|
||||
return cookie_value
|
||||
|
||||
def from_cookie_str(c):
|
||||
# Decode the base64-encoded string and unpickle it into a dictionary
|
||||
pickled_dict = base64.b64decode(c.encode('utf-8'))
|
||||
return pickle.loads(pickled_dict)
|
||||
|
||||
with gr.Floating(init_x="20%", init_y="50%", visible=False, width="40%", drag="top") as area_customize:
|
||||
with gr.Accordion("自定义菜单", open=True, elem_id="edit-panel"):
|
||||
@@ -226,11 +211,11 @@ def main():
|
||||
else:
|
||||
ret.update({predefined_btns[basic_btn_dropdown_]: gr.update(visible=True, value=basic_fn_title)})
|
||||
ret.update({cookies: cookies_})
|
||||
try: persistent_cookie_ = from_cookie_str(persistent_cookie_) # persistent cookie to dict
|
||||
try: persistent_cookie_ = from_cookie_str(persistent_cookie_) # persistent cookie to dict
|
||||
except: persistent_cookie_ = {}
|
||||
persistent_cookie_["custom_bnt"] = customize_fn_overwrite_ # dict update new value
|
||||
persistent_cookie_ = to_cookie_str(persistent_cookie_) # persistent cookie to dict
|
||||
ret.update({persistent_cookie: persistent_cookie_}) # write persistent cookie
|
||||
persistent_cookie_["custom_bnt"] = customize_fn_overwrite_ # dict update new value
|
||||
persistent_cookie_ = to_cookie_str(persistent_cookie_) # persistent cookie to dict
|
||||
ret.update({persistent_cookie: persistent_cookie_}) # write persistent cookie
|
||||
return ret
|
||||
|
||||
def reflesh_btn(persistent_cookie_, cookies_):
|
||||
@@ -251,10 +236,11 @@ def main():
|
||||
else: ret.update({predefined_btns[k]: gr.update(visible=True, value=v['Title'])})
|
||||
return ret
|
||||
|
||||
basic_fn_load.click(reflesh_btn, [persistent_cookie, cookies],[cookies, *customize_btns.values(), *predefined_btns.values()])
|
||||
basic_fn_load.click(reflesh_btn, [persistent_cookie, cookies], [cookies, *customize_btns.values(), *predefined_btns.values()])
|
||||
h = basic_fn_confirm.click(assign_btn, [persistent_cookie, cookies, basic_btn_dropdown, basic_fn_title, basic_fn_prefix, basic_fn_suffix],
|
||||
[persistent_cookie, cookies, *customize_btns.values(), *predefined_btns.values()])
|
||||
h.then(None, [persistent_cookie], None, _js="""(persistent_cookie)=>{setCookie("persistent_cookie", persistent_cookie, 5);}""") # save persistent cookie
|
||||
# save persistent cookie
|
||||
h.then(None, [persistent_cookie], None, _js="""(persistent_cookie)=>{setCookie("persistent_cookie", persistent_cookie, 5);}""")
|
||||
|
||||
# 功能区显示开关与功能区的互动
|
||||
def fn_area_visibility(a):
|
||||
@@ -304,8 +290,8 @@ def main():
|
||||
click_handle = btn.click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(btn.value)], outputs=output_combo)
|
||||
cancel_handles.append(click_handle)
|
||||
# 文件上传区,接收文件后与chatbot的互动
|
||||
file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies])
|
||||
file_upload_2.upload(on_file_uploaded, [file_upload_2, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies])
|
||||
file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies]).then(None, None, None, _js=r"()=>{toast_push('上传完毕 ...'); cancel_loading_status();}")
|
||||
file_upload_2.upload(on_file_uploaded, [file_upload_2, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies]).then(None, None, None, _js=r"()=>{toast_push('上传完毕 ...'); cancel_loading_status();}")
|
||||
# 函数插件-固定按钮区
|
||||
for k in plugins:
|
||||
if not plugins[k].get("AsButton", True): continue
|
||||
@@ -341,18 +327,7 @@ def main():
|
||||
None,
|
||||
[secret_css],
|
||||
None,
|
||||
_js="""(css) => {
|
||||
var existingStyles = document.querySelectorAll("style[data-loaded-css]");
|
||||
for (var i = 0; i < existingStyles.length; i++) {
|
||||
var style = existingStyles[i];
|
||||
style.parentNode.removeChild(style);
|
||||
}
|
||||
var styleElement = document.createElement('style');
|
||||
styleElement.setAttribute('data-loaded-css', css);
|
||||
styleElement.innerHTML = css;
|
||||
document.head.appendChild(styleElement);
|
||||
}
|
||||
"""
|
||||
_js=js_code_for_css_changing
|
||||
)
|
||||
# 随变按钮的回调函数注册
|
||||
def route(request: gr.Request, k, *args, **kwargs):
|
||||
@@ -384,27 +359,10 @@ def main():
|
||||
rad.feed(cookies['uuid'].hex, audio)
|
||||
audio_mic.stream(deal_audio, inputs=[audio_mic, cookies])
|
||||
|
||||
def init_cookie(cookies, chatbot):
|
||||
# 为每一位访问的用户赋予一个独一无二的uuid编码
|
||||
cookies.update({'uuid': uuid.uuid4()})
|
||||
return cookies
|
||||
|
||||
demo.load(init_cookie, inputs=[cookies, chatbot], outputs=[cookies])
|
||||
darkmode_js = """(dark) => {
|
||||
dark = dark == "True";
|
||||
if (document.querySelectorAll('.dark').length) {
|
||||
if (!dark){
|
||||
document.querySelectorAll('.dark').forEach(el => el.classList.remove('dark'));
|
||||
}
|
||||
} else {
|
||||
if (dark){
|
||||
document.querySelector('body').classList.add('dark');
|
||||
}
|
||||
}
|
||||
}"""
|
||||
load_cookie_js = """(persistent_cookie) => {
|
||||
return getCookie("persistent_cookie");
|
||||
}"""
|
||||
demo.load(None, inputs=None, outputs=[persistent_cookie], _js=load_cookie_js)
|
||||
darkmode_js = js_code_for_darkmode_init
|
||||
demo.load(None, inputs=None, outputs=[persistent_cookie], _js=js_code_for_persistent_cookie_init)
|
||||
demo.load(None, inputs=[dark_mode], outputs=None, _js=darkmode_js) # 配置暗色主题或亮色主题
|
||||
demo.load(None, inputs=[gr.Textbox(LAYOUT, visible=False)], outputs=None, _js='(LAYOUT)=>{GptAcademicJavaScriptInit(LAYOUT);}')
|
||||
|
||||
@@ -417,7 +375,7 @@ def main():
|
||||
|
||||
def auto_updates(): time.sleep(0); auto_update()
|
||||
def open_browser(): time.sleep(2); webbrowser.open_new_tab(f"http://localhost:{PORT}")
|
||||
def warm_up_mods(): time.sleep(4); warm_up_modules()
|
||||
def warm_up_mods(): time.sleep(6); warm_up_modules()
|
||||
|
||||
threading.Thread(target=auto_updates, name="self-upgrade", daemon=True).start() # 查看自动更新
|
||||
threading.Thread(target=open_browser, name="open-browser", daemon=True).start() # 打开浏览器页面
|
||||
|
||||
@@ -182,12 +182,12 @@ cached_translation = read_map_from_json(language=LANG)
|
||||
def trans(word_to_translate, language, special=False):
|
||||
if len(word_to_translate) == 0: return {}
|
||||
from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
||||
from toolbox import get_conf, ChatBotWithCookies
|
||||
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY = \
|
||||
get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY')
|
||||
from toolbox import get_conf, ChatBotWithCookies, load_chat_cookies
|
||||
|
||||
cookies = load_chat_cookies()
|
||||
llm_kwargs = {
|
||||
'api_key': API_KEY,
|
||||
'llm_model': LLM_MODEL,
|
||||
'api_key': cookies['api_key'],
|
||||
'llm_model': cookies['llm_model'],
|
||||
'top_p':1.0,
|
||||
'max_length': None,
|
||||
'temperature':0.4,
|
||||
@@ -245,15 +245,15 @@ def trans(word_to_translate, language, special=False):
|
||||
def trans_json(word_to_translate, language, special=False):
|
||||
if len(word_to_translate) == 0: return {}
|
||||
from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
||||
from toolbox import get_conf, ChatBotWithCookies
|
||||
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY = \
|
||||
get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY')
|
||||
from toolbox import get_conf, ChatBotWithCookies, load_chat_cookies
|
||||
|
||||
cookies = load_chat_cookies()
|
||||
llm_kwargs = {
|
||||
'api_key': API_KEY,
|
||||
'llm_model': LLM_MODEL,
|
||||
'api_key': cookies['api_key'],
|
||||
'llm_model': cookies['llm_model'],
|
||||
'top_p':1.0,
|
||||
'max_length': None,
|
||||
'temperature':0.1,
|
||||
'temperature':0.4,
|
||||
}
|
||||
import random
|
||||
N_EACH_REQ = random.randint(16, 32)
|
||||
|
||||
@@ -1,79 +1,35 @@
|
||||
# 如何使用其他大语言模型
|
||||
|
||||
## ChatGLM
|
||||
|
||||
- 安装依赖 `pip install -r request_llms/requirements_chatglm.txt`
|
||||
- 修改配置,在config.py中将LLM_MODEL的值改为"chatglm"
|
||||
|
||||
``` sh
|
||||
LLM_MODEL = "chatglm"
|
||||
```
|
||||
- 运行!
|
||||
``` sh
|
||||
`python main.py`
|
||||
```
|
||||
|
||||
## Claude-Stack
|
||||
|
||||
- 请参考此教程获取 https://zhuanlan.zhihu.com/p/627485689
|
||||
- 1、SLACK_CLAUDE_BOT_ID
|
||||
- 2、SLACK_CLAUDE_USER_TOKEN
|
||||
|
||||
- 把token加入config.py
|
||||
|
||||
## Newbing
|
||||
|
||||
- 使用cookie editor获取cookie(json)
|
||||
- 把cookie(json)加入config.py (NEWBING_COOKIES)
|
||||
|
||||
## Moss
|
||||
- 使用docker-compose
|
||||
|
||||
## RWKV
|
||||
- 使用docker-compose
|
||||
|
||||
## LLAMA
|
||||
- 使用docker-compose
|
||||
|
||||
## 盘古
|
||||
- 使用docker-compose
|
||||
P.S. 如果您按照以下步骤成功接入了新的大模型,欢迎发Pull Requests(如果您在自己接入新模型的过程中遇到困难,欢迎加README底部QQ群联系群主)
|
||||
|
||||
|
||||
---
|
||||
## Text-Generation-UI (TGUI,调试中,暂不可用)
|
||||
# 如何接入其他本地大语言模型
|
||||
|
||||
### 1. 部署TGUI
|
||||
``` sh
|
||||
# 1 下载模型
|
||||
git clone https://github.com/oobabooga/text-generation-webui.git
|
||||
# 2 这个仓库的最新代码有问题,回滚到几周之前
|
||||
git reset --hard fcda3f87767e642d1c0411776e549e1d3894843d
|
||||
# 3 切换路径
|
||||
cd text-generation-webui
|
||||
# 4 安装text-generation的额外依赖
|
||||
pip install accelerate bitsandbytes flexgen gradio llamacpp markdown numpy peft requests rwkv safetensors sentencepiece tqdm datasets git+https://github.com/huggingface/transformers
|
||||
# 5 下载模型
|
||||
python download-model.py facebook/galactica-1.3b
|
||||
# 其他可选如 facebook/opt-1.3b
|
||||
# facebook/galactica-1.3b
|
||||
# facebook/galactica-6.7b
|
||||
# facebook/galactica-120b
|
||||
# facebook/pygmalion-1.3b 等
|
||||
# 详情见 https://github.com/oobabooga/text-generation-webui
|
||||
1. 复制`request_llms/bridge_llama2.py`,重命名为你喜欢的名字
|
||||
|
||||
# 6 启动text-generation
|
||||
python server.py --cpu --listen --listen-port 7865 --model facebook_galactica-1.3b
|
||||
```
|
||||
2. 修改`load_model_and_tokenizer`方法,加载你的模型和分词器(去该模型官网找demo,复制粘贴即可)
|
||||
|
||||
### 2. 修改config.py
|
||||
3. 修改`llm_stream_generator`方法,定义推理模型(去该模型官网找demo,复制粘贴即可)
|
||||
|
||||
``` sh
|
||||
# LLM_MODEL格式: tgui:[模型]@[ws地址]:[ws端口] , 端口要和上面给定的端口一致
|
||||
LLM_MODEL = "tgui:galactica-1.3b@localhost:7860"
|
||||
```
|
||||
4. 命令行测试
|
||||
- 修改`tests/test_llms.py`(聪慧如您,只需要看一眼该文件就明白怎么修改了)
|
||||
- 运行`python tests/test_llms.py`
|
||||
|
||||
### 3. 运行!
|
||||
``` sh
|
||||
cd chatgpt-academic
|
||||
python main.py
|
||||
```
|
||||
5. 测试通过后,在`request_llms/bridge_all.py`中做最后的修改,把你的模型完全接入到框架中(聪慧如您,只需要看一眼该文件就明白怎么修改了)
|
||||
|
||||
6. 修改`LLM_MODEL`配置,然后运行`python main.py`,测试最后的效果
|
||||
|
||||
|
||||
# 如何接入其他在线大语言模型
|
||||
|
||||
1. 复制`request_llms/bridge_zhipu.py`,重命名为你喜欢的名字
|
||||
|
||||
2. 修改`predict_no_ui_long_connection`
|
||||
|
||||
3. 修改`predict`
|
||||
|
||||
4. 命令行测试
|
||||
- 修改`tests/test_llms.py`(聪慧如您,只需要看一眼该文件就明白怎么修改了)
|
||||
- 运行`python tests/test_llms.py`
|
||||
|
||||
5. 测试通过后,在`request_llms/bridge_all.py`中做最后的修改,把你的模型完全接入到框架中(聪慧如您,只需要看一眼该文件就明白怎么修改了)
|
||||
|
||||
6. 修改`LLM_MODEL`配置,然后运行`python main.py`,测试最后的效果
|
||||
|
||||
@@ -28,6 +28,9 @@ from .bridge_chatglm3 import predict as chatglm3_ui
|
||||
from .bridge_qianfan import predict_no_ui_long_connection as qianfan_noui
|
||||
from .bridge_qianfan import predict as qianfan_ui
|
||||
|
||||
from .bridge_google_gemini import predict as genai_ui
|
||||
from .bridge_google_gemini import predict_no_ui_long_connection as genai_noui
|
||||
|
||||
colors = ['#FF00FF', '#00FFFF', '#FF0000', '#990099', '#009999', '#990044']
|
||||
|
||||
class LazyloadTiktoken(object):
|
||||
@@ -246,6 +249,22 @@ model_info = {
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
"gemini-pro": {
|
||||
"fn_with_ui": genai_ui,
|
||||
"fn_without_ui": genai_noui,
|
||||
"endpoint": None,
|
||||
"max_token": 1024 * 32,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
"gemini-pro-vision": {
|
||||
"fn_with_ui": genai_ui,
|
||||
"fn_without_ui": genai_noui,
|
||||
"endpoint": None,
|
||||
"max_token": 1024 * 32,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
}
|
||||
|
||||
# -=-=-=-=-=-=- api2d 对齐支持 -=-=-=-=-=-=-
|
||||
@@ -431,14 +450,14 @@ if "chatglm_onnx" in AVAIL_LLM_MODELS:
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
if "qwen" in AVAIL_LLM_MODELS:
|
||||
if "qwen-local" in AVAIL_LLM_MODELS:
|
||||
try:
|
||||
from .bridge_qwen import predict_no_ui_long_connection as qwen_noui
|
||||
from .bridge_qwen import predict as qwen_ui
|
||||
from .bridge_qwen_local import predict_no_ui_long_connection as qwen_local_noui
|
||||
from .bridge_qwen_local import predict as qwen_local_ui
|
||||
model_info.update({
|
||||
"qwen": {
|
||||
"fn_with_ui": qwen_ui,
|
||||
"fn_without_ui": qwen_noui,
|
||||
"qwen-local": {
|
||||
"fn_with_ui": qwen_local_ui,
|
||||
"fn_without_ui": qwen_local_noui,
|
||||
"endpoint": None,
|
||||
"max_token": 4096,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
@@ -447,16 +466,32 @@ if "qwen" in AVAIL_LLM_MODELS:
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
if "chatgpt_website" in AVAIL_LLM_MODELS: # 接入一些逆向工程https://github.com/acheong08/ChatGPT-to-API/
|
||||
if "qwen-turbo" in AVAIL_LLM_MODELS or "qwen-plus" in AVAIL_LLM_MODELS or "qwen-max" in AVAIL_LLM_MODELS: # zhipuai
|
||||
try:
|
||||
from .bridge_chatgpt_website import predict_no_ui_long_connection as chatgpt_website_noui
|
||||
from .bridge_chatgpt_website import predict as chatgpt_website_ui
|
||||
from .bridge_qwen import predict_no_ui_long_connection as qwen_noui
|
||||
from .bridge_qwen import predict as qwen_ui
|
||||
model_info.update({
|
||||
"chatgpt_website": {
|
||||
"fn_with_ui": chatgpt_website_ui,
|
||||
"fn_without_ui": chatgpt_website_noui,
|
||||
"endpoint": openai_endpoint,
|
||||
"max_token": 4096,
|
||||
"qwen-turbo": {
|
||||
"fn_with_ui": qwen_ui,
|
||||
"fn_without_ui": qwen_noui,
|
||||
"endpoint": None,
|
||||
"max_token": 6144,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
"qwen-plus": {
|
||||
"fn_with_ui": qwen_ui,
|
||||
"fn_without_ui": qwen_noui,
|
||||
"endpoint": None,
|
||||
"max_token": 30720,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
"qwen-max": {
|
||||
"fn_with_ui": qwen_ui,
|
||||
"fn_without_ui": qwen_noui,
|
||||
"endpoint": None,
|
||||
"max_token": 28672,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
}
|
||||
@@ -543,6 +578,22 @@ if "zhipuai" in AVAIL_LLM_MODELS: # zhipuai
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
if "deepseekcoder" in AVAIL_LLM_MODELS: # deepseekcoder
|
||||
try:
|
||||
from .bridge_deepseekcoder import predict_no_ui_long_connection as deepseekcoder_noui
|
||||
from .bridge_deepseekcoder import predict as deepseekcoder_ui
|
||||
model_info.update({
|
||||
"deepseekcoder": {
|
||||
"fn_with_ui": deepseekcoder_ui,
|
||||
"fn_without_ui": deepseekcoder_noui,
|
||||
"endpoint": None,
|
||||
"max_token": 2048,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
}
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
|
||||
# <-- 用于定义和切换多个azure模型 -->
|
||||
AZURE_CFG_ARRAY = get_conf("AZURE_CFG_ARRAY")
|
||||
|
||||
@@ -51,7 +51,8 @@ def decode_chunk(chunk):
|
||||
chunkjson = json.loads(chunk_decoded[6:])
|
||||
has_choices = 'choices' in chunkjson
|
||||
if has_choices: choice_valid = (len(chunkjson['choices']) > 0)
|
||||
if has_choices and choice_valid: has_content = "content" in chunkjson['choices'][0]["delta"]
|
||||
if has_choices and choice_valid: has_content = ("content" in chunkjson['choices'][0]["delta"])
|
||||
if has_content: has_content = (chunkjson['choices'][0]["delta"]["content"] is not None)
|
||||
if has_choices and choice_valid: has_role = "role" in chunkjson['choices'][0]["delta"]
|
||||
except:
|
||||
pass
|
||||
@@ -101,20 +102,25 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
||||
result = ''
|
||||
json_data = None
|
||||
while True:
|
||||
try: chunk = next(stream_response).decode()
|
||||
try: chunk = next(stream_response)
|
||||
except StopIteration:
|
||||
break
|
||||
except requests.exceptions.ConnectionError:
|
||||
chunk = next(stream_response).decode() # 失败了,重试一次?再失败就没办法了。
|
||||
if len(chunk)==0: continue
|
||||
if not chunk.startswith('data:'):
|
||||
error_msg = get_full_error(chunk.encode('utf8'), stream_response).decode()
|
||||
chunk = next(stream_response) # 失败了,重试一次?再失败就没办法了。
|
||||
chunk_decoded, chunkjson, has_choices, choice_valid, has_content, has_role = decode_chunk(chunk)
|
||||
if len(chunk_decoded)==0: continue
|
||||
if not chunk_decoded.startswith('data:'):
|
||||
error_msg = get_full_error(chunk, stream_response).decode()
|
||||
if "reduce the length" in error_msg:
|
||||
raise ConnectionAbortedError("OpenAI拒绝了请求:" + error_msg)
|
||||
else:
|
||||
raise RuntimeError("OpenAI拒绝了请求:" + error_msg)
|
||||
if ('data: [DONE]' in chunk): break # api2d 正常完成
|
||||
json_data = json.loads(chunk.lstrip('data:'))['choices'][0]
|
||||
if ('data: [DONE]' in chunk_decoded): break # api2d 正常完成
|
||||
# 提前读取一些信息 (用于判断异常)
|
||||
if has_choices and not choice_valid:
|
||||
# 一些垃圾第三方接口的出现这样的错误
|
||||
continue
|
||||
json_data = chunkjson['choices'][0]
|
||||
delta = json_data["delta"]
|
||||
if len(delta) == 0: break
|
||||
if "role" in delta: continue
|
||||
|
||||
@@ -15,29 +15,16 @@ import requests
|
||||
import base64
|
||||
import os
|
||||
import glob
|
||||
from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys, clip_history, trimmed_format_exc, is_the_upload_folder, \
|
||||
update_ui_lastest_msg, get_max_token, encode_image, have_any_recent_upload_image_files
|
||||
|
||||
|
||||
from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys, clip_history, trimmed_format_exc, is_the_upload_folder, update_ui_lastest_msg, get_max_token
|
||||
proxies, TIMEOUT_SECONDS, MAX_RETRY, API_ORG, AZURE_CFG_ARRAY = \
|
||||
get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY', 'API_ORG', 'AZURE_CFG_ARRAY')
|
||||
|
||||
timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \
|
||||
'网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。'
|
||||
|
||||
def have_any_recent_upload_image_files(chatbot):
|
||||
_5min = 5 * 60
|
||||
if chatbot is None: return False, None # chatbot is None
|
||||
most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None)
|
||||
if not most_recent_uploaded: return False, None # most_recent_uploaded is None
|
||||
if time.time() - most_recent_uploaded["time"] < _5min:
|
||||
most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None)
|
||||
path = most_recent_uploaded['path']
|
||||
file_manifest = [f for f in glob.glob(f'{path}/**/*.jpg', recursive=True)]
|
||||
file_manifest += [f for f in glob.glob(f'{path}/**/*.jpeg', recursive=True)]
|
||||
file_manifest += [f for f in glob.glob(f'{path}/**/*.png', recursive=True)]
|
||||
if len(file_manifest) == 0: return False, None
|
||||
return True, file_manifest # most_recent_uploaded is new
|
||||
else:
|
||||
return False, None # most_recent_uploaded is too old
|
||||
|
||||
def report_invalid_key(key):
|
||||
if get_conf("BLOCK_INVALID_APIKEY"):
|
||||
@@ -258,10 +245,6 @@ def handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg,
|
||||
chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk_decoded)}")
|
||||
return chatbot, history
|
||||
|
||||
# Function to encode the image
|
||||
def encode_image(image_path):
|
||||
with open(image_path, "rb") as image_file:
|
||||
return base64.b64encode(image_file.read()).decode('utf-8')
|
||||
|
||||
def generate_payload(inputs, llm_kwargs, history, system_prompt, image_paths):
|
||||
"""
|
||||
|
||||
129
request_llms/bridge_deepseekcoder.py
Normal file
129
request_llms/bridge_deepseekcoder.py
Normal file
@@ -0,0 +1,129 @@
|
||||
model_name = "deepseek-coder-6.7b-instruct"
|
||||
cmd_to_install = "未知" # "`pip install -r request_llms/requirements_qwen.txt`"
|
||||
|
||||
import os
|
||||
from toolbox import ProxyNetworkActivate
|
||||
from toolbox import get_conf
|
||||
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
|
||||
from threading import Thread
|
||||
import torch
|
||||
|
||||
def download_huggingface_model(model_name, max_retry, local_dir):
|
||||
from huggingface_hub import snapshot_download
|
||||
for i in range(1, max_retry):
|
||||
try:
|
||||
snapshot_download(repo_id=model_name, local_dir=local_dir, resume_download=True)
|
||||
break
|
||||
except Exception as e:
|
||||
print(f'\n\n下载失败,重试第{i}次中...\n\n')
|
||||
return local_dir
|
||||
# ------------------------------------------------------------------------------------------------------------------------
|
||||
# 🔌💻 Local Model
|
||||
# ------------------------------------------------------------------------------------------------------------------------
|
||||
class GetCoderLMHandle(LocalLLMHandle):
|
||||
|
||||
def load_model_info(self):
|
||||
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
||||
self.model_name = model_name
|
||||
self.cmd_to_install = cmd_to_install
|
||||
|
||||
def load_model_and_tokenizer(self):
|
||||
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
||||
with ProxyNetworkActivate('Download_LLM'):
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
|
||||
model_name = "deepseek-ai/deepseek-coder-6.7b-instruct"
|
||||
# local_dir = f"~/.cache/{model_name}"
|
||||
# if not os.path.exists(local_dir):
|
||||
# tokenizer = download_huggingface_model(model_name, max_retry=128, local_dir=local_dir)
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
||||
self._streamer = TextIteratorStreamer(tokenizer)
|
||||
device_map = {
|
||||
"transformer.word_embeddings": 0,
|
||||
"transformer.word_embeddings_layernorm": 0,
|
||||
"lm_head": 0,
|
||||
"transformer.h": 0,
|
||||
"transformer.ln_f": 0,
|
||||
"model.embed_tokens": 0,
|
||||
"model.layers": 0,
|
||||
"model.norm": 0,
|
||||
}
|
||||
|
||||
# 检查量化配置
|
||||
quantization_type = get_conf('LOCAL_MODEL_QUANT')
|
||||
|
||||
if get_conf('LOCAL_MODEL_DEVICE') != 'cpu':
|
||||
if quantization_type == "INT8":
|
||||
from transformers import BitsAndBytesConfig
|
||||
# 使用 INT8 量化
|
||||
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True, load_in_8bit=True,
|
||||
device_map=device_map)
|
||||
elif quantization_type == "INT4":
|
||||
from transformers import BitsAndBytesConfig
|
||||
# 使用 INT4 量化
|
||||
bnb_config = BitsAndBytesConfig(
|
||||
load_in_4bit=True,
|
||||
bnb_4bit_use_double_quant=True,
|
||||
bnb_4bit_quant_type="nf4",
|
||||
bnb_4bit_compute_dtype=torch.bfloat16
|
||||
)
|
||||
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True,
|
||||
quantization_config=bnb_config, device_map=device_map)
|
||||
else:
|
||||
# 使用默认的 FP16
|
||||
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True,
|
||||
torch_dtype=torch.bfloat16, device_map=device_map)
|
||||
else:
|
||||
# CPU 模式
|
||||
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True,
|
||||
torch_dtype=torch.bfloat16)
|
||||
|
||||
return model, tokenizer
|
||||
|
||||
def llm_stream_generator(self, **kwargs):
|
||||
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
||||
def adaptor(kwargs):
|
||||
query = kwargs['query']
|
||||
max_length = kwargs['max_length']
|
||||
top_p = kwargs['top_p']
|
||||
temperature = kwargs['temperature']
|
||||
history = kwargs['history']
|
||||
return query, max_length, top_p, temperature, history
|
||||
|
||||
query, max_length, top_p, temperature, history = adaptor(kwargs)
|
||||
history.append({ 'role': 'user', 'content': query})
|
||||
messages = history
|
||||
inputs = self._tokenizer.apply_chat_template(messages, return_tensors="pt")
|
||||
if inputs.shape[1] > max_length:
|
||||
inputs = inputs[:, -max_length:]
|
||||
inputs = inputs.to(self._model.device)
|
||||
generation_kwargs = dict(
|
||||
inputs=inputs,
|
||||
max_new_tokens=max_length,
|
||||
do_sample=False,
|
||||
top_p=top_p,
|
||||
streamer = self._streamer,
|
||||
top_k=50,
|
||||
temperature=temperature,
|
||||
num_return_sequences=1,
|
||||
eos_token_id=32021,
|
||||
)
|
||||
thread = Thread(target=self._model.generate, kwargs=generation_kwargs, daemon=True)
|
||||
thread.start()
|
||||
generated_text = ""
|
||||
for new_text in self._streamer:
|
||||
generated_text += new_text
|
||||
# print(generated_text)
|
||||
yield generated_text
|
||||
|
||||
|
||||
def try_to_import_special_deps(self, **kwargs): pass
|
||||
# import something that will raise error if the user does not install requirement_*.txt
|
||||
# 🏃♂️🏃♂️🏃♂️ 主进程执行
|
||||
# import importlib
|
||||
# importlib.import_module('modelscope')
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------------------------------------------------
|
||||
# 🔌💻 GPT-Academic Interface
|
||||
# ------------------------------------------------------------------------------------------------------------------------
|
||||
predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetCoderLMHandle, model_name, history_format='chatglm3')
|
||||
109
request_llms/bridge_google_gemini.py
Normal file
109
request_llms/bridge_google_gemini.py
Normal file
@@ -0,0 +1,109 @@
|
||||
# encoding: utf-8
|
||||
# @Time : 2023/12/21
|
||||
# @Author : Spike
|
||||
# @Descr :
|
||||
import json
|
||||
import re
|
||||
import os
|
||||
import time
|
||||
from request_llms.com_google import GoogleChatInit
|
||||
from toolbox import get_conf, update_ui, update_ui_lastest_msg, have_any_recent_upload_image_files, trimmed_format_exc
|
||||
|
||||
proxies, TIMEOUT_SECONDS, MAX_RETRY = get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY')
|
||||
timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \
|
||||
'网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。'
|
||||
|
||||
|
||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None,
|
||||
console_slience=False):
|
||||
# 检查API_KEY
|
||||
if get_conf("GEMINI_API_KEY") == "":
|
||||
raise ValueError(f"请配置 GEMINI_API_KEY。")
|
||||
|
||||
genai = GoogleChatInit()
|
||||
watch_dog_patience = 5 # 看门狗的耐心, 设置5秒即可
|
||||
gpt_replying_buffer = ''
|
||||
stream_response = genai.generate_chat(inputs, llm_kwargs, history, sys_prompt)
|
||||
for response in stream_response:
|
||||
results = response.decode()
|
||||
match = re.search(r'"text":\s*"((?:[^"\\]|\\.)*)"', results, flags=re.DOTALL)
|
||||
error_match = re.search(r'\"message\":\s*\"(.*?)\"', results, flags=re.DOTALL)
|
||||
if match:
|
||||
try:
|
||||
paraphrase = json.loads('{"text": "%s"}' % match.group(1))
|
||||
except:
|
||||
raise ValueError(f"解析GEMINI消息出错。")
|
||||
buffer = paraphrase['text']
|
||||
gpt_replying_buffer += buffer
|
||||
if len(observe_window) >= 1:
|
||||
observe_window[0] = gpt_replying_buffer
|
||||
if len(observe_window) >= 2:
|
||||
if (time.time() - observe_window[1]) > watch_dog_patience: raise RuntimeError("程序终止。")
|
||||
if error_match:
|
||||
raise RuntimeError(f'{gpt_replying_buffer} 对话错误')
|
||||
return gpt_replying_buffer
|
||||
|
||||
|
||||
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream=True, additional_fn=None):
|
||||
# 检查API_KEY
|
||||
if get_conf("GEMINI_API_KEY") == "":
|
||||
yield from update_ui_lastest_msg(f"请配置 GEMINI_API_KEY。", chatbot=chatbot, history=history, delay=0)
|
||||
return
|
||||
|
||||
if "vision" in llm_kwargs["llm_model"]:
|
||||
have_recent_file, image_paths = have_any_recent_upload_image_files(chatbot)
|
||||
def make_media_input(inputs, image_paths):
|
||||
for image_path in image_paths:
|
||||
inputs = inputs + f'<br/><br/><div align="center"><img src="file={os.path.abspath(image_path)}"></div>'
|
||||
return inputs
|
||||
if have_recent_file:
|
||||
inputs = make_media_input(inputs, image_paths)
|
||||
|
||||
chatbot.append((inputs, ""))
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
genai = GoogleChatInit()
|
||||
retry = 0
|
||||
while True:
|
||||
try:
|
||||
stream_response = genai.generate_chat(inputs, llm_kwargs, history, system_prompt)
|
||||
break
|
||||
except Exception as e:
|
||||
retry += 1
|
||||
chatbot[-1] = ((chatbot[-1][0], trimmed_format_exc()))
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="请求失败") # 刷新界面
|
||||
return
|
||||
gpt_replying_buffer = ""
|
||||
gpt_security_policy = ""
|
||||
history.extend([inputs, ''])
|
||||
for response in stream_response:
|
||||
results = response.decode("utf-8") # 被这个解码给耍了。。
|
||||
gpt_security_policy += results
|
||||
match = re.search(r'"text":\s*"((?:[^"\\]|\\.)*)"', results, flags=re.DOTALL)
|
||||
error_match = re.search(r'\"message\":\s*\"(.*)\"', results, flags=re.DOTALL)
|
||||
if match:
|
||||
try:
|
||||
paraphrase = json.loads('{"text": "%s"}' % match.group(1))
|
||||
except:
|
||||
raise ValueError(f"解析GEMINI消息出错。")
|
||||
gpt_replying_buffer += paraphrase['text'] # 使用 json 解析库进行处理
|
||||
chatbot[-1] = (inputs, gpt_replying_buffer)
|
||||
history[-1] = gpt_replying_buffer
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
if error_match:
|
||||
history = history[-2] # 错误的不纳入对话
|
||||
chatbot[-1] = (inputs, gpt_replying_buffer + f"对话错误,请查看message\n\n```\n{error_match.group(1)}\n```")
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
raise RuntimeError('对话错误')
|
||||
if not gpt_replying_buffer:
|
||||
history = history[-2] # 错误的不纳入对话
|
||||
chatbot[-1] = (inputs, gpt_replying_buffer + f"触发了Google的安全访问策略,没有回答\n\n```\n{gpt_security_policy}\n```")
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import sys
|
||||
llm_kwargs = {'llm_model': 'gemini-pro'}
|
||||
result = predict('Write long a story about a magic backpack.', llm_kwargs, llm_kwargs, [])
|
||||
for i in result:
|
||||
print(i)
|
||||
@@ -12,7 +12,7 @@ from threading import Thread
|
||||
# ------------------------------------------------------------------------------------------------------------------------
|
||||
# 🔌💻 Local Model
|
||||
# ------------------------------------------------------------------------------------------------------------------------
|
||||
class GetONNXGLMHandle(LocalLLMHandle):
|
||||
class GetLlamaHandle(LocalLLMHandle):
|
||||
|
||||
def load_model_info(self):
|
||||
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
||||
@@ -87,4 +87,4 @@ class GetONNXGLMHandle(LocalLLMHandle):
|
||||
# ------------------------------------------------------------------------------------------------------------------------
|
||||
# 🔌💻 GPT-Academic Interface
|
||||
# ------------------------------------------------------------------------------------------------------------------------
|
||||
predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetONNXGLMHandle, model_name)
|
||||
predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetLlamaHandle, model_name)
|
||||
@@ -1,67 +1,62 @@
|
||||
model_name = "Qwen"
|
||||
cmd_to_install = "`pip install -r request_llms/requirements_qwen.txt`"
|
||||
|
||||
|
||||
from transformers import AutoModel, AutoTokenizer
|
||||
import time
|
||||
import threading
|
||||
import importlib
|
||||
from toolbox import update_ui, get_conf, ProxyNetworkActivate
|
||||
from multiprocessing import Process, Pipe
|
||||
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
|
||||
import os
|
||||
from toolbox import update_ui, get_conf, update_ui_lastest_msg
|
||||
from toolbox import check_packages, report_exception
|
||||
|
||||
model_name = 'Qwen'
|
||||
|
||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
|
||||
"""
|
||||
⭐多线程方法
|
||||
函数的说明请见 request_llms/bridge_all.py
|
||||
"""
|
||||
watch_dog_patience = 5
|
||||
response = ""
|
||||
|
||||
# ------------------------------------------------------------------------------------------------------------------------
|
||||
# 🔌💻 Local Model
|
||||
# ------------------------------------------------------------------------------------------------------------------------
|
||||
class GetONNXGLMHandle(LocalLLMHandle):
|
||||
from .com_qwenapi import QwenRequestInstance
|
||||
sri = QwenRequestInstance()
|
||||
for response in sri.generate(inputs, llm_kwargs, history, sys_prompt):
|
||||
if len(observe_window) >= 1:
|
||||
observe_window[0] = response
|
||||
if len(observe_window) >= 2:
|
||||
if (time.time()-observe_window[1]) > watch_dog_patience: raise RuntimeError("程序终止。")
|
||||
return response
|
||||
|
||||
def load_model_info(self):
|
||||
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
||||
self.model_name = model_name
|
||||
self.cmd_to_install = cmd_to_install
|
||||
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
|
||||
"""
|
||||
⭐单线程方法
|
||||
函数的说明请见 request_llms/bridge_all.py
|
||||
"""
|
||||
chatbot.append((inputs, ""))
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
|
||||
def load_model_and_tokenizer(self):
|
||||
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
||||
import os, glob
|
||||
import os
|
||||
import platform
|
||||
from modelscope import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
check_packages(["dashscope"])
|
||||
except:
|
||||
yield from update_ui_lastest_msg(f"导入软件依赖失败。使用该模型需要额外依赖,安装方法```pip install --upgrade dashscope```。",
|
||||
chatbot=chatbot, history=history, delay=0)
|
||||
return
|
||||
|
||||
with ProxyNetworkActivate('Download_LLM'):
|
||||
model_id = 'qwen/Qwen-7B-Chat'
|
||||
self._tokenizer = AutoTokenizer.from_pretrained('Qwen/Qwen-7B-Chat', trust_remote_code=True, resume_download=True)
|
||||
# use fp16
|
||||
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", trust_remote_code=True, fp16=True).eval()
|
||||
model.generation_config = GenerationConfig.from_pretrained(model_id, trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参
|
||||
self._model = model
|
||||
# 检查DASHSCOPE_API_KEY
|
||||
if get_conf("DASHSCOPE_API_KEY") == "":
|
||||
yield from update_ui_lastest_msg(f"请配置 DASHSCOPE_API_KEY。",
|
||||
chatbot=chatbot, history=history, delay=0)
|
||||
return
|
||||
|
||||
return self._model, self._tokenizer
|
||||
if additional_fn is not None:
|
||||
from core_functional import handle_core_functionality
|
||||
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
||||
|
||||
def llm_stream_generator(self, **kwargs):
|
||||
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
||||
def adaptor(kwargs):
|
||||
query = kwargs['query']
|
||||
max_length = kwargs['max_length']
|
||||
top_p = kwargs['top_p']
|
||||
temperature = kwargs['temperature']
|
||||
history = kwargs['history']
|
||||
return query, max_length, top_p, temperature, history
|
||||
# 开始接收回复
|
||||
from .com_qwenapi import QwenRequestInstance
|
||||
sri = QwenRequestInstance()
|
||||
for response in sri.generate(inputs, llm_kwargs, history, system_prompt):
|
||||
chatbot[-1] = (inputs, response)
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
|
||||
query, max_length, top_p, temperature, history = adaptor(kwargs)
|
||||
|
||||
for response in self._model.chat(self._tokenizer, query, history=history, stream=True):
|
||||
yield response
|
||||
|
||||
def try_to_import_special_deps(self, **kwargs):
|
||||
# import something that will raise error if the user does not install requirement_*.txt
|
||||
# 🏃♂️🏃♂️🏃♂️ 主进程执行
|
||||
import importlib
|
||||
importlib.import_module('modelscope')
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------------------------------------------------
|
||||
# 🔌💻 GPT-Academic Interface
|
||||
# ------------------------------------------------------------------------------------------------------------------------
|
||||
predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetONNXGLMHandle, model_name)
|
||||
# 总结输出
|
||||
if response == f"[Local Message] 等待{model_name}响应中 ...":
|
||||
response = f"[Local Message] {model_name}响应异常 ..."
|
||||
history.extend([inputs, response])
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
59
request_llms/bridge_qwen_local.py
Normal file
59
request_llms/bridge_qwen_local.py
Normal file
@@ -0,0 +1,59 @@
|
||||
model_name = "Qwen_Local"
|
||||
cmd_to_install = "`pip install -r request_llms/requirements_qwen_local.txt`"
|
||||
|
||||
from toolbox import ProxyNetworkActivate, get_conf
|
||||
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
|
||||
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------------------------------------------------
|
||||
# 🔌💻 Local Model
|
||||
# ------------------------------------------------------------------------------------------------------------------------
|
||||
class GetQwenLMHandle(LocalLLMHandle):
|
||||
|
||||
def load_model_info(self):
|
||||
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
||||
self.model_name = model_name
|
||||
self.cmd_to_install = cmd_to_install
|
||||
|
||||
def load_model_and_tokenizer(self):
|
||||
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
||||
# from modelscope import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
from transformers.generation import GenerationConfig
|
||||
with ProxyNetworkActivate('Download_LLM'):
|
||||
model_id = get_conf('QWEN_LOCAL_MODEL_SELECTION')
|
||||
self._tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True, resume_download=True)
|
||||
# use fp16
|
||||
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", trust_remote_code=True).eval()
|
||||
model.generation_config = GenerationConfig.from_pretrained(model_id, trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参
|
||||
self._model = model
|
||||
|
||||
return self._model, self._tokenizer
|
||||
|
||||
def llm_stream_generator(self, **kwargs):
|
||||
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
||||
def adaptor(kwargs):
|
||||
query = kwargs['query']
|
||||
max_length = kwargs['max_length']
|
||||
top_p = kwargs['top_p']
|
||||
temperature = kwargs['temperature']
|
||||
history = kwargs['history']
|
||||
return query, max_length, top_p, temperature, history
|
||||
|
||||
query, max_length, top_p, temperature, history = adaptor(kwargs)
|
||||
|
||||
for response in self._model.chat_stream(self._tokenizer, query, history=history):
|
||||
yield response
|
||||
|
||||
def try_to_import_special_deps(self, **kwargs):
|
||||
# import something that will raise error if the user does not install requirement_*.txt
|
||||
# 🏃♂️🏃♂️🏃♂️ 主进程执行
|
||||
import importlib
|
||||
importlib.import_module('modelscope')
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------------------------------------------------
|
||||
# 🔌💻 GPT-Academic Interface
|
||||
# ------------------------------------------------------------------------------------------------------------------------
|
||||
predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetQwenLMHandle, model_name)
|
||||
@@ -26,7 +26,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
||||
|
||||
from .com_sparkapi import SparkRequestInstance
|
||||
sri = SparkRequestInstance()
|
||||
for response in sri.generate(inputs, llm_kwargs, history, sys_prompt):
|
||||
for response in sri.generate(inputs, llm_kwargs, history, sys_prompt, use_image_api=False):
|
||||
if len(observe_window) >= 1:
|
||||
observe_window[0] = response
|
||||
if len(observe_window) >= 2:
|
||||
@@ -52,7 +52,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
# 开始接收回复
|
||||
from .com_sparkapi import SparkRequestInstance
|
||||
sri = SparkRequestInstance()
|
||||
for response in sri.generate(inputs, llm_kwargs, history, system_prompt):
|
||||
for response in sri.generate(inputs, llm_kwargs, history, system_prompt, use_image_api=True):
|
||||
chatbot[-1] = (inputs, response)
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
|
||||
import time
|
||||
from toolbox import update_ui, get_conf, update_ui_lastest_msg
|
||||
from toolbox import check_packages, report_exception
|
||||
|
||||
model_name = '智谱AI大模型'
|
||||
|
||||
@@ -37,6 +38,14 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
chatbot.append((inputs, ""))
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
check_packages(["zhipuai"])
|
||||
except:
|
||||
yield from update_ui_lastest_msg(f"导入软件依赖失败。使用该模型需要额外依赖,安装方法```pip install --upgrade zhipuai```。",
|
||||
chatbot=chatbot, history=history, delay=0)
|
||||
return
|
||||
|
||||
if validate_key() is False:
|
||||
yield from update_ui_lastest_msg(lastmsg="[Local Message] 请配置ZHIPUAI_API_KEY", chatbot=chatbot, history=history, delay=0)
|
||||
return
|
||||
|
||||
228
request_llms/com_google.py
Normal file
228
request_llms/com_google.py
Normal file
@@ -0,0 +1,228 @@
|
||||
# encoding: utf-8
|
||||
# @Time : 2023/12/25
|
||||
# @Author : Spike
|
||||
# @Descr :
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import requests
|
||||
from typing import List, Dict, Tuple
|
||||
from toolbox import get_conf, encode_image, get_pictures_list
|
||||
|
||||
proxies, TIMEOUT_SECONDS = get_conf("proxies", "TIMEOUT_SECONDS")
|
||||
|
||||
"""
|
||||
========================================================================
|
||||
第五部分 一些文件处理方法
|
||||
files_filter_handler 根据type过滤文件
|
||||
input_encode_handler 提取input中的文件,并解析
|
||||
file_manifest_filter_html 根据type过滤文件, 并解析为html or md 文本
|
||||
link_mtime_to_md 文件增加本地时间参数,避免下载到缓存文件
|
||||
html_view_blank 超链接
|
||||
html_local_file 本地文件取相对路径
|
||||
to_markdown_tabs 文件list 转换为 md tab
|
||||
"""
|
||||
|
||||
|
||||
def files_filter_handler(file_list):
|
||||
new_list = []
|
||||
filter_ = [
|
||||
"png",
|
||||
"jpg",
|
||||
"jpeg",
|
||||
"bmp",
|
||||
"svg",
|
||||
"webp",
|
||||
"ico",
|
||||
"tif",
|
||||
"tiff",
|
||||
"raw",
|
||||
"eps",
|
||||
]
|
||||
for file in file_list:
|
||||
file = str(file).replace("file=", "")
|
||||
if os.path.exists(file):
|
||||
if str(os.path.basename(file)).split(".")[-1] in filter_:
|
||||
new_list.append(file)
|
||||
return new_list
|
||||
|
||||
|
||||
def input_encode_handler(inputs, llm_kwargs):
|
||||
if llm_kwargs["most_recent_uploaded"].get("path"):
|
||||
image_paths = get_pictures_list(llm_kwargs["most_recent_uploaded"]["path"])
|
||||
md_encode = []
|
||||
for md_path in image_paths:
|
||||
type_ = os.path.splitext(md_path)[1].replace(".", "")
|
||||
type_ = "jpeg" if type_ == "jpg" else type_
|
||||
md_encode.append({"data": encode_image(md_path), "type": type_})
|
||||
return inputs, md_encode
|
||||
|
||||
|
||||
def file_manifest_filter_html(file_list, filter_: list = None, md_type=False):
|
||||
new_list = []
|
||||
if not filter_:
|
||||
filter_ = [
|
||||
"png",
|
||||
"jpg",
|
||||
"jpeg",
|
||||
"bmp",
|
||||
"svg",
|
||||
"webp",
|
||||
"ico",
|
||||
"tif",
|
||||
"tiff",
|
||||
"raw",
|
||||
"eps",
|
||||
]
|
||||
for file in file_list:
|
||||
if str(os.path.basename(file)).split(".")[-1] in filter_:
|
||||
new_list.append(html_local_img(file, md=md_type))
|
||||
elif os.path.exists(file):
|
||||
new_list.append(link_mtime_to_md(file))
|
||||
else:
|
||||
new_list.append(file)
|
||||
return new_list
|
||||
|
||||
|
||||
def link_mtime_to_md(file):
|
||||
link_local = html_local_file(file)
|
||||
link_name = os.path.basename(file)
|
||||
a = f"[{link_name}]({link_local}?{os.path.getmtime(file)})"
|
||||
return a
|
||||
|
||||
|
||||
def html_local_file(file):
|
||||
base_path = os.path.dirname(__file__) # 项目目录
|
||||
if os.path.exists(str(file)):
|
||||
file = f'file={file.replace(base_path, ".")}'
|
||||
return file
|
||||
|
||||
|
||||
def html_local_img(__file, layout="left", max_width=None, max_height=None, md=True):
|
||||
style = ""
|
||||
if max_width is not None:
|
||||
style += f"max-width: {max_width};"
|
||||
if max_height is not None:
|
||||
style += f"max-height: {max_height};"
|
||||
__file = html_local_file(__file)
|
||||
a = f'<div align="{layout}"><img src="{__file}" style="{style}"></div>'
|
||||
if md:
|
||||
a = f""
|
||||
return a
|
||||
|
||||
|
||||
def to_markdown_tabs(head: list, tabs: list, alignment=":---:", column=False):
|
||||
"""
|
||||
Args:
|
||||
head: 表头:[]
|
||||
tabs: 表值:[[列1], [列2], [列3], [列4]]
|
||||
alignment: :--- 左对齐, :---: 居中对齐, ---: 右对齐
|
||||
column: True to keep data in columns, False to keep data in rows (default).
|
||||
Returns:
|
||||
A string representation of the markdown table.
|
||||
"""
|
||||
if column:
|
||||
transposed_tabs = list(map(list, zip(*tabs)))
|
||||
else:
|
||||
transposed_tabs = tabs
|
||||
# Find the maximum length among the columns
|
||||
max_len = max(len(column) for column in transposed_tabs)
|
||||
|
||||
tab_format = "| %s "
|
||||
tabs_list = "".join([tab_format % i for i in head]) + "|\n"
|
||||
tabs_list += "".join([tab_format % alignment for i in head]) + "|\n"
|
||||
|
||||
for i in range(max_len):
|
||||
row_data = [tab[i] if i < len(tab) else "" for tab in transposed_tabs]
|
||||
row_data = file_manifest_filter_html(row_data, filter_=None)
|
||||
tabs_list += "".join([tab_format % i for i in row_data]) + "|\n"
|
||||
|
||||
return tabs_list
|
||||
|
||||
|
||||
class GoogleChatInit:
|
||||
def __init__(self):
|
||||
self.url_gemini = "https://generativelanguage.googleapis.com/v1beta/models/%m:streamGenerateContent?key=%k"
|
||||
|
||||
def generate_chat(self, inputs, llm_kwargs, history, system_prompt):
|
||||
headers, payload = self.generate_message_payload(
|
||||
inputs, llm_kwargs, history, system_prompt
|
||||
)
|
||||
response = requests.post(
|
||||
url=self.url_gemini,
|
||||
headers=headers,
|
||||
data=json.dumps(payload),
|
||||
stream=True,
|
||||
proxies=proxies,
|
||||
timeout=TIMEOUT_SECONDS,
|
||||
)
|
||||
return response.iter_lines()
|
||||
|
||||
def __conversation_user(self, user_input, llm_kwargs):
|
||||
what_i_have_asked = {"role": "user", "parts": []}
|
||||
if "vision" not in self.url_gemini:
|
||||
input_ = user_input
|
||||
encode_img = []
|
||||
else:
|
||||
input_, encode_img = input_encode_handler(user_input, llm_kwargs=llm_kwargs)
|
||||
what_i_have_asked["parts"].append({"text": input_})
|
||||
if encode_img:
|
||||
for data in encode_img:
|
||||
what_i_have_asked["parts"].append(
|
||||
{
|
||||
"inline_data": {
|
||||
"mime_type": f"image/{data['type']}",
|
||||
"data": data["data"],
|
||||
}
|
||||
}
|
||||
)
|
||||
return what_i_have_asked
|
||||
|
||||
def __conversation_history(self, history, llm_kwargs):
|
||||
messages = []
|
||||
conversation_cnt = len(history) // 2
|
||||
if conversation_cnt:
|
||||
for index in range(0, 2 * conversation_cnt, 2):
|
||||
what_i_have_asked = self.__conversation_user(history[index], llm_kwargs)
|
||||
what_gpt_answer = {
|
||||
"role": "model",
|
||||
"parts": [{"text": history[index + 1]}],
|
||||
}
|
||||
messages.append(what_i_have_asked)
|
||||
messages.append(what_gpt_answer)
|
||||
return messages
|
||||
|
||||
def generate_message_payload(
|
||||
self, inputs, llm_kwargs, history, system_prompt
|
||||
) -> Tuple[Dict, Dict]:
|
||||
messages = [
|
||||
# {"role": "system", "parts": [{"text": system_prompt}]}, # gemini 不允许对话轮次为偶数,所以这个没有用,看后续支持吧。。。
|
||||
# {"role": "user", "parts": [{"text": ""}]},
|
||||
# {"role": "model", "parts": [{"text": ""}]}
|
||||
]
|
||||
self.url_gemini = self.url_gemini.replace(
|
||||
"%m", llm_kwargs["llm_model"]
|
||||
).replace("%k", get_conf("GEMINI_API_KEY"))
|
||||
header = {"Content-Type": "application/json"}
|
||||
if "vision" not in self.url_gemini: # 不是vision 才处理history
|
||||
messages.extend(
|
||||
self.__conversation_history(history, llm_kwargs)
|
||||
) # 处理 history
|
||||
messages.append(self.__conversation_user(inputs, llm_kwargs)) # 处理用户对话
|
||||
payload = {
|
||||
"contents": messages,
|
||||
"generationConfig": {
|
||||
# "maxOutputTokens": 800,
|
||||
"stopSequences": str(llm_kwargs.get("stop", "")).split(" "),
|
||||
"temperature": llm_kwargs.get("temperature", 1),
|
||||
"topP": llm_kwargs.get("top_p", 0.8),
|
||||
"topK": 10,
|
||||
},
|
||||
}
|
||||
return header, payload
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
google = GoogleChatInit()
|
||||
# print(gootle.generate_message_payload('你好呀', {}, ['123123', '3123123'], ''))
|
||||
# gootle.input_encode_handle('123123[123123](./123123), ')
|
||||
94
request_llms/com_qwenapi.py
Normal file
94
request_llms/com_qwenapi.py
Normal file
@@ -0,0 +1,94 @@
|
||||
from http import HTTPStatus
|
||||
from toolbox import get_conf
|
||||
import threading
|
||||
import logging
|
||||
|
||||
timeout_bot_msg = '[Local Message] Request timeout. Network error.'
|
||||
|
||||
class QwenRequestInstance():
|
||||
def __init__(self):
|
||||
import dashscope
|
||||
self.time_to_yield_event = threading.Event()
|
||||
self.time_to_exit_event = threading.Event()
|
||||
self.result_buf = ""
|
||||
|
||||
def validate_key():
|
||||
DASHSCOPE_API_KEY = get_conf("DASHSCOPE_API_KEY")
|
||||
if DASHSCOPE_API_KEY == '': return False
|
||||
return True
|
||||
|
||||
if not validate_key():
|
||||
raise RuntimeError('请配置 DASHSCOPE_API_KEY')
|
||||
dashscope.api_key = get_conf("DASHSCOPE_API_KEY")
|
||||
|
||||
|
||||
def generate(self, inputs, llm_kwargs, history, system_prompt):
|
||||
# import _thread as thread
|
||||
from dashscope import Generation
|
||||
QWEN_MODEL = {
|
||||
'qwen-turbo': Generation.Models.qwen_turbo,
|
||||
'qwen-plus': Generation.Models.qwen_plus,
|
||||
'qwen-max': Generation.Models.qwen_max,
|
||||
}[llm_kwargs['llm_model']]
|
||||
top_p = llm_kwargs.get('top_p', 0.8)
|
||||
if top_p == 0: top_p += 1e-5
|
||||
if top_p == 1: top_p -= 1e-5
|
||||
|
||||
self.result_buf = ""
|
||||
responses = Generation.call(
|
||||
model=QWEN_MODEL,
|
||||
messages=generate_message_payload(inputs, llm_kwargs, history, system_prompt),
|
||||
top_p=top_p,
|
||||
temperature=llm_kwargs.get('temperature', 1.0),
|
||||
result_format='message',
|
||||
stream=True,
|
||||
incremental_output=True
|
||||
)
|
||||
|
||||
for response in responses:
|
||||
if response.status_code == HTTPStatus.OK:
|
||||
if response.output.choices[0].finish_reason == 'stop':
|
||||
yield self.result_buf
|
||||
break
|
||||
elif response.output.choices[0].finish_reason == 'length':
|
||||
self.result_buf += "[Local Message] 生成长度过长,后续输出被截断"
|
||||
yield self.result_buf
|
||||
break
|
||||
else:
|
||||
self.result_buf += response.output.choices[0].message.content
|
||||
yield self.result_buf
|
||||
else:
|
||||
self.result_buf += f"[Local Message] 请求错误:状态码:{response.status_code},错误码:{response.code},消息:{response.message}"
|
||||
yield self.result_buf
|
||||
break
|
||||
logging.info(f'[raw_input] {inputs}')
|
||||
logging.info(f'[response] {self.result_buf}')
|
||||
return self.result_buf
|
||||
|
||||
|
||||
def generate_message_payload(inputs, llm_kwargs, history, system_prompt):
|
||||
conversation_cnt = len(history) // 2
|
||||
if system_prompt == '': system_prompt = 'Hello!'
|
||||
messages = [{"role": "user", "content": system_prompt}, {"role": "assistant", "content": "Certainly!"}]
|
||||
if conversation_cnt:
|
||||
for index in range(0, 2*conversation_cnt, 2):
|
||||
what_i_have_asked = {}
|
||||
what_i_have_asked["role"] = "user"
|
||||
what_i_have_asked["content"] = history[index]
|
||||
what_gpt_answer = {}
|
||||
what_gpt_answer["role"] = "assistant"
|
||||
what_gpt_answer["content"] = history[index+1]
|
||||
if what_i_have_asked["content"] != "":
|
||||
if what_gpt_answer["content"] == "":
|
||||
continue
|
||||
if what_gpt_answer["content"] == timeout_bot_msg:
|
||||
continue
|
||||
messages.append(what_i_have_asked)
|
||||
messages.append(what_gpt_answer)
|
||||
else:
|
||||
messages[-1]['content'] = what_gpt_answer['content']
|
||||
what_i_ask_now = {}
|
||||
what_i_ask_now["role"] = "user"
|
||||
what_i_ask_now["content"] = inputs
|
||||
messages.append(what_i_ask_now)
|
||||
return messages
|
||||
@@ -1,4 +1,4 @@
|
||||
from toolbox import get_conf
|
||||
from toolbox import get_conf, get_pictures_list, encode_image
|
||||
import base64
|
||||
import datetime
|
||||
import hashlib
|
||||
@@ -65,18 +65,19 @@ class SparkRequestInstance():
|
||||
self.gpt_url = "ws://spark-api.xf-yun.com/v1.1/chat"
|
||||
self.gpt_url_v2 = "ws://spark-api.xf-yun.com/v2.1/chat"
|
||||
self.gpt_url_v3 = "ws://spark-api.xf-yun.com/v3.1/chat"
|
||||
self.gpt_url_img = "wss://spark-api.cn-huabei-1.xf-yun.com/v2.1/image"
|
||||
|
||||
self.time_to_yield_event = threading.Event()
|
||||
self.time_to_exit_event = threading.Event()
|
||||
|
||||
self.result_buf = ""
|
||||
|
||||
def generate(self, inputs, llm_kwargs, history, system_prompt):
|
||||
def generate(self, inputs, llm_kwargs, history, system_prompt, use_image_api=False):
|
||||
llm_kwargs = llm_kwargs
|
||||
history = history
|
||||
system_prompt = system_prompt
|
||||
import _thread as thread
|
||||
thread.start_new_thread(self.create_blocking_request, (inputs, llm_kwargs, history, system_prompt))
|
||||
thread.start_new_thread(self.create_blocking_request, (inputs, llm_kwargs, history, system_prompt, use_image_api))
|
||||
while True:
|
||||
self.time_to_yield_event.wait(timeout=1)
|
||||
if self.time_to_yield_event.is_set():
|
||||
@@ -85,14 +86,20 @@ class SparkRequestInstance():
|
||||
return self.result_buf
|
||||
|
||||
|
||||
def create_blocking_request(self, inputs, llm_kwargs, history, system_prompt):
|
||||
def create_blocking_request(self, inputs, llm_kwargs, history, system_prompt, use_image_api):
|
||||
if llm_kwargs['llm_model'] == 'sparkv2':
|
||||
gpt_url = self.gpt_url_v2
|
||||
elif llm_kwargs['llm_model'] == 'sparkv3':
|
||||
gpt_url = self.gpt_url_v3
|
||||
else:
|
||||
gpt_url = self.gpt_url
|
||||
|
||||
file_manifest = []
|
||||
if use_image_api and llm_kwargs.get('most_recent_uploaded'):
|
||||
if llm_kwargs['most_recent_uploaded'].get('path'):
|
||||
file_manifest = get_pictures_list(llm_kwargs['most_recent_uploaded']['path'])
|
||||
if len(file_manifest) > 0:
|
||||
print('正在使用讯飞图片理解API')
|
||||
gpt_url = self.gpt_url_img
|
||||
wsParam = Ws_Param(self.appid, self.api_key, self.api_secret, gpt_url)
|
||||
websocket.enableTrace(False)
|
||||
wsUrl = wsParam.create_url()
|
||||
@@ -101,9 +108,8 @@ class SparkRequestInstance():
|
||||
def on_open(ws):
|
||||
import _thread as thread
|
||||
thread.start_new_thread(run, (ws,))
|
||||
|
||||
def run(ws, *args):
|
||||
data = json.dumps(gen_params(ws.appid, *ws.all_args))
|
||||
data = json.dumps(gen_params(ws.appid, *ws.all_args, file_manifest))
|
||||
ws.send(data)
|
||||
|
||||
# 收到websocket消息的处理
|
||||
@@ -142,9 +148,18 @@ class SparkRequestInstance():
|
||||
ws.all_args = (inputs, llm_kwargs, history, system_prompt)
|
||||
ws.run_forever(sslopt={"cert_reqs": ssl.CERT_NONE})
|
||||
|
||||
def generate_message_payload(inputs, llm_kwargs, history, system_prompt):
|
||||
def generate_message_payload(inputs, llm_kwargs, history, system_prompt, file_manifest):
|
||||
conversation_cnt = len(history) // 2
|
||||
messages = [{"role": "system", "content": system_prompt}]
|
||||
messages = []
|
||||
if file_manifest:
|
||||
base64_images = []
|
||||
for image_path in file_manifest:
|
||||
base64_images.append(encode_image(image_path))
|
||||
for img_s in base64_images:
|
||||
if img_s not in str(messages):
|
||||
messages.append({"role": "user", "content": img_s, "content_type": "image"})
|
||||
else:
|
||||
messages = [{"role": "system", "content": system_prompt}]
|
||||
if conversation_cnt:
|
||||
for index in range(0, 2*conversation_cnt, 2):
|
||||
what_i_have_asked = {}
|
||||
@@ -167,7 +182,7 @@ def generate_message_payload(inputs, llm_kwargs, history, system_prompt):
|
||||
return messages
|
||||
|
||||
|
||||
def gen_params(appid, inputs, llm_kwargs, history, system_prompt):
|
||||
def gen_params(appid, inputs, llm_kwargs, history, system_prompt, file_manifest):
|
||||
"""
|
||||
通过appid和用户的提问来生成请参数
|
||||
"""
|
||||
@@ -176,6 +191,8 @@ def gen_params(appid, inputs, llm_kwargs, history, system_prompt):
|
||||
"sparkv2": "generalv2",
|
||||
"sparkv3": "generalv3",
|
||||
}
|
||||
domains_select = domains[llm_kwargs['llm_model']]
|
||||
if file_manifest: domains_select = 'image'
|
||||
data = {
|
||||
"header": {
|
||||
"app_id": appid,
|
||||
@@ -183,7 +200,7 @@ def gen_params(appid, inputs, llm_kwargs, history, system_prompt):
|
||||
},
|
||||
"parameter": {
|
||||
"chat": {
|
||||
"domain": domains[llm_kwargs['llm_model']],
|
||||
"domain": domains_select,
|
||||
"temperature": llm_kwargs["temperature"],
|
||||
"random_threshold": 0.5,
|
||||
"max_tokens": 4096,
|
||||
@@ -192,7 +209,7 @@ def gen_params(appid, inputs, llm_kwargs, history, system_prompt):
|
||||
},
|
||||
"payload": {
|
||||
"message": {
|
||||
"text": generate_message_payload(inputs, llm_kwargs, history, system_prompt)
|
||||
"text": generate_message_payload(inputs, llm_kwargs, history, system_prompt, file_manifest)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,6 +26,8 @@ class ZhipuRequestInstance():
|
||||
)
|
||||
for event in response.events():
|
||||
if event.event == "add":
|
||||
# if self.result_buf == "" and event.data.startswith(" "):
|
||||
# event.data = event.data.lstrip(" ") # 每次智谱为啥都要带个空格开头呢?
|
||||
self.result_buf += event.data
|
||||
yield self.result_buf
|
||||
elif event.event == "error" or event.event == "interrupted":
|
||||
|
||||
@@ -183,11 +183,11 @@ class LocalLLMHandle(Process):
|
||||
def stream_chat(self, **kwargs):
|
||||
# ⭐run in main process
|
||||
if self.get_state() == "`准备就绪`":
|
||||
yield "`正在等待线程锁,排队中请稍后 ...`"
|
||||
yield "`正在等待线程锁,排队中请稍候 ...`"
|
||||
|
||||
with self.threadLock:
|
||||
if self.parent.poll():
|
||||
yield "`排队中请稍后 ...`"
|
||||
yield "`排队中请稍候 ...`"
|
||||
self.clear_pending_messages()
|
||||
self.parent.send(kwargs)
|
||||
std_out = ""
|
||||
@@ -198,7 +198,7 @@ class LocalLLMHandle(Process):
|
||||
if res.startswith(self.std_tag):
|
||||
new_output = res[len(self.std_tag):]
|
||||
std_out = std_out[:std_out_clip_len]
|
||||
# print(new_output, end='')
|
||||
print(new_output, end='')
|
||||
std_out = new_output + std_out
|
||||
yield self.std_tag + '\n```\n' + std_out + '\n```\n'
|
||||
elif res == '[Finish]':
|
||||
|
||||
@@ -2,4 +2,4 @@ protobuf
|
||||
cpm_kernels
|
||||
torch>=1.10
|
||||
mdtex2html
|
||||
sentencepiece
|
||||
sentencepiece
|
||||
|
||||
@@ -6,5 +6,3 @@ sentencepiece
|
||||
numpy
|
||||
onnxruntime
|
||||
sentencepiece
|
||||
streamlit
|
||||
streamlit-chat
|
||||
|
||||
@@ -3,4 +3,4 @@ jtorch >= 0.1.3
|
||||
torch
|
||||
torchvision
|
||||
pandas
|
||||
jieba
|
||||
jieba
|
||||
|
||||
@@ -5,5 +5,3 @@ accelerate
|
||||
matplotlib
|
||||
huggingface_hub
|
||||
triton
|
||||
streamlit
|
||||
|
||||
|
||||
@@ -1,2 +1 @@
|
||||
modelscope
|
||||
transformers_stream_generator
|
||||
dashscope
|
||||
|
||||
5
request_llms/requirements_qwen_local.txt
Normal file
5
request_llms/requirements_qwen_local.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
modelscope
|
||||
transformers_stream_generator
|
||||
auto-gptq
|
||||
optimum
|
||||
urllib3<2
|
||||
@@ -1 +1 @@
|
||||
slack-sdk==3.21.3
|
||||
slack-sdk==3.21.3
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
./docs/gradio-3.32.6-py3-none-any.whl
|
||||
pypdf2==2.12.1
|
||||
zhipuai<2
|
||||
tiktoken>=0.3.3
|
||||
requests[socks]
|
||||
pydantic==1.10.11
|
||||
protobuf==3.18
|
||||
transformers>=4.27.1
|
||||
scipdf_parser>=0.52
|
||||
python-markdown-math
|
||||
|
||||
@@ -3,32 +3,36 @@
|
||||
# """
|
||||
def validate_path():
|
||||
import os, sys
|
||||
dir_name = os.path.dirname(__file__)
|
||||
root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..')
|
||||
|
||||
os.path.dirname(__file__)
|
||||
root_dir_assume = os.path.abspath(os.path.dirname(__file__) + "/..")
|
||||
os.chdir(root_dir_assume)
|
||||
sys.path.append(root_dir_assume)
|
||||
|
||||
validate_path() # validate path so you can run from base directory
|
||||
|
||||
|
||||
validate_path() # validate path so you can run from base directory
|
||||
if __name__ == "__main__":
|
||||
# from request_llms.bridge_newbingfree import predict_no_ui_long_connection
|
||||
# from request_llms.bridge_moss import predict_no_ui_long_connection
|
||||
# from request_llms.bridge_jittorllms_pangualpha import predict_no_ui_long_connection
|
||||
# from request_llms.bridge_jittorllms_llama import predict_no_ui_long_connection
|
||||
# from request_llms.bridge_claude import predict_no_ui_long_connection
|
||||
from request_llms.bridge_internlm import predict_no_ui_long_connection
|
||||
# from request_llms.bridge_qwen import predict_no_ui_long_connection
|
||||
# from request_llms.bridge_internlm import predict_no_ui_long_connection
|
||||
# from request_llms.bridge_deepseekcoder import predict_no_ui_long_connection
|
||||
# from request_llms.bridge_qwen_7B import predict_no_ui_long_connection
|
||||
from request_llms.bridge_qwen_local import predict_no_ui_long_connection
|
||||
|
||||
# from request_llms.bridge_spark import predict_no_ui_long_connection
|
||||
# from request_llms.bridge_zhipu import predict_no_ui_long_connection
|
||||
# from request_llms.bridge_chatglm3 import predict_no_ui_long_connection
|
||||
|
||||
llm_kwargs = {
|
||||
'max_length': 4096,
|
||||
'top_p': 1,
|
||||
'temperature': 1,
|
||||
"max_length": 4096,
|
||||
"top_p": 1,
|
||||
"temperature": 1,
|
||||
}
|
||||
|
||||
result = predict_no_ui_long_connection( inputs="请问什么是质子?",
|
||||
llm_kwargs=llm_kwargs,
|
||||
history=["你好", "我好!"],
|
||||
sys_prompt="")
|
||||
print('final result:', result)
|
||||
result = predict_no_ui_long_connection(
|
||||
inputs="请问什么是质子?", llm_kwargs=llm_kwargs, history=["你好", "我好!"], sys_prompt=""
|
||||
)
|
||||
print("final result:", result)
|
||||
|
||||
@@ -29,16 +29,20 @@ md = """
|
||||
请随时告诉我您的需求,我会尽力提供帮助。如果您有任何问题或需要解答的议题,请随时提问。
|
||||
"""
|
||||
|
||||
|
||||
def validate_path():
|
||||
import os, sys
|
||||
dir_name = os.path.dirname(__file__)
|
||||
root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..')
|
||||
|
||||
os.path.dirname(__file__)
|
||||
root_dir_assume = os.path.abspath(os.path.dirname(__file__) + "/..")
|
||||
os.chdir(root_dir_assume)
|
||||
sys.path.append(root_dir_assume)
|
||||
validate_path() # validate path so you can run from base directory
|
||||
|
||||
|
||||
validate_path() # validate path so you can run from base directory
|
||||
from toolbox import markdown_convertion
|
||||
|
||||
html = markdown_convertion(md)
|
||||
print(html)
|
||||
with open('test.html', 'w', encoding='utf-8') as f:
|
||||
f.write(html)
|
||||
with open("test.html", "w", encoding="utf-8") as f:
|
||||
f.write(html)
|
||||
|
||||
@@ -4,16 +4,28 @@
|
||||
|
||||
|
||||
import os, sys
|
||||
def validate_path(): dir_name = os.path.dirname(__file__); root_dir_assume = os.path.abspath(dir_name + '/..'); os.chdir(root_dir_assume); sys.path.append(root_dir_assume)
|
||||
validate_path() # 返回项目根路径
|
||||
|
||||
|
||||
def validate_path():
|
||||
dir_name = os.path.dirname(__file__)
|
||||
root_dir_assume = os.path.abspath(dir_name + "/..")
|
||||
os.chdir(root_dir_assume)
|
||||
sys.path.append(root_dir_assume)
|
||||
|
||||
|
||||
validate_path() # 返回项目根路径
|
||||
|
||||
if __name__ == "__main__":
|
||||
from tests.test_utils import plugin_test
|
||||
|
||||
# plugin_test(plugin='crazy_functions.函数动态生成->函数动态生成', main_input='交换图像的蓝色通道和红色通道', advanced_arg={"file_path_arg": "./build/ants.jpg"})
|
||||
|
||||
# plugin_test(plugin='crazy_functions.Latex输出PDF结果->Latex翻译中文并重新编译PDF', main_input="2307.07522")
|
||||
|
||||
plugin_test(plugin='crazy_functions.Latex输出PDF结果->Latex翻译中文并重新编译PDF', main_input="G:/SEAFILE_LOCAL/50503047/我的资料库/学位/paperlatex/aaai/Fu_8368_with_appendix")
|
||||
plugin_test(
|
||||
plugin="crazy_functions.Latex输出PDF结果->Latex翻译中文并重新编译PDF",
|
||||
main_input="G:/SEAFILE_LOCAL/50503047/我的资料库/学位/paperlatex/aaai/Fu_8368_with_appendix",
|
||||
)
|
||||
|
||||
# plugin_test(plugin='crazy_functions.虚空终端->虚空终端', main_input='修改api-key为sk-jhoejriotherjep')
|
||||
|
||||
@@ -34,7 +46,7 @@ if __name__ == "__main__":
|
||||
# plugin_test(plugin='crazy_functions.批量翻译PDF文档_多线程->批量翻译PDF文档', main_input='crazy_functions/test_project/pdf_and_word/aaai.pdf')
|
||||
|
||||
# plugin_test(plugin='crazy_functions.谷歌检索小助手->谷歌检索小助手', main_input="https://scholar.google.com/scholar?hl=en&as_sdt=0%2C5&q=auto+reinforcement+learning&btnG=")
|
||||
|
||||
|
||||
# plugin_test(plugin='crazy_functions.总结word文档->总结word文档', main_input="crazy_functions/test_project/pdf_and_word")
|
||||
|
||||
# plugin_test(plugin='crazy_functions.下载arxiv论文翻译摘要->下载arxiv论文并翻译摘要', main_input="1812.10695")
|
||||
@@ -48,17 +60,16 @@ if __name__ == "__main__":
|
||||
# for lang in ["English", "French", "Japanese", "Korean", "Russian", "Italian", "German", "Portuguese", "Arabic"]:
|
||||
# plugin_test(plugin='crazy_functions.批量Markdown翻译->Markdown翻译指定语言', main_input="README.md", advanced_arg={"advanced_arg": lang})
|
||||
|
||||
# plugin_test(plugin='crazy_functions.Langchain知识库->知识库问答', main_input="./")
|
||||
# plugin_test(plugin='crazy_functions.知识库文件注入->知识库文件注入', main_input="./")
|
||||
|
||||
# plugin_test(plugin='crazy_functions.Langchain知识库->读取知识库作答', main_input="What is the installation method?")
|
||||
# plugin_test(plugin='crazy_functions.知识库文件注入->读取知识库作答', main_input="What is the installation method?")
|
||||
|
||||
# plugin_test(plugin='crazy_functions.知识库文件注入->读取知识库作答', main_input="远程云服务器部署?")
|
||||
|
||||
# plugin_test(plugin='crazy_functions.Langchain知识库->读取知识库作答', main_input="远程云服务器部署?")
|
||||
|
||||
# plugin_test(plugin='crazy_functions.Latex输出PDF结果->Latex翻译中文并重新编译PDF', main_input="2210.03629")
|
||||
|
||||
|
||||
# advanced_arg = {"advanced_arg":"--llm_to_learn=gpt-3.5-turbo --prompt_prefix='根据下面的服装类型提示,想象一个穿着者,对这个人外貌、身处的环境、内心世界、人设进行描写。要求:100字以内,用第二人称。' --system_prompt=''" }
|
||||
# plugin_test(plugin='crazy_functions.chatglm微调工具->微调数据集生成', main_input='build/dev.json', advanced_arg=advanced_arg)
|
||||
|
||||
# advanced_arg = {"advanced_arg":"--pre_seq_len=128 --learning_rate=2e-2 --num_gpus=1 --json_dataset='t_code.json' --ptuning_directory='/home/hmp/ChatGLM2-6B/ptuning' " }
|
||||
# plugin_test(plugin='crazy_functions.chatglm微调工具->启动微调', main_input='build/dev.json', advanced_arg=advanced_arg)
|
||||
|
||||
|
||||
@@ -9,45 +9,52 @@ from functools import wraps
|
||||
import sys
|
||||
import os
|
||||
|
||||
|
||||
def chat_to_markdown_str(chat):
|
||||
result = ""
|
||||
for i, cc in enumerate(chat):
|
||||
result += f'\n\n{cc[0]}\n\n{cc[1]}'
|
||||
if i != len(chat)-1:
|
||||
result += '\n\n---'
|
||||
result += f"\n\n{cc[0]}\n\n{cc[1]}"
|
||||
if i != len(chat) - 1:
|
||||
result += "\n\n---"
|
||||
return result
|
||||
|
||||
|
||||
def silence_stdout(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
_original_stdout = sys.stdout
|
||||
sys.stdout = open(os.devnull, 'w')
|
||||
sys.stdout.reconfigure(encoding='utf-8')
|
||||
sys.stdout = open(os.devnull, "w")
|
||||
sys.stdout.reconfigure(encoding="utf-8")
|
||||
for q in func(*args, **kwargs):
|
||||
sys.stdout = _original_stdout
|
||||
yield q
|
||||
sys.stdout = open(os.devnull, 'w')
|
||||
sys.stdout.reconfigure(encoding='utf-8')
|
||||
sys.stdout = open(os.devnull, "w")
|
||||
sys.stdout.reconfigure(encoding="utf-8")
|
||||
sys.stdout.close()
|
||||
sys.stdout = _original_stdout
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def silence_stdout_fn(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
_original_stdout = sys.stdout
|
||||
sys.stdout = open(os.devnull, 'w')
|
||||
sys.stdout.reconfigure(encoding='utf-8')
|
||||
sys.stdout = open(os.devnull, "w")
|
||||
sys.stdout.reconfigure(encoding="utf-8")
|
||||
result = func(*args, **kwargs)
|
||||
sys.stdout.close()
|
||||
sys.stdout = _original_stdout
|
||||
return result
|
||||
|
||||
return wrapper
|
||||
|
||||
class VoidTerminal():
|
||||
|
||||
class VoidTerminal:
|
||||
def __init__(self) -> None:
|
||||
pass
|
||||
|
||||
|
||||
|
||||
vt = VoidTerminal()
|
||||
vt.get_conf = silence_stdout_fn(get_conf)
|
||||
vt.set_conf = silence_stdout_fn(set_conf)
|
||||
@@ -57,10 +64,28 @@ vt.get_plugin_default_kwargs = silence_stdout_fn(get_plugin_default_kwargs)
|
||||
vt.get_chat_handle = silence_stdout_fn(get_chat_handle)
|
||||
vt.get_chat_default_kwargs = silence_stdout_fn(get_chat_default_kwargs)
|
||||
vt.chat_to_markdown_str = chat_to_markdown_str
|
||||
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY = \
|
||||
vt.get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY')
|
||||
(
|
||||
proxies,
|
||||
WEB_PORT,
|
||||
LLM_MODEL,
|
||||
CONCURRENT_COUNT,
|
||||
AUTHENTICATION,
|
||||
CHATBOT_HEIGHT,
|
||||
LAYOUT,
|
||||
API_KEY,
|
||||
) = vt.get_conf(
|
||||
"proxies",
|
||||
"WEB_PORT",
|
||||
"LLM_MODEL",
|
||||
"CONCURRENT_COUNT",
|
||||
"AUTHENTICATION",
|
||||
"CHATBOT_HEIGHT",
|
||||
"LAYOUT",
|
||||
"API_KEY",
|
||||
)
|
||||
|
||||
def plugin_test(main_input, plugin, advanced_arg=None):
|
||||
|
||||
def plugin_test(main_input, plugin, advanced_arg=None, debug=True):
|
||||
from rich.live import Live
|
||||
from rich.markdown import Markdown
|
||||
|
||||
@@ -69,13 +94,16 @@ def plugin_test(main_input, plugin, advanced_arg=None):
|
||||
|
||||
plugin = vt.get_plugin_handle(plugin)
|
||||
plugin_kwargs = vt.get_plugin_default_kwargs()
|
||||
plugin_kwargs['main_input'] = main_input
|
||||
plugin_kwargs["main_input"] = main_input
|
||||
if advanced_arg is not None:
|
||||
plugin_kwargs['plugin_kwargs'] = advanced_arg
|
||||
my_working_plugin = silence_stdout(plugin)(**plugin_kwargs)
|
||||
plugin_kwargs["plugin_kwargs"] = advanced_arg
|
||||
if debug:
|
||||
my_working_plugin = (plugin)(**plugin_kwargs)
|
||||
else:
|
||||
my_working_plugin = silence_stdout(plugin)(**plugin_kwargs)
|
||||
|
||||
with Live(Markdown(""), auto_refresh=False, vertical_overflow="visible") as live:
|
||||
for cookies, chat, hist, msg in my_working_plugin:
|
||||
md_str = vt.chat_to_markdown_str(chat)
|
||||
md = Markdown(md_str)
|
||||
live.update(md, refresh=True)
|
||||
live.update(md, refresh=True)
|
||||
|
||||
28
tests/test_vector_plugins.py
Normal file
28
tests/test_vector_plugins.py
Normal file
@@ -0,0 +1,28 @@
|
||||
"""
|
||||
对项目中的各个插件进行测试。运行方法:直接运行 python tests/test_plugins.py
|
||||
"""
|
||||
|
||||
|
||||
import os, sys
|
||||
|
||||
|
||||
def validate_path():
|
||||
dir_name = os.path.dirname(__file__)
|
||||
root_dir_assume = os.path.abspath(dir_name + "/..")
|
||||
os.chdir(root_dir_assume)
|
||||
sys.path.append(root_dir_assume)
|
||||
|
||||
|
||||
validate_path() # 返回项目根路径
|
||||
|
||||
if __name__ == "__main__":
|
||||
from tests.test_utils import plugin_test
|
||||
|
||||
plugin_test(plugin="crazy_functions.知识库问答->知识库文件注入", main_input="./README.md")
|
||||
|
||||
plugin_test(
|
||||
plugin="crazy_functions.知识库问答->读取知识库作答",
|
||||
main_input="What is the installation method?",
|
||||
)
|
||||
|
||||
plugin_test(plugin="crazy_functions.知识库问答->读取知识库作答", main_input="远程云服务器部署?")
|
||||
@@ -94,6 +94,10 @@
|
||||
background-color: var(--block-background-fill) !important;
|
||||
}
|
||||
|
||||
#cbsc {
|
||||
background-color: var(--block-background-fill) !important;
|
||||
}
|
||||
|
||||
#interact-panel .form {
|
||||
border: hidden
|
||||
}
|
||||
@@ -111,4 +115,4 @@
|
||||
border: solid;
|
||||
border-width: thin;
|
||||
border-top-width: 0;
|
||||
}
|
||||
}
|
||||
|
||||
593
themes/common.js
593
themes/common.js
@@ -1,9 +1,13 @@
|
||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
// 第 1 部分: 工具函数
|
||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
|
||||
function gradioApp() {
|
||||
// https://github.com/GaiZhenbiao/ChuanhuChatGPT/tree/main/web_assets/javascript
|
||||
const elems = document.getElementsByTagName('gradio-app');
|
||||
const elem = elems.length == 0 ? document : elems[0];
|
||||
if (elem !== document) {
|
||||
elem.getElementById = function(id) {
|
||||
elem.getElementById = function (id) {
|
||||
return document.getElementById(id);
|
||||
};
|
||||
}
|
||||
@@ -12,31 +16,168 @@ function gradioApp() {
|
||||
|
||||
function setCookie(name, value, days) {
|
||||
var expires = "";
|
||||
|
||||
|
||||
if (days) {
|
||||
var date = new Date();
|
||||
date.setTime(date.getTime() + (days * 24 * 60 * 60 * 1000));
|
||||
expires = "; expires=" + date.toUTCString();
|
||||
var date = new Date();
|
||||
date.setTime(date.getTime() + (days * 24 * 60 * 60 * 1000));
|
||||
expires = "; expires=" + date.toUTCString();
|
||||
}
|
||||
|
||||
|
||||
document.cookie = name + "=" + value + expires + "; path=/";
|
||||
}
|
||||
|
||||
function getCookie(name) {
|
||||
var decodedCookie = decodeURIComponent(document.cookie);
|
||||
var cookies = decodedCookie.split(';');
|
||||
|
||||
|
||||
for (var i = 0; i < cookies.length; i++) {
|
||||
var cookie = cookies[i].trim();
|
||||
|
||||
if (cookie.indexOf(name + "=") === 0) {
|
||||
return cookie.substring(name.length + 1, cookie.length);
|
||||
}
|
||||
var cookie = cookies[i].trim();
|
||||
|
||||
if (cookie.indexOf(name + "=") === 0) {
|
||||
return cookie.substring(name.length + 1, cookie.length);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
let toastCount = 0;
|
||||
function toast_push(msg, duration) {
|
||||
duration = isNaN(duration) ? 3000 : duration;
|
||||
const existingToasts = document.querySelectorAll('.toast');
|
||||
existingToasts.forEach(toast => {
|
||||
toast.style.top = `${parseInt(toast.style.top, 10) - 70}px`;
|
||||
});
|
||||
const m = document.createElement('div');
|
||||
m.innerHTML = msg;
|
||||
m.classList.add('toast');
|
||||
m.style.cssText = `font-size: var(--text-md) !important; color: rgb(255, 255, 255); background-color: rgba(0, 0, 0, 0.6); padding: 10px 15px; border-radius: 4px; position: fixed; top: ${50 + toastCount * 70}%; left: 50%; transform: translateX(-50%); width: auto; text-align: center; transition: top 0.3s;`;
|
||||
document.body.appendChild(m);
|
||||
setTimeout(function () {
|
||||
m.style.opacity = '0';
|
||||
setTimeout(function () {
|
||||
document.body.removeChild(m);
|
||||
toastCount--;
|
||||
}, 500);
|
||||
}, duration);
|
||||
toastCount++;
|
||||
}
|
||||
|
||||
function toast_up(msg) {
|
||||
var m = document.getElementById('toast_up');
|
||||
if (m) {
|
||||
document.body.removeChild(m); // remove the loader from the body
|
||||
}
|
||||
m = document.createElement('div');
|
||||
m.id = 'toast_up';
|
||||
m.innerHTML = msg;
|
||||
m.style.cssText = "font-size: var(--text-md) !important; color: rgb(255, 255, 255); background-color: rgba(0, 0, 100, 0.6); padding: 10px 15px; margin: 0 0 0 -60px; border-radius: 4px; position: fixed; top: 50%; left: 50%; width: auto; text-align: center;";
|
||||
document.body.appendChild(m);
|
||||
}
|
||||
|
||||
function toast_down() {
|
||||
var m = document.getElementById('toast_up');
|
||||
if (m) {
|
||||
document.body.removeChild(m); // remove the loader from the body
|
||||
}
|
||||
}
|
||||
|
||||
function begin_loading_status() {
|
||||
// Create the loader div and add styling
|
||||
var loader = document.createElement('div');
|
||||
loader.id = 'Js_File_Loading';
|
||||
var C1 = document.createElement('div');
|
||||
var C2 = document.createElement('div');
|
||||
// var C3 = document.createElement('span');
|
||||
// C3.textContent = '上传中...'
|
||||
// C3.style.position = "fixed";
|
||||
// C3.style.top = "50%";
|
||||
// C3.style.left = "50%";
|
||||
// C3.style.width = "80px";
|
||||
// C3.style.height = "80px";
|
||||
// C3.style.margin = "-40px 0 0 -40px";
|
||||
|
||||
C1.style.position = "fixed";
|
||||
C1.style.top = "50%";
|
||||
C1.style.left = "50%";
|
||||
C1.style.width = "80px";
|
||||
C1.style.height = "80px";
|
||||
C1.style.borderLeft = "12px solid #00f3f300";
|
||||
C1.style.borderRight = "12px solid #00f3f300";
|
||||
C1.style.borderTop = "12px solid #82aaff";
|
||||
C1.style.borderBottom = "12px solid #82aaff"; // Added for effect
|
||||
C1.style.borderRadius = "50%";
|
||||
C1.style.margin = "-40px 0 0 -40px";
|
||||
C1.style.animation = "spinAndPulse 2s linear infinite";
|
||||
|
||||
C2.style.position = "fixed";
|
||||
C2.style.top = "50%";
|
||||
C2.style.left = "50%";
|
||||
C2.style.width = "40px";
|
||||
C2.style.height = "40px";
|
||||
C2.style.borderLeft = "12px solid #00f3f300";
|
||||
C2.style.borderRight = "12px solid #00f3f300";
|
||||
C2.style.borderTop = "12px solid #33c9db";
|
||||
C2.style.borderBottom = "12px solid #33c9db"; // Added for effect
|
||||
C2.style.borderRadius = "50%";
|
||||
C2.style.margin = "-20px 0 0 -20px";
|
||||
C2.style.animation = "spinAndPulse2 2s linear infinite";
|
||||
|
||||
loader.appendChild(C1);
|
||||
loader.appendChild(C2);
|
||||
// loader.appendChild(C3);
|
||||
document.body.appendChild(loader); // Add the loader to the body
|
||||
|
||||
// Set the CSS animation keyframes for spin and pulse to be synchronized
|
||||
var styleSheet = document.createElement('style');
|
||||
styleSheet.id = 'Js_File_Loading_Style';
|
||||
styleSheet.textContent = `
|
||||
@keyframes spinAndPulse {
|
||||
0% { transform: rotate(0deg) scale(1); }
|
||||
25% { transform: rotate(90deg) scale(1.1); }
|
||||
50% { transform: rotate(180deg) scale(1); }
|
||||
75% { transform: rotate(270deg) scale(0.9); }
|
||||
100% { transform: rotate(360deg) scale(1); }
|
||||
}
|
||||
|
||||
@keyframes spinAndPulse2 {
|
||||
0% { transform: rotate(-90deg);}
|
||||
25% { transform: rotate(-180deg);}
|
||||
50% { transform: rotate(-270deg);}
|
||||
75% { transform: rotate(-360deg);}
|
||||
100% { transform: rotate(-450deg);}
|
||||
}
|
||||
`;
|
||||
document.head.appendChild(styleSheet);
|
||||
}
|
||||
|
||||
|
||||
function cancel_loading_status() {
|
||||
// remove the loader from the body
|
||||
var loadingElement = document.getElementById('Js_File_Loading');
|
||||
if (loadingElement) {
|
||||
document.body.removeChild(loadingElement);
|
||||
}
|
||||
var loadingStyle = document.getElementById('Js_File_Loading_Style');
|
||||
if (loadingStyle) {
|
||||
document.head.removeChild(loadingStyle);
|
||||
}
|
||||
// create new listen event
|
||||
let clearButton = document.querySelectorAll('div[id*="elem_upload"] button[aria-label="Clear"]');
|
||||
for (let button of clearButton) {
|
||||
button.addEventListener('click', function () {
|
||||
setTimeout(function () {
|
||||
register_upload_event();
|
||||
}, 50);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
// 第 2 部分: 复制按钮
|
||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
|
||||
function addCopyButton(botElement) {
|
||||
// https://github.com/GaiZhenbiao/ChuanhuChatGPT/tree/main/web_assets/javascript
|
||||
// Copy bot button
|
||||
@@ -45,11 +186,10 @@ function addCopyButton(botElement) {
|
||||
|
||||
const messageBtnColumnElement = botElement.querySelector('.message-btn-row');
|
||||
if (messageBtnColumnElement) {
|
||||
// Do something if .message-btn-column exists, for example, remove it
|
||||
// messageBtnColumnElement.remove();
|
||||
// if .message-btn-column exists
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
var copyButton = document.createElement('button');
|
||||
copyButton.classList.add('copy-bot-btn');
|
||||
copyButton.setAttribute('aria-label', 'Copy');
|
||||
@@ -98,47 +238,63 @@ function chatbotContentChanged(attempt = 1, force = false) {
|
||||
}
|
||||
}
|
||||
|
||||
function chatbotAutoHeight(){
|
||||
// 自动调整高度
|
||||
function update_height(){
|
||||
var { panel_height_target, chatbot_height, chatbot } = get_elements(true);
|
||||
if (panel_height_target!=chatbot_height)
|
||||
{
|
||||
var pixelString = panel_height_target.toString() + 'px';
|
||||
chatbot.style.maxHeight = pixelString; chatbot.style.height = pixelString;
|
||||
|
||||
|
||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
// 第 3 部分: chatbot动态高度调整
|
||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
|
||||
function chatbotAutoHeight() {
|
||||
// 自动调整高度:立即
|
||||
function update_height() {
|
||||
var { height_target, chatbot_height, chatbot } = get_elements(true);
|
||||
if (height_target != chatbot_height) {
|
||||
var pixelString = height_target.toString() + 'px';
|
||||
chatbot.style.maxHeight = pixelString; chatbot.style.height = pixelString;
|
||||
}
|
||||
}
|
||||
|
||||
function update_height_slow(){
|
||||
var { panel_height_target, chatbot_height, chatbot } = get_elements();
|
||||
if (panel_height_target!=chatbot_height)
|
||||
{
|
||||
new_panel_height = (panel_height_target - chatbot_height)*0.5 + chatbot_height;
|
||||
if (Math.abs(new_panel_height - panel_height_target) < 10){
|
||||
new_panel_height = panel_height_target;
|
||||
// 自动调整高度:缓慢
|
||||
function update_height_slow() {
|
||||
var { height_target, chatbot_height, chatbot } = get_elements();
|
||||
if (height_target != chatbot_height) {
|
||||
// sign = (height_target - chatbot_height)/Math.abs(height_target - chatbot_height);
|
||||
// speed = Math.max(Math.abs(height_target - chatbot_height), 1);
|
||||
new_panel_height = (height_target - chatbot_height) * 0.5 + chatbot_height;
|
||||
if (Math.abs(new_panel_height - height_target) < 10) {
|
||||
new_panel_height = height_target;
|
||||
}
|
||||
// console.log(chatbot_height, panel_height_target, new_panel_height);
|
||||
var pixelString = new_panel_height.toString() + 'px';
|
||||
chatbot.style.maxHeight = pixelString; chatbot.style.height = pixelString;
|
||||
chatbot.style.maxHeight = pixelString; chatbot.style.height = pixelString;
|
||||
}
|
||||
}
|
||||
|
||||
monitoring_input_box()
|
||||
update_height();
|
||||
setInterval(function() {
|
||||
update_height_slow()
|
||||
}, 50); // 每100毫秒执行一次
|
||||
window.addEventListener('resize', function() { update_height(); });
|
||||
window.addEventListener('scroll', function() { update_height_slow(); });
|
||||
setInterval(function () { update_height_slow() }, 50); // 每50毫秒执行一次
|
||||
}
|
||||
|
||||
function GptAcademicJavaScriptInit(LAYOUT = "LEFT-RIGHT") {
|
||||
chatbotIndicator = gradioApp().querySelector('#gpt-chatbot > div.wrap');
|
||||
var chatbotObserver = new MutationObserver(() => {
|
||||
chatbotContentChanged(1);
|
||||
});
|
||||
chatbotObserver.observe(chatbotIndicator, { attributes: true, childList: true, subtree: true });
|
||||
if (LAYOUT === "LEFT-RIGHT") {chatbotAutoHeight();}
|
||||
swapped = false;
|
||||
function swap_input_area() {
|
||||
// Get the elements to be swapped
|
||||
var element1 = document.querySelector("#input-panel");
|
||||
var element2 = document.querySelector("#basic-panel");
|
||||
|
||||
// Get the parent of the elements
|
||||
var parent = element1.parentNode;
|
||||
|
||||
// Get the next sibling of element2
|
||||
var nextSibling = element2.nextSibling;
|
||||
|
||||
// Swap the elements
|
||||
parent.insertBefore(element2, element1);
|
||||
parent.insertBefore(element1, nextSibling);
|
||||
if (swapped) {swapped = false;}
|
||||
else {swapped = true;}
|
||||
}
|
||||
|
||||
function get_elements(consider_state_panel=false) {
|
||||
function get_elements(consider_state_panel = false) {
|
||||
var chatbot = document.querySelector('#gpt-chatbot > div.wrap.svelte-18telvq');
|
||||
if (!chatbot) {
|
||||
chatbot = document.querySelector('#gpt-chatbot');
|
||||
@@ -147,17 +303,346 @@ function get_elements(consider_state_panel=false) {
|
||||
const panel2 = document.querySelector('#basic-panel').getBoundingClientRect()
|
||||
const panel3 = document.querySelector('#plugin-panel').getBoundingClientRect();
|
||||
// const panel4 = document.querySelector('#interact-panel').getBoundingClientRect();
|
||||
const panel5 = document.querySelector('#input-panel2').getBoundingClientRect();
|
||||
const panel_active = document.querySelector('#state-panel').getBoundingClientRect();
|
||||
if (consider_state_panel || panel_active.height < 25){
|
||||
if (consider_state_panel || panel_active.height < 25) {
|
||||
document.state_panel_height = panel_active.height;
|
||||
}
|
||||
// 25 是chatbot的label高度, 16 是右侧的gap
|
||||
var panel_height_target = panel1.height + panel2.height + panel3.height + 0 + 0 - 25 + 16*2;
|
||||
var height_target = panel1.height + panel2.height + panel3.height + 0 + 0 - 25 + 16 * 2;
|
||||
// 禁止动态的state-panel高度影响
|
||||
panel_height_target = panel_height_target + (document.state_panel_height-panel_active.height)
|
||||
var panel_height_target = parseInt(panel_height_target);
|
||||
height_target = height_target + (document.state_panel_height - panel_active.height)
|
||||
var height_target = parseInt(height_target);
|
||||
var chatbot_height = chatbot.style.height;
|
||||
// 交换输入区位置,使得输入区始终可用
|
||||
if (!swapped){
|
||||
if (panel1.top!=0 && (panel1.bottom + panel1.top)/2 < 0){ swap_input_area(); }
|
||||
}
|
||||
else if (swapped){
|
||||
if (panel2.top!=0 && panel2.top > 0){ swap_input_area(); }
|
||||
}
|
||||
// 调整高度
|
||||
const err_tor = 5;
|
||||
if (Math.abs(panel1.left - chatbot.getBoundingClientRect().left) < err_tor){
|
||||
// 是否处于窄屏模式
|
||||
height_target = window.innerHeight * 0.6;
|
||||
}else{
|
||||
// 调整高度
|
||||
const chatbot_height_exceed = 15;
|
||||
const chatbot_height_exceed_m = 10;
|
||||
b_panel = Math.max(panel1.bottom, panel2.bottom, panel3.bottom)
|
||||
if (b_panel >= window.innerHeight - chatbot_height_exceed) {
|
||||
height_target = window.innerHeight - chatbot.getBoundingClientRect().top - chatbot_height_exceed_m;
|
||||
}
|
||||
else if (b_panel < window.innerHeight * 0.75) {
|
||||
height_target = window.innerHeight * 0.8;
|
||||
}
|
||||
}
|
||||
var chatbot_height = parseInt(chatbot_height);
|
||||
return { panel_height_target, chatbot_height, chatbot };
|
||||
}
|
||||
return { height_target, chatbot_height, chatbot };
|
||||
}
|
||||
|
||||
|
||||
|
||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
// 第 4 部分: 粘贴、拖拽文件上传
|
||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
|
||||
var elem_upload = null;
|
||||
var elem_upload_float = null;
|
||||
var elem_input_main = null;
|
||||
var elem_input_float = null;
|
||||
var elem_chatbot = null;
|
||||
var elem_upload_component_float = null;
|
||||
var elem_upload_component = null;
|
||||
var exist_file_msg = '⚠️请先删除上传区(左上方)中的历史文件,再尝试上传。'
|
||||
|
||||
function locate_upload_elems(){
|
||||
elem_upload = document.getElementById('elem_upload')
|
||||
elem_upload_float = document.getElementById('elem_upload_float')
|
||||
elem_input_main = document.getElementById('user_input_main')
|
||||
elem_input_float = document.getElementById('user_input_float')
|
||||
elem_chatbot = document.getElementById('gpt-chatbot')
|
||||
elem_upload_component_float = elem_upload_float.querySelector("input[type=file]");
|
||||
elem_upload_component = elem_upload.querySelector("input[type=file]");
|
||||
}
|
||||
|
||||
async function upload_files(files) {
|
||||
let totalSizeMb = 0
|
||||
elem_upload_component_float = elem_upload_float.querySelector("input[type=file]");
|
||||
if (files && files.length > 0) {
|
||||
// 执行具体的上传逻辑
|
||||
if (elem_upload_component_float) {
|
||||
for (let i = 0; i < files.length; i++) {
|
||||
// 将从文件数组中获取的文件大小(单位为字节)转换为MB,
|
||||
totalSizeMb += files[i].size / 1024 / 1024;
|
||||
}
|
||||
// 检查文件总大小是否超过20MB
|
||||
if (totalSizeMb > 20) {
|
||||
toast_push('⚠️文件夹大于 20MB 🚀上传文件中', 3000);
|
||||
}
|
||||
let event = new Event("change");
|
||||
Object.defineProperty(event, "target", { value: elem_upload_component_float, enumerable: true });
|
||||
Object.defineProperty(event, "currentTarget", { value: elem_upload_component_float, enumerable: true });
|
||||
Object.defineProperty(elem_upload_component_float, "files", { value: files, enumerable: true });
|
||||
elem_upload_component_float.dispatchEvent(event);
|
||||
} else {
|
||||
console.log(exist_file_msg);
|
||||
toast_push(exist_file_msg, 3000);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function register_func_paste(input) {
|
||||
let paste_files = [];
|
||||
if (input) {
|
||||
input.addEventListener("paste", async function (e) {
|
||||
const clipboardData = e.clipboardData || window.clipboardData;
|
||||
const items = clipboardData.items;
|
||||
if (items) {
|
||||
for (i = 0; i < items.length; i++) {
|
||||
if (items[i].kind === "file") { // 确保是文件类型
|
||||
const file = items[i].getAsFile();
|
||||
// 将每一个粘贴的文件添加到files数组中
|
||||
paste_files.push(file);
|
||||
e.preventDefault(); // 避免粘贴文件名到输入框
|
||||
}
|
||||
}
|
||||
if (paste_files.length > 0) {
|
||||
// 按照文件列表执行批量上传逻辑
|
||||
await upload_files(paste_files);
|
||||
paste_files = []
|
||||
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function register_func_drag(elem) {
|
||||
if (elem) {
|
||||
const dragEvents = ["dragover"];
|
||||
const leaveEvents = ["dragleave", "dragend", "drop"];
|
||||
|
||||
const onDrag = function (e) {
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
if (elem_upload_float.querySelector("input[type=file]")) {
|
||||
toast_up('⚠️释放以上传文件')
|
||||
} else {
|
||||
toast_up(exist_file_msg)
|
||||
}
|
||||
};
|
||||
|
||||
const onLeave = function (e) {
|
||||
toast_down();
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
};
|
||||
|
||||
dragEvents.forEach(event => {
|
||||
elem.addEventListener(event, onDrag);
|
||||
});
|
||||
|
||||
leaveEvents.forEach(event => {
|
||||
elem.addEventListener(event, onLeave);
|
||||
});
|
||||
|
||||
elem.addEventListener("drop", async function (e) {
|
||||
const files = e.dataTransfer.files;
|
||||
await upload_files(files);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function elem_upload_component_pop_message(elem) {
|
||||
if (elem) {
|
||||
const dragEvents = ["dragover"];
|
||||
const leaveEvents = ["dragleave", "dragend", "drop"];
|
||||
dragEvents.forEach(event => {
|
||||
elem.addEventListener(event, function (e) {
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
if (elem_upload_float.querySelector("input[type=file]")) {
|
||||
toast_up('⚠️释放以上传文件')
|
||||
} else {
|
||||
toast_up(exist_file_msg)
|
||||
}
|
||||
});
|
||||
});
|
||||
leaveEvents.forEach(event => {
|
||||
elem.addEventListener(event, function (e) {
|
||||
toast_down();
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
});
|
||||
});
|
||||
elem.addEventListener("drop", async function (e) {
|
||||
toast_push('正在上传中,请稍等。', 2000);
|
||||
begin_loading_status();
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function register_upload_event() {
|
||||
locate_upload_elems();
|
||||
if (elem_upload_float) {
|
||||
_upload = document.querySelector("#elem_upload_float div.center.boundedheight.flex")
|
||||
elem_upload_component_pop_message(_upload);
|
||||
}
|
||||
if (elem_upload_component_float) {
|
||||
elem_upload_component_float.addEventListener('change', function (event) {
|
||||
toast_push('正在上传中,请稍等。', 2000);
|
||||
begin_loading_status();
|
||||
});
|
||||
}
|
||||
if (elem_upload_component) {
|
||||
elem_upload_component.addEventListener('change', function (event) {
|
||||
toast_push('正在上传中,请稍等。', 2000);
|
||||
begin_loading_status();
|
||||
});
|
||||
}else{
|
||||
toast_push("oppps", 3000);
|
||||
}
|
||||
}
|
||||
|
||||
function monitoring_input_box() {
|
||||
register_upload_event();
|
||||
|
||||
if (elem_input_main) {
|
||||
if (elem_input_main.querySelector("textarea")) {
|
||||
register_func_paste(elem_input_main.querySelector("textarea"))
|
||||
}
|
||||
}
|
||||
if (elem_input_float) {
|
||||
if (elem_input_float.querySelector("textarea")) {
|
||||
register_func_paste(elem_input_float.querySelector("textarea"))
|
||||
}
|
||||
}
|
||||
if (elem_chatbot) {
|
||||
register_func_drag(elem_chatbot)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
// 监视页面变化
|
||||
window.addEventListener("DOMContentLoaded", function () {
|
||||
// const ga = document.getElementsByTagName("gradio-app");
|
||||
gradioApp().addEventListener("render", monitoring_input_box);
|
||||
});
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
// 第 5 部分: 音频按钮样式变化
|
||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
|
||||
function audio_fn_init() {
|
||||
let audio_component = document.getElementById('elem_audio');
|
||||
if (audio_component) {
|
||||
let buttonElement = audio_component.querySelector('button');
|
||||
let specificElement = audio_component.querySelector('.hide.sr-only');
|
||||
specificElement.remove();
|
||||
|
||||
buttonElement.childNodes[1].nodeValue = '启动麦克风';
|
||||
buttonElement.addEventListener('click', function (event) {
|
||||
event.stopPropagation();
|
||||
toast_push('您启动了麦克风!下一步请点击“实时语音对话”启动语音对话。');
|
||||
});
|
||||
|
||||
// 查找语音插件按钮
|
||||
let buttons = document.querySelectorAll('button');
|
||||
let audio_button = null;
|
||||
for (let button of buttons) {
|
||||
if (button.textContent.includes('语音')) {
|
||||
audio_button = button;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (audio_button) {
|
||||
audio_button.addEventListener('click', function () {
|
||||
toast_push('您点击了“实时语音对话”启动语音对话。');
|
||||
});
|
||||
let parent_element = audio_component.parentElement; // 将buttonElement移动到audio_button的内部
|
||||
audio_button.appendChild(audio_component);
|
||||
buttonElement.style.cssText = 'border-color: #00ffe0;border-width: 2px; height: 25px;'
|
||||
parent_element.remove();
|
||||
audio_component.style.cssText = 'width: 250px;right: 0px;display: inline-flex;flex-flow: row-reverse wrap;place-content: stretch space-between;align-items: center;background-color: #ffffff00;';
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
function minor_ui_adjustment() {
|
||||
let cbsc_area = document.getElementById('cbsc');
|
||||
cbsc_area.style.paddingTop = '15px';
|
||||
var bar_btn_width = [];
|
||||
// 自动隐藏超出范围的toolbar按钮
|
||||
function auto_hide_toolbar() {
|
||||
var qq = document.getElementById('tooltip');
|
||||
var tab_nav = qq.getElementsByClassName('tab-nav');
|
||||
if (tab_nav.length == 0){ return; }
|
||||
var btn_list = tab_nav[0].getElementsByTagName('button')
|
||||
if (btn_list.length == 0){ return; }
|
||||
// 获取页面宽度
|
||||
var page_width = document.documentElement.clientWidth;
|
||||
// 总是保留的按钮数量
|
||||
const always_preserve = 2;
|
||||
// 获取最后一个按钮的右侧位置
|
||||
var cur_right = btn_list[always_preserve-1].getBoundingClientRect().right;
|
||||
if (bar_btn_width.length == 0){
|
||||
// 首次运行,记录每个按钮的宽度
|
||||
for (var i = 0; i < btn_list.length; i++) {
|
||||
bar_btn_width.push(btn_list[i].getBoundingClientRect().width);
|
||||
}
|
||||
}
|
||||
// 处理每一个按钮
|
||||
for (var i = always_preserve; i < btn_list.length; i++) {
|
||||
var element = btn_list[i];
|
||||
var element_right = element.getBoundingClientRect().right;
|
||||
if (element_right!=0){ cur_right = element_right; }
|
||||
if (element.style.display === 'none') {
|
||||
if ((cur_right + bar_btn_width[i]) < (page_width * 0.37)) {
|
||||
// 恢复显示当前按钮
|
||||
element.style.display = 'block';
|
||||
// console.log('show');
|
||||
return;
|
||||
}else{
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
if (cur_right > (page_width * 0.38)) {
|
||||
// 隐藏当前按钮以及右侧所有按钮
|
||||
for (var j = i; j < btn_list.length; j++) {
|
||||
if (btn_list[j].style.display !== 'none') {
|
||||
btn_list[j].style.display = 'none';
|
||||
}
|
||||
}
|
||||
// console.log('show');
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
setInterval(function () {
|
||||
auto_hide_toolbar()
|
||||
}, 200); // 每50毫秒执行一次
|
||||
}
|
||||
|
||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
// 第 6 部分: JS初始化函数
|
||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
|
||||
function GptAcademicJavaScriptInit(LAYOUT = "LEFT-RIGHT") {
|
||||
audio_fn_init();
|
||||
minor_ui_adjustment();
|
||||
chatbotIndicator = gradioApp().querySelector('#gpt-chatbot > div.wrap');
|
||||
var chatbotObserver = new MutationObserver(() => {
|
||||
chatbotContentChanged(1);
|
||||
});
|
||||
chatbotObserver.observe(chatbotIndicator, { attributes: true, childList: true, subtree: true });
|
||||
if (LAYOUT === "LEFT-RIGHT") { chatbotAutoHeight(); }
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
--button-primary-text-color-hover: #FFFFFF;
|
||||
--button-secondary-text-color: #FFFFFF;
|
||||
--button-secondary-text-color-hover: #FFFFFF;
|
||||
|
||||
|
||||
|
||||
--border-bottom-right-radius: 0px;
|
||||
--border-bottom-left-radius: 0px;
|
||||
@@ -51,8 +51,8 @@
|
||||
--button-primary-border-color-hover: #3cff00;
|
||||
--button-secondary-border-color: #3cff00;
|
||||
--button-secondary-border-color-hover: #3cff00;
|
||||
|
||||
|
||||
|
||||
|
||||
--body-background-fill: #000000;
|
||||
--background-fill-primary: #000000;
|
||||
--background-fill-secondary: #000000;
|
||||
@@ -103,7 +103,7 @@
|
||||
--button-primary-text-color-hover: #FFFFFF;
|
||||
--button-secondary-text-color: #FFFFFF;
|
||||
--button-secondary-text-color-hover: #FFFFFF;
|
||||
|
||||
|
||||
|
||||
|
||||
--border-bottom-right-radius: 0px;
|
||||
@@ -138,8 +138,8 @@
|
||||
--button-primary-border-color-hover: #3cff00;
|
||||
--button-secondary-border-color: #3cff00;
|
||||
--button-secondary-border-color-hover: #3cff00;
|
||||
|
||||
|
||||
|
||||
|
||||
--body-background-fill: #000000;
|
||||
--background-fill-primary: #000000;
|
||||
--background-fill-secondary: #000000;
|
||||
@@ -479,4 +479,3 @@
|
||||
.dark .codehilite .vi { color: #89DDFF } /* Name.Variable.Instance */
|
||||
.dark .codehilite .vm { color: #82AAFF } /* Name.Variable.Magic */
|
||||
.dark .codehilite .il { color: #F78C6C } /* Literal.Number.Integer.Long */
|
||||
|
||||
|
||||
@@ -1,16 +1,26 @@
|
||||
import os
|
||||
import gradio as gr
|
||||
from toolbox import get_conf
|
||||
CODE_HIGHLIGHT, ADD_WAIFU, LAYOUT = get_conf('CODE_HIGHLIGHT', 'ADD_WAIFU', 'LAYOUT')
|
||||
|
||||
CODE_HIGHLIGHT, ADD_WAIFU, LAYOUT = get_conf("CODE_HIGHLIGHT", "ADD_WAIFU", "LAYOUT")
|
||||
theme_dir = os.path.dirname(__file__)
|
||||
|
||||
|
||||
def adjust_theme():
|
||||
|
||||
try:
|
||||
color_er = gr.themes.utils.colors.fuchsia
|
||||
set_theme = gr.themes.Default(
|
||||
primary_hue=gr.themes.utils.colors.orange,
|
||||
neutral_hue=gr.themes.utils.colors.gray,
|
||||
font=["Helvetica", "Microsoft YaHei", "ui-sans-serif", "sans-serif", "system-ui"],
|
||||
font_mono=["ui-monospace", "Consolas", "monospace"])
|
||||
font=[
|
||||
"Helvetica",
|
||||
"Microsoft YaHei",
|
||||
"ui-sans-serif",
|
||||
"sans-serif",
|
||||
"system-ui",
|
||||
],
|
||||
font_mono=["ui-monospace", "Consolas", "monospace"],
|
||||
)
|
||||
set_theme.set(
|
||||
# Colors
|
||||
input_background_fill_dark="*neutral_800",
|
||||
@@ -57,9 +67,9 @@ def adjust_theme():
|
||||
button_cancel_text_color_dark="white",
|
||||
)
|
||||
|
||||
with open('themes/common.js', 'r', encoding='utf8') as f:
|
||||
with open(os.path.join(theme_dir, "common.js"), "r", encoding="utf8") as f:
|
||||
js = f"<script>{f.read()}</script>"
|
||||
|
||||
|
||||
# 添加一个萌萌的看板娘
|
||||
if ADD_WAIFU:
|
||||
js += """
|
||||
@@ -67,19 +77,26 @@ def adjust_theme():
|
||||
<script src="file=docs/waifu_plugin/jquery-ui.min.js"></script>
|
||||
<script src="file=docs/waifu_plugin/autoload.js"></script>
|
||||
"""
|
||||
gradio_original_template_fn = gr.routes.templates.TemplateResponse
|
||||
if not hasattr(gr, "RawTemplateResponse"):
|
||||
gr.RawTemplateResponse = gr.routes.templates.TemplateResponse
|
||||
gradio_original_template_fn = gr.RawTemplateResponse
|
||||
|
||||
def gradio_new_template_fn(*args, **kwargs):
|
||||
res = gradio_original_template_fn(*args, **kwargs)
|
||||
res.body = res.body.replace(b'</html>', f'{js}</html>'.encode("utf8"))
|
||||
res.body = res.body.replace(b"</html>", f"{js}</html>".encode("utf8"))
|
||||
res.init_headers()
|
||||
return res
|
||||
gr.routes.templates.TemplateResponse = gradio_new_template_fn # override gradio template
|
||||
|
||||
gr.routes.templates.TemplateResponse = (
|
||||
gradio_new_template_fn # override gradio template
|
||||
)
|
||||
except:
|
||||
set_theme = None
|
||||
print('gradio版本较旧, 不能自定义字体和颜色')
|
||||
print("gradio版本较旧, 不能自定义字体和颜色")
|
||||
return set_theme
|
||||
|
||||
with open("themes/contrast.css", "r", encoding="utf-8") as f:
|
||||
|
||||
with open(os.path.join(theme_dir, "contrast.css"), "r", encoding="utf-8") as f:
|
||||
advanced_css = f.read()
|
||||
with open("themes/common.css", "r", encoding="utf-8") as f:
|
||||
with open(os.path.join(theme_dir, "common.css"), "r", encoding="utf-8") as f:
|
||||
advanced_css += f.read()
|
||||
|
||||
0
themes/cookies.py
Normal file
0
themes/cookies.py
Normal file
@@ -303,4 +303,3 @@
|
||||
.dark .codehilite .vi { color: #89DDFF } /* Name.Variable.Instance */
|
||||
.dark .codehilite .vm { color: #82AAFF } /* Name.Variable.Magic */
|
||||
.dark .codehilite .il { color: #F78C6C } /* Literal.Number.Integer.Long */
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user