Compare commits
2 Commits
2706263a4b
...
batch-file
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
36e50d490d | ||
|
|
9172337695 |
56
.github/workflows/conda-pack-windows.yml
vendored
56
.github/workflows/conda-pack-windows.yml
vendored
@@ -1,56 +0,0 @@
|
|||||||
name: Create Conda Environment Package
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
runs-on: windows-latest
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Setup Miniconda
|
|
||||||
uses: conda-incubator/setup-miniconda@v3
|
|
||||||
with:
|
|
||||||
auto-activate-base: true
|
|
||||||
activate-environment: ""
|
|
||||||
|
|
||||||
- name: Create new Conda environment
|
|
||||||
shell: bash -l {0}
|
|
||||||
run: |
|
|
||||||
conda create -n gpt python=3.11 -y
|
|
||||||
conda activate gpt
|
|
||||||
|
|
||||||
- name: Install requirements
|
|
||||||
shell: bash -l {0}
|
|
||||||
run: |
|
|
||||||
conda activate gpt
|
|
||||||
pip install -r requirements.txt
|
|
||||||
|
|
||||||
- name: Install conda-pack
|
|
||||||
shell: bash -l {0}
|
|
||||||
run: |
|
|
||||||
conda activate gpt
|
|
||||||
conda install conda-pack -y
|
|
||||||
|
|
||||||
- name: Pack conda environment
|
|
||||||
shell: bash -l {0}
|
|
||||||
run: |
|
|
||||||
conda activate gpt
|
|
||||||
conda pack -n gpt -o gpt.tar.gz
|
|
||||||
|
|
||||||
- name: Create workspace zip
|
|
||||||
shell: pwsh
|
|
||||||
run: |
|
|
||||||
mkdir workspace
|
|
||||||
Get-ChildItem -Exclude "workspace" | Copy-Item -Destination workspace -Recurse
|
|
||||||
Remove-Item -Path workspace/.git* -Recurse -Force -ErrorAction SilentlyContinue
|
|
||||||
Copy-Item gpt.tar.gz workspace/ -Force
|
|
||||||
|
|
||||||
- name: Upload packed files
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: gpt-academic-package
|
|
||||||
path: workspace
|
|
||||||
7
.github/workflows/stale.yml
vendored
7
.github/workflows/stale.yml
vendored
@@ -7,7 +7,7 @@
|
|||||||
name: 'Close stale issues and PRs'
|
name: 'Close stale issues and PRs'
|
||||||
on:
|
on:
|
||||||
schedule:
|
schedule:
|
||||||
- cron: '*/30 * * * *'
|
- cron: '*/5 * * * *'
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
stale:
|
stale:
|
||||||
@@ -19,6 +19,7 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- uses: actions/stale@v8
|
- uses: actions/stale@v8
|
||||||
with:
|
with:
|
||||||
stale-issue-message: 'This issue is stale because it has been open 100 days with no activity. Remove stale label or comment or this will be closed in 7 days.'
|
stale-issue-message: 'This issue is stale because it has been open 100 days with no activity. Remove stale label or comment or this will be closed in 1 days.'
|
||||||
days-before-stale: 100
|
days-before-stale: 100
|
||||||
days-before-close: 7
|
days-before-close: 1
|
||||||
|
debug-only: true
|
||||||
|
|||||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -160,6 +160,4 @@ test.*
|
|||||||
temp.*
|
temp.*
|
||||||
objdump*
|
objdump*
|
||||||
*.min.*.js
|
*.min.*.js
|
||||||
TODO
|
TODO
|
||||||
experimental_mods
|
|
||||||
search_results
|
|
||||||
27
Dockerfile
27
Dockerfile
@@ -3,36 +3,37 @@
|
|||||||
# - 如何构建: 先修改 `config.py`, 然后 `docker build -t gpt-academic . `
|
# - 如何构建: 先修改 `config.py`, 然后 `docker build -t gpt-academic . `
|
||||||
# - 如何运行(Linux下): `docker run --rm -it --net=host gpt-academic `
|
# - 如何运行(Linux下): `docker run --rm -it --net=host gpt-academic `
|
||||||
# - 如何运行(其他操作系统,选择任意一个固定端口50923): `docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic `
|
# - 如何运行(其他操作系统,选择任意一个固定端口50923): `docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic `
|
||||||
|
FROM python:3.11
|
||||||
|
|
||||||
FROM ghcr.io/astral-sh/uv:python3.12-bookworm
|
|
||||||
|
|
||||||
# 非必要步骤,更换pip源 (以下三行,可以删除)
|
# 非必要步骤,更换pip源 (以下三行,可以删除)
|
||||||
RUN echo '[global]' > /etc/pip.conf && \
|
RUN echo '[global]' > /etc/pip.conf && \
|
||||||
echo 'index-url = https://mirrors.aliyun.com/pypi/simple/' >> /etc/pip.conf && \
|
echo 'index-url = https://mirrors.aliyun.com/pypi/simple/' >> /etc/pip.conf && \
|
||||||
echo 'trusted-host = mirrors.aliyun.com' >> /etc/pip.conf
|
echo 'trusted-host = mirrors.aliyun.com' >> /etc/pip.conf
|
||||||
|
|
||||||
# 语音输出功能(以下1,2行更换阿里源,第3,4行安装ffmpeg,都可以删除)
|
|
||||||
RUN sed -i 's/deb.debian.org/mirrors.aliyun.com/g' /etc/apt/sources.list.d/debian.sources && \
|
# 语音输出功能(以下两行,第一行更换阿里源,第二行安装ffmpeg,都可以删除)
|
||||||
sed -i 's/security.debian.org/mirrors.aliyun.com/g' /etc/apt/sources.list.d/debian.sources && \
|
RUN UBUNTU_VERSION=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release); echo "deb https://mirrors.aliyun.com/debian/ $UBUNTU_VERSION main non-free contrib" > /etc/apt/sources.list; apt-get update
|
||||||
apt-get update
|
|
||||||
RUN apt-get install ffmpeg -y
|
RUN apt-get install ffmpeg -y
|
||||||
RUN apt-get clean
|
|
||||||
|
|
||||||
# 进入工作路径(必要)
|
# 进入工作路径(必要)
|
||||||
WORKDIR /gpt
|
WORKDIR /gpt
|
||||||
|
|
||||||
|
|
||||||
# 安装大部分依赖,利用Docker缓存加速以后的构建 (以下两行,可以删除)
|
# 安装大部分依赖,利用Docker缓存加速以后的构建 (以下两行,可以删除)
|
||||||
COPY requirements.txt ./
|
COPY requirements.txt ./
|
||||||
RUN uv venv --python=3.12 && uv pip install --verbose -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
|
RUN pip3 install -r requirements.txt
|
||||||
ENV PATH="/gpt/.venv/bin:$PATH"
|
|
||||||
RUN python -c 'import loguru'
|
|
||||||
|
|
||||||
# 装载项目文件,安装剩余依赖(必要)
|
# 装载项目文件,安装剩余依赖(必要)
|
||||||
COPY . .
|
COPY . .
|
||||||
RUN uv venv --python=3.12 && uv pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
|
RUN pip3 install -r requirements.txt
|
||||||
|
|
||||||
|
|
||||||
|
# 非必要步骤,用于预热模块(可以删除)
|
||||||
|
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
|
||||||
|
|
||||||
# # 非必要步骤,用于预热模块(可以删除)
|
|
||||||
RUN python -c 'from check_proxy import warm_up_modules; warm_up_modules()'
|
|
||||||
|
|
||||||
# 启动(必要)
|
# 启动(必要)
|
||||||
CMD ["bash", "-c", "python main.py"]
|
CMD ["python3", "-u", "main.py"]
|
||||||
|
|||||||
53
README.md
53
README.md
@@ -1,15 +1,9 @@
|
|||||||
> [!IMPORTANT]
|
> [!IMPORTANT]
|
||||||
> `master主分支`最新动态(2025.7.31): 新GUI前端,Coming Soon
|
> 2024.10.10: 突发停电,紧急恢复了提供[whl包](https://drive.google.com/file/d/19U_hsLoMrjOlQSzYS3pzWX9fTzyusArP/view?usp=sharing)的文件服务器
|
||||||
> `master主分支`最新动态(2025.3.2): 修复大量代码typo / 联网组件支持Jina的api / 增加deepseek-r1支持
|
> 2024.10.8: 版本3.90加入对llama-index的初步支持,版本3.80加入插件二级菜单功能(详见wiki)
|
||||||
> `frontier开发分支`最新动态(2024.12.9): 更新对话时间线功能,优化xelatex论文翻译
|
|
||||||
> `wiki文档`最新动态(2024.12.5): 更新ollama接入指南
|
|
||||||
>
|
|
||||||
> 2025.2.2: 三分钟快速接入最强qwen2.5-max[视频](https://www.bilibili.com/video/BV1LeFuerEG4)
|
|
||||||
> 2025.2.1: 支持自定义字体
|
|
||||||
> 2024.10.10: 突发停电,紧急恢复了提供[whl包](https://drive.google.com/drive/folders/14kR-3V-lIbvGxri4AHc8TpiA1fqsw7SK?usp=sharing)的文件服务器
|
|
||||||
> 2024.5.1: 加入Doc2x翻译PDF论文的功能,[查看详情](https://github.com/binary-husky/gpt_academic/wiki/Doc2x)
|
> 2024.5.1: 加入Doc2x翻译PDF论文的功能,[查看详情](https://github.com/binary-husky/gpt_academic/wiki/Doc2x)
|
||||||
> 2024.3.11: 全力支持Qwen、GLM、DeepseekCoder等中文大语言模型! SoVits语音克隆模块,[查看详情](https://www.bilibili.com/video/BV1Rp421S7tF/)
|
> 2024.3.11: 全力支持Qwen、GLM、DeepseekCoder等中文大语言模型! SoVits语音克隆模块,[查看详情](https://www.bilibili.com/video/BV1Rp421S7tF/)
|
||||||
> 2024.1.17: 安装依赖时,请选择`requirements.txt`中**指定的版本**。 安装命令:`pip install -r requirements.txt`。
|
> 2024.1.17: 安装依赖时,请选择`requirements.txt`中**指定的版本**。 安装命令:`pip install -r requirements.txt`。本项目完全开源免费,您可通过订阅[在线服务](https://github.com/binary-husky/gpt_academic/wiki/online)的方式鼓励本项目的发展。
|
||||||
|
|
||||||
<br>
|
<br>
|
||||||
|
|
||||||
@@ -130,20 +124,20 @@ Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼
|
|||||||
|
|
||||||
```mermaid
|
```mermaid
|
||||||
flowchart TD
|
flowchart TD
|
||||||
A{"安装方法"} --> W1("I 🔑直接运行 (Windows, Linux or MacOS)")
|
A{"安装方法"} --> W1("I. 🔑直接运行 (Windows, Linux or MacOS)")
|
||||||
W1 --> W11["1 Python pip包管理依赖"]
|
W1 --> W11["1. Python pip包管理依赖"]
|
||||||
W1 --> W12["2 Anaconda包管理依赖(推荐⭐)"]
|
W1 --> W12["2. Anaconda包管理依赖(推荐⭐)"]
|
||||||
|
|
||||||
A --> W2["II 🐳使用Docker (Windows, Linux or MacOS)"]
|
A --> W2["II. 🐳使用Docker (Windows, Linux or MacOS)"]
|
||||||
|
|
||||||
W2 --> k1["1 部署项目全部能力的大镜像(推荐⭐)"]
|
W2 --> k1["1. 部署项目全部能力的大镜像(推荐⭐)"]
|
||||||
W2 --> k2["2 仅在线模型(GPT, GLM4等)镜像"]
|
W2 --> k2["2. 仅在线模型(GPT, GLM4等)镜像"]
|
||||||
W2 --> k3["3 在线模型 + Latex的大镜像"]
|
W2 --> k3["3. 在线模型 + Latex的大镜像"]
|
||||||
|
|
||||||
A --> W4["IV 🚀其他部署方法"]
|
A --> W4["IV. 🚀其他部署方法"]
|
||||||
W4 --> C1["1 Windows/MacOS 一键安装运行脚本(推荐⭐)"]
|
W4 --> C1["1. Windows/MacOS 一键安装运行脚本(推荐⭐)"]
|
||||||
W4 --> C2["2 Huggingface, Sealos远程部署"]
|
W4 --> C2["2. Huggingface, Sealos远程部署"]
|
||||||
W4 --> C4["3 其他 ..."]
|
W4 --> C4["3. ... 其他 ..."]
|
||||||
```
|
```
|
||||||
|
|
||||||
### 安装方法I:直接运行 (Windows, Linux or MacOS)
|
### 安装方法I:直接运行 (Windows, Linux or MacOS)
|
||||||
@@ -176,32 +170,26 @@ flowchart TD
|
|||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
<details><summary>如果需要支持清华ChatGLM系列/复旦MOSS/RWKV作为后端,请点击展开此处</summary>
|
<details><summary>如果需要支持清华ChatGLM2/复旦MOSS/RWKV作为后端,请点击展开此处</summary>
|
||||||
<p>
|
<p>
|
||||||
|
|
||||||
【可选步骤】如果需要支持清华ChatGLM系列/复旦MOSS作为后端,需要额外安装更多依赖(前提条件:熟悉Python + 用过Pytorch + 电脑配置够强):
|
【可选步骤】如果需要支持清华ChatGLM3/复旦MOSS作为后端,需要额外安装更多依赖(前提条件:熟悉Python + 用过Pytorch + 电脑配置够强):
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
# 【可选步骤I】支持清华ChatGLM3。清华ChatGLM备注:如果遇到"Call ChatGLM fail 不能正常加载ChatGLM的参数" 错误,参考如下: 1:以上默认安装的为torch+cpu版,使用cuda需要卸载torch重新安装torch+cuda; 2:如因本机配置不够无法加载模型,可以修改request_llm/bridge_chatglm.py中的模型精度, 将 AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) 都修改为 AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
|
# 【可选步骤I】支持清华ChatGLM3。清华ChatGLM备注:如果遇到"Call ChatGLM fail 不能正常加载ChatGLM的参数" 错误,参考如下: 1:以上默认安装的为torch+cpu版,使用cuda需要卸载torch重新安装torch+cuda; 2:如因本机配置不够无法加载模型,可以修改request_llm/bridge_chatglm.py中的模型精度, 将 AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) 都修改为 AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
|
||||||
python -m pip install -r request_llms/requirements_chatglm.txt
|
python -m pip install -r request_llms/requirements_chatglm.txt
|
||||||
|
|
||||||
# 【可选步骤II】支持清华ChatGLM4 注意:此模型至少需要24G显存
|
# 【可选步骤II】支持复旦MOSS
|
||||||
python -m pip install -r request_llms/requirements_chatglm4.txt
|
|
||||||
# 可使用modelscope下载ChatGLM4模型
|
|
||||||
# pip install modelscope
|
|
||||||
# modelscope download --model ZhipuAI/glm-4-9b-chat --local_dir ./THUDM/glm-4-9b-chat
|
|
||||||
|
|
||||||
# 【可选步骤III】支持复旦MOSS
|
|
||||||
python -m pip install -r request_llms/requirements_moss.txt
|
python -m pip install -r request_llms/requirements_moss.txt
|
||||||
git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss # 注意执行此行代码时,必须处于项目根路径
|
git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss # 注意执行此行代码时,必须处于项目根路径
|
||||||
|
|
||||||
# 【可选步骤IV】支持RWKV Runner
|
# 【可选步骤III】支持RWKV Runner
|
||||||
参考wiki:https://github.com/binary-husky/gpt_academic/wiki/%E9%80%82%E9%85%8DRWKV-Runner
|
参考wiki:https://github.com/binary-husky/gpt_academic/wiki/%E9%80%82%E9%85%8DRWKV-Runner
|
||||||
|
|
||||||
# 【可选步骤V】确保config.py配置文件的AVAIL_LLM_MODELS包含了期望的模型,目前支持的全部模型如下(jittorllms系列目前仅支持docker方案):
|
# 【可选步骤IV】确保config.py配置文件的AVAIL_LLM_MODELS包含了期望的模型,目前支持的全部模型如下(jittorllms系列目前仅支持docker方案):
|
||||||
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
|
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
|
||||||
|
|
||||||
# 【可选步骤VI】支持本地模型INT8,INT4量化(这里所指的模型本身不是量化版本,目前deepseek-coder支持,后面测试后会加入更多模型量化选择)
|
# 【可选步骤V】支持本地模型INT8,INT4量化(这里所指的模型本身不是量化版本,目前deepseek-coder支持,后面测试后会加入更多模型量化选择)
|
||||||
pip install bitsandbyte
|
pip install bitsandbyte
|
||||||
# windows用户安装bitsandbytes需要使用下面bitsandbytes-windows-webui
|
# windows用户安装bitsandbytes需要使用下面bitsandbytes-windows-webui
|
||||||
python -m pip install bitsandbytes --prefer-binary --extra-index-url=https://jllllll.github.io/bitsandbytes-windows-webui
|
python -m pip install bitsandbytes --prefer-binary --extra-index-url=https://jllllll.github.io/bitsandbytes-windows-webui
|
||||||
@@ -429,6 +417,7 @@ timeline LR
|
|||||||
1. `master` 分支: 主分支,稳定版
|
1. `master` 分支: 主分支,稳定版
|
||||||
2. `frontier` 分支: 开发分支,测试版
|
2. `frontier` 分支: 开发分支,测试版
|
||||||
3. 如何[接入其他大模型](request_llms/README.md)
|
3. 如何[接入其他大模型](request_llms/README.md)
|
||||||
|
4. 访问GPT-Academic的[在线服务并支持我们](https://github.com/binary-husky/gpt_academic/wiki/online)
|
||||||
|
|
||||||
### V:参考与学习
|
### V:参考与学习
|
||||||
|
|
||||||
|
|||||||
89
config.py
89
config.py
@@ -7,16 +7,11 @@
|
|||||||
Configuration reading priority: environment variable > config_private.py > config.py
|
Configuration reading priority: environment variable > config_private.py > config.py
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# [step 1-1]>> ( 接入OpenAI模型家族 ) API_KEY = "sk-123456789xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx123456789"。极少数情况下,还需要填写组织(格式如org-123456789abcdefghijklmno的),请向下翻,找 API_ORG 设置项
|
# [step 1]>> API_KEY = "sk-123456789xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx123456789"。极少数情况下,还需要填写组织(格式如org-123456789abcdefghijklmno的),请向下翻,找 API_ORG 设置项
|
||||||
API_KEY = "在此处填写APIKEY" # 可同时填写多个API-KEY,用英文逗号分割,例如API_KEY = "sk-openaikey1,sk-openaikey2,fkxxxx-api2dkey3,azure-apikey4"
|
API_KEY = "此处填API密钥" # 可同时填写多个API-KEY,用英文逗号分割,例如API_KEY = "sk-openaikey1,sk-openaikey2,fkxxxx-api2dkey3,azure-apikey4"
|
||||||
|
|
||||||
# [step 1-2]>> ( 强烈推荐!接入通义家族 & 大模型服务平台百炼 ) 接入通义千问在线大模型,api-key获取地址 https://dashscope.console.aliyun.com/
|
|
||||||
DASHSCOPE_API_KEY = "" # 阿里灵积云API_KEY(用于接入qwen-max,dashscope-qwen3-14b,dashscope-deepseek-r1等)
|
|
||||||
|
|
||||||
# [step 1-3]>> ( 接入 deepseek-reasoner, 即 deepseek-r1 ) 深度求索(DeepSeek) API KEY,默认请求地址为"https://api.deepseek.com/v1/chat/completions"
|
# [step 2]>> 改为True应用代理,如果直接在海外服务器部署,此处不修改;如果使用本地或无地域限制的大模型时,此处也不需要修改
|
||||||
DEEPSEEK_API_KEY = ""
|
|
||||||
|
|
||||||
# [step 2]>> 改为True应用代理。如果使用本地或无地域限制的大模型时,此处不修改;如果直接在海外服务器部署,此处不修改
|
|
||||||
USE_PROXY = False
|
USE_PROXY = False
|
||||||
if USE_PROXY:
|
if USE_PROXY:
|
||||||
"""
|
"""
|
||||||
@@ -37,16 +32,11 @@ else:
|
|||||||
|
|
||||||
# [step 3]>> 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 )
|
# [step 3]>> 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 )
|
||||||
LLM_MODEL = "gpt-3.5-turbo-16k" # 可选 ↓↓↓
|
LLM_MODEL = "gpt-3.5-turbo-16k" # 可选 ↓↓↓
|
||||||
AVAIL_LLM_MODELS = ["qwen-max", "o1-mini", "o1-mini-2024-09-12", "o1", "o1-2024-12-17", "o1-preview", "o1-preview-2024-09-12",
|
AVAIL_LLM_MODELS = ["gpt-4-1106-preview", "gpt-4-turbo-preview", "gpt-4-vision-preview",
|
||||||
"gpt-4-1106-preview", "gpt-4-turbo-preview", "gpt-4-vision-preview",
|
|
||||||
"gpt-4o", "gpt-4o-mini", "gpt-4-turbo", "gpt-4-turbo-2024-04-09",
|
"gpt-4o", "gpt-4o-mini", "gpt-4-turbo", "gpt-4-turbo-2024-04-09",
|
||||||
"gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5",
|
"gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5",
|
||||||
"gpt-4", "gpt-4-32k", "azure-gpt-4", "glm-4", "glm-4v", "glm-3-turbo",
|
"gpt-4", "gpt-4-32k", "azure-gpt-4", "glm-4", "glm-4v", "glm-3-turbo",
|
||||||
"gemini-1.5-pro", "chatglm3", "chatglm4",
|
"gemini-1.5-pro", "chatglm3"
|
||||||
"deepseek-chat", "deepseek-coder", "deepseek-reasoner",
|
|
||||||
"volcengine-deepseek-r1-250120", "volcengine-deepseek-v3-241226",
|
|
||||||
"dashscope-deepseek-r1", "dashscope-deepseek-v3",
|
|
||||||
"dashscope-qwen3-14b", "dashscope-qwen3-235b-a22b", "dashscope-qwen3-32b",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
EMBEDDING_MODEL = "text-embedding-3-small"
|
EMBEDDING_MODEL = "text-embedding-3-small"
|
||||||
@@ -57,7 +47,7 @@ EMBEDDING_MODEL = "text-embedding-3-small"
|
|||||||
# "glm-4-0520", "glm-4-air", "glm-4-airx", "glm-4-flash",
|
# "glm-4-0520", "glm-4-air", "glm-4-airx", "glm-4-flash",
|
||||||
# "qianfan", "deepseekcoder",
|
# "qianfan", "deepseekcoder",
|
||||||
# "spark", "sparkv2", "sparkv3", "sparkv3.5", "sparkv4",
|
# "spark", "sparkv2", "sparkv3", "sparkv3.5", "sparkv4",
|
||||||
# "qwen-turbo", "qwen-plus", "qwen-local",
|
# "qwen-turbo", "qwen-plus", "qwen-max", "qwen-local",
|
||||||
# "moonshot-v1-128k", "moonshot-v1-32k", "moonshot-v1-8k",
|
# "moonshot-v1-128k", "moonshot-v1-32k", "moonshot-v1-8k",
|
||||||
# "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-turbo-0125", "gpt-4o-2024-05-13"
|
# "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-turbo-0125", "gpt-4o-2024-05-13"
|
||||||
# "claude-3-haiku-20240307","claude-3-sonnet-20240229","claude-3-opus-20240229", "claude-2.1", "claude-instant-1.2",
|
# "claude-3-haiku-20240307","claude-3-sonnet-20240229","claude-3-opus-20240229", "claude-2.1", "claude-instant-1.2",
|
||||||
@@ -65,7 +55,6 @@ EMBEDDING_MODEL = "text-embedding-3-small"
|
|||||||
# "deepseek-chat" ,"deepseek-coder",
|
# "deepseek-chat" ,"deepseek-coder",
|
||||||
# "gemini-1.5-flash",
|
# "gemini-1.5-flash",
|
||||||
# "yi-34b-chat-0205","yi-34b-chat-200k","yi-large","yi-medium","yi-spark","yi-large-turbo","yi-large-preview",
|
# "yi-34b-chat-0205","yi-34b-chat-200k","yi-large","yi-medium","yi-spark","yi-large-turbo","yi-large-preview",
|
||||||
# "grok-beta",
|
|
||||||
# ]
|
# ]
|
||||||
# --- --- --- ---
|
# --- --- --- ---
|
||||||
# 此外,您还可以在接入one-api/vllm/ollama/Openroute时,
|
# 此外,您还可以在接入one-api/vllm/ollama/Openroute时,
|
||||||
@@ -84,7 +73,7 @@ API_URL_REDIRECT = {}
|
|||||||
|
|
||||||
# 多线程函数插件中,默认允许多少路线程同时访问OpenAI。Free trial users的限制是每分钟3次,Pay-as-you-go users的限制是每分钟3500次
|
# 多线程函数插件中,默认允许多少路线程同时访问OpenAI。Free trial users的限制是每分钟3次,Pay-as-you-go users的限制是每分钟3500次
|
||||||
# 一言以蔽之:免费(5刀)用户填3,OpenAI绑了信用卡的用户可以填 16 或者更高。提高限制请查询:https://platform.openai.com/docs/guides/rate-limits/overview
|
# 一言以蔽之:免费(5刀)用户填3,OpenAI绑了信用卡的用户可以填 16 或者更高。提高限制请查询:https://platform.openai.com/docs/guides/rate-limits/overview
|
||||||
DEFAULT_WORKER_NUM = 8
|
DEFAULT_WORKER_NUM = 3
|
||||||
|
|
||||||
|
|
||||||
# 色彩主题, 可选 ["Default", "Chuanhu-Small-and-Beautiful", "High-Contrast"]
|
# 色彩主题, 可选 ["Default", "Chuanhu-Small-and-Beautiful", "High-Contrast"]
|
||||||
@@ -92,31 +81,6 @@ DEFAULT_WORKER_NUM = 8
|
|||||||
THEME = "Default"
|
THEME = "Default"
|
||||||
AVAIL_THEMES = ["Default", "Chuanhu-Small-and-Beautiful", "High-Contrast", "Gstaff/Xkcd", "NoCrypt/Miku"]
|
AVAIL_THEMES = ["Default", "Chuanhu-Small-and-Beautiful", "High-Contrast", "Gstaff/Xkcd", "NoCrypt/Miku"]
|
||||||
|
|
||||||
FONT = "Theme-Default-Font"
|
|
||||||
AVAIL_FONTS = [
|
|
||||||
"默认值(Theme-Default-Font)",
|
|
||||||
"宋体(SimSun)",
|
|
||||||
"黑体(SimHei)",
|
|
||||||
"楷体(KaiTi)",
|
|
||||||
"仿宋(FangSong)",
|
|
||||||
"华文细黑(STHeiti Light)",
|
|
||||||
"华文楷体(STKaiti)",
|
|
||||||
"华文仿宋(STFangsong)",
|
|
||||||
"华文宋体(STSong)",
|
|
||||||
"华文中宋(STZhongsong)",
|
|
||||||
"华文新魏(STXinwei)",
|
|
||||||
"华文隶书(STLiti)",
|
|
||||||
# 备注:以下字体需要网络支持,您可以自定义任意您喜欢的字体,如下所示,需要满足的格式为 "字体昵称(字体英文真名@字体css下载链接)"
|
|
||||||
"思源宋体(Source Han Serif CN VF@https://chinese-fonts-cdn.deno.dev/packages/syst/dist/SourceHanSerifCN/result.css)",
|
|
||||||
"月星楷(Moon Stars Kai HW@https://chinese-fonts-cdn.deno.dev/packages/moon-stars-kai/dist/MoonStarsKaiHW-Regular/result.css)",
|
|
||||||
"珠圆体(MaokenZhuyuanTi@https://chinese-fonts-cdn.deno.dev/packages/mkzyt/dist/猫啃珠圆体/result.css)",
|
|
||||||
"平方萌萌哒(PING FANG MENG MNEG DA@https://chinese-fonts-cdn.deno.dev/packages/pfmmd/dist/平方萌萌哒/result.css)",
|
|
||||||
"Helvetica",
|
|
||||||
"ui-sans-serif",
|
|
||||||
"sans-serif",
|
|
||||||
"system-ui"
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
# 默认的系统提示词(system prompt)
|
# 默认的系统提示词(system prompt)
|
||||||
INIT_SYS_PROMPT = "Serve me as a writing and programming assistant."
|
INIT_SYS_PROMPT = "Serve me as a writing and programming assistant."
|
||||||
@@ -168,15 +132,16 @@ MULTI_QUERY_LLM_MODELS = "gpt-3.5-turbo&chatglm3"
|
|||||||
QWEN_LOCAL_MODEL_SELECTION = "Qwen/Qwen-1_8B-Chat-Int8"
|
QWEN_LOCAL_MODEL_SELECTION = "Qwen/Qwen-1_8B-Chat-Int8"
|
||||||
|
|
||||||
|
|
||||||
|
# 接入通义千问在线大模型 https://dashscope.console.aliyun.com/
|
||||||
|
DASHSCOPE_API_KEY = "" # 阿里灵积云API_KEY
|
||||||
|
|
||||||
|
|
||||||
# 百度千帆(LLM_MODEL="qianfan")
|
# 百度千帆(LLM_MODEL="qianfan")
|
||||||
BAIDU_CLOUD_API_KEY = ''
|
BAIDU_CLOUD_API_KEY = ''
|
||||||
BAIDU_CLOUD_SECRET_KEY = ''
|
BAIDU_CLOUD_SECRET_KEY = ''
|
||||||
BAIDU_CLOUD_QIANFAN_MODEL = 'ERNIE-Bot' # 可选 "ERNIE-Bot-4"(文心大模型4.0), "ERNIE-Bot"(文心一言), "ERNIE-Bot-turbo", "BLOOMZ-7B", "Llama-2-70B-Chat", "Llama-2-13B-Chat", "Llama-2-7B-Chat", "ERNIE-Speed-128K", "ERNIE-Speed-8K", "ERNIE-Lite-8K"
|
BAIDU_CLOUD_QIANFAN_MODEL = 'ERNIE-Bot' # 可选 "ERNIE-Bot-4"(文心大模型4.0), "ERNIE-Bot"(文心一言), "ERNIE-Bot-turbo", "BLOOMZ-7B", "Llama-2-70B-Chat", "Llama-2-13B-Chat", "Llama-2-7B-Chat", "ERNIE-Speed-128K", "ERNIE-Speed-8K", "ERNIE-Lite-8K"
|
||||||
|
|
||||||
|
|
||||||
# 如果使用ChatGLM3或ChatGLM4本地模型,请把 LLM_MODEL="chatglm3" 或LLM_MODEL="chatglm4",并在此处指定模型路径
|
|
||||||
CHATGLM_LOCAL_MODEL_PATH = "THUDM/glm-4-9b-chat" # 例如"/home/hmp/ChatGLM3-6B/"
|
|
||||||
|
|
||||||
# 如果使用ChatGLM2微调模型,请把 LLM_MODEL="chatglmft",并在此处指定模型路径
|
# 如果使用ChatGLM2微调模型,请把 LLM_MODEL="chatglmft",并在此处指定模型路径
|
||||||
CHATGLM_PTUNING_CHECKPOINT = "" # 例如"/home/hmp/ChatGLM2-6B/ptuning/output/6b-pt-128-1e-2/checkpoint-100"
|
CHATGLM_PTUNING_CHECKPOINT = "" # 例如"/home/hmp/ChatGLM2-6B/ptuning/output/6b-pt-128-1e-2/checkpoint-100"
|
||||||
|
|
||||||
@@ -270,15 +235,13 @@ MOONSHOT_API_KEY = ""
|
|||||||
YIMODEL_API_KEY = ""
|
YIMODEL_API_KEY = ""
|
||||||
|
|
||||||
|
|
||||||
# 接入火山引擎的在线大模型),api-key获取地址 https://console.volcengine.com/ark/region:ark+cn-beijing/endpoint
|
# 深度求索(DeepSeek) API KEY,默认请求地址为"https://api.deepseek.com/v1/chat/completions"
|
||||||
ARK_API_KEY = "00000000-0000-0000-0000-000000000000" # 火山引擎 API KEY
|
DEEPSEEK_API_KEY = ""
|
||||||
|
|
||||||
|
|
||||||
# 紫东太初大模型 https://ai-maas.wair.ac.cn
|
# 紫东太初大模型 https://ai-maas.wair.ac.cn
|
||||||
TAICHU_API_KEY = ""
|
TAICHU_API_KEY = ""
|
||||||
|
|
||||||
# Grok API KEY
|
|
||||||
GROK_API_KEY = ""
|
|
||||||
|
|
||||||
# Mathpix 拥有执行PDF的OCR功能,但是需要注册账号
|
# Mathpix 拥有执行PDF的OCR功能,但是需要注册账号
|
||||||
MATHPIX_APPID = ""
|
MATHPIX_APPID = ""
|
||||||
@@ -310,8 +273,8 @@ GROBID_URLS = [
|
|||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
# Searxng互联网检索服务(这是一个huggingface空间,请前往huggingface复制该空间,然后把自己新的空间地址填在这里)
|
# Searxng互联网检索服务
|
||||||
SEARXNG_URLS = [ f"https://kaletianlre-beardvs{i}dd.hf.space/" for i in range(1,5) ]
|
SEARXNG_URL = "https://cloud-1.agent-matrix.com/"
|
||||||
|
|
||||||
|
|
||||||
# 是否允许通过自然语言描述修改本页的配置,该功能具有一定的危险性,默认关闭
|
# 是否允许通过自然语言描述修改本页的配置,该功能具有一定的危险性,默认关闭
|
||||||
@@ -335,7 +298,7 @@ ARXIV_CACHE_DIR = "gpt_log/arxiv_cache"
|
|||||||
|
|
||||||
|
|
||||||
# 除了连接OpenAI之外,还有哪些场合允许使用代理,请尽量不要修改
|
# 除了连接OpenAI之外,还有哪些场合允许使用代理,请尽量不要修改
|
||||||
WHEN_TO_USE_PROXY = ["Connect_OpenAI", "Download_LLM", "Download_Gradio_Theme", "Connect_Grobid",
|
WHEN_TO_USE_PROXY = ["Download_LLM", "Download_Gradio_Theme", "Connect_Grobid",
|
||||||
"Warmup_Modules", "Nougat_Download", "AutoGen", "Connect_OpenAI_Embedding"]
|
"Warmup_Modules", "Nougat_Download", "AutoGen", "Connect_OpenAI_Embedding"]
|
||||||
|
|
||||||
|
|
||||||
@@ -347,23 +310,6 @@ PLUGIN_HOT_RELOAD = False
|
|||||||
NUM_CUSTOM_BASIC_BTN = 4
|
NUM_CUSTOM_BASIC_BTN = 4
|
||||||
|
|
||||||
|
|
||||||
# 媒体智能体的服务地址(这是一个huggingface空间,请前往huggingface复制该空间,然后把自己新的空间地址填在这里)
|
|
||||||
DAAS_SERVER_URLS = [ f"https://niuziniu-biligpt{i}.hf.space/stream" for i in range(1,5) ]
|
|
||||||
|
|
||||||
|
|
||||||
# 在互联网搜索组件中,负责将搜索结果整理成干净的Markdown
|
|
||||||
JINA_API_KEY = ""
|
|
||||||
|
|
||||||
|
|
||||||
# 是否自动裁剪上下文长度(是否启动,默认不启动)
|
|
||||||
AUTO_CONTEXT_CLIP_ENABLE = False
|
|
||||||
# 目标裁剪上下文的token长度(如果超过这个长度,则会自动裁剪)
|
|
||||||
AUTO_CONTEXT_CLIP_TRIGGER_TOKEN_LEN = 30*1000
|
|
||||||
# 无条件丢弃x以上的轮数
|
|
||||||
AUTO_CONTEXT_MAX_ROUND = 64
|
|
||||||
# 在裁剪上下文时,倒数第x次对话能“最多”保留的上下文token的比例占 AUTO_CONTEXT_CLIP_TRIGGER_TOKEN_LEN 的多少
|
|
||||||
AUTO_CONTEXT_MAX_CLIP_RATIO = [0.80, 0.60, 0.45, 0.25, 0.20, 0.18, 0.16, 0.14, 0.12, 0.10, 0.08, 0.07, 0.06, 0.05, 0.04, 0.03, 0.02, 0.01]
|
|
||||||
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
--------------- 配置关联关系说明 ---------------
|
--------------- 配置关联关系说明 ---------------
|
||||||
@@ -423,7 +369,6 @@ AUTO_CONTEXT_MAX_CLIP_RATIO = [0.80, 0.60, 0.45, 0.25, 0.20, 0.18, 0.16, 0.14, 0
|
|||||||
|
|
||||||
本地大模型示意图
|
本地大模型示意图
|
||||||
│
|
│
|
||||||
├── "chatglm4"
|
|
||||||
├── "chatglm3"
|
├── "chatglm3"
|
||||||
├── "chatglm"
|
├── "chatglm"
|
||||||
├── "chatglm_onnx"
|
├── "chatglm_onnx"
|
||||||
@@ -454,7 +399,7 @@ AUTO_CONTEXT_MAX_CLIP_RATIO = [0.80, 0.60, 0.45, 0.25, 0.20, 0.18, 0.16, 0.14, 0
|
|||||||
插件在线服务配置依赖关系示意图
|
插件在线服务配置依赖关系示意图
|
||||||
│
|
│
|
||||||
├── 互联网检索
|
├── 互联网检索
|
||||||
│ └── SEARXNG_URLS
|
│ └── SEARXNG_URL
|
||||||
│
|
│
|
||||||
├── 语音功能
|
├── 语音功能
|
||||||
│ ├── ENABLE_AUDIO
|
│ ├── ENABLE_AUDIO
|
||||||
|
|||||||
@@ -1,444 +0,0 @@
|
|||||||
"""
|
|
||||||
以下所有配置也都支持利用环境变量覆写,环境变量配置格式见docker-compose.yml。
|
|
||||||
读取优先级:环境变量 > config_private.py > config.py
|
|
||||||
--- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- ---
|
|
||||||
All the following configurations also support using environment variables to override,
|
|
||||||
and the environment variable configuration format can be seen in docker-compose.yml.
|
|
||||||
Configuration reading priority: environment variable > config_private.py > config.py
|
|
||||||
"""
|
|
||||||
|
|
||||||
# [step 1-1]>> ( 接入GPT等模型 ) API_KEY = "sk-123456789xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx123456789"。极少数情况下,还需要填写组织(格式如org-123456789abcdefghijklmno的),请向下翻,找 API_ORG 设置项
|
|
||||||
API_KEY = "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" # 可同时填写多个API-KEY,用英文逗号分割,例如API_KEY = "sk-openaikey1,sk-openaikey2,fkxxxx-api2dkey1,fkxxxx-api2dkey2"
|
|
||||||
|
|
||||||
# [step 1-2]>> ( 接入通义 qwen-max ) 接入通义千问在线大模型,api-key获取地址 https://dashscope.console.aliyun.com/
|
|
||||||
DASHSCOPE_API_KEY = "" # 阿里灵积云API_KEY
|
|
||||||
|
|
||||||
# [step 1-3]>> ( 接入 deepseek-reasoner, 即 deepseek-r1 ) 深度求索(DeepSeek) API KEY,默认请求地址为"https://api.deepseek.com/v1/chat/completions"
|
|
||||||
DEEPSEEK_API_KEY = "sk-d99b8cc6b7414cc88a5d950a3ff7585e"
|
|
||||||
|
|
||||||
# [step 2]>> 改为True应用代理。如果使用本地或无地域限制的大模型时,此处不修改;如果直接在海外服务器部署,此处不修改
|
|
||||||
USE_PROXY = True
|
|
||||||
if USE_PROXY:
|
|
||||||
proxies = {
|
|
||||||
"http":"socks5h://192.168.8.9:1070", # 再例如 "http": "http://127.0.0.1:7890",
|
|
||||||
"https":"socks5h://192.168.8.9:1070", # 再例如 "https": "http://127.0.0.1:7890",
|
|
||||||
}
|
|
||||||
else:
|
|
||||||
proxies = None
|
|
||||||
DEFAULT_WORKER_NUM = 256
|
|
||||||
|
|
||||||
# [step 3]>> 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 )
|
|
||||||
LLM_MODEL = "gpt-4-32k" # 可选 ↓↓↓
|
|
||||||
AVAIL_LLM_MODELS = ["deepseek-chat", "deepseek-coder", "deepseek-reasoner",
|
|
||||||
"gpt-4-1106-preview", "gpt-4-turbo-preview", "gpt-4-vision-preview",
|
|
||||||
"gpt-4o", "gpt-4o-mini", "gpt-4-turbo", "gpt-4-turbo-2024-04-09",
|
|
||||||
"gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5",
|
|
||||||
"gpt-4", "gpt-4-32k", "azure-gpt-4", "glm-4", "glm-4v", "glm-3-turbo",
|
|
||||||
"gemini-1.5-pro", "chatglm3", "chatglm4",
|
|
||||||
]
|
|
||||||
|
|
||||||
EMBEDDING_MODEL = "text-embedding-3-small"
|
|
||||||
|
|
||||||
# --- --- --- ---
|
|
||||||
# P.S. 其他可用的模型还包括
|
|
||||||
# AVAIL_LLM_MODELS = [
|
|
||||||
# "glm-4-0520", "glm-4-air", "glm-4-airx", "glm-4-flash",
|
|
||||||
# "qianfan", "deepseekcoder",
|
|
||||||
# "spark", "sparkv2", "sparkv3", "sparkv3.5", "sparkv4",
|
|
||||||
# "qwen-turbo", "qwen-plus", "qwen-local",
|
|
||||||
# "moonshot-v1-128k", "moonshot-v1-32k", "moonshot-v1-8k",
|
|
||||||
# "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-turbo-0125", "gpt-4o-2024-05-13"
|
|
||||||
# "claude-3-haiku-20240307","claude-3-sonnet-20240229","claude-3-opus-20240229", "claude-2.1", "claude-instant-1.2",
|
|
||||||
# "moss", "llama2", "chatglm_onnx", "internlm", "jittorllms_pangualpha", "jittorllms_llama",
|
|
||||||
# "deepseek-chat" ,"deepseek-coder",
|
|
||||||
# "gemini-1.5-flash",
|
|
||||||
# "yi-34b-chat-0205","yi-34b-chat-200k","yi-large","yi-medium","yi-spark","yi-large-turbo","yi-large-preview",
|
|
||||||
# "grok-beta",
|
|
||||||
# ]
|
|
||||||
# --- --- --- ---
|
|
||||||
# 此外,您还可以在接入one-api/vllm/ollama/Openroute时,
|
|
||||||
# 使用"one-api-*","vllm-*","ollama-*","openrouter-*"前缀直接使用非标准方式接入的模型,例如
|
|
||||||
# AVAIL_LLM_MODELS = ["one-api-claude-3-sonnet-20240229(max_token=100000)", "ollama-phi3(max_token=4096)","openrouter-openai/gpt-4o-mini","openrouter-openai/chatgpt-4o-latest"]
|
|
||||||
# --- --- --- ---
|
|
||||||
|
|
||||||
|
|
||||||
# --------------- 以下配置可以优化体验 ---------------
|
|
||||||
|
|
||||||
# 重新URL重新定向,实现更换API_URL的作用(高危设置! 常规情况下不要修改! 通过修改此设置,您将把您的API-KEY和对话隐私完全暴露给您设定的中间人!)
|
|
||||||
# 格式: API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "在这里填写重定向的api.openai.com的URL"}
|
|
||||||
# 举例: API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "https://reverse-proxy-url/v1/chat/completions", "http://localhost:11434/api/chat": "在这里填写您ollama的URL"}
|
|
||||||
API_URL_REDIRECT = {}
|
|
||||||
|
|
||||||
|
|
||||||
# 多线程函数插件中,默认允许多少路线程同时访问OpenAI。Free trial users的限制是每分钟3次,Pay-as-you-go users的限制是每分钟3500次
|
|
||||||
# 一言以蔽之:免费(5刀)用户填3,OpenAI绑了信用卡的用户可以填 16 或者更高。提高限制请查询:https://platform.openai.com/docs/guides/rate-limits/overview
|
|
||||||
DEFAULT_WORKER_NUM = 64
|
|
||||||
|
|
||||||
|
|
||||||
# 色彩主题, 可选 ["Default", "Chuanhu-Small-and-Beautiful", "High-Contrast"]
|
|
||||||
# 更多主题, 请查阅Gradio主题商店: https://huggingface.co/spaces/gradio/theme-gallery 可选 ["Gstaff/Xkcd", "NoCrypt/Miku", ...]
|
|
||||||
THEME = "Default"
|
|
||||||
AVAIL_THEMES = ["Default", "Chuanhu-Small-and-Beautiful", "High-Contrast", "Gstaff/Xkcd", "NoCrypt/Miku"]
|
|
||||||
|
|
||||||
FONT = "Theme-Default-Font"
|
|
||||||
AVAIL_FONTS = [
|
|
||||||
"默认值(Theme-Default-Font)",
|
|
||||||
"宋体(SimSun)",
|
|
||||||
"黑体(SimHei)",
|
|
||||||
"楷体(KaiTi)",
|
|
||||||
"仿宋(FangSong)",
|
|
||||||
"华文细黑(STHeiti Light)",
|
|
||||||
"华文楷体(STKaiti)",
|
|
||||||
"华文仿宋(STFangsong)",
|
|
||||||
"华文宋体(STSong)",
|
|
||||||
"华文中宋(STZhongsong)",
|
|
||||||
"华文新魏(STXinwei)",
|
|
||||||
"华文隶书(STLiti)",
|
|
||||||
"思源宋体(Source Han Serif CN VF@https://chinese-fonts-cdn.deno.dev/packages/syst/dist/SourceHanSerifCN/result.css)",
|
|
||||||
"月星楷(Moon Stars Kai HW@https://chinese-fonts-cdn.deno.dev/packages/moon-stars-kai/dist/MoonStarsKaiHW-Regular/result.css)",
|
|
||||||
"珠圆体(MaokenZhuyuanTi@https://chinese-fonts-cdn.deno.dev/packages/mkzyt/dist/猫啃珠圆体/result.css)",
|
|
||||||
"平方萌萌哒(PING FANG MENG MNEG DA@https://chinese-fonts-cdn.deno.dev/packages/pfmmd/dist/平方萌萌哒/result.css)",
|
|
||||||
"Helvetica",
|
|
||||||
"ui-sans-serif",
|
|
||||||
"sans-serif",
|
|
||||||
"system-ui"
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
# 默认的系统提示词(system prompt)
|
|
||||||
INIT_SYS_PROMPT = " "
|
|
||||||
|
|
||||||
|
|
||||||
# 对话窗的高度 (仅在LAYOUT="TOP-DOWN"时生效)
|
|
||||||
CHATBOT_HEIGHT = 1115
|
|
||||||
|
|
||||||
|
|
||||||
# 代码高亮
|
|
||||||
CODE_HIGHLIGHT = True
|
|
||||||
|
|
||||||
|
|
||||||
# 窗口布局
|
|
||||||
LAYOUT = "LEFT-RIGHT" # "LEFT-RIGHT"(左右布局) # "TOP-DOWN"(上下布局)
|
|
||||||
|
|
||||||
|
|
||||||
# 暗色模式 / 亮色模式
|
|
||||||
DARK_MODE = True
|
|
||||||
|
|
||||||
|
|
||||||
# 发送请求到OpenAI后,等待多久判定为超时
|
|
||||||
TIMEOUT_SECONDS = 60
|
|
||||||
|
|
||||||
|
|
||||||
# 网页的端口, -1代表随机端口
|
|
||||||
WEB_PORT = 19998
|
|
||||||
|
|
||||||
# 是否自动打开浏览器页面
|
|
||||||
AUTO_OPEN_BROWSER = True
|
|
||||||
|
|
||||||
|
|
||||||
# 如果OpenAI不响应(网络卡顿、代理失败、KEY失效),重试的次数限制
|
|
||||||
MAX_RETRY = 5
|
|
||||||
|
|
||||||
|
|
||||||
# 插件分类默认选项
|
|
||||||
DEFAULT_FN_GROUPS = ['对话', '编程', '学术', '智能体']
|
|
||||||
|
|
||||||
|
|
||||||
# 定义界面上“询问多个GPT模型”插件应该使用哪些模型,请从AVAIL_LLM_MODELS中选择,并在不同模型之间用`&`间隔,例如"gpt-3.5-turbo&chatglm3&azure-gpt-4"
|
|
||||||
MULTI_QUERY_LLM_MODELS = "gpt-3.5-turbo&chatglm3"
|
|
||||||
|
|
||||||
|
|
||||||
# 选择本地模型变体(只有当AVAIL_LLM_MODELS包含了对应本地模型时,才会起作用)
|
|
||||||
# 如果你选择Qwen系列的模型,那么请在下面的QWEN_MODEL_SELECTION中指定具体的模型
|
|
||||||
# 也可以是具体的模型路径
|
|
||||||
QWEN_LOCAL_MODEL_SELECTION = "Qwen/Qwen-1_8B-Chat-Int8"
|
|
||||||
|
|
||||||
|
|
||||||
# 百度千帆(LLM_MODEL="qianfan")
|
|
||||||
BAIDU_CLOUD_API_KEY = ''
|
|
||||||
BAIDU_CLOUD_SECRET_KEY = ''
|
|
||||||
BAIDU_CLOUD_QIANFAN_MODEL = 'ERNIE-Bot' # 可选 "ERNIE-Bot-4"(文心大模型4.0), "ERNIE-Bot"(文心一言), "ERNIE-Bot-turbo", "BLOOMZ-7B", "Llama-2-70B-Chat", "Llama-2-13B-Chat", "Llama-2-7B-Chat", "ERNIE-Speed-128K", "ERNIE-Speed-8K", "ERNIE-Lite-8K"
|
|
||||||
|
|
||||||
|
|
||||||
# 如果使用ChatGLM3或ChatGLM4本地模型,请把 LLM_MODEL="chatglm3" 或LLM_MODEL="chatglm4",并在此处指定模型路径
|
|
||||||
CHATGLM_LOCAL_MODEL_PATH = "THUDM/glm-4-9b-chat" # 例如"/home/hmp/ChatGLM3-6B/"
|
|
||||||
|
|
||||||
# 如果使用ChatGLM2微调模型,请把 LLM_MODEL="chatglmft",并在此处指定模型路径
|
|
||||||
CHATGLM_PTUNING_CHECKPOINT = "" # 例如"/home/hmp/ChatGLM2-6B/ptuning/output/6b-pt-128-1e-2/checkpoint-100"
|
|
||||||
|
|
||||||
|
|
||||||
# 本地LLM模型如ChatGLM的执行方式 CPU/GPU
|
|
||||||
LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
|
|
||||||
LOCAL_MODEL_QUANT = "FP16" # 默认 "FP16" "INT4" 启用量化INT4版本 "INT8" 启用量化INT8版本
|
|
||||||
|
|
||||||
|
|
||||||
# 设置gradio的并行线程数(不需要修改)
|
|
||||||
CONCURRENT_COUNT = 100
|
|
||||||
|
|
||||||
|
|
||||||
# 是否在提交时自动清空输入框
|
|
||||||
AUTO_CLEAR_TXT = False
|
|
||||||
|
|
||||||
|
|
||||||
# 加一个live2d装饰
|
|
||||||
ADD_WAIFU = False
|
|
||||||
|
|
||||||
|
|
||||||
# 设置用户名和密码(不需要修改)(相关功能不稳定,与gradio版本和网络都相关,如果本地使用不建议加这个)
|
|
||||||
# [("username", "password"), ("username2", "password2"), ...]
|
|
||||||
AUTHENTICATION = [("van", "L807878712"),("林", "L807878712"),("源", "L807878712"),("欣", "L807878712"),("z", "czh123456789")]
|
|
||||||
|
|
||||||
|
|
||||||
# 如果需要在二级路径下运行(常规情况下,不要修改!!)
|
|
||||||
# (举例 CUSTOM_PATH = "/gpt_academic",可以让软件运行在 http://ip:port/gpt_academic/ 下。)
|
|
||||||
CUSTOM_PATH = "/"
|
|
||||||
|
|
||||||
|
|
||||||
# HTTPS 秘钥和证书(不需要修改)
|
|
||||||
SSL_KEYFILE = ""
|
|
||||||
SSL_CERTFILE = ""
|
|
||||||
|
|
||||||
|
|
||||||
# 极少数情况下,openai的官方KEY需要伴随组织编码(格式如org-xxxxxxxxxxxxxxxxxxxxxxxx)使用
|
|
||||||
API_ORG = ""
|
|
||||||
|
|
||||||
|
|
||||||
# 如果需要使用Slack Claude,使用教程详情见 request_llms/README.md
|
|
||||||
SLACK_CLAUDE_BOT_ID = ''
|
|
||||||
SLACK_CLAUDE_USER_TOKEN = ''
|
|
||||||
|
|
||||||
|
|
||||||
# 如果需要使用AZURE(方法一:单个azure模型部署)详情请见额外文档 docs\use_azure.md
|
|
||||||
AZURE_ENDPOINT = "https://你亲手写的api名称.openai.azure.com/"
|
|
||||||
AZURE_API_KEY = "填入azure openai api的密钥" # 建议直接在API_KEY处填写,该选项即将被弃用
|
|
||||||
AZURE_ENGINE = "填入你亲手写的部署名" # 读 docs\use_azure.md
|
|
||||||
|
|
||||||
|
|
||||||
# 如果需要使用AZURE(方法二:多个azure模型部署+动态切换)详情请见额外文档 docs\use_azure.md
|
|
||||||
AZURE_CFG_ARRAY = {}
|
|
||||||
|
|
||||||
|
|
||||||
# 阿里云实时语音识别 配置难度较高
|
|
||||||
# 参考 https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md
|
|
||||||
ENABLE_AUDIO = False
|
|
||||||
ALIYUN_TOKEN="" # 例如 f37f30e0f9934c34a992f6f64f7eba4f
|
|
||||||
ALIYUN_APPKEY="" # 例如 RoPlZrM88DnAFkZK
|
|
||||||
ALIYUN_ACCESSKEY="" # (无需填写)
|
|
||||||
ALIYUN_SECRET="" # (无需填写)
|
|
||||||
|
|
||||||
|
|
||||||
# GPT-SOVITS 文本转语音服务的运行地址(将语言模型的生成文本朗读出来)
|
|
||||||
TTS_TYPE = "DISABLE" # EDGE_TTS / LOCAL_SOVITS_API / DISABLE
|
|
||||||
GPT_SOVITS_URL = ""
|
|
||||||
EDGE_TTS_VOICE = "zh-CN-XiaoxiaoNeural"
|
|
||||||
|
|
||||||
|
|
||||||
# 接入讯飞星火大模型 https://console.xfyun.cn/services/iat
|
|
||||||
XFYUN_APPID = "00000000"
|
|
||||||
XFYUN_API_SECRET = "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"
|
|
||||||
XFYUN_API_KEY = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
|
|
||||||
|
|
||||||
|
|
||||||
# 接入智谱大模型
|
|
||||||
ZHIPUAI_API_KEY = ""
|
|
||||||
ZHIPUAI_MODEL = "" # 此选项已废弃,不再需要填写
|
|
||||||
|
|
||||||
|
|
||||||
# Claude API KEY
|
|
||||||
ANTHROPIC_API_KEY = ""
|
|
||||||
|
|
||||||
|
|
||||||
# 月之暗面 API KEY
|
|
||||||
MOONSHOT_API_KEY = ""
|
|
||||||
|
|
||||||
|
|
||||||
# 零一万物(Yi Model) API KEY
|
|
||||||
YIMODEL_API_KEY = ""
|
|
||||||
|
|
||||||
|
|
||||||
# 紫东太初大模型 https://ai-maas.wair.ac.cn
|
|
||||||
TAICHU_API_KEY = ""
|
|
||||||
|
|
||||||
# Grok API KEY
|
|
||||||
GROK_API_KEY = ""
|
|
||||||
|
|
||||||
# Mathpix 拥有执行PDF的OCR功能,但是需要注册账号
|
|
||||||
MATHPIX_APPID = ""
|
|
||||||
MATHPIX_APPKEY = ""
|
|
||||||
|
|
||||||
|
|
||||||
# DOC2X的PDF解析服务,注册账号并获取API KEY: https://doc2x.noedgeai.com/login
|
|
||||||
DOC2X_API_KEY = ""
|
|
||||||
|
|
||||||
|
|
||||||
# 自定义API KEY格式
|
|
||||||
CUSTOM_API_KEY_PATTERN = ""
|
|
||||||
|
|
||||||
|
|
||||||
# Google Gemini API-Key
|
|
||||||
GEMINI_API_KEY = ''
|
|
||||||
|
|
||||||
|
|
||||||
# HUGGINGFACE的TOKEN,下载LLAMA时起作用 https://huggingface.co/docs/hub/security-tokens
|
|
||||||
HUGGINGFACE_ACCESS_TOKEN = "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"
|
|
||||||
|
|
||||||
|
|
||||||
# GROBID服务器地址(填写多个可以均衡负载),用于高质量地读取PDF文档
|
|
||||||
# 获取方法:复制以下空间https://huggingface.co/spaces/qingxu98/grobid,设为public,然后GROBID_URL = "https://(你的hf用户名如qingxu98)-(你的填写的空间名如grobid).hf.space"
|
|
||||||
GROBID_URLS = [
|
|
||||||
"https://qingxu98-grobid.hf.space","https://qingxu98-grobid2.hf.space","https://qingxu98-grobid3.hf.space",
|
|
||||||
"https://qingxu98-grobid4.hf.space","https://qingxu98-grobid5.hf.space", "https://qingxu98-grobid6.hf.space",
|
|
||||||
"https://qingxu98-grobid7.hf.space", "https://qingxu98-grobid8.hf.space",
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
# Searxng互联网检索服务(这是一个huggingface空间,请前往huggingface复制该空间,然后把自己新的空间地址填在这里)
|
|
||||||
SEARXNG_URLS = [ f"https://kaletianlre-beardvs{i}dd.hf.space/" for i in range(1,5) ]
|
|
||||||
|
|
||||||
|
|
||||||
# 是否允许通过自然语言描述修改本页的配置,该功能具有一定的危险性,默认关闭
|
|
||||||
ALLOW_RESET_CONFIG = False
|
|
||||||
|
|
||||||
|
|
||||||
# 在使用AutoGen插件时,是否使用Docker容器运行代码
|
|
||||||
AUTOGEN_USE_DOCKER = False
|
|
||||||
|
|
||||||
|
|
||||||
# 临时的上传文件夹位置,请尽量不要修改
|
|
||||||
PATH_PRIVATE_UPLOAD = "private_upload"
|
|
||||||
|
|
||||||
|
|
||||||
# 日志文件夹的位置,请尽量不要修改
|
|
||||||
PATH_LOGGING = "gpt_log"
|
|
||||||
|
|
||||||
|
|
||||||
# 存储翻译好的arxiv论文的路径,请尽量不要修改
|
|
||||||
ARXIV_CACHE_DIR = "gpt_log/arxiv_cache"
|
|
||||||
|
|
||||||
|
|
||||||
# 除了连接OpenAI之外,还有哪些场合允许使用代理,请尽量不要修改
|
|
||||||
WHEN_TO_USE_PROXY = ["Connect_OpenAI", "Download_LLM", "Download_Gradio_Theme", "Connect_Grobid",
|
|
||||||
"Warmup_Modules", "Nougat_Download", "AutoGen", "Connect_OpenAI_Embedding"]
|
|
||||||
|
|
||||||
|
|
||||||
# 启用插件热加载
|
|
||||||
PLUGIN_HOT_RELOAD = False
|
|
||||||
|
|
||||||
|
|
||||||
# 自定义按钮的最大数量限制
|
|
||||||
NUM_CUSTOM_BASIC_BTN = 4
|
|
||||||
|
|
||||||
|
|
||||||
# 媒体智能体的服务地址(这是一个huggingface空间,请前往huggingface复制该空间,然后把自己新的空间地址填在这里)
|
|
||||||
DAAS_SERVER_URLS = [ f"https://niuziniu-biligpt{i}.hf.space/stream" for i in range(1,5) ]
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
"""
|
|
||||||
--------------- 配置关联关系说明 ---------------
|
|
||||||
|
|
||||||
在线大模型配置关联关系示意图
|
|
||||||
│
|
|
||||||
├── "gpt-3.5-turbo" 等openai模型
|
|
||||||
│ ├── API_KEY
|
|
||||||
│ ├── CUSTOM_API_KEY_PATTERN(不常用)
|
|
||||||
│ ├── API_ORG(不常用)
|
|
||||||
│ └── API_URL_REDIRECT(不常用)
|
|
||||||
│
|
|
||||||
├── "azure-gpt-3.5" 等azure模型(单个azure模型,不需要动态切换)
|
|
||||||
│ ├── API_KEY
|
|
||||||
│ ├── AZURE_ENDPOINT
|
|
||||||
│ ├── AZURE_API_KEY
|
|
||||||
│ ├── AZURE_ENGINE
|
|
||||||
│ └── API_URL_REDIRECT
|
|
||||||
│
|
|
||||||
├── "azure-gpt-3.5" 等azure模型(多个azure模型,需要动态切换,高优先级)
|
|
||||||
│ └── AZURE_CFG_ARRAY
|
|
||||||
│
|
|
||||||
├── "spark" 星火认知大模型 spark & sparkv2
|
|
||||||
│ ├── XFYUN_APPID
|
|
||||||
│ ├── XFYUN_API_SECRET
|
|
||||||
│ └── XFYUN_API_KEY
|
|
||||||
│
|
|
||||||
├── "claude-3-opus-20240229" 等claude模型
|
|
||||||
│ └── ANTHROPIC_API_KEY
|
|
||||||
│
|
|
||||||
├── "stack-claude"
|
|
||||||
│ ├── SLACK_CLAUDE_BOT_ID
|
|
||||||
│ └── SLACK_CLAUDE_USER_TOKEN
|
|
||||||
│
|
|
||||||
├── "qianfan" 百度千帆大模型库
|
|
||||||
│ ├── BAIDU_CLOUD_QIANFAN_MODEL
|
|
||||||
│ ├── BAIDU_CLOUD_API_KEY
|
|
||||||
│ └── BAIDU_CLOUD_SECRET_KEY
|
|
||||||
│
|
|
||||||
├── "glm-4", "glm-3-turbo", "zhipuai" 智谱AI大模型
|
|
||||||
│ └── ZHIPUAI_API_KEY
|
|
||||||
│
|
|
||||||
├── "yi-34b-chat-0205", "yi-34b-chat-200k" 等零一万物(Yi Model)大模型
|
|
||||||
│ └── YIMODEL_API_KEY
|
|
||||||
│
|
|
||||||
├── "qwen-turbo" 等通义千问大模型
|
|
||||||
│ └── DASHSCOPE_API_KEY
|
|
||||||
│
|
|
||||||
├── "Gemini"
|
|
||||||
│ └── GEMINI_API_KEY
|
|
||||||
│
|
|
||||||
└── "one-api-...(max_token=...)" 用一种更方便的方式接入one-api多模型管理界面
|
|
||||||
├── AVAIL_LLM_MODELS
|
|
||||||
├── API_KEY
|
|
||||||
└── API_URL_REDIRECT
|
|
||||||
|
|
||||||
|
|
||||||
本地大模型示意图
|
|
||||||
│
|
|
||||||
├── "chatglm4"
|
|
||||||
├── "chatglm3"
|
|
||||||
├── "chatglm"
|
|
||||||
├── "chatglm_onnx"
|
|
||||||
├── "chatglmft"
|
|
||||||
├── "internlm"
|
|
||||||
├── "moss"
|
|
||||||
├── "jittorllms_pangualpha"
|
|
||||||
├── "jittorllms_llama"
|
|
||||||
├── "deepseekcoder"
|
|
||||||
├── "qwen-local"
|
|
||||||
├── RWKV的支持见Wiki
|
|
||||||
└── "llama2"
|
|
||||||
|
|
||||||
|
|
||||||
用户图形界面布局依赖关系示意图
|
|
||||||
│
|
|
||||||
├── CHATBOT_HEIGHT 对话窗的高度
|
|
||||||
├── CODE_HIGHLIGHT 代码高亮
|
|
||||||
├── LAYOUT 窗口布局
|
|
||||||
├── DARK_MODE 暗色模式 / 亮色模式
|
|
||||||
├── DEFAULT_FN_GROUPS 插件分类默认选项
|
|
||||||
├── THEME 色彩主题
|
|
||||||
├── AUTO_CLEAR_TXT 是否在提交时自动清空输入框
|
|
||||||
├── ADD_WAIFU 加一个live2d装饰
|
|
||||||
└── ALLOW_RESET_CONFIG 是否允许通过自然语言描述修改本页的配置,该功能具有一定的危险性
|
|
||||||
|
|
||||||
|
|
||||||
插件在线服务配置依赖关系示意图
|
|
||||||
│
|
|
||||||
├── 互联网检索
|
|
||||||
│ └── SEARXNG_URLS
|
|
||||||
│
|
|
||||||
├── 语音功能
|
|
||||||
│ ├── ENABLE_AUDIO
|
|
||||||
│ ├── ALIYUN_TOKEN
|
|
||||||
│ ├── ALIYUN_APPKEY
|
|
||||||
│ ├── ALIYUN_ACCESSKEY
|
|
||||||
│ └── ALIYUN_SECRET
|
|
||||||
│
|
|
||||||
└── PDF文档精准解析
|
|
||||||
├── GROBID_URLS
|
|
||||||
├── MATHPIX_APPID
|
|
||||||
└── MATHPIX_APPKEY
|
|
||||||
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -2,6 +2,7 @@ from toolbox import HotReload # HotReload 的意思是热更新,修改函数
|
|||||||
from toolbox import trimmed_format_exc
|
from toolbox import trimmed_format_exc
|
||||||
from loguru import logger
|
from loguru import logger
|
||||||
|
|
||||||
|
|
||||||
def get_crazy_functions():
|
def get_crazy_functions():
|
||||||
from crazy_functions.读文章写摘要 import 读文章写摘要
|
from crazy_functions.读文章写摘要 import 读文章写摘要
|
||||||
from crazy_functions.生成函数注释 import 批量生成函数注释
|
from crazy_functions.生成函数注释 import 批量生成函数注释
|
||||||
@@ -16,7 +17,7 @@ def get_crazy_functions():
|
|||||||
from crazy_functions.SourceCode_Analyse import 解析一个前端项目
|
from crazy_functions.SourceCode_Analyse import 解析一个前端项目
|
||||||
from crazy_functions.高级功能函数模板 import 高阶功能模板函数
|
from crazy_functions.高级功能函数模板 import 高阶功能模板函数
|
||||||
from crazy_functions.高级功能函数模板 import Demo_Wrap
|
from crazy_functions.高级功能函数模板 import Demo_Wrap
|
||||||
from crazy_functions.Latex_Project_Polish import Latex英文润色
|
from crazy_functions.Latex全文润色 import Latex英文润色
|
||||||
from crazy_functions.询问多个大语言模型 import 同时问询
|
from crazy_functions.询问多个大语言模型 import 同时问询
|
||||||
from crazy_functions.SourceCode_Analyse import 解析一个Lua项目
|
from crazy_functions.SourceCode_Analyse import 解析一个Lua项目
|
||||||
from crazy_functions.SourceCode_Analyse import 解析一个CSharp项目
|
from crazy_functions.SourceCode_Analyse import 解析一个CSharp项目
|
||||||
@@ -27,13 +28,14 @@ def get_crazy_functions():
|
|||||||
from crazy_functions.Conversation_To_File import Conversation_To_File_Wrap
|
from crazy_functions.Conversation_To_File import Conversation_To_File_Wrap
|
||||||
from crazy_functions.Conversation_To_File import 删除所有本地对话历史记录
|
from crazy_functions.Conversation_To_File import 删除所有本地对话历史记录
|
||||||
from crazy_functions.辅助功能 import 清除缓存
|
from crazy_functions.辅助功能 import 清除缓存
|
||||||
|
from crazy_functions.批量文件询问 import 批量文件询问
|
||||||
from crazy_functions.Markdown_Translate import Markdown英译中
|
from crazy_functions.Markdown_Translate import Markdown英译中
|
||||||
from crazy_functions.批量总结PDF文档 import 批量总结PDF文档
|
from crazy_functions.批量总结PDF文档 import 批量总结PDF文档
|
||||||
from crazy_functions.PDF_Translate import 批量翻译PDF文档
|
from crazy_functions.PDF_Translate import 批量翻译PDF文档
|
||||||
from crazy_functions.谷歌检索小助手 import 谷歌检索小助手
|
from crazy_functions.谷歌检索小助手 import 谷歌检索小助手
|
||||||
from crazy_functions.理解PDF文档内容 import 理解PDF文档内容标准文件输入
|
from crazy_functions.理解PDF文档内容 import 理解PDF文档内容标准文件输入
|
||||||
from crazy_functions.Latex_Project_Polish import Latex中文润色
|
from crazy_functions.Latex全文润色 import Latex中文润色
|
||||||
from crazy_functions.Latex_Project_Polish import Latex英文纠错
|
from crazy_functions.Latex全文润色 import Latex英文纠错
|
||||||
from crazy_functions.Markdown_Translate import Markdown中译英
|
from crazy_functions.Markdown_Translate import Markdown中译英
|
||||||
from crazy_functions.虚空终端 import 虚空终端
|
from crazy_functions.虚空终端 import 虚空终端
|
||||||
from crazy_functions.生成多种Mermaid图表 import Mermaid_Gen
|
from crazy_functions.生成多种Mermaid图表 import Mermaid_Gen
|
||||||
@@ -49,16 +51,8 @@ def get_crazy_functions():
|
|||||||
from crazy_functions.Image_Generate_Wrap import ImageGen_Wrap
|
from crazy_functions.Image_Generate_Wrap import ImageGen_Wrap
|
||||||
from crazy_functions.SourceCode_Comment import 注释Python项目
|
from crazy_functions.SourceCode_Comment import 注释Python项目
|
||||||
from crazy_functions.SourceCode_Comment_Wrap import SourceCodeComment_Wrap
|
from crazy_functions.SourceCode_Comment_Wrap import SourceCodeComment_Wrap
|
||||||
from crazy_functions.VideoResource_GPT import 多媒体任务
|
|
||||||
|
|
||||||
function_plugins = {
|
function_plugins = {
|
||||||
"多媒体智能体": {
|
|
||||||
"Group": "智能体",
|
|
||||||
"Color": "stop",
|
|
||||||
"AsButton": False,
|
|
||||||
"Info": "【仅测试】多媒体任务",
|
|
||||||
"Function": HotReload(多媒体任务),
|
|
||||||
},
|
|
||||||
"虚空终端": {
|
"虚空终端": {
|
||||||
"Group": "对话|编程|学术|智能体",
|
"Group": "对话|编程|学术|智能体",
|
||||||
"Color": "stop",
|
"Color": "stop",
|
||||||
@@ -113,16 +107,17 @@ def get_crazy_functions():
|
|||||||
"Group": "学术",
|
"Group": "学术",
|
||||||
"Color": "stop",
|
"Color": "stop",
|
||||||
"AsButton": True,
|
"AsButton": True,
|
||||||
"Info": "ArXiv论文精细翻译 | 输入参数arxiv论文的ID,比如1812.10695",
|
"Info": "Arixv论文精细翻译 | 输入参数arxiv论文的ID,比如1812.10695",
|
||||||
"Function": HotReload(Latex翻译中文并重新编译PDF), # 当注册Class后,Function旧接口仅会在“虚空终端”中起作用
|
"Function": HotReload(Latex翻译中文并重新编译PDF), # 当注册Class后,Function旧接口仅会在“虚空终端”中起作用
|
||||||
"Class": Arxiv_Localize, # 新一代插件需要注册Class
|
"Class": Arxiv_Localize, # 新一代插件需要注册Class
|
||||||
},
|
},
|
||||||
"批量总结Word文档": {
|
"批量文件询问": {
|
||||||
"Group": "学术",
|
"Group": "学术",
|
||||||
"Color": "stop",
|
"Color": "stop",
|
||||||
"AsButton": False,
|
"AsButton": False,
|
||||||
"Info": "批量总结word文档 | 输入参数为路径",
|
"AdvancedArgs": True,
|
||||||
"Function": HotReload(总结word文档),
|
"Info": "通过在高级参数区写入prompt,可自定义询问逻辑,默认情况下为总结逻辑 | 输入参数为路径",
|
||||||
|
"Function": HotReload(批量文件询问),
|
||||||
},
|
},
|
||||||
"解析整个Matlab项目": {
|
"解析整个Matlab项目": {
|
||||||
"Group": "编程",
|
"Group": "编程",
|
||||||
@@ -245,7 +240,7 @@ def get_crazy_functions():
|
|||||||
"AsButton": True, # 加入下拉菜单中
|
"AsButton": True, # 加入下拉菜单中
|
||||||
# "Info": "连接网络回答问题(需要访问谷歌)| 输入参数是一个问题",
|
# "Info": "连接网络回答问题(需要访问谷歌)| 输入参数是一个问题",
|
||||||
"Function": HotReload(连接网络回答问题),
|
"Function": HotReload(连接网络回答问题),
|
||||||
"Class": NetworkGPT_Wrap # 新一代插件需要注册Class
|
# "Class": NetworkGPT_Wrap # 新一代插件需要注册Class
|
||||||
},
|
},
|
||||||
"历史上的今天": {
|
"历史上的今天": {
|
||||||
"Group": "对话",
|
"Group": "对话",
|
||||||
@@ -352,7 +347,7 @@ def get_crazy_functions():
|
|||||||
"ArgsReminder": r"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "
|
"ArgsReminder": r"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "
|
||||||
r"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: "
|
r"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: "
|
||||||
r'If the term "agent" is used in this section, it should be translated to "智能体". ',
|
r'If the term "agent" is used in this section, it should be translated to "智能体". ',
|
||||||
"Info": "ArXiv论文精细翻译 | 输入参数arxiv论文的ID,比如1812.10695",
|
"Info": "Arixv论文精细翻译 | 输入参数arxiv论文的ID,比如1812.10695",
|
||||||
"Function": HotReload(Latex翻译中文并重新编译PDF), # 当注册Class后,Function旧接口仅会在“虚空终端”中起作用
|
"Function": HotReload(Latex翻译中文并重新编译PDF), # 当注册Class后,Function旧接口仅会在“虚空终端”中起作用
|
||||||
"Class": Arxiv_Localize, # 新一代插件需要注册Class
|
"Class": Arxiv_Localize, # 新一代插件需要注册Class
|
||||||
},
|
},
|
||||||
@@ -434,6 +429,36 @@ def get_crazy_functions():
|
|||||||
logger.error(trimmed_format_exc())
|
logger.error(trimmed_format_exc())
|
||||||
logger.error("Load function plugin failed")
|
logger.error("Load function plugin failed")
|
||||||
|
|
||||||
|
# try:
|
||||||
|
# from crazy_functions.联网的ChatGPT import 连接网络回答问题
|
||||||
|
|
||||||
|
# function_plugins.update(
|
||||||
|
# {
|
||||||
|
# "连接网络回答问题(输入问题后点击该插件,需要访问谷歌)": {
|
||||||
|
# "Group": "对话",
|
||||||
|
# "Color": "stop",
|
||||||
|
# "AsButton": False, # 加入下拉菜单中
|
||||||
|
# # "Info": "连接网络回答问题(需要访问谷歌)| 输入参数是一个问题",
|
||||||
|
# "Function": HotReload(连接网络回答问题),
|
||||||
|
# }
|
||||||
|
# }
|
||||||
|
# )
|
||||||
|
# from crazy_functions.联网的ChatGPT_bing版 import 连接bing搜索回答问题
|
||||||
|
|
||||||
|
# function_plugins.update(
|
||||||
|
# {
|
||||||
|
# "连接网络回答问题(中文Bing版,输入问题后点击该插件)": {
|
||||||
|
# "Group": "对话",
|
||||||
|
# "Color": "stop",
|
||||||
|
# "AsButton": False, # 加入下拉菜单中
|
||||||
|
# "Info": "连接网络回答问题(需要访问中文Bing)| 输入参数是一个问题",
|
||||||
|
# "Function": HotReload(连接bing搜索回答问题),
|
||||||
|
# }
|
||||||
|
# }
|
||||||
|
# )
|
||||||
|
# except:
|
||||||
|
# logger.error(trimmed_format_exc())
|
||||||
|
# logger.error("Load function plugin failed")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from crazy_functions.SourceCode_Analyse import 解析任意code项目
|
from crazy_functions.SourceCode_Analyse import 解析任意code项目
|
||||||
@@ -697,6 +722,12 @@ def get_crazy_functions():
|
|||||||
logger.error("Load function plugin failed")
|
logger.error("Load function plugin failed")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# try:
|
# try:
|
||||||
# from crazy_functions.高级功能函数模板 import 测试图表渲染
|
# from crazy_functions.高级功能函数模板 import 测试图表渲染
|
||||||
# function_plugins.update({
|
# function_plugins.update({
|
||||||
@@ -711,6 +742,19 @@ def get_crazy_functions():
|
|||||||
# logger.error(trimmed_format_exc())
|
# logger.error(trimmed_format_exc())
|
||||||
# print('Load function plugin failed')
|
# print('Load function plugin failed')
|
||||||
|
|
||||||
|
# try:
|
||||||
|
# from crazy_functions.chatglm微调工具 import 微调数据集生成
|
||||||
|
# function_plugins.update({
|
||||||
|
# "黑盒模型学习: 微调数据集生成 (先上传数据集)": {
|
||||||
|
# "Color": "stop",
|
||||||
|
# "AsButton": False,
|
||||||
|
# "AdvancedArgs": True,
|
||||||
|
# "ArgsReminder": "针对数据集输入(如 绿帽子*深蓝色衬衫*黑色运动裤)给出指令,例如您可以将以下命令复制到下方: --llm_to_learn=azure-gpt-3.5 --prompt_prefix='根据下面的服装类型提示,想象一个穿着者,对这个人外貌、身处的环境、内心世界、过去经历进行描写。要求:100字以内,用第二人称。' --system_prompt=''",
|
||||||
|
# "Function": HotReload(微调数据集生成)
|
||||||
|
# }
|
||||||
|
# })
|
||||||
|
# except:
|
||||||
|
# print('Load function plugin failed')
|
||||||
|
|
||||||
"""
|
"""
|
||||||
设置默认值:
|
设置默认值:
|
||||||
@@ -730,26 +774,3 @@ def get_crazy_functions():
|
|||||||
function_plugins[name]["Color"] = "secondary"
|
function_plugins[name]["Color"] = "secondary"
|
||||||
|
|
||||||
return function_plugins
|
return function_plugins
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def get_multiplex_button_functions():
|
|
||||||
"""多路复用主提交按钮的功能映射
|
|
||||||
"""
|
|
||||||
return {
|
|
||||||
"常规对话":
|
|
||||||
"",
|
|
||||||
|
|
||||||
"查互联网后回答":
|
|
||||||
"查互联网后回答",
|
|
||||||
|
|
||||||
"多模型对话":
|
|
||||||
"询问多个GPT模型", # 映射到上面的 `询问多个GPT模型` 插件
|
|
||||||
|
|
||||||
"智能召回 RAG":
|
|
||||||
"Rag智能召回", # 映射到上面的 `Rag智能召回` 插件
|
|
||||||
|
|
||||||
"多媒体查询":
|
|
||||||
"多媒体智能体", # 映射到上面的 `多媒体智能体` 插件
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,11 +1,10 @@
|
|||||||
import re
|
from toolbox import CatchException, update_ui, promote_file_to_downloadzone, get_log_folder, get_user
|
||||||
from toolbox import CatchException, update_ui, promote_file_to_downloadzone, get_log_folder, get_user, update_ui_latest_msg
|
|
||||||
from crazy_functions.plugin_template.plugin_class_template import GptAcademicPluginTemplate, ArgProperty
|
from crazy_functions.plugin_template.plugin_class_template import GptAcademicPluginTemplate, ArgProperty
|
||||||
from loguru import logger
|
import re
|
||||||
|
|
||||||
f_prefix = 'GPT-Academic对话存档'
|
f_prefix = 'GPT-Academic对话存档'
|
||||||
|
|
||||||
def write_chat_to_file_legacy(chatbot, history=None, file_name=None):
|
def write_chat_to_file(chatbot, history=None, file_name=None):
|
||||||
"""
|
"""
|
||||||
将对话记录history以Markdown格式写入文件中。如果没有指定文件名,则使用当前时间生成文件名。
|
将对话记录history以Markdown格式写入文件中。如果没有指定文件名,则使用当前时间生成文件名。
|
||||||
"""
|
"""
|
||||||
@@ -13,9 +12,6 @@ def write_chat_to_file_legacy(chatbot, history=None, file_name=None):
|
|||||||
import time
|
import time
|
||||||
from themes.theme import advanced_css
|
from themes.theme import advanced_css
|
||||||
|
|
||||||
if (file_name is not None) and (file_name != "") and (not file_name.endswith('.html')): file_name += '.html'
|
|
||||||
else: file_name = None
|
|
||||||
|
|
||||||
if file_name is None:
|
if file_name is None:
|
||||||
file_name = f_prefix + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.html'
|
file_name = f_prefix + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.html'
|
||||||
fp = os.path.join(get_log_folder(get_user(chatbot), plugin_name='chat_history'), file_name)
|
fp = os.path.join(get_log_folder(get_user(chatbot), plugin_name='chat_history'), file_name)
|
||||||
@@ -72,147 +68,6 @@ def write_chat_to_file_legacy(chatbot, history=None, file_name=None):
|
|||||||
promote_file_to_downloadzone(fp, rename_file=file_name, chatbot=chatbot)
|
promote_file_to_downloadzone(fp, rename_file=file_name, chatbot=chatbot)
|
||||||
return '对话历史写入:' + fp
|
return '对话历史写入:' + fp
|
||||||
|
|
||||||
def write_chat_to_file(chatbot, history=None, file_name=None):
|
|
||||||
"""
|
|
||||||
将对话记录history以多种格式(HTML、Word、Markdown)写入文件中。如果没有指定文件名,则使用当前时间生成文件名。
|
|
||||||
|
|
||||||
Args:
|
|
||||||
chatbot: 聊天机器人对象,包含对话内容
|
|
||||||
history: 对话历史记录
|
|
||||||
file_name: 指定的文件名,如果为None则使用时间戳
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: 提示信息,包含文件保存路径
|
|
||||||
"""
|
|
||||||
import os
|
|
||||||
import time
|
|
||||||
import asyncio
|
|
||||||
import aiofiles
|
|
||||||
from toolbox import promote_file_to_downloadzone
|
|
||||||
from crazy_functions.doc_fns.conversation_doc.excel_doc import save_chat_tables
|
|
||||||
from crazy_functions.doc_fns.conversation_doc.html_doc import HtmlFormatter
|
|
||||||
from crazy_functions.doc_fns.conversation_doc.markdown_doc import MarkdownFormatter
|
|
||||||
from crazy_functions.doc_fns.conversation_doc.word_doc import WordFormatter
|
|
||||||
from crazy_functions.doc_fns.conversation_doc.txt_doc import TxtFormatter
|
|
||||||
from crazy_functions.doc_fns.conversation_doc.word2pdf import WordToPdfConverter
|
|
||||||
|
|
||||||
async def save_html():
|
|
||||||
try:
|
|
||||||
html_formatter = HtmlFormatter(chatbot, history)
|
|
||||||
html_content = html_formatter.create_document()
|
|
||||||
html_file = os.path.join(save_dir, base_name + '.html')
|
|
||||||
async with aiofiles.open(html_file, 'w', encoding='utf8') as f:
|
|
||||||
await f.write(html_content)
|
|
||||||
return html_file
|
|
||||||
except Exception as e:
|
|
||||||
print(f"保存HTML格式失败: {str(e)}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
async def save_word():
|
|
||||||
try:
|
|
||||||
word_formatter = WordFormatter()
|
|
||||||
doc = word_formatter.create_document(history)
|
|
||||||
docx_file = os.path.join(save_dir, base_name + '.docx')
|
|
||||||
# 由于python-docx不支持异步,使用线程池执行
|
|
||||||
loop = asyncio.get_event_loop()
|
|
||||||
await loop.run_in_executor(None, doc.save, docx_file)
|
|
||||||
return docx_file
|
|
||||||
except Exception as e:
|
|
||||||
print(f"保存Word格式失败: {str(e)}")
|
|
||||||
return None
|
|
||||||
async def save_pdf(docx_file):
|
|
||||||
try:
|
|
||||||
if docx_file:
|
|
||||||
# 获取文件名和保存路径
|
|
||||||
pdf_file = os.path.join(save_dir, base_name + '.pdf')
|
|
||||||
|
|
||||||
# 在线程池中执行转换
|
|
||||||
loop = asyncio.get_event_loop()
|
|
||||||
pdf_file = await loop.run_in_executor(
|
|
||||||
None,
|
|
||||||
WordToPdfConverter.convert_to_pdf,
|
|
||||||
docx_file
|
|
||||||
# save_dir
|
|
||||||
)
|
|
||||||
|
|
||||||
return pdf_file
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"保存PDF格式失败: {str(e)}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
async def save_markdown():
|
|
||||||
try:
|
|
||||||
md_formatter = MarkdownFormatter()
|
|
||||||
md_content = md_formatter.create_document(history)
|
|
||||||
md_file = os.path.join(save_dir, base_name + '.md')
|
|
||||||
async with aiofiles.open(md_file, 'w', encoding='utf8') as f:
|
|
||||||
await f.write(md_content)
|
|
||||||
return md_file
|
|
||||||
except Exception as e:
|
|
||||||
print(f"保存Markdown格式失败: {str(e)}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
async def save_txt():
|
|
||||||
try:
|
|
||||||
txt_formatter = TxtFormatter()
|
|
||||||
txt_content = txt_formatter.create_document(history)
|
|
||||||
txt_file = os.path.join(save_dir, base_name + '.txt')
|
|
||||||
async with aiofiles.open(txt_file, 'w', encoding='utf8') as f:
|
|
||||||
await f.write(txt_content)
|
|
||||||
return txt_file
|
|
||||||
except Exception as e:
|
|
||||||
print(f"保存TXT格式失败: {str(e)}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
async def main():
|
|
||||||
# 并发执行所有保存任务
|
|
||||||
html_task = asyncio.create_task(save_html())
|
|
||||||
word_task = asyncio.create_task(save_word())
|
|
||||||
md_task = asyncio.create_task(save_markdown())
|
|
||||||
txt_task = asyncio.create_task(save_txt())
|
|
||||||
|
|
||||||
# 等待所有任务完成
|
|
||||||
html_file = await html_task
|
|
||||||
docx_file = await word_task
|
|
||||||
md_file = await md_task
|
|
||||||
txt_file = await txt_task
|
|
||||||
|
|
||||||
# PDF转换需要等待word文件生成完成
|
|
||||||
pdf_file = await save_pdf(docx_file)
|
|
||||||
# 收集所有成功生成的文件
|
|
||||||
result_files = [f for f in [html_file, docx_file, md_file, txt_file, pdf_file] if f]
|
|
||||||
|
|
||||||
# 保存Excel表格
|
|
||||||
excel_files = save_chat_tables(history, save_dir, base_name)
|
|
||||||
result_files.extend(excel_files)
|
|
||||||
|
|
||||||
return result_files
|
|
||||||
|
|
||||||
# 生成时间戳
|
|
||||||
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
|
|
||||||
|
|
||||||
# 获取保存目录
|
|
||||||
save_dir = get_log_folder(get_user(chatbot), plugin_name='chat_history')
|
|
||||||
|
|
||||||
# 处理文件名
|
|
||||||
base_name = file_name if file_name else f"聊天记录_{timestamp}"
|
|
||||||
|
|
||||||
# 运行异步任务
|
|
||||||
result_files = asyncio.run(main())
|
|
||||||
|
|
||||||
# 将生成的文件添加到下载区
|
|
||||||
for file in result_files:
|
|
||||||
promote_file_to_downloadzone(file, rename_file=os.path.basename(file), chatbot=chatbot)
|
|
||||||
|
|
||||||
# 如果没有成功保存任何文件,返回错误信息
|
|
||||||
if not result_files:
|
|
||||||
return "保存对话记录失败,请检查错误日志"
|
|
||||||
|
|
||||||
ext_list = [os.path.splitext(f)[1] for f in result_files]
|
|
||||||
# 返回成功信息和文件路径
|
|
||||||
return f"对话历史已保存至以下格式文件:" + "、".join(ext_list)
|
|
||||||
|
|
||||||
def gen_file_preview(file_name):
|
def gen_file_preview(file_name):
|
||||||
try:
|
try:
|
||||||
with open(file_name, 'r', encoding='utf8') as f:
|
with open(file_name, 'r', encoding='utf8') as f:
|
||||||
@@ -264,21 +119,12 @@ def 对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
|
|||||||
user_request 当前用户的请求信息(IP地址等)
|
user_request 当前用户的请求信息(IP地址等)
|
||||||
"""
|
"""
|
||||||
file_name = plugin_kwargs.get("file_name", None)
|
file_name = plugin_kwargs.get("file_name", None)
|
||||||
|
if (file_name is not None) and (file_name != "") and (not file_name.endswith('.html')): file_name += '.html'
|
||||||
|
else: file_name = None
|
||||||
|
|
||||||
|
chatbot.append((None, f"[Local Message] {write_chat_to_file(chatbot, history, file_name)},您可以调用下拉菜单中的“载入对话历史存档”还原当下的对话。"))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
||||||
|
|
||||||
chatbot.append((None, f"[Local Message] {write_chat_to_file_legacy(chatbot, history, file_name)},您可以调用下拉菜单中的“载入对话历史存档”还原当下的对话。"))
|
|
||||||
try:
|
|
||||||
chatbot.append((None, f"[Local Message] 正在尝试生成pdf以及word格式的对话存档,请稍等..."))
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求需要一段时间,我们先及时地做一次界面更新
|
|
||||||
lastmsg = f"[Local Message] {write_chat_to_file(chatbot, history, file_name)}。" \
|
|
||||||
f"您可以调用下拉菜单中的“载入对话历史会话”还原当下的对话,请注意,目前只支持html格式载入历史。" \
|
|
||||||
f"当模型回答中存在表格,将提取表格内容存储为Excel的xlsx格式,如果你提供一些数据,然后输入指令要求模型帮你整理为表格" \
|
|
||||||
f"(如“请帮我将下面的数据整理为表格:”),再利用此插件就可以获取到Excel表格。"
|
|
||||||
yield from update_ui_latest_msg(lastmsg, chatbot, history) # 刷新界面 # 由于请求需要一段时间,我们先及时地做一次界面更新
|
|
||||||
except Exception as e:
|
|
||||||
logger.exception(f"已完成对话存档(pdf和word格式的对话存档生成未成功)。{str(e)}")
|
|
||||||
lastmsg = "已完成对话存档(pdf和word格式的对话存档生成未成功)。"
|
|
||||||
yield from update_ui_latest_msg(lastmsg, chatbot, history) # 刷新界面 # 由于请求需要一段时间,我们先及时地做一次界面更新
|
|
||||||
return
|
|
||||||
|
|
||||||
class Conversation_To_File_Wrap(GptAcademicPluginTemplate):
|
class Conversation_To_File_Wrap(GptAcademicPluginTemplate):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ from bs4 import BeautifulSoup
|
|||||||
from functools import lru_cache
|
from functools import lru_cache
|
||||||
from itertools import zip_longest
|
from itertools import zip_longest
|
||||||
from check_proxy import check_proxy
|
from check_proxy import check_proxy
|
||||||
from toolbox import CatchException, update_ui, get_conf, update_ui_latest_msg
|
from toolbox import CatchException, update_ui, get_conf
|
||||||
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, input_clipping
|
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, input_clipping
|
||||||
from request_llms.bridge_all import model_info
|
from request_llms.bridge_all import model_info
|
||||||
from request_llms.bridge_all import predict_no_ui_long_connection
|
from request_llms.bridge_all import predict_no_ui_long_connection
|
||||||
@@ -49,7 +49,7 @@ def search_optimizer(
|
|||||||
mutable = ["", time.time(), ""]
|
mutable = ["", time.time(), ""]
|
||||||
llm_kwargs["temperature"] = 0.8
|
llm_kwargs["temperature"] = 0.8
|
||||||
try:
|
try:
|
||||||
query_json = predict_no_ui_long_connection(
|
querys_json = predict_no_ui_long_connection(
|
||||||
inputs=query,
|
inputs=query,
|
||||||
llm_kwargs=llm_kwargs,
|
llm_kwargs=llm_kwargs,
|
||||||
history=[],
|
history=[],
|
||||||
@@ -57,31 +57,31 @@ def search_optimizer(
|
|||||||
observe_window=mutable,
|
observe_window=mutable,
|
||||||
)
|
)
|
||||||
except Exception:
|
except Exception:
|
||||||
query_json = "null"
|
querys_json = "1234"
|
||||||
#* 尝试解码优化后的搜索结果
|
#* 尝试解码优化后的搜索结果
|
||||||
query_json = re.sub(r"```json|```", "", query_json)
|
querys_json = re.sub(r"```json|```", "", querys_json)
|
||||||
try:
|
try:
|
||||||
queries = json.loads(query_json)
|
querys = json.loads(querys_json)
|
||||||
except Exception:
|
except Exception:
|
||||||
#* 如果解码失败,降低温度再试一次
|
#* 如果解码失败,降低温度再试一次
|
||||||
try:
|
try:
|
||||||
llm_kwargs["temperature"] = 0.4
|
llm_kwargs["temperature"] = 0.4
|
||||||
query_json = predict_no_ui_long_connection(
|
querys_json = predict_no_ui_long_connection(
|
||||||
inputs=query,
|
inputs=query,
|
||||||
llm_kwargs=llm_kwargs,
|
llm_kwargs=llm_kwargs,
|
||||||
history=[],
|
history=[],
|
||||||
sys_prompt=sys_prompt,
|
sys_prompt=sys_prompt,
|
||||||
observe_window=mutable,
|
observe_window=mutable,
|
||||||
)
|
)
|
||||||
query_json = re.sub(r"```json|```", "", query_json)
|
querys_json = re.sub(r"```json|```", "", querys_json)
|
||||||
queries = json.loads(query_json)
|
querys = json.loads(querys_json)
|
||||||
except Exception:
|
except Exception:
|
||||||
#* 如果再次失败,直接返回原始问题
|
#* 如果再次失败,直接返回原始问题
|
||||||
queries = [query]
|
querys = [query]
|
||||||
links = []
|
links = []
|
||||||
success = 0
|
success = 0
|
||||||
Exceptions = ""
|
Exceptions = ""
|
||||||
for q in queries:
|
for q in querys:
|
||||||
try:
|
try:
|
||||||
link = searxng_request(q, proxies, categories, searxng_url, engines=engines)
|
link = searxng_request(q, proxies, categories, searxng_url, engines=engines)
|
||||||
if len(link) > 0:
|
if len(link) > 0:
|
||||||
@@ -115,8 +115,7 @@ def get_auth_ip():
|
|||||||
|
|
||||||
def searxng_request(query, proxies, categories='general', searxng_url=None, engines=None):
|
def searxng_request(query, proxies, categories='general', searxng_url=None, engines=None):
|
||||||
if searxng_url is None:
|
if searxng_url is None:
|
||||||
urls = get_conf("SEARXNG_URLS")
|
url = get_conf("SEARXNG_URL")
|
||||||
url = random.choice(urls)
|
|
||||||
else:
|
else:
|
||||||
url = searxng_url
|
url = searxng_url
|
||||||
|
|
||||||
@@ -175,17 +174,10 @@ def scrape_text(url, proxies) -> str:
|
|||||||
Returns:
|
Returns:
|
||||||
str: The scraped text
|
str: The scraped text
|
||||||
"""
|
"""
|
||||||
from loguru import logger
|
|
||||||
headers = {
|
headers = {
|
||||||
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36',
|
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36',
|
||||||
'Content-Type': 'text/plain',
|
'Content-Type': 'text/plain',
|
||||||
}
|
}
|
||||||
|
|
||||||
# 首先采用Jina进行文本提取
|
|
||||||
if get_conf("JINA_API_KEY"):
|
|
||||||
try: return jina_scrape_text(url)
|
|
||||||
except: logger.debug("Jina API 请求失败,回到旧方法")
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
response = requests.get(url, headers=headers, proxies=proxies, timeout=8)
|
response = requests.get(url, headers=headers, proxies=proxies, timeout=8)
|
||||||
if response.encoding == "ISO-8859-1": response.encoding = response.apparent_encoding
|
if response.encoding == "ISO-8859-1": response.encoding = response.apparent_encoding
|
||||||
@@ -201,56 +193,6 @@ def scrape_text(url, proxies) -> str:
|
|||||||
return text
|
return text
|
||||||
|
|
||||||
|
|
||||||
def jina_scrape_text(url) -> str:
|
|
||||||
"jina_39727421c8fa4e4fa9bd698e5211feaaDyGeVFESNrRaepWiLT0wmHYJSh-d"
|
|
||||||
headers = {
|
|
||||||
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36',
|
|
||||||
'Content-Type': 'text/plain',
|
|
||||||
"X-Retain-Images": "none",
|
|
||||||
"Authorization": f'Bearer {get_conf("JINA_API_KEY")}'
|
|
||||||
}
|
|
||||||
response = requests.get("https://r.jina.ai/" + url, headers=headers, proxies=None, timeout=8)
|
|
||||||
if response.status_code != 200:
|
|
||||||
raise ValueError("Jina API 请求失败,开始尝试旧方法!" + response.text)
|
|
||||||
if response.encoding == "ISO-8859-1": response.encoding = response.apparent_encoding
|
|
||||||
result = response.text
|
|
||||||
result = result.replace("\\[", "[").replace("\\]", "]").replace("\\(", "(").replace("\\)", ")")
|
|
||||||
return response.text
|
|
||||||
|
|
||||||
|
|
||||||
def internet_search_with_analysis_prompt(prompt, analysis_prompt, llm_kwargs, chatbot):
|
|
||||||
from toolbox import get_conf
|
|
||||||
proxies = get_conf('proxies')
|
|
||||||
categories = 'general'
|
|
||||||
searxng_url = None # 使用默认的searxng_url
|
|
||||||
engines = None # 使用默认的搜索引擎
|
|
||||||
yield from update_ui_latest_msg(lastmsg=f"检索中: {prompt} ...", chatbot=chatbot, history=[], delay=1)
|
|
||||||
urls = searxng_request(prompt, proxies, categories, searxng_url, engines=engines)
|
|
||||||
yield from update_ui_latest_msg(lastmsg=f"依次访问搜索到的网站 ...", chatbot=chatbot, history=[], delay=1)
|
|
||||||
if len(urls) == 0:
|
|
||||||
return None
|
|
||||||
max_search_result = 5 # 最多收纳多少个网页的结果
|
|
||||||
history = []
|
|
||||||
for index, url in enumerate(urls[:max_search_result]):
|
|
||||||
yield from update_ui_latest_msg(lastmsg=f"依次访问搜索到的网站: {url['link']} ...", chatbot=chatbot, history=[], delay=1)
|
|
||||||
res = scrape_text(url['link'], proxies)
|
|
||||||
prefix = f"第{index}份搜索结果 [源自{url['source'][0]}搜索] ({url['title'][:25]}):"
|
|
||||||
history.extend([prefix, res])
|
|
||||||
i_say = f"从以上搜索结果中抽取信息,然后回答问题:{prompt} {analysis_prompt}"
|
|
||||||
i_say, history = input_clipping( # 裁剪输入,从最长的条目开始裁剪,防止爆token
|
|
||||||
inputs=i_say,
|
|
||||||
history=history,
|
|
||||||
max_token_limit=8192
|
|
||||||
)
|
|
||||||
gpt_say = predict_no_ui_long_connection(
|
|
||||||
inputs=i_say,
|
|
||||||
llm_kwargs=llm_kwargs,
|
|
||||||
history=history,
|
|
||||||
sys_prompt="请从搜索结果中抽取信息,对最相关的两个搜索结果进行总结,然后回答问题。",
|
|
||||||
console_silence=False,
|
|
||||||
)
|
|
||||||
return gpt_say
|
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
def 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
optimizer_history = history[:-8]
|
optimizer_history = history[:-8]
|
||||||
@@ -271,52 +213,23 @@ def 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
|||||||
urls = search_optimizer(txt, proxies, optimizer_history, llm_kwargs, optimizer, categories, searxng_url, engines)
|
urls = search_optimizer(txt, proxies, optimizer_history, llm_kwargs, optimizer, categories, searxng_url, engines)
|
||||||
history = []
|
history = []
|
||||||
if len(urls) == 0:
|
if len(urls) == 0:
|
||||||
chatbot.append((f"结论:{txt}", "[Local Message] 受到限制,无法从searxng获取信息!请尝试更换搜索引擎。"))
|
chatbot.append((f"结论:{txt}",
|
||||||
|
"[Local Message] 受到限制,无法从searxng获取信息!请尝试更换搜索引擎。"))
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
|
|
||||||
# ------------- < 第2步:依次访问网页 > -------------
|
# ------------- < 第2步:依次访问网页 > -------------
|
||||||
from concurrent.futures import ThreadPoolExecutor
|
|
||||||
from textwrap import dedent
|
|
||||||
max_search_result = 5 # 最多收纳多少个网页的结果
|
max_search_result = 5 # 最多收纳多少个网页的结果
|
||||||
if optimizer == "开启(增强)":
|
if optimizer == "开启(增强)":
|
||||||
max_search_result = 8
|
max_search_result = 8
|
||||||
template = dedent("""
|
chatbot.append(["联网检索中 ...", None])
|
||||||
<details>
|
for index, url in enumerate(urls[:max_search_result]):
|
||||||
<summary>{TITLE}</summary>
|
res = scrape_text(url['link'], proxies)
|
||||||
<div class="search_result">{URL}</div>
|
prefix = f"第{index}份搜索结果 [源自{url['source'][0]}搜索] ({url['title'][:25]}):"
|
||||||
<div class="search_result">{CONTENT}</div>
|
history.extend([prefix, res])
|
||||||
</details>
|
res_squeeze = res.replace('\n', '...')
|
||||||
""")
|
chatbot[-1] = [prefix + "\n\n" + res_squeeze[:500] + "......", None]
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
buffer = ""
|
|
||||||
|
|
||||||
# 创建线程池
|
|
||||||
with ThreadPoolExecutor(max_workers=5) as executor:
|
|
||||||
# 提交任务到线程池
|
|
||||||
futures = []
|
|
||||||
for index, url in enumerate(urls[:max_search_result]):
|
|
||||||
future = executor.submit(scrape_text, url['link'], proxies)
|
|
||||||
futures.append((index, future, url))
|
|
||||||
|
|
||||||
# 处理完成的任务
|
|
||||||
for index, future, url in futures:
|
|
||||||
# 开始
|
|
||||||
prefix = f"正在加载 第{index+1}份搜索结果 [源自{url['source'][0]}搜索] ({url['title'][:25]}):"
|
|
||||||
string_structure = template.format(TITLE=prefix, URL=url['link'], CONTENT="正在加载,请稍后 ......")
|
|
||||||
yield from update_ui_latest_msg(lastmsg=(buffer + string_structure), chatbot=chatbot, history=history, delay=0.1) # 刷新界面
|
|
||||||
|
|
||||||
# 获取结果
|
|
||||||
res = future.result()
|
|
||||||
|
|
||||||
# 显示结果
|
|
||||||
prefix = f"第{index+1}份搜索结果 [源自{url['source'][0]}搜索] ({url['title'][:25]}):"
|
|
||||||
string_structure = template.format(TITLE=prefix, URL=url['link'], CONTENT=res[:1000] + "......")
|
|
||||||
buffer += string_structure
|
|
||||||
|
|
||||||
# 更新历史
|
|
||||||
history.extend([prefix, res])
|
|
||||||
yield from update_ui_latest_msg(lastmsg=buffer, chatbot=chatbot, history=history, delay=0.1) # 刷新界面
|
|
||||||
|
|
||||||
# ------------- < 第3步:ChatGPT综合 > -------------
|
# ------------- < 第3步:ChatGPT综合 > -------------
|
||||||
if (optimizer != "开启(增强)"):
|
if (optimizer != "开启(增强)"):
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
import random
|
|
||||||
from toolbox import get_conf
|
from toolbox import get_conf
|
||||||
from crazy_functions.Internet_GPT import 连接网络回答问题
|
from crazy_functions.Internet_GPT import 连接网络回答问题
|
||||||
from crazy_functions.plugin_template.plugin_class_template import GptAcademicPluginTemplate, ArgProperty
|
from crazy_functions.plugin_template.plugin_class_template import GptAcademicPluginTemplate, ArgProperty
|
||||||
@@ -20,9 +20,6 @@ class NetworkGPT_Wrap(GptAcademicPluginTemplate):
|
|||||||
第三个参数,名称`allow_cache`,参数`type`声明这是一个下拉菜单,下拉菜单上方显示`title`+`description`,下拉菜单的选项为`options`,`default_value`为下拉菜单默认值;
|
第三个参数,名称`allow_cache`,参数`type`声明这是一个下拉菜单,下拉菜单上方显示`title`+`description`,下拉菜单的选项为`options`,`default_value`为下拉菜单默认值;
|
||||||
|
|
||||||
"""
|
"""
|
||||||
urls = get_conf("SEARXNG_URLS")
|
|
||||||
url = random.choice(urls)
|
|
||||||
|
|
||||||
gui_definition = {
|
gui_definition = {
|
||||||
"main_input":
|
"main_input":
|
||||||
ArgProperty(title="输入问题", description="待通过互联网检索的问题,会自动读取输入框内容", default_value="", type="string").model_dump_json(), # 主输入,自动从输入框同步
|
ArgProperty(title="输入问题", description="待通过互联网检索的问题,会自动读取输入框内容", default_value="", type="string").model_dump_json(), # 主输入,自动从输入框同步
|
||||||
@@ -33,17 +30,16 @@ class NetworkGPT_Wrap(GptAcademicPluginTemplate):
|
|||||||
"optimizer":
|
"optimizer":
|
||||||
ArgProperty(title="搜索优化", options=["关闭", "开启", "开启(增强)"], default_value="关闭", description="是否使用搜索增强。注意这可能会消耗较多token", type="dropdown").model_dump_json(),
|
ArgProperty(title="搜索优化", options=["关闭", "开启", "开启(增强)"], default_value="关闭", description="是否使用搜索增强。注意这可能会消耗较多token", type="dropdown").model_dump_json(),
|
||||||
"searxng_url":
|
"searxng_url":
|
||||||
ArgProperty(title="Searxng服务地址", description="输入Searxng的地址", default_value=url, type="string").model_dump_json(), # 主输入,自动从输入框同步
|
ArgProperty(title="Searxng服务地址", description="输入Searxng的地址", default_value=get_conf("SEARXNG_URL"), type="string").model_dump_json(), # 主输入,自动从输入框同步
|
||||||
|
|
||||||
}
|
}
|
||||||
return gui_definition
|
return gui_definition
|
||||||
|
|
||||||
def execute(txt, llm_kwargs, plugin_kwargs:dict, chatbot, history, system_prompt, user_request):
|
def execute(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
"""
|
"""
|
||||||
执行插件
|
执行插件
|
||||||
"""
|
"""
|
||||||
if plugin_kwargs.get("categories", None) == "网页": plugin_kwargs["categories"] = "general"
|
if plugin_kwargs["categories"] == "网页": plugin_kwargs["categories"] = "general"
|
||||||
elif plugin_kwargs.get("categories", None) == "学术论文": plugin_kwargs["categories"] = "science"
|
if plugin_kwargs["categories"] == "学术论文": plugin_kwargs["categories"] = "science"
|
||||||
else: plugin_kwargs["categories"] = "general"
|
|
||||||
yield from 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)
|
yield from 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
from toolbox import update_ui, trimmed_format_exc, get_conf, get_log_folder, promote_file_to_downloadzone, check_repeat_upload, map_file_to_sha256
|
from toolbox import update_ui, trimmed_format_exc, get_conf, get_log_folder, promote_file_to_downloadzone, check_repeat_upload, map_file_to_sha256
|
||||||
from toolbox import CatchException, report_exception, update_ui_latest_msg, zip_result, gen_time_str
|
from toolbox import CatchException, report_exception, update_ui_lastest_msg, zip_result, gen_time_str
|
||||||
from functools import partial
|
from functools import partial
|
||||||
from loguru import logger
|
from loguru import logger
|
||||||
|
|
||||||
@@ -41,7 +41,7 @@ def switch_prompt(pfg, mode, more_requirement):
|
|||||||
return inputs_array, sys_prompt_array
|
return inputs_array, sys_prompt_array
|
||||||
|
|
||||||
|
|
||||||
def descend_to_extracted_folder_if_exist(project_folder):
|
def desend_to_extracted_folder_if_exist(project_folder):
|
||||||
"""
|
"""
|
||||||
Descend into the extracted folder if it exists, otherwise return the original folder.
|
Descend into the extracted folder if it exists, otherwise return the original folder.
|
||||||
|
|
||||||
@@ -130,7 +130,7 @@ def arxiv_download(chatbot, history, txt, allow_cache=True):
|
|||||||
|
|
||||||
if not txt.startswith('https://arxiv.org/abs/'):
|
if not txt.startswith('https://arxiv.org/abs/'):
|
||||||
msg = f"解析arxiv网址失败, 期望格式例如: https://arxiv.org/abs/1707.06690。实际得到格式: {url_}。"
|
msg = f"解析arxiv网址失败, 期望格式例如: https://arxiv.org/abs/1707.06690。实际得到格式: {url_}。"
|
||||||
yield from update_ui_latest_msg(msg, chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui_lastest_msg(msg, chatbot=chatbot, history=history) # 刷新界面
|
||||||
return msg, None
|
return msg, None
|
||||||
# <-------------- set format ------------->
|
# <-------------- set format ------------->
|
||||||
arxiv_id = url_.split('/abs/')[-1]
|
arxiv_id = url_.split('/abs/')[-1]
|
||||||
@@ -156,16 +156,16 @@ def arxiv_download(chatbot, history, txt, allow_cache=True):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
if os.path.exists(dst) and allow_cache:
|
if os.path.exists(dst) and allow_cache:
|
||||||
yield from update_ui_latest_msg(f"调用缓存 {arxiv_id}", chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui_lastest_msg(f"调用缓存 {arxiv_id}", chatbot=chatbot, history=history) # 刷新界面
|
||||||
success = True
|
success = True
|
||||||
else:
|
else:
|
||||||
yield from update_ui_latest_msg(f"开始下载 {arxiv_id}", chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui_lastest_msg(f"开始下载 {arxiv_id}", chatbot=chatbot, history=history) # 刷新界面
|
||||||
success = fix_url_and_download()
|
success = fix_url_and_download()
|
||||||
yield from update_ui_latest_msg(f"下载完成 {arxiv_id}", chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui_lastest_msg(f"下载完成 {arxiv_id}", chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
|
|
||||||
if not success:
|
if not success:
|
||||||
yield from update_ui_latest_msg(f"下载失败 {arxiv_id}", chatbot=chatbot, history=history)
|
yield from update_ui_lastest_msg(f"下载失败 {arxiv_id}", chatbot=chatbot, history=history)
|
||||||
raise tarfile.ReadError(f"论文下载失败 {arxiv_id}")
|
raise tarfile.ReadError(f"论文下载失败 {arxiv_id}")
|
||||||
|
|
||||||
# <-------------- extract file ------------->
|
# <-------------- extract file ------------->
|
||||||
@@ -288,7 +288,7 @@ def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, histo
|
|||||||
return
|
return
|
||||||
|
|
||||||
# <-------------- if is a zip/tar file ------------->
|
# <-------------- if is a zip/tar file ------------->
|
||||||
project_folder = descend_to_extracted_folder_if_exist(project_folder)
|
project_folder = desend_to_extracted_folder_if_exist(project_folder)
|
||||||
|
|
||||||
# <-------------- move latex project away from temp folder ------------->
|
# <-------------- move latex project away from temp folder ------------->
|
||||||
from shared_utils.fastapi_server import validate_path_safety
|
from shared_utils.fastapi_server import validate_path_safety
|
||||||
@@ -365,7 +365,7 @@ def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot,
|
|||||||
try:
|
try:
|
||||||
txt, arxiv_id = yield from arxiv_download(chatbot, history, txt, allow_cache)
|
txt, arxiv_id = yield from arxiv_download(chatbot, history, txt, allow_cache)
|
||||||
except tarfile.ReadError as e:
|
except tarfile.ReadError as e:
|
||||||
yield from update_ui_latest_msg(
|
yield from update_ui_lastest_msg(
|
||||||
"无法自动下载该论文的Latex源码,请前往arxiv打开此论文下载页面,点other Formats,然后download source手动下载latex源码包。接下来调用本地Latex翻译插件即可。",
|
"无法自动下载该论文的Latex源码,请前往arxiv打开此论文下载页面,点other Formats,然后download source手动下载latex源码包。接下来调用本地Latex翻译插件即可。",
|
||||||
chatbot=chatbot, history=history)
|
chatbot=chatbot, history=history)
|
||||||
return
|
return
|
||||||
@@ -404,7 +404,7 @@ def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot,
|
|||||||
return
|
return
|
||||||
|
|
||||||
# <-------------- if is a zip/tar file ------------->
|
# <-------------- if is a zip/tar file ------------->
|
||||||
project_folder = descend_to_extracted_folder_if_exist(project_folder)
|
project_folder = desend_to_extracted_folder_if_exist(project_folder)
|
||||||
|
|
||||||
# <-------------- move latex project away from temp folder ------------->
|
# <-------------- move latex project away from temp folder ------------->
|
||||||
from shared_utils.fastapi_server import validate_path_safety
|
from shared_utils.fastapi_server import validate_path_safety
|
||||||
@@ -518,7 +518,7 @@ def PDF翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, h
|
|||||||
# repeat, project_folder = check_repeat_upload(file_manifest[0], hash_tag)
|
# repeat, project_folder = check_repeat_upload(file_manifest[0], hash_tag)
|
||||||
|
|
||||||
# if repeat:
|
# if repeat:
|
||||||
# yield from update_ui_latest_msg(f"发现重复上传,请查收结果(压缩包)...", chatbot=chatbot, history=history)
|
# yield from update_ui_lastest_msg(f"发现重复上传,请查收结果(压缩包)...", chatbot=chatbot, history=history)
|
||||||
# try:
|
# try:
|
||||||
# translate_pdf = [f for f in glob.glob(f'{project_folder}/**/merge_translate_zh.pdf', recursive=True)][0]
|
# translate_pdf = [f for f in glob.glob(f'{project_folder}/**/merge_translate_zh.pdf', recursive=True)][0]
|
||||||
# promote_file_to_downloadzone(translate_pdf, rename_file=None, chatbot=chatbot)
|
# promote_file_to_downloadzone(translate_pdf, rename_file=None, chatbot=chatbot)
|
||||||
@@ -531,7 +531,7 @@ def PDF翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, h
|
|||||||
# report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"发现重复上传,但是无法找到相关文件")
|
# report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"发现重复上传,但是无法找到相关文件")
|
||||||
# yield from update_ui(chatbot=chatbot, history=history)
|
# yield from update_ui(chatbot=chatbot, history=history)
|
||||||
# else:
|
# else:
|
||||||
# yield from update_ui_latest_msg(f"未发现重复上传", chatbot=chatbot, history=history)
|
# yield from update_ui_lastest_msg(f"未发现重复上传", chatbot=chatbot, history=history)
|
||||||
|
|
||||||
# <-------------- convert pdf into tex ------------->
|
# <-------------- convert pdf into tex ------------->
|
||||||
chatbot.append([f"解析项目: {txt}", "正在将PDF转换为tex项目,请耐心等待..."])
|
chatbot.append([f"解析项目: {txt}", "正在将PDF转换为tex项目,请耐心等待..."])
|
||||||
@@ -543,7 +543,7 @@ def PDF翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, h
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
# <-------------- translate latex file into Chinese ------------->
|
# <-------------- translate latex file into Chinese ------------->
|
||||||
yield from update_ui_latest_msg("正在tex项目将翻译为中文...", chatbot=chatbot, history=history)
|
yield from update_ui_lastest_msg("正在tex项目将翻译为中文...", chatbot=chatbot, history=history)
|
||||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
||||||
if len(file_manifest) == 0:
|
if len(file_manifest) == 0:
|
||||||
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.tex文件: {txt}")
|
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.tex文件: {txt}")
|
||||||
@@ -551,7 +551,7 @@ def PDF翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, h
|
|||||||
return
|
return
|
||||||
|
|
||||||
# <-------------- if is a zip/tar file ------------->
|
# <-------------- if is a zip/tar file ------------->
|
||||||
project_folder = descend_to_extracted_folder_if_exist(project_folder)
|
project_folder = desend_to_extracted_folder_if_exist(project_folder)
|
||||||
|
|
||||||
# <-------------- move latex project away from temp folder ------------->
|
# <-------------- move latex project away from temp folder ------------->
|
||||||
from shared_utils.fastapi_server import validate_path_safety
|
from shared_utils.fastapi_server import validate_path_safety
|
||||||
@@ -559,7 +559,7 @@ def PDF翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, h
|
|||||||
project_folder = move_project(project_folder)
|
project_folder = move_project(project_folder)
|
||||||
|
|
||||||
# <-------------- set a hash tag for repeat-checking ------------->
|
# <-------------- set a hash tag for repeat-checking ------------->
|
||||||
with open(pj(project_folder, hash_tag + '.tag'), 'w', encoding='utf8') as f:
|
with open(pj(project_folder, hash_tag + '.tag'), 'w') as f:
|
||||||
f.write(hash_tag)
|
f.write(hash_tag)
|
||||||
f.close()
|
f.close()
|
||||||
|
|
||||||
@@ -571,7 +571,7 @@ def PDF翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, h
|
|||||||
switch_prompt=_switch_prompt_)
|
switch_prompt=_switch_prompt_)
|
||||||
|
|
||||||
# <-------------- compile PDF ------------->
|
# <-------------- compile PDF ------------->
|
||||||
yield from update_ui_latest_msg("正在将翻译好的项目tex项目编译为PDF...", chatbot=chatbot, history=history)
|
yield from update_ui_lastest_msg("正在将翻译好的项目tex项目编译为PDF...", chatbot=chatbot, history=history)
|
||||||
success = yield from 编译Latex(chatbot, history, main_file_original='merge',
|
success = yield from 编译Latex(chatbot, history, main_file_original='merge',
|
||||||
main_file_modified='merge_translate_zh', mode='translate_zh',
|
main_file_modified='merge_translate_zh', mode='translate_zh',
|
||||||
work_folder_original=project_folder, work_folder_modified=project_folder,
|
work_folder_original=project_folder, work_folder_modified=project_folder,
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
from toolbox import CatchException, check_packages, get_conf
|
from toolbox import CatchException, check_packages, get_conf
|
||||||
from toolbox import update_ui, update_ui_latest_msg, disable_auto_promotion
|
from toolbox import update_ui, update_ui_lastest_msg, disable_auto_promotion
|
||||||
from toolbox import trimmed_format_exc_markdown
|
from toolbox import trimmed_format_exc_markdown
|
||||||
from crazy_functions.crazy_utils import get_files_from_everything
|
from crazy_functions.crazy_utils import get_files_from_everything
|
||||||
from crazy_functions.pdf_fns.parse_pdf import get_avail_grobid_url
|
from crazy_functions.pdf_fns.parse_pdf import get_avail_grobid_url
|
||||||
@@ -47,7 +47,7 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
|
|||||||
yield from 解析PDF_基于DOC2X(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, DOC2X_API_KEY, user_request)
|
yield from 解析PDF_基于DOC2X(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, DOC2X_API_KEY, user_request)
|
||||||
return
|
return
|
||||||
except:
|
except:
|
||||||
chatbot.append([None, f"DOC2X服务不可用,请检查报错详细。{trimmed_format_exc_markdown()}"])
|
chatbot.append([None, f"DOC2X服务不可用,现在将执行效果稍差的旧版代码。{trimmed_format_exc_markdown()}"])
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
|
||||||
if method == "GROBID":
|
if method == "GROBID":
|
||||||
@@ -57,9 +57,9 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
|
|||||||
yield from 解析PDF_基于GROBID(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, grobid_url)
|
yield from 解析PDF_基于GROBID(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, grobid_url)
|
||||||
return
|
return
|
||||||
|
|
||||||
if method == "Classic":
|
if method == "ClASSIC":
|
||||||
# ------- 第三种方法,早期代码,效果不理想 -------
|
# ------- 第三种方法,早期代码,效果不理想 -------
|
||||||
yield from update_ui_latest_msg("GROBID服务不可用,请检查config中的GROBID_URL。作为替代,现在将执行效果稍差的旧版代码。", chatbot, history, delay=3)
|
yield from update_ui_lastest_msg("GROBID服务不可用,请检查config中的GROBID_URL。作为替代,现在将执行效果稍差的旧版代码。", chatbot, history, delay=3)
|
||||||
yield from 解析PDF_简单拆解(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
yield from 解析PDF_简单拆解(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -77,7 +77,7 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
|
|||||||
if grobid_url is not None:
|
if grobid_url is not None:
|
||||||
yield from 解析PDF_基于GROBID(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, grobid_url)
|
yield from 解析PDF_基于GROBID(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, grobid_url)
|
||||||
return
|
return
|
||||||
yield from update_ui_latest_msg("GROBID服务不可用,请检查config中的GROBID_URL。作为替代,现在将执行效果稍差的旧版代码。", chatbot, history, delay=3)
|
yield from update_ui_lastest_msg("GROBID服务不可用,请检查config中的GROBID_URL。作为替代,现在将执行效果稍差的旧版代码。", chatbot, history, delay=3)
|
||||||
yield from 解析PDF_简单拆解(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
yield from 解析PDF_简单拆解(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ class PDF_Tran(GptAcademicPluginTemplate):
|
|||||||
"additional_prompt":
|
"additional_prompt":
|
||||||
ArgProperty(title="额外提示词", description="例如:对专有名词、翻译语气等方面的要求", default_value="", type="string").model_dump_json(), # 高级参数输入区,自动同步
|
ArgProperty(title="额外提示词", description="例如:对专有名词、翻译语气等方面的要求", default_value="", type="string").model_dump_json(), # 高级参数输入区,自动同步
|
||||||
"pdf_parse_method":
|
"pdf_parse_method":
|
||||||
ArgProperty(title="PDF解析方法", options=["DOC2X", "GROBID", "Classic"], description="无", default_value="GROBID", type="dropdown").model_dump_json(),
|
ArgProperty(title="PDF解析方法", options=["DOC2X", "GROBID", "ClASSIC"], description="无", default_value="GROBID", type="dropdown").model_dump_json(),
|
||||||
}
|
}
|
||||||
return gui_definition
|
return gui_definition
|
||||||
|
|
||||||
|
|||||||
@@ -1,11 +1,4 @@
|
|||||||
import os,glob
|
from toolbox import CatchException, update_ui, get_conf, get_log_folder, update_ui_lastest_msg
|
||||||
from typing import List
|
|
||||||
|
|
||||||
from shared_utils.fastapi_server import validate_path_safety
|
|
||||||
|
|
||||||
from toolbox import report_exception
|
|
||||||
from toolbox import CatchException, update_ui, get_conf, get_log_folder, update_ui_latest_msg
|
|
||||||
from shared_utils.fastapi_server import validate_path_safety
|
|
||||||
from crazy_functions.crazy_utils import input_clipping
|
from crazy_functions.crazy_utils import input_clipping
|
||||||
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
|
|
||||||
@@ -14,37 +7,6 @@ MAX_HISTORY_ROUND = 5
|
|||||||
MAX_CONTEXT_TOKEN_LIMIT = 4096
|
MAX_CONTEXT_TOKEN_LIMIT = 4096
|
||||||
REMEMBER_PREVIEW = 1000
|
REMEMBER_PREVIEW = 1000
|
||||||
|
|
||||||
@CatchException
|
|
||||||
def handle_document_upload(files: List[str], llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request, rag_worker):
|
|
||||||
"""
|
|
||||||
Handles document uploads by extracting text and adding it to the vector store.
|
|
||||||
"""
|
|
||||||
from llama_index.core import Document
|
|
||||||
from crazy_functions.rag_fns.rag_file_support import extract_text, supports_format
|
|
||||||
user_name = chatbot.get_user()
|
|
||||||
checkpoint_dir = get_log_folder(user_name, plugin_name='experimental_rag')
|
|
||||||
|
|
||||||
for file_path in files:
|
|
||||||
try:
|
|
||||||
validate_path_safety(file_path, user_name)
|
|
||||||
text = extract_text(file_path)
|
|
||||||
if text is None:
|
|
||||||
chatbot.append(
|
|
||||||
[f"上传文件: {os.path.basename(file_path)}", f"文件解析失败,无法提取文本内容,请更换文件。失败原因可能为:1.文档格式过于复杂;2. 不支持的文件格式,支持的文件格式后缀有:" + ", ".join(supports_format)])
|
|
||||||
else:
|
|
||||||
chatbot.append(
|
|
||||||
[f"上传文件: {os.path.basename(file_path)}", f"上传文件前50个字符为:{text[:50]}。"])
|
|
||||||
document = Document(text=text, metadata={"source": file_path})
|
|
||||||
rag_worker.add_documents_to_vector_store([document])
|
|
||||||
chatbot.append([f"上传文件: {os.path.basename(file_path)}", "文件已成功添加到知识库。"])
|
|
||||||
except Exception as e:
|
|
||||||
report_exception(chatbot, history, a=f"处理文件: {file_path}", b=str(e))
|
|
||||||
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Main Q&A function with document upload support
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def Rag问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
def Rag问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
|
|
||||||
@@ -61,45 +23,28 @@ def Rag问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, u
|
|||||||
# 1. we retrieve rag worker from global context
|
# 1. we retrieve rag worker from global context
|
||||||
user_name = chatbot.get_user()
|
user_name = chatbot.get_user()
|
||||||
checkpoint_dir = get_log_folder(user_name, plugin_name='experimental_rag')
|
checkpoint_dir = get_log_folder(user_name, plugin_name='experimental_rag')
|
||||||
|
|
||||||
if user_name in RAG_WORKER_REGISTER:
|
if user_name in RAG_WORKER_REGISTER:
|
||||||
rag_worker = RAG_WORKER_REGISTER[user_name]
|
rag_worker = RAG_WORKER_REGISTER[user_name]
|
||||||
else:
|
else:
|
||||||
rag_worker = RAG_WORKER_REGISTER[user_name] = LlamaIndexRagWorker(
|
rag_worker = RAG_WORKER_REGISTER[user_name] = LlamaIndexRagWorker(
|
||||||
user_name,
|
user_name,
|
||||||
llm_kwargs,
|
llm_kwargs,
|
||||||
checkpoint_dir=checkpoint_dir,
|
checkpoint_dir=checkpoint_dir,
|
||||||
auto_load_checkpoint=True
|
auto_load_checkpoint=True)
|
||||||
)
|
|
||||||
|
|
||||||
current_context = f"{VECTOR_STORE_TYPE} @ {checkpoint_dir}"
|
current_context = f"{VECTOR_STORE_TYPE} @ {checkpoint_dir}"
|
||||||
tip = "提示:输入“清空向量数据库”可以清空RAG向量数据库"
|
tip = "提示:输入“清空向量数据库”可以清空RAG向量数据库"
|
||||||
|
if txt == "清空向量数据库":
|
||||||
# 2. Handle special commands
|
|
||||||
if os.path.exists(txt) and os.path.isdir(txt):
|
|
||||||
project_folder = txt
|
|
||||||
validate_path_safety(project_folder, chatbot.get_user())
|
|
||||||
# Extract file paths from the user input
|
|
||||||
# Assuming the user inputs file paths separated by commas after the command
|
|
||||||
file_paths = [f for f in glob.glob(f'{project_folder}/**/*', recursive=True)]
|
|
||||||
chatbot.append([txt, f'正在处理上传的文档 ({current_context}) ...'])
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
|
|
||||||
yield from handle_document_upload(file_paths, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request, rag_worker)
|
|
||||||
return
|
|
||||||
|
|
||||||
elif txt == "清空向量数据库":
|
|
||||||
chatbot.append([txt, f'正在清空 ({current_context}) ...'])
|
chatbot.append([txt, f'正在清空 ({current_context}) ...'])
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
rag_worker.purge_vector_store()
|
rag_worker.purge()
|
||||||
yield from update_ui_latest_msg('已清空', chatbot, history, delay=0) # 刷新界面
|
yield from update_ui_lastest_msg('已清空', chatbot, history, delay=0) # 刷新界面
|
||||||
return
|
return
|
||||||
|
|
||||||
# 3. Normal Q&A processing
|
|
||||||
chatbot.append([txt, f'正在召回知识 ({current_context}) ...'])
|
chatbot.append([txt, f'正在召回知识 ({current_context}) ...'])
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
# 4. Clip history to reduce token consumption
|
# 2. clip history to reduce token consumption
|
||||||
|
# 2-1. reduce chat round
|
||||||
txt_origin = txt
|
txt_origin = txt
|
||||||
|
|
||||||
if len(history) > MAX_HISTORY_ROUND * 2:
|
if len(history) > MAX_HISTORY_ROUND * 2:
|
||||||
@@ -107,47 +52,41 @@ def Rag问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, u
|
|||||||
txt_clip, history, flags = input_clipping(txt, history, max_token_limit=MAX_CONTEXT_TOKEN_LIMIT, return_clip_flags=True)
|
txt_clip, history, flags = input_clipping(txt, history, max_token_limit=MAX_CONTEXT_TOKEN_LIMIT, return_clip_flags=True)
|
||||||
input_is_clipped_flag = (flags["original_input_len"] != flags["clipped_input_len"])
|
input_is_clipped_flag = (flags["original_input_len"] != flags["clipped_input_len"])
|
||||||
|
|
||||||
# 5. If input is clipped, add input to vector store before retrieve
|
# 2-2. if input is clipped, add input to vector store before retrieve
|
||||||
if input_is_clipped_flag:
|
if input_is_clipped_flag:
|
||||||
yield from update_ui_latest_msg('检测到长输入, 正在向量化 ...', chatbot, history, delay=0) # 刷新界面
|
yield from update_ui_lastest_msg('检测到长输入, 正在向量化 ...', chatbot, history, delay=0) # 刷新界面
|
||||||
# Save input to vector store
|
# save input to vector store
|
||||||
rag_worker.add_text_to_vector_store(txt_origin)
|
rag_worker.add_text_to_vector_store(txt_origin)
|
||||||
yield from update_ui_latest_msg('向量化完成 ...', chatbot, history, delay=0) # 刷新界面
|
yield from update_ui_lastest_msg('向量化完成 ...', chatbot, history, delay=0) # 刷新界面
|
||||||
|
|
||||||
if len(txt_origin) > REMEMBER_PREVIEW:
|
if len(txt_origin) > REMEMBER_PREVIEW:
|
||||||
HALF = REMEMBER_PREVIEW // 2
|
HALF = REMEMBER_PREVIEW//2
|
||||||
i_say_to_remember = txt[:HALF] + f" ...\n...(省略{len(txt_origin)-REMEMBER_PREVIEW}字)...\n... " + txt[-HALF:]
|
i_say_to_remember = txt[:HALF] + f" ...\n...(省略{len(txt_origin)-REMEMBER_PREVIEW}字)...\n... " + txt[-HALF:]
|
||||||
if (flags["original_input_len"] - flags["clipped_input_len"]) > HALF:
|
if (flags["original_input_len"] - flags["clipped_input_len"]) > HALF:
|
||||||
txt_clip = txt_clip + f" ...\n...(省略{len(txt_origin)-len(txt_clip)-HALF}字)...\n... " + txt[-HALF:]
|
txt_clip = txt_clip + f" ...\n...(省略{len(txt_origin)-len(txt_clip)-HALF}字)...\n... " + txt[-HALF:]
|
||||||
|
else:
|
||||||
|
pass
|
||||||
|
i_say = txt_clip
|
||||||
else:
|
else:
|
||||||
i_say_to_remember = i_say = txt_clip
|
i_say_to_remember = i_say = txt_clip
|
||||||
else:
|
else:
|
||||||
i_say_to_remember = i_say = txt_clip
|
i_say_to_remember = i_say = txt_clip
|
||||||
|
|
||||||
# 6. Search vector store and build prompts
|
# 3. we search vector store and build prompts
|
||||||
nodes = rag_worker.retrieve_from_store_with_query(i_say)
|
nodes = rag_worker.retrieve_from_store_with_query(i_say)
|
||||||
prompt = rag_worker.build_prompt(query=i_say, nodes=nodes)
|
prompt = rag_worker.build_prompt(query=i_say, nodes=nodes)
|
||||||
# 7. Query language model
|
|
||||||
if len(chatbot) != 0:
|
|
||||||
chatbot.pop(-1) # Pop temp chat, because we are going to add them again inside `request_gpt_model_in_new_thread_with_ui_alive`
|
|
||||||
|
|
||||||
|
# 4. it is time to query llms
|
||||||
|
if len(chatbot) != 0: chatbot.pop(-1) # pop temp chat, because we are going to add them again inside `request_gpt_model_in_new_thread_with_ui_alive`
|
||||||
model_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
model_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||||
inputs=prompt,
|
inputs=prompt, inputs_show_user=i_say,
|
||||||
inputs_show_user=i_say,
|
llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
|
||||||
llm_kwargs=llm_kwargs,
|
|
||||||
chatbot=chatbot,
|
|
||||||
history=history,
|
|
||||||
sys_prompt=system_prompt,
|
sys_prompt=system_prompt,
|
||||||
retry_times_at_unknown_error=0
|
retry_times_at_unknown_error=0
|
||||||
)
|
)
|
||||||
|
|
||||||
# 8. Remember Q&A
|
# 5. remember what has been asked / answered
|
||||||
yield from update_ui_latest_msg(
|
yield from update_ui_lastest_msg(model_say + '</br></br>' + f'对话记忆中, 请稍等 ({current_context}) ...', chatbot, history, delay=0.5) # 刷新界面
|
||||||
model_say + '</br></br>' + f'对话记忆中, 请稍等 ({current_context}) ...',
|
|
||||||
chatbot, history, delay=0.5
|
|
||||||
)
|
|
||||||
rag_worker.remember_qa(i_say_to_remember, model_say)
|
rag_worker.remember_qa(i_say_to_remember, model_say)
|
||||||
history.extend([i_say, model_say])
|
history.extend([i_say, model_say])
|
||||||
|
|
||||||
# 9. Final UI Update
|
yield from update_ui_lastest_msg(model_say, chatbot, history, delay=0, msg=tip) # 刷新界面
|
||||||
yield from update_ui_latest_msg(model_say, chatbot, history, delay=0, msg=tip)
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
import pickle, os, random
|
import pickle, os, random
|
||||||
from toolbox import CatchException, update_ui, get_conf, get_log_folder, update_ui_latest_msg
|
from toolbox import CatchException, update_ui, get_conf, get_log_folder, update_ui_lastest_msg
|
||||||
from crazy_functions.crazy_utils import input_clipping
|
from crazy_functions.crazy_utils import input_clipping
|
||||||
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
from request_llms.bridge_all import predict_no_ui_long_connection
|
from request_llms.bridge_all import predict_no_ui_long_connection
|
||||||
@@ -9,7 +9,7 @@ from loguru import logger
|
|||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
|
|
||||||
SOCIAL_NETWORK_WORKER_REGISTER = {}
|
SOCIAL_NETWOK_WORKER_REGISTER = {}
|
||||||
|
|
||||||
class SocialNetwork():
|
class SocialNetwork():
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
@@ -78,7 +78,7 @@ class SocialNetworkWorker(SaveAndLoad):
|
|||||||
for f in friend.friends_list:
|
for f in friend.friends_list:
|
||||||
self.add_friend(f)
|
self.add_friend(f)
|
||||||
msg = f"成功添加{len(friend.friends_list)}个联系人: {str(friend.friends_list)}"
|
msg = f"成功添加{len(friend.friends_list)}个联系人: {str(friend.friends_list)}"
|
||||||
yield from update_ui_latest_msg(lastmsg=msg, chatbot=chatbot, history=history, delay=0)
|
yield from update_ui_lastest_msg(lastmsg=msg, chatbot=chatbot, history=history, delay=0)
|
||||||
|
|
||||||
|
|
||||||
def run(self, txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
def run(self, txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
@@ -104,12 +104,12 @@ class SocialNetworkWorker(SaveAndLoad):
|
|||||||
}
|
}
|
||||||
|
|
||||||
try:
|
try:
|
||||||
Explanation = '\n'.join([f'{k}: {v["explain_to_llm"]}' for k, v in self.tools_to_select.items()])
|
Explaination = '\n'.join([f'{k}: {v["explain_to_llm"]}' for k, v in self.tools_to_select.items()])
|
||||||
class UserSociaIntention(BaseModel):
|
class UserSociaIntention(BaseModel):
|
||||||
intention_type: str = Field(
|
intention_type: str = Field(
|
||||||
description=
|
description=
|
||||||
f"The type of user intention. You must choose from {self.tools_to_select.keys()}.\n\n"
|
f"The type of user intention. You must choose from {self.tools_to_select.keys()}.\n\n"
|
||||||
f"Explanation:\n{Explanation}",
|
f"Explaination:\n{Explaination}",
|
||||||
default="SocialAdvice"
|
default="SocialAdvice"
|
||||||
)
|
)
|
||||||
pydantic_cls_instance, err_msg = select_tool(
|
pydantic_cls_instance, err_msg = select_tool(
|
||||||
@@ -118,7 +118,7 @@ class SocialNetworkWorker(SaveAndLoad):
|
|||||||
pydantic_cls=UserSociaIntention
|
pydantic_cls=UserSociaIntention
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
yield from update_ui_latest_msg(
|
yield from update_ui_lastest_msg(
|
||||||
lastmsg=f"无法理解用户意图 {err_msg}",
|
lastmsg=f"无法理解用户意图 {err_msg}",
|
||||||
chatbot=chatbot,
|
chatbot=chatbot,
|
||||||
history=history,
|
history=history,
|
||||||
@@ -150,10 +150,10 @@ def I人助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
|||||||
# 1. we retrieve worker from global context
|
# 1. we retrieve worker from global context
|
||||||
user_name = chatbot.get_user()
|
user_name = chatbot.get_user()
|
||||||
checkpoint_dir=get_log_folder(user_name, plugin_name='experimental_rag')
|
checkpoint_dir=get_log_folder(user_name, plugin_name='experimental_rag')
|
||||||
if user_name in SOCIAL_NETWORK_WORKER_REGISTER:
|
if user_name in SOCIAL_NETWOK_WORKER_REGISTER:
|
||||||
social_network_worker = SOCIAL_NETWORK_WORKER_REGISTER[user_name]
|
social_network_worker = SOCIAL_NETWOK_WORKER_REGISTER[user_name]
|
||||||
else:
|
else:
|
||||||
social_network_worker = SOCIAL_NETWORK_WORKER_REGISTER[user_name] = SocialNetworkWorker(
|
social_network_worker = SOCIAL_NETWOK_WORKER_REGISTER[user_name] = SocialNetworkWorker(
|
||||||
user_name,
|
user_name,
|
||||||
llm_kwargs,
|
llm_kwargs,
|
||||||
checkpoint_dir=checkpoint_dir,
|
checkpoint_dir=checkpoint_dir,
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
import os, copy, time
|
import os, copy, time
|
||||||
from toolbox import CatchException, report_exception, update_ui, zip_result, promote_file_to_downloadzone, update_ui_latest_msg, get_conf, generate_file_link
|
from toolbox import CatchException, report_exception, update_ui, zip_result, promote_file_to_downloadzone, update_ui_lastest_msg, get_conf, generate_file_link
|
||||||
from shared_utils.fastapi_server import validate_path_safety
|
from shared_utils.fastapi_server import validate_path_safety
|
||||||
from crazy_functions.crazy_utils import input_clipping
|
from crazy_functions.crazy_utils import input_clipping
|
||||||
from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
||||||
@@ -117,7 +117,7 @@ def 注释源代码(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch
|
|||||||
logger.error(f"文件: {fp} 的注释结果未能成功")
|
logger.error(f"文件: {fp} 的注释结果未能成功")
|
||||||
file_links = generate_file_link(preview_html_list)
|
file_links = generate_file_link(preview_html_list)
|
||||||
|
|
||||||
yield from update_ui_latest_msg(
|
yield from update_ui_lastest_msg(
|
||||||
f"当前任务: <br/>{'<br/>'.join(tasks)}.<br/>" +
|
f"当前任务: <br/>{'<br/>'.join(tasks)}.<br/>" +
|
||||||
f"剩余源文件数量: {remain}.<br/>" +
|
f"剩余源文件数量: {remain}.<br/>" +
|
||||||
f"已完成的文件: {sum(worker_done)}.<br/>" +
|
f"已完成的文件: {sum(worker_done)}.<br/>" +
|
||||||
|
|||||||
@@ -1,204 +0,0 @@
|
|||||||
import requests
|
|
||||||
import random
|
|
||||||
import time
|
|
||||||
import re
|
|
||||||
import json
|
|
||||||
from bs4 import BeautifulSoup
|
|
||||||
from functools import lru_cache
|
|
||||||
from itertools import zip_longest
|
|
||||||
from check_proxy import check_proxy
|
|
||||||
from toolbox import CatchException, update_ui, get_conf, promote_file_to_downloadzone, update_ui_latest_msg, generate_file_link
|
|
||||||
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, input_clipping
|
|
||||||
from request_llms.bridge_all import model_info
|
|
||||||
from request_llms.bridge_all import predict_no_ui_long_connection
|
|
||||||
from crazy_functions.prompts.internet import SearchOptimizerPrompt, SearchAcademicOptimizerPrompt
|
|
||||||
from crazy_functions.json_fns.pydantic_io import GptJsonIO, JsonStringError
|
|
||||||
from textwrap import dedent
|
|
||||||
from loguru import logger
|
|
||||||
from pydantic import BaseModel, Field
|
|
||||||
|
|
||||||
class Query(BaseModel):
|
|
||||||
search_keyword: str = Field(description="search query for video resource")
|
|
||||||
|
|
||||||
|
|
||||||
class VideoResource(BaseModel):
|
|
||||||
thought: str = Field(description="analysis of the search results based on the user's query")
|
|
||||||
title: str = Field(description="title of the video")
|
|
||||||
author: str = Field(description="author/uploader of the video")
|
|
||||||
bvid: str = Field(description="unique ID of the video")
|
|
||||||
another_failsafe_bvid: str = Field(description="provide another bvid, the other one is not working")
|
|
||||||
|
|
||||||
|
|
||||||
def get_video_resource(search_keyword):
|
|
||||||
from crazy_functions.media_fns.get_media import search_videos
|
|
||||||
|
|
||||||
# Search for videos and return the first result
|
|
||||||
videos = search_videos(
|
|
||||||
search_keyword
|
|
||||||
)
|
|
||||||
|
|
||||||
# Return the first video if results exist, otherwise return None
|
|
||||||
return videos
|
|
||||||
|
|
||||||
def download_video(bvid, user_name, chatbot, history):
|
|
||||||
# from experimental_mods.get_bilibili_resource import download_bilibili
|
|
||||||
from crazy_functions.media_fns.get_media import download_video
|
|
||||||
# pause a while
|
|
||||||
tic_time = 8
|
|
||||||
for i in range(tic_time):
|
|
||||||
yield from update_ui_latest_msg(
|
|
||||||
lastmsg=f"即将下载音频。等待{tic_time-i}秒后自动继续, 点击“停止”键取消此操作。",
|
|
||||||
chatbot=chatbot, history=[], delay=1)
|
|
||||||
|
|
||||||
# download audio
|
|
||||||
chatbot.append((None, "下载音频, 请稍等...")); yield from update_ui(chatbot=chatbot, history=history)
|
|
||||||
downloaded_files = yield from download_video(bvid, only_audio=True, user_name=user_name, chatbot=chatbot, history=history)
|
|
||||||
|
|
||||||
if len(downloaded_files) == 0:
|
|
||||||
# failed to download audio
|
|
||||||
return []
|
|
||||||
|
|
||||||
# preview
|
|
||||||
preview_list = [promote_file_to_downloadzone(fp) for fp in downloaded_files]
|
|
||||||
file_links = generate_file_link(preview_list)
|
|
||||||
yield from update_ui_latest_msg(f"已完成的文件: <br/>" + file_links, chatbot=chatbot, history=history, delay=0)
|
|
||||||
chatbot.append((None, f"即将下载视频。"))
|
|
||||||
|
|
||||||
# pause a while
|
|
||||||
tic_time = 16
|
|
||||||
for i in range(tic_time):
|
|
||||||
yield from update_ui_latest_msg(
|
|
||||||
lastmsg=f"即将下载视频。等待{tic_time-i}秒后自动继续, 点击“停止”键取消此操作。",
|
|
||||||
chatbot=chatbot, history=[], delay=1)
|
|
||||||
|
|
||||||
# download video
|
|
||||||
chatbot.append((None, "下载视频, 请稍等...")); yield from update_ui(chatbot=chatbot, history=history)
|
|
||||||
downloaded_files_part2 = yield from download_video(bvid, only_audio=False, user_name=user_name, chatbot=chatbot, history=history)
|
|
||||||
|
|
||||||
# preview
|
|
||||||
preview_list = [promote_file_to_downloadzone(fp) for fp in downloaded_files_part2]
|
|
||||||
file_links = generate_file_link(preview_list)
|
|
||||||
yield from update_ui_latest_msg(f"已完成的文件: <br/>" + file_links, chatbot=chatbot, history=history, delay=0)
|
|
||||||
|
|
||||||
# return
|
|
||||||
return downloaded_files + downloaded_files_part2
|
|
||||||
|
|
||||||
|
|
||||||
class Strategy(BaseModel):
|
|
||||||
thought: str = Field(description="analysis of the user's wish, for example, can you recall the name of the resource?")
|
|
||||||
which_methods: str = Field(description="Which method to use to find the necessary information? choose from 'method_1' and 'method_2'.")
|
|
||||||
method_1_search_keywords: str = Field(description="Generate keywords to search the internet if you choose method 1, otherwise empty.")
|
|
||||||
method_2_generate_keywords: str = Field(description="Generate keywords for video download engine if you choose method 2, otherwise empty.")
|
|
||||||
|
|
||||||
|
|
||||||
@CatchException
|
|
||||||
def 多媒体任务(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
|
||||||
user_wish: str = txt
|
|
||||||
# query demos:
|
|
||||||
# - "我想找一首歌,里面有句歌词是“turn your face towards the sun”"
|
|
||||||
# - "一首歌,第一句是红豆生南国"
|
|
||||||
# - "一首音乐,中国航天任务专用的那首"
|
|
||||||
# - "戴森球计划在熔岩星球的音乐"
|
|
||||||
# - "hanser的百变什么精"
|
|
||||||
# - "打大圣残躯时的bgm"
|
|
||||||
# - "渊下宫战斗音乐"
|
|
||||||
|
|
||||||
# 搜索
|
|
||||||
chatbot.append((txt, "检索中, 请稍等..."))
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
if "跳过联网搜索" not in user_wish:
|
|
||||||
# 结构化生成
|
|
||||||
internet_search_keyword = user_wish
|
|
||||||
|
|
||||||
yield from update_ui_latest_msg(lastmsg=f"发起互联网检索: {internet_search_keyword} ...", chatbot=chatbot, history=[], delay=1)
|
|
||||||
from crazy_functions.Internet_GPT import internet_search_with_analysis_prompt
|
|
||||||
result = yield from internet_search_with_analysis_prompt(
|
|
||||||
prompt=internet_search_keyword,
|
|
||||||
analysis_prompt="请根据搜索结果分析,获取用户需要找的资源的名称、作者、出处等信息。",
|
|
||||||
llm_kwargs=llm_kwargs,
|
|
||||||
chatbot=chatbot
|
|
||||||
)
|
|
||||||
|
|
||||||
yield from update_ui_latest_msg(lastmsg=f"互联网检索结论: {result} \n\n 正在生成进一步检索方案 ...", chatbot=chatbot, history=[], delay=1)
|
|
||||||
rf_req = dedent(f"""
|
|
||||||
The user wish to get the following resource:
|
|
||||||
{user_wish}
|
|
||||||
Meanwhile, you can access another expert's opinion on the user's wish:
|
|
||||||
{result}
|
|
||||||
Generate search keywords (less than 5 keywords) for video download engine accordingly.
|
|
||||||
""")
|
|
||||||
else:
|
|
||||||
user_wish = user_wish.replace("跳过联网搜索", "").strip()
|
|
||||||
rf_req = dedent(f"""
|
|
||||||
The user wish to get the following resource:
|
|
||||||
{user_wish}
|
|
||||||
Generate research keywords (less than 5 keywords) accordingly.
|
|
||||||
""")
|
|
||||||
gpt_json_io = GptJsonIO(Query)
|
|
||||||
inputs = rf_req + gpt_json_io.format_instructions
|
|
||||||
run_gpt_fn = lambda inputs, sys_prompt: predict_no_ui_long_connection(inputs=inputs, llm_kwargs=llm_kwargs, history=[], sys_prompt=sys_prompt, observe_window=[])
|
|
||||||
analyze_res = run_gpt_fn(inputs, "")
|
|
||||||
logger.info(analyze_res)
|
|
||||||
query: Query = gpt_json_io.generate_output_auto_repair(analyze_res, run_gpt_fn)
|
|
||||||
video_engine_keywords = query.search_keyword
|
|
||||||
# 关键词展示
|
|
||||||
chatbot.append((None, f"检索关键词已确认: {video_engine_keywords}。筛选中, 请稍等..."))
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
|
|
||||||
# 获取候选资源
|
|
||||||
candidate_dictionary: dict = get_video_resource(video_engine_keywords)
|
|
||||||
candidate_dictionary_as_str = json.dumps(candidate_dictionary, ensure_ascii=False, indent=4)
|
|
||||||
|
|
||||||
# 展示候选资源
|
|
||||||
candidate_display = "\n".join([f"{i+1}. {it['title']}" for i, it in enumerate(candidate_dictionary)])
|
|
||||||
chatbot.append((None, f"候选:\n\n{candidate_display}"))
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
|
|
||||||
# 结构化生成
|
|
||||||
rf_req_2 = dedent(f"""
|
|
||||||
The user wish to get the following resource:
|
|
||||||
{user_wish}
|
|
||||||
|
|
||||||
Select the most relevant and suitable video resource from the following search results:
|
|
||||||
{candidate_dictionary_as_str}
|
|
||||||
|
|
||||||
Note:
|
|
||||||
1. The first several search video results are more likely to satisfy the user's wish.
|
|
||||||
2. The time duration of the video should be less than 10 minutes.
|
|
||||||
3. You should analyze the search results first, before giving your answer.
|
|
||||||
4. Use Chinese if possible.
|
|
||||||
5. Beside the primary video selection, give a backup video resource `bvid`.
|
|
||||||
""")
|
|
||||||
gpt_json_io = GptJsonIO(VideoResource)
|
|
||||||
inputs = rf_req_2 + gpt_json_io.format_instructions
|
|
||||||
run_gpt_fn = lambda inputs, sys_prompt: predict_no_ui_long_connection(inputs=inputs, llm_kwargs=llm_kwargs, history=[], sys_prompt=sys_prompt, observe_window=[])
|
|
||||||
analyze_res = run_gpt_fn(inputs, "")
|
|
||||||
logger.info(analyze_res)
|
|
||||||
video_resource: VideoResource = gpt_json_io.generate_output_auto_repair(analyze_res, run_gpt_fn)
|
|
||||||
|
|
||||||
# Display
|
|
||||||
chatbot.append(
|
|
||||||
(None,
|
|
||||||
f"分析:{video_resource.thought}" "<br/>"
|
|
||||||
f"选择: `{video_resource.title}`。" "<br/>"
|
|
||||||
f"作者:{video_resource.author}"
|
|
||||||
)
|
|
||||||
)
|
|
||||||
chatbot.append((None, f"下载中, 请稍等..."))
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
|
|
||||||
if video_resource and video_resource.bvid:
|
|
||||||
logger.info(video_resource)
|
|
||||||
downloaded = yield from download_video(video_resource.bvid, chatbot.get_user(), chatbot, history)
|
|
||||||
if not downloaded:
|
|
||||||
chatbot.append((None, f"下载失败, 尝试备选 ..."))
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
downloaded = yield from download_video(video_resource.another_failsafe_bvid, chatbot.get_user(), chatbot, history)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@CatchException
|
|
||||||
def debug(bvid, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
|
||||||
yield from download_video(bvid, chatbot.get_user(), chatbot, history)
|
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, ProxyNetworkActivate
|
from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, ProxyNetworkActivate
|
||||||
from toolbox import report_exception, get_log_folder, update_ui_latest_msg, Singleton
|
from toolbox import report_exception, get_log_folder, update_ui_lastest_msg, Singleton
|
||||||
from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom
|
from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom
|
||||||
from crazy_functions.agent_fns.general import AutoGenGeneral
|
from crazy_functions.agent_fns.general import AutoGenGeneral
|
||||||
|
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ class EchoDemo(PluginMultiprocessManager):
|
|||||||
while True:
|
while True:
|
||||||
msg = self.child_conn.recv() # PipeCom
|
msg = self.child_conn.recv() # PipeCom
|
||||||
if msg.cmd == "user_input":
|
if msg.cmd == "user_input":
|
||||||
# wait father user input
|
# wait futher user input
|
||||||
self.child_conn.send(PipeCom("show", msg.content))
|
self.child_conn.send(PipeCom("show", msg.content))
|
||||||
wait_success = self.subprocess_worker_wait_user_feedback(wait_msg="我准备好处理下一个问题了.")
|
wait_success = self.subprocess_worker_wait_user_feedback(wait_msg="我准备好处理下一个问题了.")
|
||||||
if not wait_success:
|
if not wait_success:
|
||||||
|
|||||||
@@ -27,7 +27,7 @@ def gpt_academic_generate_oai_reply(
|
|||||||
llm_kwargs=llm_config,
|
llm_kwargs=llm_config,
|
||||||
history=history,
|
history=history,
|
||||||
sys_prompt=self._oai_system_message[0]['content'],
|
sys_prompt=self._oai_system_message[0]['content'],
|
||||||
console_silence=True
|
console_slience=True
|
||||||
)
|
)
|
||||||
assumed_done = reply.endswith('\nTERMINATE')
|
assumed_done = reply.endswith('\nTERMINATE')
|
||||||
return True, reply
|
return True, reply
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_
|
|||||||
# TODO: 解决缩进问题
|
# TODO: 解决缩进问题
|
||||||
|
|
||||||
find_function_end_prompt = '''
|
find_function_end_prompt = '''
|
||||||
Below is a page of code that you need to read. This page may not yet complete, you job is to split this page to separate functions, class functions etc.
|
Below is a page of code that you need to read. This page may not yet complete, you job is to split this page to sperate functions, class functions etc.
|
||||||
- Provide the line number where the first visible function ends.
|
- Provide the line number where the first visible function ends.
|
||||||
- Provide the line number where the next visible function begins.
|
- Provide the line number where the next visible function begins.
|
||||||
- If there are no other functions in this page, you should simply return the line number of the last line.
|
- If there are no other functions in this page, you should simply return the line number of the last line.
|
||||||
@@ -59,7 +59,7 @@ OUTPUT:
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
revise_function_prompt = '''
|
revise_funtion_prompt = '''
|
||||||
You need to read the following code, and revise the source code ({FILE_BASENAME}) according to following instructions:
|
You need to read the following code, and revise the source code ({FILE_BASENAME}) according to following instructions:
|
||||||
1. You should analyze the purpose of the functions (if there are any).
|
1. You should analyze the purpose of the functions (if there are any).
|
||||||
2. You need to add docstring for the provided functions (if there are any).
|
2. You need to add docstring for the provided functions (if there are any).
|
||||||
@@ -117,7 +117,7 @@ def zip_result(folder):
|
|||||||
'''
|
'''
|
||||||
|
|
||||||
|
|
||||||
revise_function_prompt_chinese = '''
|
revise_funtion_prompt_chinese = '''
|
||||||
您需要阅读以下代码,并根据以下说明修订源代码({FILE_BASENAME}):
|
您需要阅读以下代码,并根据以下说明修订源代码({FILE_BASENAME}):
|
||||||
1. 如果源代码中包含函数的话, 你应该分析给定函数实现了什么功能
|
1. 如果源代码中包含函数的话, 你应该分析给定函数实现了什么功能
|
||||||
2. 如果源代码中包含函数的话, 你需要为函数添加docstring, docstring必须使用中文
|
2. 如果源代码中包含函数的话, 你需要为函数添加docstring, docstring必须使用中文
|
||||||
@@ -188,9 +188,9 @@ class PythonCodeComment():
|
|||||||
self.language = language
|
self.language = language
|
||||||
self.observe_window_update = observe_window_update
|
self.observe_window_update = observe_window_update
|
||||||
if self.language == "chinese":
|
if self.language == "chinese":
|
||||||
self.core_prompt = revise_function_prompt_chinese
|
self.core_prompt = revise_funtion_prompt_chinese
|
||||||
else:
|
else:
|
||||||
self.core_prompt = revise_function_prompt
|
self.core_prompt = revise_funtion_prompt
|
||||||
self.path = None
|
self.path = None
|
||||||
self.file_basename = None
|
self.file_basename = None
|
||||||
self.file_brief = ""
|
self.file_brief = ""
|
||||||
@@ -222,7 +222,7 @@ class PythonCodeComment():
|
|||||||
history=[],
|
history=[],
|
||||||
sys_prompt="",
|
sys_prompt="",
|
||||||
observe_window=[],
|
observe_window=[],
|
||||||
console_silence=True
|
console_slience=True
|
||||||
)
|
)
|
||||||
|
|
||||||
def extract_number(text):
|
def extract_number(text):
|
||||||
@@ -316,7 +316,7 @@ class PythonCodeComment():
|
|||||||
def tag_code(self, fn, hint):
|
def tag_code(self, fn, hint):
|
||||||
code = fn
|
code = fn
|
||||||
_, n_indent = self.dedent(code)
|
_, n_indent = self.dedent(code)
|
||||||
indent_reminder = "" if n_indent == 0 else "(Reminder: as you can see, this piece of code has indent made up with {n_indent} whitespace, please preserve them in the OUTPUT.)"
|
indent_reminder = "" if n_indent == 0 else "(Reminder: as you can see, this piece of code has indent made up with {n_indent} whitespace, please preseve them in the OUTPUT.)"
|
||||||
brief_reminder = "" if self.file_brief == "" else f"({self.file_basename} abstract: {self.file_brief})"
|
brief_reminder = "" if self.file_brief == "" else f"({self.file_basename} abstract: {self.file_brief})"
|
||||||
hint_reminder = "" if hint is None else f"(Reminder: do not ignore or modify code such as `{hint}`, provide complete code in the OUTPUT.)"
|
hint_reminder = "" if hint is None else f"(Reminder: do not ignore or modify code such as `{hint}`, provide complete code in the OUTPUT.)"
|
||||||
self.llm_kwargs['temperature'] = 0
|
self.llm_kwargs['temperature'] = 0
|
||||||
@@ -333,7 +333,7 @@ class PythonCodeComment():
|
|||||||
history=[],
|
history=[],
|
||||||
sys_prompt="",
|
sys_prompt="",
|
||||||
observe_window=[],
|
observe_window=[],
|
||||||
console_silence=True
|
console_slience=True
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_code_block(reply):
|
def get_code_block(reply):
|
||||||
@@ -400,7 +400,7 @@ class PythonCodeComment():
|
|||||||
return revised
|
return revised
|
||||||
|
|
||||||
def begin_comment_source_code(self, chatbot=None, history=None):
|
def begin_comment_source_code(self, chatbot=None, history=None):
|
||||||
# from toolbox import update_ui_latest_msg
|
# from toolbox import update_ui_lastest_msg
|
||||||
assert self.path is not None
|
assert self.path is not None
|
||||||
assert '.py' in self.path # must be python source code
|
assert '.py' in self.path # must be python source code
|
||||||
# write_target = self.path + '.revised.py'
|
# write_target = self.path + '.revised.py'
|
||||||
@@ -409,10 +409,10 @@ class PythonCodeComment():
|
|||||||
# with open(self.path + '.revised.py', 'w+', encoding='utf8') as f:
|
# with open(self.path + '.revised.py', 'w+', encoding='utf8') as f:
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
# yield from update_ui_latest_msg(f"({self.file_basename}) 正在读取下一段代码片段:\n", chatbot=chatbot, history=history, delay=0)
|
# yield from update_ui_lastest_msg(f"({self.file_basename}) 正在读取下一段代码片段:\n", chatbot=chatbot, history=history, delay=0)
|
||||||
next_batch, line_no_start, line_no_end = self.get_next_batch()
|
next_batch, line_no_start, line_no_end = self.get_next_batch()
|
||||||
self.observe_window_update(f"正在处理{self.file_basename} - {line_no_start}/{len(self.full_context)}\n")
|
self.observe_window_update(f"正在处理{self.file_basename} - {line_no_start}/{len(self.full_context)}\n")
|
||||||
# yield from update_ui_latest_msg(f"({self.file_basename}) 处理代码片段:\n\n{next_batch}", chatbot=chatbot, history=history, delay=0)
|
# yield from update_ui_lastest_msg(f"({self.file_basename}) 处理代码片段:\n\n{next_batch}", chatbot=chatbot, history=history, delay=0)
|
||||||
|
|
||||||
hint = None
|
hint = None
|
||||||
MAX_ATTEMPT = 2
|
MAX_ATTEMPT = 2
|
||||||
|
|||||||
141
crazy_functions/chatglm微调工具.py
Normal file
141
crazy_functions/chatglm微调工具.py
Normal file
@@ -0,0 +1,141 @@
|
|||||||
|
from toolbox import CatchException, update_ui, promote_file_to_downloadzone
|
||||||
|
from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
||||||
|
import datetime, json
|
||||||
|
|
||||||
|
def fetch_items(list_of_items, batch_size):
|
||||||
|
for i in range(0, len(list_of_items), batch_size):
|
||||||
|
yield list_of_items[i:i + batch_size]
|
||||||
|
|
||||||
|
def string_to_options(arguments):
|
||||||
|
import argparse
|
||||||
|
import shlex
|
||||||
|
|
||||||
|
# Create an argparse.ArgumentParser instance
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
|
||||||
|
# Add command-line arguments
|
||||||
|
parser.add_argument("--llm_to_learn", type=str, help="LLM model to learn", default="gpt-3.5-turbo")
|
||||||
|
parser.add_argument("--prompt_prefix", type=str, help="Prompt prefix", default='')
|
||||||
|
parser.add_argument("--system_prompt", type=str, help="System prompt", default='')
|
||||||
|
parser.add_argument("--batch", type=int, help="System prompt", default=50)
|
||||||
|
parser.add_argument("--pre_seq_len", type=int, help="pre_seq_len", default=50)
|
||||||
|
parser.add_argument("--learning_rate", type=float, help="learning_rate", default=2e-2)
|
||||||
|
parser.add_argument("--num_gpus", type=int, help="num_gpus", default=1)
|
||||||
|
parser.add_argument("--json_dataset", type=str, help="json_dataset", default="")
|
||||||
|
parser.add_argument("--ptuning_directory", type=str, help="ptuning_directory", default="")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Parse the arguments
|
||||||
|
args = parser.parse_args(shlex.split(arguments))
|
||||||
|
|
||||||
|
return args
|
||||||
|
|
||||||
|
@CatchException
|
||||||
|
def 微调数据集生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
|
"""
|
||||||
|
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||||
|
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
||||||
|
plugin_kwargs 插件模型的参数
|
||||||
|
chatbot 聊天显示框的句柄,用于显示给用户
|
||||||
|
history 聊天历史,前情提要
|
||||||
|
system_prompt 给gpt的静默提醒
|
||||||
|
user_request 当前用户的请求信息(IP地址等)
|
||||||
|
"""
|
||||||
|
history = [] # 清空历史,以免输入溢出
|
||||||
|
chatbot.append(("这是什么功能?", "[Local Message] 微调数据集生成"))
|
||||||
|
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
||||||
|
args = plugin_kwargs.get("advanced_arg", None)
|
||||||
|
if args is None:
|
||||||
|
chatbot.append(("没给定指令", "退出"))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history); return
|
||||||
|
else:
|
||||||
|
arguments = string_to_options(arguments=args)
|
||||||
|
|
||||||
|
dat = []
|
||||||
|
with open(txt, 'r', encoding='utf8') as f:
|
||||||
|
for line in f.readlines():
|
||||||
|
json_dat = json.loads(line)
|
||||||
|
dat.append(json_dat["content"])
|
||||||
|
|
||||||
|
llm_kwargs['llm_model'] = arguments.llm_to_learn
|
||||||
|
for batch in fetch_items(dat, arguments.batch):
|
||||||
|
res = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||||
|
inputs_array=[f"{arguments.prompt_prefix}\n\n{b}" for b in (batch)],
|
||||||
|
inputs_show_user_array=[f"Show Nothing" for _ in (batch)],
|
||||||
|
llm_kwargs=llm_kwargs,
|
||||||
|
chatbot=chatbot,
|
||||||
|
history_array=[[] for _ in (batch)],
|
||||||
|
sys_prompt_array=[arguments.system_prompt for _ in (batch)],
|
||||||
|
max_workers=10 # OpenAI所允许的最大并行过载
|
||||||
|
)
|
||||||
|
|
||||||
|
with open(txt+'.generated.json', 'a+', encoding='utf8') as f:
|
||||||
|
for b, r in zip(batch, res[1::2]):
|
||||||
|
f.write(json.dumps({"content":b, "summary":r}, ensure_ascii=False)+'\n')
|
||||||
|
|
||||||
|
promote_file_to_downloadzone(txt+'.generated.json', rename_file='generated.json', chatbot=chatbot)
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@CatchException
|
||||||
|
def 启动微调(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
|
"""
|
||||||
|
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||||
|
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
||||||
|
plugin_kwargs 插件模型的参数
|
||||||
|
chatbot 聊天显示框的句柄,用于显示给用户
|
||||||
|
history 聊天历史,前情提要
|
||||||
|
system_prompt 给gpt的静默提醒
|
||||||
|
user_request 当前用户的请求信息(IP地址等)
|
||||||
|
"""
|
||||||
|
import subprocess
|
||||||
|
history = [] # 清空历史,以免输入溢出
|
||||||
|
chatbot.append(("这是什么功能?", "[Local Message] 微调数据集生成"))
|
||||||
|
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
||||||
|
args = plugin_kwargs.get("advanced_arg", None)
|
||||||
|
if args is None:
|
||||||
|
chatbot.append(("没给定指令", "退出"))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history); return
|
||||||
|
else:
|
||||||
|
arguments = string_to_options(arguments=args)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
pre_seq_len = arguments.pre_seq_len # 128
|
||||||
|
learning_rate = arguments.learning_rate # 2e-2
|
||||||
|
num_gpus = arguments.num_gpus # 1
|
||||||
|
json_dataset = arguments.json_dataset # 't_code.json'
|
||||||
|
ptuning_directory = arguments.ptuning_directory # '/home/hmp/ChatGLM2-6B/ptuning'
|
||||||
|
|
||||||
|
command = f"torchrun --standalone --nnodes=1 --nproc-per-node={num_gpus} main.py \
|
||||||
|
--do_train \
|
||||||
|
--train_file AdvertiseGen/{json_dataset} \
|
||||||
|
--validation_file AdvertiseGen/{json_dataset} \
|
||||||
|
--preprocessing_num_workers 20 \
|
||||||
|
--prompt_column content \
|
||||||
|
--response_column summary \
|
||||||
|
--overwrite_cache \
|
||||||
|
--model_name_or_path THUDM/chatglm2-6b \
|
||||||
|
--output_dir output/clothgen-chatglm2-6b-pt-{pre_seq_len}-{learning_rate} \
|
||||||
|
--overwrite_output_dir \
|
||||||
|
--max_source_length 256 \
|
||||||
|
--max_target_length 256 \
|
||||||
|
--per_device_train_batch_size 1 \
|
||||||
|
--per_device_eval_batch_size 1 \
|
||||||
|
--gradient_accumulation_steps 16 \
|
||||||
|
--predict_with_generate \
|
||||||
|
--max_steps 100 \
|
||||||
|
--logging_steps 10 \
|
||||||
|
--save_steps 20 \
|
||||||
|
--learning_rate {learning_rate} \
|
||||||
|
--pre_seq_len {pre_seq_len} \
|
||||||
|
--quantization_bit 4"
|
||||||
|
|
||||||
|
process = subprocess.Popen(command, shell=True, cwd=ptuning_directory)
|
||||||
|
try:
|
||||||
|
process.communicate(timeout=3600*24)
|
||||||
|
except subprocess.TimeoutExpired:
|
||||||
|
process.kill()
|
||||||
|
return
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
import os
|
import os
|
||||||
import threading
|
import threading
|
||||||
from loguru import logger
|
from loguru import logger
|
||||||
from shared_utils.char_visual_effect import scrolling_visual_effect
|
from shared_utils.char_visual_effect import scolling_visual_effect
|
||||||
from toolbox import update_ui, get_conf, trimmed_format_exc, get_max_token, Singleton
|
from toolbox import update_ui, get_conf, trimmed_format_exc, get_max_token, Singleton
|
||||||
|
|
||||||
def input_clipping(inputs, history, max_token_limit, return_clip_flags=False):
|
def input_clipping(inputs, history, max_token_limit, return_clip_flags=False):
|
||||||
@@ -169,7 +169,6 @@ def can_multi_process(llm) -> bool:
|
|||||||
def default_condition(llm) -> bool:
|
def default_condition(llm) -> bool:
|
||||||
# legacy condition
|
# legacy condition
|
||||||
if llm.startswith('gpt-'): return True
|
if llm.startswith('gpt-'): return True
|
||||||
if llm.startswith('chatgpt-'): return True
|
|
||||||
if llm.startswith('api2d-'): return True
|
if llm.startswith('api2d-'): return True
|
||||||
if llm.startswith('azure-'): return True
|
if llm.startswith('azure-'): return True
|
||||||
if llm.startswith('spark'): return True
|
if llm.startswith('spark'): return True
|
||||||
@@ -256,7 +255,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
|||||||
# 【第一种情况】:顺利完成
|
# 【第一种情况】:顺利完成
|
||||||
gpt_say = predict_no_ui_long_connection(
|
gpt_say = predict_no_ui_long_connection(
|
||||||
inputs=inputs, llm_kwargs=llm_kwargs, history=history,
|
inputs=inputs, llm_kwargs=llm_kwargs, history=history,
|
||||||
sys_prompt=sys_prompt, observe_window=mutable[index], console_silence=True
|
sys_prompt=sys_prompt, observe_window=mutable[index], console_slience=True
|
||||||
)
|
)
|
||||||
mutable[index][2] = "已成功"
|
mutable[index][2] = "已成功"
|
||||||
return gpt_say
|
return gpt_say
|
||||||
@@ -326,7 +325,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
|||||||
mutable[thread_index][1] = time.time()
|
mutable[thread_index][1] = time.time()
|
||||||
# 在前端打印些好玩的东西
|
# 在前端打印些好玩的东西
|
||||||
for thread_index, _ in enumerate(worker_done):
|
for thread_index, _ in enumerate(worker_done):
|
||||||
print_something_really_funny = f"[ ...`{scrolling_visual_effect(mutable[thread_index][0], scroller_max_len)}`... ]"
|
print_something_really_funny = f"[ ...`{scolling_visual_effect(mutable[thread_index][0], scroller_max_len)}`... ]"
|
||||||
observe_win.append(print_something_really_funny)
|
observe_win.append(print_something_really_funny)
|
||||||
# 在前端打印些好玩的东西
|
# 在前端打印些好玩的东西
|
||||||
stat_str = ''.join([f'`{mutable[thread_index][2]}`: {obs}\n\n'
|
stat_str = ''.join([f'`{mutable[thread_index][2]}`: {obs}\n\n'
|
||||||
@@ -389,11 +388,11 @@ def read_and_clean_pdf_text(fp):
|
|||||||
"""
|
"""
|
||||||
提取文本块主字体
|
提取文本块主字体
|
||||||
"""
|
"""
|
||||||
fsize_statistics = {}
|
fsize_statiscs = {}
|
||||||
for wtf in l['spans']:
|
for wtf in l['spans']:
|
||||||
if wtf['size'] not in fsize_statistics: fsize_statistics[wtf['size']] = 0
|
if wtf['size'] not in fsize_statiscs: fsize_statiscs[wtf['size']] = 0
|
||||||
fsize_statistics[wtf['size']] += len(wtf['text'])
|
fsize_statiscs[wtf['size']] += len(wtf['text'])
|
||||||
return max(fsize_statistics, key=fsize_statistics.get)
|
return max(fsize_statiscs, key=fsize_statiscs.get)
|
||||||
|
|
||||||
def ffsize_same(a,b):
|
def ffsize_same(a,b):
|
||||||
"""
|
"""
|
||||||
@@ -433,11 +432,11 @@ def read_and_clean_pdf_text(fp):
|
|||||||
|
|
||||||
############################## <第 2 步,获取正文主字体> ##################################
|
############################## <第 2 步,获取正文主字体> ##################################
|
||||||
try:
|
try:
|
||||||
fsize_statistics = {}
|
fsize_statiscs = {}
|
||||||
for span in meta_span:
|
for span in meta_span:
|
||||||
if span[1] not in fsize_statistics: fsize_statistics[span[1]] = 0
|
if span[1] not in fsize_statiscs: fsize_statiscs[span[1]] = 0
|
||||||
fsize_statistics[span[1]] += span[2]
|
fsize_statiscs[span[1]] += span[2]
|
||||||
main_fsize = max(fsize_statistics, key=fsize_statistics.get)
|
main_fsize = max(fsize_statiscs, key=fsize_statiscs.get)
|
||||||
if REMOVE_FOOT_NOTE:
|
if REMOVE_FOOT_NOTE:
|
||||||
give_up_fize_threshold = main_fsize * REMOVE_FOOT_FFSIZE_PERCENT
|
give_up_fize_threshold = main_fsize * REMOVE_FOOT_FFSIZE_PERCENT
|
||||||
except:
|
except:
|
||||||
@@ -610,9 +609,9 @@ class nougat_interface():
|
|||||||
|
|
||||||
|
|
||||||
def NOUGAT_parse_pdf(self, fp, chatbot, history):
|
def NOUGAT_parse_pdf(self, fp, chatbot, history):
|
||||||
from toolbox import update_ui_latest_msg
|
from toolbox import update_ui_lastest_msg
|
||||||
|
|
||||||
yield from update_ui_latest_msg("正在解析论文, 请稍候。进度:正在排队, 等待线程锁...",
|
yield from update_ui_lastest_msg("正在解析论文, 请稍候。进度:正在排队, 等待线程锁...",
|
||||||
chatbot=chatbot, history=history, delay=0)
|
chatbot=chatbot, history=history, delay=0)
|
||||||
self.threadLock.acquire()
|
self.threadLock.acquire()
|
||||||
import glob, threading, os
|
import glob, threading, os
|
||||||
@@ -620,7 +619,7 @@ class nougat_interface():
|
|||||||
dst = os.path.join(get_log_folder(plugin_name='nougat'), gen_time_str())
|
dst = os.path.join(get_log_folder(plugin_name='nougat'), gen_time_str())
|
||||||
os.makedirs(dst)
|
os.makedirs(dst)
|
||||||
|
|
||||||
yield from update_ui_latest_msg("正在解析论文, 请稍候。进度:正在加载NOUGAT... (提示:首次运行需要花费较长时间下载NOUGAT参数)",
|
yield from update_ui_lastest_msg("正在解析论文, 请稍候。进度:正在加载NOUGAT... (提示:首次运行需要花费较长时间下载NOUGAT参数)",
|
||||||
chatbot=chatbot, history=history, delay=0)
|
chatbot=chatbot, history=history, delay=0)
|
||||||
command = ['nougat', '--out', os.path.abspath(dst), os.path.abspath(fp)]
|
command = ['nougat', '--out', os.path.abspath(dst), os.path.abspath(fp)]
|
||||||
self.nougat_with_timeout(command, cwd=os.getcwd(), timeout=3600)
|
self.nougat_with_timeout(command, cwd=os.getcwd(), timeout=3600)
|
||||||
|
|||||||
@@ -1,812 +0,0 @@
|
|||||||
import os
|
|
||||||
import time
|
|
||||||
from abc import ABC, abstractmethod
|
|
||||||
from datetime import datetime
|
|
||||||
from docx import Document
|
|
||||||
from docx.enum.style import WD_STYLE_TYPE
|
|
||||||
from docx.enum.text import WD_PARAGRAPH_ALIGNMENT, WD_LINE_SPACING
|
|
||||||
from docx.oxml.ns import qn
|
|
||||||
from docx.shared import Inches, Cm
|
|
||||||
from docx.shared import Pt, RGBColor, Inches
|
|
||||||
from typing import Dict, List, Tuple
|
|
||||||
import markdown
|
|
||||||
from crazy_functions.doc_fns.conversation_doc.word_doc import convert_markdown_to_word
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class DocumentFormatter(ABC):
|
|
||||||
"""文档格式化基类,定义文档格式化的基本接口"""
|
|
||||||
|
|
||||||
def __init__(self, final_summary: str, file_summaries_map: Dict, failed_files: List[Tuple]):
|
|
||||||
self.final_summary = final_summary
|
|
||||||
self.file_summaries_map = file_summaries_map
|
|
||||||
self.failed_files = failed_files
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def format_failed_files(self) -> str:
|
|
||||||
"""格式化失败文件列表"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def format_file_summaries(self) -> str:
|
|
||||||
"""格式化文件总结内容"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def create_document(self) -> str:
|
|
||||||
"""创建完整文档"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class WordFormatter(DocumentFormatter):
|
|
||||||
"""Word格式文档生成器 - 符合中国政府公文格式规范(GB/T 9704-2012),并进行了优化"""
|
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
super().__init__(*args, **kwargs)
|
|
||||||
self.doc = Document()
|
|
||||||
self._setup_document()
|
|
||||||
self._create_styles()
|
|
||||||
# 初始化三级标题编号系统
|
|
||||||
self.numbers = {
|
|
||||||
1: 0, # 一级标题编号
|
|
||||||
2: 0, # 二级标题编号
|
|
||||||
3: 0 # 三级标题编号
|
|
||||||
}
|
|
||||||
|
|
||||||
def _setup_document(self):
|
|
||||||
"""设置文档基本格式,包括页面设置和页眉"""
|
|
||||||
sections = self.doc.sections
|
|
||||||
for section in sections:
|
|
||||||
# 设置页面大小为A4
|
|
||||||
section.page_width = Cm(21)
|
|
||||||
section.page_height = Cm(29.7)
|
|
||||||
# 设置页边距
|
|
||||||
section.top_margin = Cm(3.7) # 上边距37mm
|
|
||||||
section.bottom_margin = Cm(3.5) # 下边距35mm
|
|
||||||
section.left_margin = Cm(2.8) # 左边距28mm
|
|
||||||
section.right_margin = Cm(2.6) # 右边距26mm
|
|
||||||
# 设置页眉页脚距离
|
|
||||||
section.header_distance = Cm(2.0)
|
|
||||||
section.footer_distance = Cm(2.0)
|
|
||||||
|
|
||||||
# 添加页眉
|
|
||||||
header = section.header
|
|
||||||
header_para = header.paragraphs[0]
|
|
||||||
header_para.alignment = WD_PARAGRAPH_ALIGNMENT.RIGHT
|
|
||||||
header_run = header_para.add_run("该文档由GPT-academic生成")
|
|
||||||
header_run.font.name = '仿宋'
|
|
||||||
header_run._element.rPr.rFonts.set(qn('w:eastAsia'), '仿宋')
|
|
||||||
header_run.font.size = Pt(9)
|
|
||||||
|
|
||||||
def _create_styles(self):
|
|
||||||
"""创建文档样式"""
|
|
||||||
# 创建正文样式
|
|
||||||
style = self.doc.styles.add_style('Normal_Custom', WD_STYLE_TYPE.PARAGRAPH)
|
|
||||||
style.font.name = '仿宋'
|
|
||||||
style._element.rPr.rFonts.set(qn('w:eastAsia'), '仿宋')
|
|
||||||
style.font.size = Pt(14)
|
|
||||||
style.paragraph_format.line_spacing_rule = WD_LINE_SPACING.ONE_POINT_FIVE
|
|
||||||
style.paragraph_format.space_after = Pt(0)
|
|
||||||
style.paragraph_format.first_line_indent = Pt(28)
|
|
||||||
|
|
||||||
# 创建各级标题样式
|
|
||||||
self._create_heading_style('Title_Custom', '方正小标宋简体', 32, WD_PARAGRAPH_ALIGNMENT.CENTER)
|
|
||||||
self._create_heading_style('Heading1_Custom', '黑体', 22, WD_PARAGRAPH_ALIGNMENT.LEFT)
|
|
||||||
self._create_heading_style('Heading2_Custom', '黑体', 18, WD_PARAGRAPH_ALIGNMENT.LEFT)
|
|
||||||
self._create_heading_style('Heading3_Custom', '黑体', 16, WD_PARAGRAPH_ALIGNMENT.LEFT)
|
|
||||||
|
|
||||||
def _create_heading_style(self, style_name: str, font_name: str, font_size: int, alignment):
|
|
||||||
"""创建标题样式"""
|
|
||||||
style = self.doc.styles.add_style(style_name, WD_STYLE_TYPE.PARAGRAPH)
|
|
||||||
style.font.name = font_name
|
|
||||||
style._element.rPr.rFonts.set(qn('w:eastAsia'), font_name)
|
|
||||||
style.font.size = Pt(font_size)
|
|
||||||
style.font.bold = True
|
|
||||||
style.paragraph_format.alignment = alignment
|
|
||||||
style.paragraph_format.space_before = Pt(12)
|
|
||||||
style.paragraph_format.space_after = Pt(12)
|
|
||||||
style.paragraph_format.line_spacing_rule = WD_LINE_SPACING.ONE_POINT_FIVE
|
|
||||||
return style
|
|
||||||
|
|
||||||
def _get_heading_number(self, level: int) -> str:
|
|
||||||
"""
|
|
||||||
生成标题编号
|
|
||||||
|
|
||||||
Args:
|
|
||||||
level: 标题级别 (0-3)
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: 格式化的标题编号
|
|
||||||
"""
|
|
||||||
if level == 0: # 主标题不需要编号
|
|
||||||
return ""
|
|
||||||
|
|
||||||
self.numbers[level] += 1 # 增加当前级别的编号
|
|
||||||
|
|
||||||
# 重置下级标题编号
|
|
||||||
for i in range(level + 1, 4):
|
|
||||||
self.numbers[i] = 0
|
|
||||||
|
|
||||||
# 根据级别返回不同格式的编号
|
|
||||||
if level == 1:
|
|
||||||
return f"{self.numbers[1]}. "
|
|
||||||
elif level == 2:
|
|
||||||
return f"{self.numbers[1]}.{self.numbers[2]} "
|
|
||||||
elif level == 3:
|
|
||||||
return f"{self.numbers[1]}.{self.numbers[2]}.{self.numbers[3]} "
|
|
||||||
return ""
|
|
||||||
|
|
||||||
def _add_heading(self, text: str, level: int):
|
|
||||||
"""
|
|
||||||
添加带编号的标题
|
|
||||||
|
|
||||||
Args:
|
|
||||||
text: 标题文本
|
|
||||||
level: 标题级别 (0-3)
|
|
||||||
"""
|
|
||||||
style_map = {
|
|
||||||
0: 'Title_Custom',
|
|
||||||
1: 'Heading1_Custom',
|
|
||||||
2: 'Heading2_Custom',
|
|
||||||
3: 'Heading3_Custom'
|
|
||||||
}
|
|
||||||
|
|
||||||
number = self._get_heading_number(level)
|
|
||||||
paragraph = self.doc.add_paragraph(style=style_map[level])
|
|
||||||
|
|
||||||
if number:
|
|
||||||
number_run = paragraph.add_run(number)
|
|
||||||
font_size = 22 if level == 1 else (18 if level == 2 else 16)
|
|
||||||
self._get_run_style(number_run, '黑体', font_size, True)
|
|
||||||
|
|
||||||
text_run = paragraph.add_run(text)
|
|
||||||
font_size = 32 if level == 0 else (22 if level == 1 else (18 if level == 2 else 16))
|
|
||||||
self._get_run_style(text_run, '黑体', font_size, True)
|
|
||||||
|
|
||||||
# 主标题添加日期
|
|
||||||
if level == 0:
|
|
||||||
date_paragraph = self.doc.add_paragraph()
|
|
||||||
date_paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
|
|
||||||
date_run = date_paragraph.add_run(datetime.now().strftime('%Y年%m月%d日'))
|
|
||||||
self._get_run_style(date_run, '仿宋', 16, False)
|
|
||||||
|
|
||||||
return paragraph
|
|
||||||
|
|
||||||
def _get_run_style(self, run, font_name: str, font_size: int, bold: bool = False):
|
|
||||||
"""设置文本运行对象的样式"""
|
|
||||||
run.font.name = font_name
|
|
||||||
run._element.rPr.rFonts.set(qn('w:eastAsia'), font_name)
|
|
||||||
run.font.size = Pt(font_size)
|
|
||||||
run.font.bold = bold
|
|
||||||
|
|
||||||
def format_failed_files(self) -> str:
|
|
||||||
"""格式化失败文件列表"""
|
|
||||||
result = []
|
|
||||||
if not self.failed_files:
|
|
||||||
return "\n".join(result)
|
|
||||||
|
|
||||||
result.append("处理失败文件:")
|
|
||||||
for fp, reason in self.failed_files:
|
|
||||||
result.append(f"• {os.path.basename(fp)}: {reason}")
|
|
||||||
|
|
||||||
self._add_heading("处理失败文件", 1)
|
|
||||||
for fp, reason in self.failed_files:
|
|
||||||
self._add_content(f"• {os.path.basename(fp)}: {reason}", indent=False)
|
|
||||||
self.doc.add_paragraph()
|
|
||||||
|
|
||||||
return "\n".join(result)
|
|
||||||
|
|
||||||
def _add_content(self, text: str, indent: bool = True):
|
|
||||||
"""添加正文内容,使用convert_markdown_to_word处理文本"""
|
|
||||||
# 使用convert_markdown_to_word处理markdown文本
|
|
||||||
processed_text = convert_markdown_to_word(text)
|
|
||||||
paragraph = self.doc.add_paragraph(processed_text, style='Normal_Custom')
|
|
||||||
if not indent:
|
|
||||||
paragraph.paragraph_format.first_line_indent = Pt(0)
|
|
||||||
return paragraph
|
|
||||||
|
|
||||||
def format_file_summaries(self) -> str:
|
|
||||||
"""
|
|
||||||
格式化文件总结内容,确保正确的标题层级并处理markdown文本
|
|
||||||
"""
|
|
||||||
result = []
|
|
||||||
# 首先对文件路径进行分组整理
|
|
||||||
file_groups = {}
|
|
||||||
for path in sorted(self.file_summaries_map.keys()):
|
|
||||||
dir_path = os.path.dirname(path)
|
|
||||||
if dir_path not in file_groups:
|
|
||||||
file_groups[dir_path] = []
|
|
||||||
file_groups[dir_path].append(path)
|
|
||||||
|
|
||||||
# 处理没有目录的文件
|
|
||||||
root_files = file_groups.get("", [])
|
|
||||||
if root_files:
|
|
||||||
for path in sorted(root_files):
|
|
||||||
file_name = os.path.basename(path)
|
|
||||||
result.append(f"\n📄 {file_name}")
|
|
||||||
result.append(self.file_summaries_map[path])
|
|
||||||
# 无目录的文件作为二级标题
|
|
||||||
self._add_heading(f"📄 {file_name}", 2)
|
|
||||||
# 使用convert_markdown_to_word处理文件内容
|
|
||||||
self._add_content(convert_markdown_to_word(self.file_summaries_map[path]))
|
|
||||||
self.doc.add_paragraph()
|
|
||||||
|
|
||||||
# 处理有目录的文件
|
|
||||||
for dir_path in sorted(file_groups.keys()):
|
|
||||||
if dir_path == "": # 跳过已处理的根目录文件
|
|
||||||
continue
|
|
||||||
|
|
||||||
# 添加目录作为二级标题
|
|
||||||
result.append(f"\n📁 {dir_path}")
|
|
||||||
self._add_heading(f"📁 {dir_path}", 2)
|
|
||||||
|
|
||||||
# 该目录下的所有文件作为三级标题
|
|
||||||
for path in sorted(file_groups[dir_path]):
|
|
||||||
file_name = os.path.basename(path)
|
|
||||||
result.append(f"\n📄 {file_name}")
|
|
||||||
result.append(self.file_summaries_map[path])
|
|
||||||
|
|
||||||
# 添加文件名作为三级标题
|
|
||||||
self._add_heading(f"📄 {file_name}", 3)
|
|
||||||
# 使用convert_markdown_to_word处理文件内容
|
|
||||||
self._add_content(convert_markdown_to_word(self.file_summaries_map[path]))
|
|
||||||
self.doc.add_paragraph()
|
|
||||||
|
|
||||||
return "\n".join(result)
|
|
||||||
|
|
||||||
|
|
||||||
def create_document(self):
|
|
||||||
"""创建完整Word文档并返回文档对象"""
|
|
||||||
# 重置所有编号
|
|
||||||
for level in self.numbers:
|
|
||||||
self.numbers[level] = 0
|
|
||||||
|
|
||||||
# 添加主标题
|
|
||||||
self._add_heading("文档总结报告", 0)
|
|
||||||
self.doc.add_paragraph()
|
|
||||||
|
|
||||||
# 添加总体摘要,使用convert_markdown_to_word处理
|
|
||||||
self._add_heading("总体摘要", 1)
|
|
||||||
self._add_content(convert_markdown_to_word(self.final_summary))
|
|
||||||
self.doc.add_paragraph()
|
|
||||||
|
|
||||||
# 添加失败文件列表(如果有)
|
|
||||||
if self.failed_files:
|
|
||||||
self.format_failed_files()
|
|
||||||
|
|
||||||
# 添加文件详细总结
|
|
||||||
self._add_heading("各文件详细总结", 1)
|
|
||||||
self.format_file_summaries()
|
|
||||||
|
|
||||||
return self.doc
|
|
||||||
|
|
||||||
def save_as_pdf(self, word_path, pdf_path=None):
|
|
||||||
"""将生成的Word文档转换为PDF
|
|
||||||
|
|
||||||
参数:
|
|
||||||
word_path: Word文档的路径
|
|
||||||
pdf_path: 可选,PDF文件的输出路径。如果未指定,将使用与Word文档相同的名称和位置
|
|
||||||
|
|
||||||
返回:
|
|
||||||
生成的PDF文件路径,如果转换失败则返回None
|
|
||||||
"""
|
|
||||||
from crazy_functions.doc_fns.conversation_doc.word2pdf import WordToPdfConverter
|
|
||||||
try:
|
|
||||||
pdf_path = WordToPdfConverter.convert_to_pdf(word_path, pdf_path)
|
|
||||||
return pdf_path
|
|
||||||
except Exception as e:
|
|
||||||
print(f"PDF转换失败: {str(e)}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
class MarkdownFormatter(DocumentFormatter):
|
|
||||||
"""Markdown格式文档生成器"""
|
|
||||||
|
|
||||||
def format_failed_files(self) -> str:
|
|
||||||
if not self.failed_files:
|
|
||||||
return ""
|
|
||||||
|
|
||||||
formatted_text = ["\n## ⚠️ 处理失败的文件"]
|
|
||||||
for fp, reason in self.failed_files:
|
|
||||||
formatted_text.append(f"- {os.path.basename(fp)}: {reason}")
|
|
||||||
formatted_text.append("\n---")
|
|
||||||
return "\n".join(formatted_text)
|
|
||||||
|
|
||||||
def format_file_summaries(self) -> str:
|
|
||||||
formatted_text = []
|
|
||||||
sorted_paths = sorted(self.file_summaries_map.keys())
|
|
||||||
current_dir = ""
|
|
||||||
|
|
||||||
for path in sorted_paths:
|
|
||||||
dir_path = os.path.dirname(path)
|
|
||||||
if dir_path != current_dir:
|
|
||||||
if dir_path:
|
|
||||||
formatted_text.append(f"\n## 📁 {dir_path}")
|
|
||||||
current_dir = dir_path
|
|
||||||
|
|
||||||
file_name = os.path.basename(path)
|
|
||||||
formatted_text.append(f"\n### 📄 {file_name}")
|
|
||||||
formatted_text.append(self.file_summaries_map[path])
|
|
||||||
formatted_text.append("\n---")
|
|
||||||
|
|
||||||
return "\n".join(formatted_text)
|
|
||||||
|
|
||||||
def create_document(self) -> str:
|
|
||||||
document = [
|
|
||||||
"# 📑 文档总结报告",
|
|
||||||
"\n## 总体摘要",
|
|
||||||
self.final_summary
|
|
||||||
]
|
|
||||||
|
|
||||||
if self.failed_files:
|
|
||||||
document.append(self.format_failed_files())
|
|
||||||
|
|
||||||
document.extend([
|
|
||||||
"\n# 📚 各文件详细总结",
|
|
||||||
self.format_file_summaries()
|
|
||||||
])
|
|
||||||
|
|
||||||
return "\n".join(document)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class HtmlFormatter(DocumentFormatter):
|
|
||||||
"""HTML格式文档生成器 - 优化版"""
|
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
super().__init__(*args, **kwargs)
|
|
||||||
self.md = markdown.Markdown(extensions=['extra','codehilite', 'tables','nl2br'])
|
|
||||||
self.css_styles = """
|
|
||||||
@keyframes fadeIn {
|
|
||||||
from { opacity: 0; transform: translateY(20px); }
|
|
||||||
to { opacity: 1; transform: translateY(0); }
|
|
||||||
}
|
|
||||||
|
|
||||||
@keyframes slideIn {
|
|
||||||
from { transform: translateX(-20px); opacity: 0; }
|
|
||||||
to { transform: translateX(0); opacity: 1; }
|
|
||||||
}
|
|
||||||
|
|
||||||
@keyframes pulse {
|
|
||||||
0% { transform: scale(1); }
|
|
||||||
50% { transform: scale(1.05); }
|
|
||||||
100% { transform: scale(1); }
|
|
||||||
}
|
|
||||||
|
|
||||||
:root {
|
|
||||||
/* Enhanced color palette */
|
|
||||||
--primary-color: #2563eb;
|
|
||||||
--primary-light: #eff6ff;
|
|
||||||
--secondary-color: #1e293b;
|
|
||||||
--background-color: #f8fafc;
|
|
||||||
--text-color: #334155;
|
|
||||||
--text-light: #64748b;
|
|
||||||
--border-color: #e2e8f0;
|
|
||||||
--error-color: #ef4444;
|
|
||||||
--error-light: #fef2f2;
|
|
||||||
--success-color: #22c55e;
|
|
||||||
--warning-color: #f59e0b;
|
|
||||||
--card-shadow: 0 4px 6px -1px rgb(0 0 0 / 0.1), 0 2px 4px -2px rgb(0 0 0 / 0.1);
|
|
||||||
--hover-shadow: 0 20px 25px -5px rgb(0 0 0 / 0.1), 0 8px 10px -6px rgb(0 0 0 / 0.1);
|
|
||||||
|
|
||||||
/* Typography */
|
|
||||||
--heading-font: "Plus Jakarta Sans", system-ui, sans-serif;
|
|
||||||
--body-font: "Inter", system-ui, sans-serif;
|
|
||||||
}
|
|
||||||
|
|
||||||
body {
|
|
||||||
font-family: var(--body-font);
|
|
||||||
line-height: 1.8;
|
|
||||||
max-width: 1200px;
|
|
||||||
margin: 0 auto;
|
|
||||||
padding: 2rem;
|
|
||||||
color: var(--text-color);
|
|
||||||
background-color: var(--background-color);
|
|
||||||
font-size: 16px;
|
|
||||||
-webkit-font-smoothing: antialiased;
|
|
||||||
}
|
|
||||||
|
|
||||||
.container {
|
|
||||||
background: white;
|
|
||||||
padding: 3rem;
|
|
||||||
border-radius: 24px;
|
|
||||||
box-shadow: var(--card-shadow);
|
|
||||||
transition: all 0.4s cubic-bezier(0.4, 0, 0.2, 1);
|
|
||||||
animation: fadeIn 0.6s ease-out;
|
|
||||||
border: 1px solid var(--border-color);
|
|
||||||
}
|
|
||||||
|
|
||||||
.container:hover {
|
|
||||||
box-shadow: var(--hover-shadow);
|
|
||||||
transform: translateY(-2px);
|
|
||||||
}
|
|
||||||
|
|
||||||
h1, h2, h3 {
|
|
||||||
font-family: var(--heading-font);
|
|
||||||
font-weight: 600;
|
|
||||||
}
|
|
||||||
|
|
||||||
h1 {
|
|
||||||
color: var(--primary-color);
|
|
||||||
font-size: 2.8em;
|
|
||||||
text-align: center;
|
|
||||||
margin: 2rem 0 3rem;
|
|
||||||
padding-bottom: 1.5rem;
|
|
||||||
border-bottom: 3px solid var(--primary-color);
|
|
||||||
letter-spacing: -0.03em;
|
|
||||||
position: relative;
|
|
||||||
display: flex;
|
|
||||||
align-items: center;
|
|
||||||
justify-content: center;
|
|
||||||
gap: 1rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
h1::after {
|
|
||||||
content: '';
|
|
||||||
position: absolute;
|
|
||||||
bottom: -3px;
|
|
||||||
left: 50%;
|
|
||||||
transform: translateX(-50%);
|
|
||||||
width: 120px;
|
|
||||||
height: 3px;
|
|
||||||
background: linear-gradient(90deg, var(--primary-color), var(--primary-light));
|
|
||||||
border-radius: 3px;
|
|
||||||
transition: width 0.3s ease;
|
|
||||||
}
|
|
||||||
|
|
||||||
h1:hover::after {
|
|
||||||
width: 180px;
|
|
||||||
}
|
|
||||||
|
|
||||||
h2 {
|
|
||||||
color: var(--secondary-color);
|
|
||||||
font-size: 1.9em;
|
|
||||||
margin: 2.5rem 0 1.5rem;
|
|
||||||
padding-left: 1.2rem;
|
|
||||||
border-left: 4px solid var(--primary-color);
|
|
||||||
letter-spacing: -0.02em;
|
|
||||||
display: flex;
|
|
||||||
align-items: center;
|
|
||||||
gap: 1rem;
|
|
||||||
transition: all 0.3s ease;
|
|
||||||
}
|
|
||||||
|
|
||||||
h2:hover {
|
|
||||||
color: var(--primary-color);
|
|
||||||
transform: translateX(5px);
|
|
||||||
}
|
|
||||||
|
|
||||||
h3 {
|
|
||||||
color: var(--text-color);
|
|
||||||
font-size: 1.5em;
|
|
||||||
margin: 2rem 0 1rem;
|
|
||||||
padding-bottom: 0.8rem;
|
|
||||||
border-bottom: 2px solid var(--border-color);
|
|
||||||
transition: all 0.3s ease;
|
|
||||||
display: flex;
|
|
||||||
align-items: center;
|
|
||||||
gap: 0.8rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
h3:hover {
|
|
||||||
color: var(--primary-color);
|
|
||||||
border-bottom-color: var(--primary-color);
|
|
||||||
}
|
|
||||||
|
|
||||||
.summary {
|
|
||||||
background: var(--primary-light);
|
|
||||||
padding: 2.5rem;
|
|
||||||
border-radius: 16px;
|
|
||||||
margin: 2.5rem 0;
|
|
||||||
box-shadow: 0 4px 6px -1px rgba(37, 99, 235, 0.1);
|
|
||||||
position: relative;
|
|
||||||
overflow: hidden;
|
|
||||||
transition: transform 0.3s ease, box-shadow 0.3s ease;
|
|
||||||
animation: slideIn 0.5s ease-out;
|
|
||||||
}
|
|
||||||
|
|
||||||
.summary:hover {
|
|
||||||
transform: translateY(-3px);
|
|
||||||
box-shadow: 0 8px 12px -2px rgba(37, 99, 235, 0.15);
|
|
||||||
}
|
|
||||||
|
|
||||||
.summary::before {
|
|
||||||
content: '';
|
|
||||||
position: absolute;
|
|
||||||
top: 0;
|
|
||||||
left: 0;
|
|
||||||
width: 4px;
|
|
||||||
height: 100%;
|
|
||||||
background: linear-gradient(to bottom, var(--primary-color), rgba(37, 99, 235, 0.6));
|
|
||||||
}
|
|
||||||
|
|
||||||
.summary p {
|
|
||||||
margin: 1.2rem 0;
|
|
||||||
line-height: 1.9;
|
|
||||||
color: var(--text-color);
|
|
||||||
transition: color 0.3s ease;
|
|
||||||
}
|
|
||||||
|
|
||||||
.summary:hover p {
|
|
||||||
color: var(--secondary-color);
|
|
||||||
}
|
|
||||||
|
|
||||||
.details {
|
|
||||||
margin-top: 3.5rem;
|
|
||||||
padding-top: 2.5rem;
|
|
||||||
border-top: 2px dashed var(--border-color);
|
|
||||||
animation: fadeIn 0.8s ease-out;
|
|
||||||
}
|
|
||||||
|
|
||||||
.failed-files {
|
|
||||||
background: var(--error-light);
|
|
||||||
padding: 2rem;
|
|
||||||
border-radius: 16px;
|
|
||||||
margin: 3rem 0;
|
|
||||||
border-left: 4px solid var(--error-color);
|
|
||||||
position: relative;
|
|
||||||
transition: all 0.3s ease;
|
|
||||||
animation: slideIn 0.5s ease-out;
|
|
||||||
}
|
|
||||||
|
|
||||||
.failed-files:hover {
|
|
||||||
transform: translateX(5px);
|
|
||||||
box-shadow: 0 8px 15px -3px rgba(239, 68, 68, 0.1);
|
|
||||||
}
|
|
||||||
|
|
||||||
.failed-files h2 {
|
|
||||||
color: var(--error-color);
|
|
||||||
border-left: none;
|
|
||||||
padding-left: 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
.failed-files ul {
|
|
||||||
margin: 1.8rem 0;
|
|
||||||
padding-left: 1.2rem;
|
|
||||||
list-style-type: none;
|
|
||||||
}
|
|
||||||
|
|
||||||
.failed-files li {
|
|
||||||
margin: 1.2rem 0;
|
|
||||||
padding: 1.2rem 1.8rem;
|
|
||||||
background: rgba(239, 68, 68, 0.08);
|
|
||||||
border-radius: 12px;
|
|
||||||
transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
.failed-files li:hover {
|
|
||||||
transform: translateX(8px);
|
|
||||||
background: rgba(239, 68, 68, 0.12);
|
|
||||||
}
|
|
||||||
|
|
||||||
.directory-section {
|
|
||||||
margin: 3.5rem 0;
|
|
||||||
padding: 2rem;
|
|
||||||
background: var(--background-color);
|
|
||||||
border-radius: 16px;
|
|
||||||
position: relative;
|
|
||||||
transition: all 0.3s ease;
|
|
||||||
animation: fadeIn 0.6s ease-out;
|
|
||||||
}
|
|
||||||
|
|
||||||
.directory-section:hover {
|
|
||||||
background: white;
|
|
||||||
box-shadow: var(--card-shadow);
|
|
||||||
}
|
|
||||||
|
|
||||||
.file-summary {
|
|
||||||
background: white;
|
|
||||||
padding: 2rem;
|
|
||||||
margin: 1.8rem 0;
|
|
||||||
border-radius: 16px;
|
|
||||||
box-shadow: var(--card-shadow);
|
|
||||||
border-left: 4px solid var(--border-color);
|
|
||||||
transition: all 0.4s cubic-bezier(0.4, 0, 0.2, 1);
|
|
||||||
position: relative;
|
|
||||||
overflow: hidden;
|
|
||||||
}
|
|
||||||
|
|
||||||
.file-summary:hover {
|
|
||||||
border-left-color: var(--primary-color);
|
|
||||||
transform: translateX(8px) translateY(-2px);
|
|
||||||
box-shadow: var(--hover-shadow);
|
|
||||||
}
|
|
||||||
|
|
||||||
.file-summary {
|
|
||||||
background: white;
|
|
||||||
padding: 2rem;
|
|
||||||
margin: 1.8rem 0;
|
|
||||||
border-radius: 16px;
|
|
||||||
box-shadow: var(--card-shadow);
|
|
||||||
border-left: 4px solid var(--border-color);
|
|
||||||
transition: all 0.4s cubic-bezier(0.4, 0, 0.2, 1);
|
|
||||||
position: relative;
|
|
||||||
}
|
|
||||||
|
|
||||||
.file-summary:hover {
|
|
||||||
border-left-color: var(--primary-color);
|
|
||||||
transform: translateX(8px) translateY(-2px);
|
|
||||||
box-shadow: var(--hover-shadow);
|
|
||||||
}
|
|
||||||
|
|
||||||
.icon {
|
|
||||||
display: inline-flex;
|
|
||||||
align-items: center;
|
|
||||||
justify-content: center;
|
|
||||||
width: 32px;
|
|
||||||
height: 32px;
|
|
||||||
border-radius: 8px;
|
|
||||||
background: var(--primary-light);
|
|
||||||
color: var(--primary-color);
|
|
||||||
font-size: 1.2em;
|
|
||||||
transition: all 0.3s ease;
|
|
||||||
}
|
|
||||||
|
|
||||||
.file-summary:hover .icon,
|
|
||||||
.directory-section:hover .icon {
|
|
||||||
transform: scale(1.1);
|
|
||||||
background: var(--primary-color);
|
|
||||||
color: white;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Smooth scrolling */
|
|
||||||
html {
|
|
||||||
scroll-behavior: smooth;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Selection style */
|
|
||||||
::selection {
|
|
||||||
background: var(--primary-light);
|
|
||||||
color: var(--primary-color);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Print styles */
|
|
||||||
@media print {
|
|
||||||
body {
|
|
||||||
background: white;
|
|
||||||
}
|
|
||||||
.container {
|
|
||||||
box-shadow: none;
|
|
||||||
padding: 0;
|
|
||||||
}
|
|
||||||
.file-summary, .failed-files {
|
|
||||||
break-inside: avoid;
|
|
||||||
box-shadow: none;
|
|
||||||
}
|
|
||||||
.icon {
|
|
||||||
display: none;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Responsive design */
|
|
||||||
@media (max-width: 768px) {
|
|
||||||
body {
|
|
||||||
padding: 1rem;
|
|
||||||
font-size: 15px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.container {
|
|
||||||
padding: 1.5rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
h1 {
|
|
||||||
font-size: 2.2em;
|
|
||||||
margin: 1.5rem 0 2rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
h2 {
|
|
||||||
font-size: 1.7em;
|
|
||||||
}
|
|
||||||
|
|
||||||
h3 {
|
|
||||||
font-size: 1.4em;
|
|
||||||
}
|
|
||||||
|
|
||||||
.summary, .failed-files, .directory-section {
|
|
||||||
padding: 1.5rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
.file-summary {
|
|
||||||
padding: 1.2rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
.icon {
|
|
||||||
width: 28px;
|
|
||||||
height: 28px;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Dark mode support */
|
|
||||||
@media (prefers-color-scheme: dark) {
|
|
||||||
:root {
|
|
||||||
--primary-light: rgba(37, 99, 235, 0.15);
|
|
||||||
--background-color: #0f172a;
|
|
||||||
--text-color: #e2e8f0;
|
|
||||||
--text-light: #94a3b8;
|
|
||||||
--border-color: #1e293b;
|
|
||||||
--error-light: rgba(239, 68, 68, 0.15);
|
|
||||||
}
|
|
||||||
|
|
||||||
.container, .file-summary {
|
|
||||||
background: #1e293b;
|
|
||||||
}
|
|
||||||
|
|
||||||
.directory-section {
|
|
||||||
background: #0f172a;
|
|
||||||
}
|
|
||||||
|
|
||||||
.directory-section:hover {
|
|
||||||
background: #1e293b;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
"""
|
|
||||||
|
|
||||||
def format_failed_files(self) -> str:
|
|
||||||
if not self.failed_files:
|
|
||||||
return ""
|
|
||||||
|
|
||||||
failed_files_html = ['<div class="failed-files">']
|
|
||||||
failed_files_html.append('<h2><span class="icon">⚠️</span> 处理失败的文件</h2>')
|
|
||||||
failed_files_html.append("<ul>")
|
|
||||||
for fp, reason in self.failed_files:
|
|
||||||
failed_files_html.append(
|
|
||||||
f'<li><strong>📄 {os.path.basename(fp)}</strong><br><span style="color: var(--text-light)">{reason}</span></li>'
|
|
||||||
)
|
|
||||||
failed_files_html.append("</ul></div>")
|
|
||||||
return "\n".join(failed_files_html)
|
|
||||||
|
|
||||||
def format_file_summaries(self) -> str:
|
|
||||||
formatted_html = []
|
|
||||||
sorted_paths = sorted(self.file_summaries_map.keys())
|
|
||||||
current_dir = ""
|
|
||||||
|
|
||||||
for path in sorted_paths:
|
|
||||||
dir_path = os.path.dirname(path)
|
|
||||||
if dir_path != current_dir:
|
|
||||||
if dir_path:
|
|
||||||
formatted_html.append('<div class="directory-section">')
|
|
||||||
formatted_html.append(f'<h2><span class="icon">📁</span> {dir_path}</h2>')
|
|
||||||
formatted_html.append('</div>')
|
|
||||||
current_dir = dir_path
|
|
||||||
|
|
||||||
file_name = os.path.basename(path)
|
|
||||||
formatted_html.append('<div class="file-summary">')
|
|
||||||
formatted_html.append(f'<h3><span class="icon">📄</span> {file_name}</h3>')
|
|
||||||
formatted_html.append(self.md.convert(self.file_summaries_map[path]))
|
|
||||||
formatted_html.append('</div>')
|
|
||||||
|
|
||||||
return "\n".join(formatted_html)
|
|
||||||
|
|
||||||
def create_document(self) -> str:
|
|
||||||
"""生成HTML文档
|
|
||||||
Returns:
|
|
||||||
str: 完整的HTML文档字符串
|
|
||||||
"""
|
|
||||||
return f"""
|
|
||||||
<!DOCTYPE html>
|
|
||||||
<html lang="zh-CN">
|
|
||||||
<head>
|
|
||||||
<meta charset="utf-8">
|
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
|
||||||
<title>文档总结报告</title>
|
|
||||||
<link href="https://cdnjs.cloudflare.com/ajax/libs/inter/3.19.3/inter.css" rel="stylesheet">
|
|
||||||
<link href="https://fonts.googleapis.com/css2?family=Plus+Jakarta+Sans:wght@400;600&display=swap" rel="stylesheet">
|
|
||||||
<style>{self.css_styles}</style>
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
<div class="container">
|
|
||||||
<h1><span class="icon">📑</span> 文档总结报告</h1>
|
|
||||||
<div class="summary">
|
|
||||||
<h2><span class="icon">📋</span> 总体摘要</h2>
|
|
||||||
<p>{self.md.convert(self.final_summary)}</p>
|
|
||||||
</div>
|
|
||||||
{self.format_failed_files()}
|
|
||||||
<div class="details">
|
|
||||||
<h2><span class="icon">📚</span> 各文件详细总结</h2>
|
|
||||||
{self.format_file_summaries()}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
</body>
|
|
||||||
</html>
|
|
||||||
"""
|
|
||||||
@@ -9,9 +9,6 @@ from docx.oxml.ns import qn
|
|||||||
from docx.shared import Inches, Cm
|
from docx.shared import Inches, Cm
|
||||||
from docx.shared import Pt, RGBColor, Inches
|
from docx.shared import Pt, RGBColor, Inches
|
||||||
from typing import Dict, List, Tuple
|
from typing import Dict, List, Tuple
|
||||||
import markdown
|
|
||||||
from crazy_functions.doc_fns.conversation_doc.word_doc import convert_markdown_to_word
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class DocumentFormatter(ABC):
|
class DocumentFormatter(ABC):
|
||||||
@@ -197,17 +194,26 @@ class WordFormatter(DocumentFormatter):
|
|||||||
return "\n".join(result)
|
return "\n".join(result)
|
||||||
|
|
||||||
def _add_content(self, text: str, indent: bool = True):
|
def _add_content(self, text: str, indent: bool = True):
|
||||||
"""添加正文内容,使用convert_markdown_to_word处理文本"""
|
"""添加正文内容"""
|
||||||
# 使用convert_markdown_to_word处理markdown文本
|
paragraph = self.doc.add_paragraph(text, style='Normal_Custom')
|
||||||
processed_text = convert_markdown_to_word(text)
|
|
||||||
paragraph = self.doc.add_paragraph(processed_text, style='Normal_Custom')
|
|
||||||
if not indent:
|
if not indent:
|
||||||
paragraph.paragraph_format.first_line_indent = Pt(0)
|
paragraph.paragraph_format.first_line_indent = Pt(0)
|
||||||
return paragraph
|
return paragraph
|
||||||
|
|
||||||
def format_file_summaries(self) -> str:
|
def format_file_summaries(self) -> str:
|
||||||
"""
|
"""
|
||||||
格式化文件总结内容,确保正确的标题层级并处理markdown文本
|
格式化文件总结内容,确保正确的标题层级
|
||||||
|
|
||||||
|
返回:
|
||||||
|
str: 格式化后的文件总结字符串
|
||||||
|
|
||||||
|
标题层级规则:
|
||||||
|
1. 一级标题为"各文件详细总结"
|
||||||
|
2. 如果文件有目录路径:
|
||||||
|
- 目录路径作为二级标题 (2.1, 2.2 等)
|
||||||
|
- 该目录下所有文件作为三级标题 (2.1.1, 2.1.2 等)
|
||||||
|
3. 如果文件没有目录路径:
|
||||||
|
- 文件直接作为二级标题 (2.1, 2.2 等)
|
||||||
"""
|
"""
|
||||||
result = []
|
result = []
|
||||||
# 首先对文件路径进行分组整理
|
# 首先对文件路径进行分组整理
|
||||||
@@ -227,8 +233,7 @@ class WordFormatter(DocumentFormatter):
|
|||||||
result.append(self.file_summaries_map[path])
|
result.append(self.file_summaries_map[path])
|
||||||
# 无目录的文件作为二级标题
|
# 无目录的文件作为二级标题
|
||||||
self._add_heading(f"📄 {file_name}", 2)
|
self._add_heading(f"📄 {file_name}", 2)
|
||||||
# 使用convert_markdown_to_word处理文件内容
|
self._add_content(self.file_summaries_map[path])
|
||||||
self._add_content(convert_markdown_to_word(self.file_summaries_map[path]))
|
|
||||||
self.doc.add_paragraph()
|
self.doc.add_paragraph()
|
||||||
|
|
||||||
# 处理有目录的文件
|
# 处理有目录的文件
|
||||||
@@ -248,8 +253,7 @@ class WordFormatter(DocumentFormatter):
|
|||||||
|
|
||||||
# 添加文件名作为三级标题
|
# 添加文件名作为三级标题
|
||||||
self._add_heading(f"📄 {file_name}", 3)
|
self._add_heading(f"📄 {file_name}", 3)
|
||||||
# 使用convert_markdown_to_word处理文件内容
|
self._add_content(self.file_summaries_map[path])
|
||||||
self._add_content(convert_markdown_to_word(self.file_summaries_map[path]))
|
|
||||||
self.doc.add_paragraph()
|
self.doc.add_paragraph()
|
||||||
|
|
||||||
return "\n".join(result)
|
return "\n".join(result)
|
||||||
@@ -265,9 +269,9 @@ class WordFormatter(DocumentFormatter):
|
|||||||
self._add_heading("文档总结报告", 0)
|
self._add_heading("文档总结报告", 0)
|
||||||
self.doc.add_paragraph()
|
self.doc.add_paragraph()
|
||||||
|
|
||||||
# 添加总体摘要,使用convert_markdown_to_word处理
|
# 添加总体摘要
|
||||||
self._add_heading("总体摘要", 1)
|
self._add_heading("总体摘要", 1)
|
||||||
self._add_content(convert_markdown_to_word(self.final_summary))
|
self._add_content(self.final_summary)
|
||||||
self.doc.add_paragraph()
|
self.doc.add_paragraph()
|
||||||
|
|
||||||
# 添加失败文件列表(如果有)
|
# 添加失败文件列表(如果有)
|
||||||
@@ -280,24 +284,6 @@ class WordFormatter(DocumentFormatter):
|
|||||||
|
|
||||||
return self.doc
|
return self.doc
|
||||||
|
|
||||||
def save_as_pdf(self, word_path, pdf_path=None):
|
|
||||||
"""将生成的Word文档转换为PDF
|
|
||||||
|
|
||||||
参数:
|
|
||||||
word_path: Word文档的路径
|
|
||||||
pdf_path: 可选,PDF文件的输出路径。如果未指定,将使用与Word文档相同的名称和位置
|
|
||||||
|
|
||||||
返回:
|
|
||||||
生成的PDF文件路径,如果转换失败则返回None
|
|
||||||
"""
|
|
||||||
from crazy_functions.doc_fns.conversation_doc.word2pdf import WordToPdfConverter
|
|
||||||
try:
|
|
||||||
pdf_path = WordToPdfConverter.convert_to_pdf(word_path, pdf_path)
|
|
||||||
return pdf_path
|
|
||||||
except Exception as e:
|
|
||||||
print(f"PDF转换失败: {str(e)}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
class MarkdownFormatter(DocumentFormatter):
|
class MarkdownFormatter(DocumentFormatter):
|
||||||
"""Markdown格式文档生成器"""
|
"""Markdown格式文档生成器"""
|
||||||
@@ -349,395 +335,61 @@ class MarkdownFormatter(DocumentFormatter):
|
|||||||
return "\n".join(document)
|
return "\n".join(document)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class HtmlFormatter(DocumentFormatter):
|
class HtmlFormatter(DocumentFormatter):
|
||||||
"""HTML格式文档生成器 - 优化版"""
|
"""HTML格式文档生成器"""
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
super().__init__(*args, **kwargs)
|
super().__init__(*args, **kwargs)
|
||||||
self.md = markdown.Markdown(extensions=['extra','codehilite', 'tables','nl2br'])
|
|
||||||
self.css_styles = """
|
self.css_styles = """
|
||||||
@keyframes fadeIn {
|
|
||||||
from { opacity: 0; transform: translateY(20px); }
|
|
||||||
to { opacity: 1; transform: translateY(0); }
|
|
||||||
}
|
|
||||||
|
|
||||||
@keyframes slideIn {
|
|
||||||
from { transform: translateX(-20px); opacity: 0; }
|
|
||||||
to { transform: translateX(0); opacity: 1; }
|
|
||||||
}
|
|
||||||
|
|
||||||
@keyframes pulse {
|
|
||||||
0% { transform: scale(1); }
|
|
||||||
50% { transform: scale(1.05); }
|
|
||||||
100% { transform: scale(1); }
|
|
||||||
}
|
|
||||||
|
|
||||||
:root {
|
|
||||||
/* Enhanced color palette */
|
|
||||||
--primary-color: #2563eb;
|
|
||||||
--primary-light: #eff6ff;
|
|
||||||
--secondary-color: #1e293b;
|
|
||||||
--background-color: #f8fafc;
|
|
||||||
--text-color: #334155;
|
|
||||||
--text-light: #64748b;
|
|
||||||
--border-color: #e2e8f0;
|
|
||||||
--error-color: #ef4444;
|
|
||||||
--error-light: #fef2f2;
|
|
||||||
--success-color: #22c55e;
|
|
||||||
--warning-color: #f59e0b;
|
|
||||||
--card-shadow: 0 4px 6px -1px rgb(0 0 0 / 0.1), 0 2px 4px -2px rgb(0 0 0 / 0.1);
|
|
||||||
--hover-shadow: 0 20px 25px -5px rgb(0 0 0 / 0.1), 0 8px 10px -6px rgb(0 0 0 / 0.1);
|
|
||||||
|
|
||||||
/* Typography */
|
|
||||||
--heading-font: "Plus Jakarta Sans", system-ui, sans-serif;
|
|
||||||
--body-font: "Inter", system-ui, sans-serif;
|
|
||||||
}
|
|
||||||
|
|
||||||
body {
|
body {
|
||||||
font-family: var(--body-font);
|
font-family: "Microsoft YaHei", Arial, sans-serif;
|
||||||
line-height: 1.8;
|
line-height: 1.6;
|
||||||
max-width: 1200px;
|
max-width: 1000px;
|
||||||
margin: 0 auto;
|
margin: 0 auto;
|
||||||
padding: 2rem;
|
padding: 20px;
|
||||||
color: var(--text-color);
|
color: #333;
|
||||||
background-color: var(--background-color);
|
|
||||||
font-size: 16px;
|
|
||||||
-webkit-font-smoothing: antialiased;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
.container {
|
|
||||||
background: white;
|
|
||||||
padding: 3rem;
|
|
||||||
border-radius: 24px;
|
|
||||||
box-shadow: var(--card-shadow);
|
|
||||||
transition: all 0.4s cubic-bezier(0.4, 0, 0.2, 1);
|
|
||||||
animation: fadeIn 0.6s ease-out;
|
|
||||||
border: 1px solid var(--border-color);
|
|
||||||
}
|
|
||||||
|
|
||||||
.container:hover {
|
|
||||||
box-shadow: var(--hover-shadow);
|
|
||||||
transform: translateY(-2px);
|
|
||||||
}
|
|
||||||
|
|
||||||
h1, h2, h3 {
|
|
||||||
font-family: var(--heading-font);
|
|
||||||
font-weight: 600;
|
|
||||||
}
|
|
||||||
|
|
||||||
h1 {
|
h1 {
|
||||||
color: var(--primary-color);
|
color: #2c3e50;
|
||||||
font-size: 2.8em;
|
border-bottom: 2px solid #eee;
|
||||||
|
padding-bottom: 10px;
|
||||||
|
font-size: 24px;
|
||||||
text-align: center;
|
text-align: center;
|
||||||
margin: 2rem 0 3rem;
|
|
||||||
padding-bottom: 1.5rem;
|
|
||||||
border-bottom: 3px solid var(--primary-color);
|
|
||||||
letter-spacing: -0.03em;
|
|
||||||
position: relative;
|
|
||||||
display: flex;
|
|
||||||
align-items: center;
|
|
||||||
justify-content: center;
|
|
||||||
gap: 1rem;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
h1::after {
|
|
||||||
content: '';
|
|
||||||
position: absolute;
|
|
||||||
bottom: -3px;
|
|
||||||
left: 50%;
|
|
||||||
transform: translateX(-50%);
|
|
||||||
width: 120px;
|
|
||||||
height: 3px;
|
|
||||||
background: linear-gradient(90deg, var(--primary-color), var(--primary-light));
|
|
||||||
border-radius: 3px;
|
|
||||||
transition: width 0.3s ease;
|
|
||||||
}
|
|
||||||
|
|
||||||
h1:hover::after {
|
|
||||||
width: 180px;
|
|
||||||
}
|
|
||||||
|
|
||||||
h2 {
|
h2 {
|
||||||
color: var(--secondary-color);
|
color: #34495e;
|
||||||
font-size: 1.9em;
|
margin-top: 30px;
|
||||||
margin: 2.5rem 0 1.5rem;
|
font-size: 20px;
|
||||||
padding-left: 1.2rem;
|
border-left: 4px solid #3498db;
|
||||||
border-left: 4px solid var(--primary-color);
|
padding-left: 10px;
|
||||||
letter-spacing: -0.02em;
|
|
||||||
display: flex;
|
|
||||||
align-items: center;
|
|
||||||
gap: 1rem;
|
|
||||||
transition: all 0.3s ease;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
h2:hover {
|
|
||||||
color: var(--primary-color);
|
|
||||||
transform: translateX(5px);
|
|
||||||
}
|
|
||||||
|
|
||||||
h3 {
|
h3 {
|
||||||
color: var(--text-color);
|
color: #2c3e50;
|
||||||
font-size: 1.5em;
|
font-size: 18px;
|
||||||
margin: 2rem 0 1rem;
|
margin-top: 20px;
|
||||||
padding-bottom: 0.8rem;
|
|
||||||
border-bottom: 2px solid var(--border-color);
|
|
||||||
transition: all 0.3s ease;
|
|
||||||
display: flex;
|
|
||||||
align-items: center;
|
|
||||||
gap: 0.8rem;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
h3:hover {
|
|
||||||
color: var(--primary-color);
|
|
||||||
border-bottom-color: var(--primary-color);
|
|
||||||
}
|
|
||||||
|
|
||||||
.summary {
|
.summary {
|
||||||
background: var(--primary-light);
|
background-color: #f8f9fa;
|
||||||
padding: 2.5rem;
|
padding: 20px;
|
||||||
border-radius: 16px;
|
border-radius: 5px;
|
||||||
margin: 2.5rem 0;
|
margin: 20px 0;
|
||||||
box-shadow: 0 4px 6px -1px rgba(37, 99, 235, 0.1);
|
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
|
||||||
position: relative;
|
|
||||||
overflow: hidden;
|
|
||||||
transition: transform 0.3s ease, box-shadow 0.3s ease;
|
|
||||||
animation: slideIn 0.5s ease-out;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
.summary:hover {
|
|
||||||
transform: translateY(-3px);
|
|
||||||
box-shadow: 0 8px 12px -2px rgba(37, 99, 235, 0.15);
|
|
||||||
}
|
|
||||||
|
|
||||||
.summary::before {
|
|
||||||
content: '';
|
|
||||||
position: absolute;
|
|
||||||
top: 0;
|
|
||||||
left: 0;
|
|
||||||
width: 4px;
|
|
||||||
height: 100%;
|
|
||||||
background: linear-gradient(to bottom, var(--primary-color), rgba(37, 99, 235, 0.6));
|
|
||||||
}
|
|
||||||
|
|
||||||
.summary p {
|
|
||||||
margin: 1.2rem 0;
|
|
||||||
line-height: 1.9;
|
|
||||||
color: var(--text-color);
|
|
||||||
transition: color 0.3s ease;
|
|
||||||
}
|
|
||||||
|
|
||||||
.summary:hover p {
|
|
||||||
color: var(--secondary-color);
|
|
||||||
}
|
|
||||||
|
|
||||||
.details {
|
.details {
|
||||||
margin-top: 3.5rem;
|
margin-top: 40px;
|
||||||
padding-top: 2.5rem;
|
|
||||||
border-top: 2px dashed var(--border-color);
|
|
||||||
animation: fadeIn 0.8s ease-out;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
.failed-files {
|
.failed-files {
|
||||||
background: var(--error-light);
|
background-color: #fff3f3;
|
||||||
padding: 2rem;
|
padding: 15px;
|
||||||
border-radius: 16px;
|
border-left: 4px solid #e74c3c;
|
||||||
margin: 3rem 0;
|
margin: 20px 0;
|
||||||
border-left: 4px solid var(--error-color);
|
|
||||||
position: relative;
|
|
||||||
transition: all 0.3s ease;
|
|
||||||
animation: slideIn 0.5s ease-out;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
.failed-files:hover {
|
|
||||||
transform: translateX(5px);
|
|
||||||
box-shadow: 0 8px 15px -3px rgba(239, 68, 68, 0.1);
|
|
||||||
}
|
|
||||||
|
|
||||||
.failed-files h2 {
|
|
||||||
color: var(--error-color);
|
|
||||||
border-left: none;
|
|
||||||
padding-left: 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
.failed-files ul {
|
|
||||||
margin: 1.8rem 0;
|
|
||||||
padding-left: 1.2rem;
|
|
||||||
list-style-type: none;
|
|
||||||
}
|
|
||||||
|
|
||||||
.failed-files li {
|
|
||||||
margin: 1.2rem 0;
|
|
||||||
padding: 1.2rem 1.8rem;
|
|
||||||
background: rgba(239, 68, 68, 0.08);
|
|
||||||
border-radius: 12px;
|
|
||||||
transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
.failed-files li:hover {
|
|
||||||
transform: translateX(8px);
|
|
||||||
background: rgba(239, 68, 68, 0.12);
|
|
||||||
}
|
|
||||||
|
|
||||||
.directory-section {
|
|
||||||
margin: 3.5rem 0;
|
|
||||||
padding: 2rem;
|
|
||||||
background: var(--background-color);
|
|
||||||
border-radius: 16px;
|
|
||||||
position: relative;
|
|
||||||
transition: all 0.3s ease;
|
|
||||||
animation: fadeIn 0.6s ease-out;
|
|
||||||
}
|
|
||||||
|
|
||||||
.directory-section:hover {
|
|
||||||
background: white;
|
|
||||||
box-shadow: var(--card-shadow);
|
|
||||||
}
|
|
||||||
|
|
||||||
.file-summary {
|
.file-summary {
|
||||||
background: white;
|
background-color: #fff;
|
||||||
padding: 2rem;
|
padding: 15px;
|
||||||
margin: 1.8rem 0;
|
margin: 15px 0;
|
||||||
border-radius: 16px;
|
border-radius: 4px;
|
||||||
box-shadow: var(--card-shadow);
|
box-shadow: 0 1px 3px rgba(0,0,0,0.1);
|
||||||
border-left: 4px solid var(--border-color);
|
|
||||||
transition: all 0.4s cubic-bezier(0.4, 0, 0.2, 1);
|
|
||||||
position: relative;
|
|
||||||
overflow: hidden;
|
|
||||||
}
|
|
||||||
|
|
||||||
.file-summary:hover {
|
|
||||||
border-left-color: var(--primary-color);
|
|
||||||
transform: translateX(8px) translateY(-2px);
|
|
||||||
box-shadow: var(--hover-shadow);
|
|
||||||
}
|
|
||||||
|
|
||||||
.file-summary {
|
|
||||||
background: white;
|
|
||||||
padding: 2rem;
|
|
||||||
margin: 1.8rem 0;
|
|
||||||
border-radius: 16px;
|
|
||||||
box-shadow: var(--card-shadow);
|
|
||||||
border-left: 4px solid var(--border-color);
|
|
||||||
transition: all 0.4s cubic-bezier(0.4, 0, 0.2, 1);
|
|
||||||
position: relative;
|
|
||||||
}
|
|
||||||
|
|
||||||
.file-summary:hover {
|
|
||||||
border-left-color: var(--primary-color);
|
|
||||||
transform: translateX(8px) translateY(-2px);
|
|
||||||
box-shadow: var(--hover-shadow);
|
|
||||||
}
|
|
||||||
|
|
||||||
.icon {
|
|
||||||
display: inline-flex;
|
|
||||||
align-items: center;
|
|
||||||
justify-content: center;
|
|
||||||
width: 32px;
|
|
||||||
height: 32px;
|
|
||||||
border-radius: 8px;
|
|
||||||
background: var(--primary-light);
|
|
||||||
color: var(--primary-color);
|
|
||||||
font-size: 1.2em;
|
|
||||||
transition: all 0.3s ease;
|
|
||||||
}
|
|
||||||
|
|
||||||
.file-summary:hover .icon,
|
|
||||||
.directory-section:hover .icon {
|
|
||||||
transform: scale(1.1);
|
|
||||||
background: var(--primary-color);
|
|
||||||
color: white;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Smooth scrolling */
|
|
||||||
html {
|
|
||||||
scroll-behavior: smooth;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Selection style */
|
|
||||||
::selection {
|
|
||||||
background: var(--primary-light);
|
|
||||||
color: var(--primary-color);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Print styles */
|
|
||||||
@media print {
|
|
||||||
body {
|
|
||||||
background: white;
|
|
||||||
}
|
|
||||||
.container {
|
|
||||||
box-shadow: none;
|
|
||||||
padding: 0;
|
|
||||||
}
|
|
||||||
.file-summary, .failed-files {
|
|
||||||
break-inside: avoid;
|
|
||||||
box-shadow: none;
|
|
||||||
}
|
|
||||||
.icon {
|
|
||||||
display: none;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Responsive design */
|
|
||||||
@media (max-width: 768px) {
|
|
||||||
body {
|
|
||||||
padding: 1rem;
|
|
||||||
font-size: 15px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.container {
|
|
||||||
padding: 1.5rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
h1 {
|
|
||||||
font-size: 2.2em;
|
|
||||||
margin: 1.5rem 0 2rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
h2 {
|
|
||||||
font-size: 1.7em;
|
|
||||||
}
|
|
||||||
|
|
||||||
h3 {
|
|
||||||
font-size: 1.4em;
|
|
||||||
}
|
|
||||||
|
|
||||||
.summary, .failed-files, .directory-section {
|
|
||||||
padding: 1.5rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
.file-summary {
|
|
||||||
padding: 1.2rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
.icon {
|
|
||||||
width: 28px;
|
|
||||||
height: 28px;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Dark mode support */
|
|
||||||
@media (prefers-color-scheme: dark) {
|
|
||||||
:root {
|
|
||||||
--primary-light: rgba(37, 99, 235, 0.15);
|
|
||||||
--background-color: #0f172a;
|
|
||||||
--text-color: #e2e8f0;
|
|
||||||
--text-light: #94a3b8;
|
|
||||||
--border-color: #1e293b;
|
|
||||||
--error-light: rgba(239, 68, 68, 0.15);
|
|
||||||
}
|
|
||||||
|
|
||||||
.container, .file-summary {
|
|
||||||
background: #1e293b;
|
|
||||||
}
|
|
||||||
|
|
||||||
.directory-section {
|
|
||||||
background: #0f172a;
|
|
||||||
}
|
|
||||||
|
|
||||||
.directory-section:hover {
|
|
||||||
background: #1e293b;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@@ -746,12 +398,10 @@ class HtmlFormatter(DocumentFormatter):
|
|||||||
return ""
|
return ""
|
||||||
|
|
||||||
failed_files_html = ['<div class="failed-files">']
|
failed_files_html = ['<div class="failed-files">']
|
||||||
failed_files_html.append('<h2><span class="icon">⚠️</span> 处理失败的文件</h2>')
|
failed_files_html.append("<h2>⚠️ 处理失败的文件</h2>")
|
||||||
failed_files_html.append("<ul>")
|
failed_files_html.append("<ul>")
|
||||||
for fp, reason in self.failed_files:
|
for fp, reason in self.failed_files:
|
||||||
failed_files_html.append(
|
failed_files_html.append(f"<li><strong>{os.path.basename(fp)}:</strong> {reason}</li>")
|
||||||
f'<li><strong>📄 {os.path.basename(fp)}</strong><br><span style="color: var(--text-light)">{reason}</span></li>'
|
|
||||||
)
|
|
||||||
failed_files_html.append("</ul></div>")
|
failed_files_html.append("</ul></div>")
|
||||||
return "\n".join(failed_files_html)
|
return "\n".join(failed_files_html)
|
||||||
|
|
||||||
@@ -764,49 +414,37 @@ class HtmlFormatter(DocumentFormatter):
|
|||||||
dir_path = os.path.dirname(path)
|
dir_path = os.path.dirname(path)
|
||||||
if dir_path != current_dir:
|
if dir_path != current_dir:
|
||||||
if dir_path:
|
if dir_path:
|
||||||
formatted_html.append('<div class="directory-section">')
|
formatted_html.append(f'<h2>📁 {dir_path}</h2>')
|
||||||
formatted_html.append(f'<h2><span class="icon">📁</span> {dir_path}</h2>')
|
|
||||||
formatted_html.append('</div>')
|
|
||||||
current_dir = dir_path
|
current_dir = dir_path
|
||||||
|
|
||||||
file_name = os.path.basename(path)
|
file_name = os.path.basename(path)
|
||||||
formatted_html.append('<div class="file-summary">')
|
formatted_html.append('<div class="file-summary">')
|
||||||
formatted_html.append(f'<h3><span class="icon">📄</span> {file_name}</h3>')
|
formatted_html.append(f'<h3>📄 {file_name}</h3>')
|
||||||
formatted_html.append(self.md.convert(self.file_summaries_map[path]))
|
formatted_html.append(f'<p>{self.file_summaries_map[path]}</p>')
|
||||||
formatted_html.append('</div>')
|
formatted_html.append('</div>')
|
||||||
|
|
||||||
return "\n".join(formatted_html)
|
return "\n".join(formatted_html)
|
||||||
|
|
||||||
def create_document(self) -> str:
|
def create_document(self) -> str:
|
||||||
"""生成HTML文档
|
|
||||||
Returns:
|
|
||||||
str: 完整的HTML文档字符串
|
|
||||||
"""
|
|
||||||
return f"""
|
return f"""
|
||||||
<!DOCTYPE html>
|
<!DOCTYPE html>
|
||||||
<html lang="zh-CN">
|
<html>
|
||||||
<head>
|
<head>
|
||||||
<meta charset="utf-8">
|
<meta charset='utf-8'>
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
|
||||||
<title>文档总结报告</title>
|
<title>文档总结报告</title>
|
||||||
<link href="https://cdnjs.cloudflare.com/ajax/libs/inter/3.19.3/inter.css" rel="stylesheet">
|
|
||||||
<link href="https://fonts.googleapis.com/css2?family=Plus+Jakarta+Sans:wght@400;600&display=swap" rel="stylesheet">
|
|
||||||
<style>{self.css_styles}</style>
|
<style>{self.css_styles}</style>
|
||||||
</head>
|
</head>
|
||||||
<body>
|
<body>
|
||||||
<div class="container">
|
<h1>📑 文档总结报告</h1>
|
||||||
<h1><span class="icon">📑</span> 文档总结报告</h1>
|
<h2>总体摘要</h2>
|
||||||
<div class="summary">
|
<div class="summary">{self.final_summary}</div>
|
||||||
<h2><span class="icon">📋</span> 总体摘要</h2>
|
{self.format_failed_files()}
|
||||||
<p>{self.md.convert(self.final_summary)}</p>
|
<div class="details">
|
||||||
</div>
|
<h2>📚 各文件详细总结</h2>
|
||||||
{self.format_failed_files()}
|
{self.format_file_summaries()}
|
||||||
<div class="details">
|
|
||||||
<h2><span class="icon">📚</span> 各文件详细总结</h2>
|
|
||||||
{self.format_file_summaries()}
|
|
||||||
</div>
|
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
</body>
|
</body>
|
||||||
</html>
|
</html>
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,237 +0,0 @@
|
|||||||
from abc import ABC, abstractmethod
|
|
||||||
from typing import Any, Dict, Optional, Type, TypeVar, Generic, Union
|
|
||||||
|
|
||||||
from dataclasses import dataclass
|
|
||||||
from enum import Enum, auto
|
|
||||||
import logging
|
|
||||||
from datetime import datetime
|
|
||||||
|
|
||||||
# 设置日志
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
# 自定义异常类定义
|
|
||||||
class FoldingError(Exception):
|
|
||||||
"""折叠相关的自定义异常基类"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class FormattingError(FoldingError):
|
|
||||||
"""格式化过程中的错误"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class MetadataError(FoldingError):
|
|
||||||
"""元数据相关的错误"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class ValidationError(FoldingError):
|
|
||||||
"""验证错误"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class FoldingStyle(Enum):
|
|
||||||
"""折叠样式枚举"""
|
|
||||||
SIMPLE = auto() # 简单折叠
|
|
||||||
DETAILED = auto() # 详细折叠(带有额外信息)
|
|
||||||
NESTED = auto() # 嵌套折叠
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class FoldingOptions:
|
|
||||||
"""折叠选项配置"""
|
|
||||||
style: FoldingStyle = FoldingStyle.DETAILED
|
|
||||||
code_language: Optional[str] = None # 代码块的语言
|
|
||||||
show_timestamp: bool = False # 是否显示时间戳
|
|
||||||
indent_level: int = 0 # 缩进级别
|
|
||||||
custom_css: Optional[str] = None # 自定义CSS类
|
|
||||||
|
|
||||||
|
|
||||||
T = TypeVar('T') # 用于泛型类型
|
|
||||||
|
|
||||||
|
|
||||||
class BaseMetadata(ABC):
|
|
||||||
"""元数据基类"""
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def validate(self) -> bool:
|
|
||||||
"""验证元数据的有效性"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def _validate_non_empty_str(self, value: Optional[str]) -> bool:
|
|
||||||
"""验证字符串非空"""
|
|
||||||
return bool(value and value.strip())
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class FileMetadata(BaseMetadata):
|
|
||||||
"""文件元数据"""
|
|
||||||
rel_path: str
|
|
||||||
size: float
|
|
||||||
last_modified: Optional[datetime] = None
|
|
||||||
mime_type: Optional[str] = None
|
|
||||||
encoding: str = 'utf-8'
|
|
||||||
|
|
||||||
def validate(self) -> bool:
|
|
||||||
"""验证文件元数据的有效性"""
|
|
||||||
try:
|
|
||||||
if not self._validate_non_empty_str(self.rel_path):
|
|
||||||
return False
|
|
||||||
if self.size < 0:
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"File metadata validation error: {str(e)}")
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class ContentFormatter(ABC, Generic[T]):
|
|
||||||
"""内容格式化抽象基类
|
|
||||||
|
|
||||||
支持泛型类型参数,可以指定具体的元数据类型。
|
|
||||||
"""
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def format(self,
|
|
||||||
content: str,
|
|
||||||
metadata: T,
|
|
||||||
options: Optional[FoldingOptions] = None) -> str:
|
|
||||||
"""格式化内容
|
|
||||||
|
|
||||||
Args:
|
|
||||||
content: 需要格式化的内容
|
|
||||||
metadata: 类型化的元数据
|
|
||||||
options: 折叠选项
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: 格式化后的内容
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
FormattingError: 格式化过程中的错误
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def _create_summary(self, metadata: T) -> str:
|
|
||||||
"""创建折叠摘要,可被子类重写"""
|
|
||||||
return str(metadata)
|
|
||||||
|
|
||||||
def _format_content_block(self,
|
|
||||||
content: str,
|
|
||||||
options: Optional[FoldingOptions]) -> str:
|
|
||||||
"""格式化内容块,处理代码块等特殊格式"""
|
|
||||||
if not options:
|
|
||||||
return content
|
|
||||||
|
|
||||||
if options.code_language:
|
|
||||||
return f"```{options.code_language}\n{content}\n```"
|
|
||||||
return content
|
|
||||||
|
|
||||||
def _add_indent(self, text: str, level: int) -> str:
|
|
||||||
"""添加缩进"""
|
|
||||||
if level <= 0:
|
|
||||||
return text
|
|
||||||
indent = " " * level
|
|
||||||
return "\n".join(indent + line for line in text.splitlines())
|
|
||||||
|
|
||||||
|
|
||||||
class FileContentFormatter(ContentFormatter[FileMetadata]):
|
|
||||||
"""文件内容格式化器"""
|
|
||||||
|
|
||||||
def format(self,
|
|
||||||
content: str,
|
|
||||||
metadata: FileMetadata,
|
|
||||||
options: Optional[FoldingOptions] = None) -> str:
|
|
||||||
"""格式化文件内容"""
|
|
||||||
if not metadata.validate():
|
|
||||||
raise MetadataError("Invalid file metadata")
|
|
||||||
|
|
||||||
try:
|
|
||||||
options = options or FoldingOptions()
|
|
||||||
|
|
||||||
# 构建摘要信息
|
|
||||||
summary_parts = [
|
|
||||||
f"{metadata.rel_path} ({metadata.size:.2f}MB)",
|
|
||||||
f"Type: {metadata.mime_type}" if metadata.mime_type else None,
|
|
||||||
(f"Modified: {metadata.last_modified.strftime('%Y-%m-%d %H:%M:%S')}"
|
|
||||||
if metadata.last_modified and options.show_timestamp else None)
|
|
||||||
]
|
|
||||||
summary = " | ".join(filter(None, summary_parts))
|
|
||||||
|
|
||||||
# 构建HTML类
|
|
||||||
css_class = f' class="{options.custom_css}"' if options.custom_css else ''
|
|
||||||
|
|
||||||
# 格式化内容
|
|
||||||
formatted_content = self._format_content_block(content, options)
|
|
||||||
|
|
||||||
# 组装最终结果
|
|
||||||
result = (
|
|
||||||
f'<details{css_class}><summary>{summary}</summary>\n\n'
|
|
||||||
f'{formatted_content}\n\n'
|
|
||||||
f'</details>\n\n'
|
|
||||||
)
|
|
||||||
|
|
||||||
return self._add_indent(result, options.indent_level)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error formatting file content: {str(e)}")
|
|
||||||
raise FormattingError(f"Failed to format file content: {str(e)}")
|
|
||||||
|
|
||||||
|
|
||||||
class ContentFoldingManager:
|
|
||||||
"""内容折叠管理器"""
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
"""初始化折叠管理器"""
|
|
||||||
self._formatters: Dict[str, ContentFormatter] = {}
|
|
||||||
self._register_default_formatters()
|
|
||||||
|
|
||||||
def _register_default_formatters(self) -> None:
|
|
||||||
"""注册默认的格式化器"""
|
|
||||||
self.register_formatter('file', FileContentFormatter())
|
|
||||||
|
|
||||||
def register_formatter(self, name: str, formatter: ContentFormatter) -> None:
|
|
||||||
"""注册新的格式化器"""
|
|
||||||
if not isinstance(formatter, ContentFormatter):
|
|
||||||
raise TypeError("Formatter must implement ContentFormatter interface")
|
|
||||||
self._formatters[name] = formatter
|
|
||||||
|
|
||||||
def _guess_language(self, extension: str) -> Optional[str]:
|
|
||||||
"""根据文件扩展名猜测编程语言"""
|
|
||||||
extension = extension.lower().lstrip('.')
|
|
||||||
language_map = {
|
|
||||||
'py': 'python',
|
|
||||||
'js': 'javascript',
|
|
||||||
'java': 'java',
|
|
||||||
'cpp': 'cpp',
|
|
||||||
'cs': 'csharp',
|
|
||||||
'html': 'html',
|
|
||||||
'css': 'css',
|
|
||||||
'md': 'markdown',
|
|
||||||
'json': 'json',
|
|
||||||
'xml': 'xml',
|
|
||||||
'sql': 'sql',
|
|
||||||
'sh': 'bash',
|
|
||||||
'yaml': 'yaml',
|
|
||||||
'yml': 'yaml',
|
|
||||||
'txt': None # 纯文本不需要语言标识
|
|
||||||
}
|
|
||||||
return language_map.get(extension)
|
|
||||||
|
|
||||||
def format_content(self,
|
|
||||||
content: str,
|
|
||||||
formatter_type: str,
|
|
||||||
metadata: Union[FileMetadata],
|
|
||||||
options: Optional[FoldingOptions] = None) -> str:
|
|
||||||
"""格式化内容"""
|
|
||||||
formatter = self._formatters.get(formatter_type)
|
|
||||||
if not formatter:
|
|
||||||
raise KeyError(f"No formatter registered for type: {formatter_type}")
|
|
||||||
|
|
||||||
if not isinstance(metadata, FileMetadata):
|
|
||||||
raise TypeError("Invalid metadata type")
|
|
||||||
|
|
||||||
return formatter.format(content, metadata, options)
|
|
||||||
|
|
||||||
@@ -1,211 +0,0 @@
|
|||||||
import re
|
|
||||||
import os
|
|
||||||
import pandas as pd
|
|
||||||
from datetime import datetime
|
|
||||||
from openpyxl import Workbook
|
|
||||||
|
|
||||||
|
|
||||||
class ExcelTableFormatter:
|
|
||||||
"""聊天记录中Markdown表格转Excel生成器"""
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
"""初始化Excel文档对象"""
|
|
||||||
self.workbook = Workbook()
|
|
||||||
self._table_count = 0
|
|
||||||
self._current_sheet = None
|
|
||||||
|
|
||||||
def _normalize_table_row(self, row):
|
|
||||||
"""标准化表格行,处理不同的分隔符情况"""
|
|
||||||
row = row.strip()
|
|
||||||
if row.startswith('|'):
|
|
||||||
row = row[1:]
|
|
||||||
if row.endswith('|'):
|
|
||||||
row = row[:-1]
|
|
||||||
return [cell.strip() for cell in row.split('|')]
|
|
||||||
|
|
||||||
def _is_separator_row(self, row):
|
|
||||||
"""检查是否是分隔行(由 - 或 : 组成)"""
|
|
||||||
clean_row = re.sub(r'[\s|]', '', row)
|
|
||||||
return bool(re.match(r'^[-:]+$', clean_row))
|
|
||||||
|
|
||||||
def _extract_tables_from_text(self, text):
|
|
||||||
"""从文本中提取所有表格内容"""
|
|
||||||
if not isinstance(text, str):
|
|
||||||
return []
|
|
||||||
|
|
||||||
tables = []
|
|
||||||
current_table = []
|
|
||||||
is_in_table = False
|
|
||||||
|
|
||||||
for line in text.split('\n'):
|
|
||||||
line = line.strip()
|
|
||||||
if not line:
|
|
||||||
if is_in_table and current_table:
|
|
||||||
if len(current_table) >= 2:
|
|
||||||
tables.append(current_table)
|
|
||||||
current_table = []
|
|
||||||
is_in_table = False
|
|
||||||
continue
|
|
||||||
|
|
||||||
if '|' in line:
|
|
||||||
if not is_in_table:
|
|
||||||
is_in_table = True
|
|
||||||
current_table.append(line)
|
|
||||||
else:
|
|
||||||
if is_in_table and current_table:
|
|
||||||
if len(current_table) >= 2:
|
|
||||||
tables.append(current_table)
|
|
||||||
current_table = []
|
|
||||||
is_in_table = False
|
|
||||||
|
|
||||||
if is_in_table and current_table and len(current_table) >= 2:
|
|
||||||
tables.append(current_table)
|
|
||||||
|
|
||||||
return tables
|
|
||||||
|
|
||||||
def _parse_table(self, table_lines):
|
|
||||||
"""解析表格内容为结构化数据"""
|
|
||||||
try:
|
|
||||||
headers = self._normalize_table_row(table_lines[0])
|
|
||||||
|
|
||||||
separator_index = next(
|
|
||||||
(i for i, line in enumerate(table_lines) if self._is_separator_row(line)),
|
|
||||||
1
|
|
||||||
)
|
|
||||||
|
|
||||||
data_rows = []
|
|
||||||
for line in table_lines[separator_index + 1:]:
|
|
||||||
cells = self._normalize_table_row(line)
|
|
||||||
# 确保单元格数量与表头一致
|
|
||||||
while len(cells) < len(headers):
|
|
||||||
cells.append('')
|
|
||||||
cells = cells[:len(headers)]
|
|
||||||
data_rows.append(cells)
|
|
||||||
|
|
||||||
if headers and data_rows:
|
|
||||||
return {
|
|
||||||
'headers': headers,
|
|
||||||
'data': data_rows
|
|
||||||
}
|
|
||||||
except Exception as e:
|
|
||||||
print(f"解析表格时发生错误: {str(e)}")
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
def _create_sheet(self, question_num, table_num):
|
|
||||||
"""创建新的工作表"""
|
|
||||||
sheet_name = f'Q{question_num}_T{table_num}'
|
|
||||||
if len(sheet_name) > 31:
|
|
||||||
sheet_name = f'Table{self._table_count}'
|
|
||||||
|
|
||||||
if sheet_name in self.workbook.sheetnames:
|
|
||||||
sheet_name = f'{sheet_name}_{datetime.now().strftime("%H%M%S")}'
|
|
||||||
|
|
||||||
return self.workbook.create_sheet(title=sheet_name)
|
|
||||||
|
|
||||||
def create_document(self, history):
|
|
||||||
"""
|
|
||||||
处理聊天历史中的所有表格并创建Excel文档
|
|
||||||
|
|
||||||
Args:
|
|
||||||
history: 聊天历史列表
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Workbook: 处理完成的Excel工作簿对象,如果没有表格则返回None
|
|
||||||
"""
|
|
||||||
has_tables = False
|
|
||||||
|
|
||||||
# 删除默认创建的工作表
|
|
||||||
default_sheet = self.workbook['Sheet']
|
|
||||||
self.workbook.remove(default_sheet)
|
|
||||||
|
|
||||||
# 遍历所有回答
|
|
||||||
for i in range(1, len(history), 2):
|
|
||||||
answer = history[i]
|
|
||||||
tables = self._extract_tables_from_text(answer)
|
|
||||||
|
|
||||||
for table_lines in tables:
|
|
||||||
parsed_table = self._parse_table(table_lines)
|
|
||||||
if parsed_table:
|
|
||||||
self._table_count += 1
|
|
||||||
sheet = self._create_sheet(i // 2 + 1, self._table_count)
|
|
||||||
|
|
||||||
# 写入表头
|
|
||||||
for col, header in enumerate(parsed_table['headers'], 1):
|
|
||||||
sheet.cell(row=1, column=col, value=header)
|
|
||||||
|
|
||||||
# 写入数据
|
|
||||||
for row_idx, row_data in enumerate(parsed_table['data'], 2):
|
|
||||||
for col_idx, value in enumerate(row_data, 1):
|
|
||||||
sheet.cell(row=row_idx, column=col_idx, value=value)
|
|
||||||
|
|
||||||
has_tables = True
|
|
||||||
|
|
||||||
return self.workbook if has_tables else None
|
|
||||||
|
|
||||||
|
|
||||||
def save_chat_tables(history, save_dir, base_name):
|
|
||||||
"""
|
|
||||||
保存聊天历史中的表格到Excel文件
|
|
||||||
|
|
||||||
Args:
|
|
||||||
history: 聊天历史列表
|
|
||||||
save_dir: 保存目录
|
|
||||||
base_name: 基础文件名
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
list: 保存的文件路径列表
|
|
||||||
"""
|
|
||||||
result_files = []
|
|
||||||
|
|
||||||
try:
|
|
||||||
# 创建Excel格式
|
|
||||||
excel_formatter = ExcelTableFormatter()
|
|
||||||
workbook = excel_formatter.create_document(history)
|
|
||||||
|
|
||||||
if workbook is not None:
|
|
||||||
# 确保保存目录存在
|
|
||||||
os.makedirs(save_dir, exist_ok=True)
|
|
||||||
|
|
||||||
# 生成Excel文件路径
|
|
||||||
excel_file = os.path.join(save_dir, base_name + '.xlsx')
|
|
||||||
|
|
||||||
# 保存Excel文件
|
|
||||||
workbook.save(excel_file)
|
|
||||||
result_files.append(excel_file)
|
|
||||||
print(f"已保存表格到Excel文件: {excel_file}")
|
|
||||||
except Exception as e:
|
|
||||||
print(f"保存Excel格式失败: {str(e)}")
|
|
||||||
|
|
||||||
return result_files
|
|
||||||
|
|
||||||
|
|
||||||
# 使用示例
|
|
||||||
if __name__ == "__main__":
|
|
||||||
# 示例聊天历史
|
|
||||||
history = [
|
|
||||||
"问题1",
|
|
||||||
"""这是第一个表格:
|
|
||||||
| A | B | C |
|
|
||||||
|---|---|---|
|
|
||||||
| 1 | 2 | 3 |""",
|
|
||||||
|
|
||||||
"问题2",
|
|
||||||
"这是没有表格的回答",
|
|
||||||
|
|
||||||
"问题3",
|
|
||||||
"""回答包含多个表格:
|
|
||||||
| Name | Age |
|
|
||||||
|------|-----|
|
|
||||||
| Tom | 20 |
|
|
||||||
|
|
||||||
第二个表格:
|
|
||||||
| X | Y |
|
|
||||||
|---|---|
|
|
||||||
| 1 | 2 |"""
|
|
||||||
]
|
|
||||||
|
|
||||||
# 保存表格
|
|
||||||
save_dir = "output"
|
|
||||||
base_name = "chat_tables"
|
|
||||||
saved_files = save_chat_tables(history, save_dir, base_name)
|
|
||||||
@@ -1,190 +0,0 @@
|
|||||||
|
|
||||||
|
|
||||||
class HtmlFormatter:
|
|
||||||
"""聊天记录HTML格式生成器"""
|
|
||||||
|
|
||||||
def __init__(self, chatbot, history):
|
|
||||||
self.chatbot = chatbot
|
|
||||||
self.history = history
|
|
||||||
self.css_styles = """
|
|
||||||
:root {
|
|
||||||
--primary-color: #2563eb;
|
|
||||||
--primary-light: #eff6ff;
|
|
||||||
--secondary-color: #1e293b;
|
|
||||||
--background-color: #f8fafc;
|
|
||||||
--text-color: #334155;
|
|
||||||
--border-color: #e2e8f0;
|
|
||||||
--card-shadow: 0 4px 6px -1px rgb(0 0 0 / 0.1), 0 2px 4px -2px rgb(0 0 0 / 0.1);
|
|
||||||
}
|
|
||||||
|
|
||||||
body {
|
|
||||||
font-family: system-ui, -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
|
|
||||||
line-height: 1.8;
|
|
||||||
margin: 0;
|
|
||||||
padding: 2rem;
|
|
||||||
color: var(--text-color);
|
|
||||||
background-color: var(--background-color);
|
|
||||||
}
|
|
||||||
|
|
||||||
.container {
|
|
||||||
max-width: 1200px;
|
|
||||||
margin: 0 auto;
|
|
||||||
background: white;
|
|
||||||
padding: 2rem;
|
|
||||||
border-radius: 16px;
|
|
||||||
box-shadow: var(--card-shadow);
|
|
||||||
}
|
|
||||||
::selection {
|
|
||||||
background: var(--primary-light);
|
|
||||||
color: var(--primary-color);
|
|
||||||
}
|
|
||||||
@keyframes fadeIn {
|
|
||||||
from { opacity: 0; transform: translateY(20px); }
|
|
||||||
to { opacity: 1; transform: translateY(0); }
|
|
||||||
}
|
|
||||||
|
|
||||||
@keyframes slideIn {
|
|
||||||
from { transform: translateX(-20px); opacity: 0; }
|
|
||||||
to { transform: translateX(0); opacity: 1; }
|
|
||||||
}
|
|
||||||
|
|
||||||
.container {
|
|
||||||
animation: fadeIn 0.6s ease-out;
|
|
||||||
}
|
|
||||||
|
|
||||||
.QaBox {
|
|
||||||
animation: slideIn 0.5s ease-out;
|
|
||||||
transition: all 0.3s ease;
|
|
||||||
}
|
|
||||||
|
|
||||||
.QaBox:hover {
|
|
||||||
transform: translateX(5px);
|
|
||||||
}
|
|
||||||
.Question, .Answer, .historyBox {
|
|
||||||
transition: all 0.3s ease;
|
|
||||||
}
|
|
||||||
.chat-title {
|
|
||||||
color: var(--primary-color);
|
|
||||||
font-size: 2em;
|
|
||||||
text-align: center;
|
|
||||||
margin: 1rem 0 2rem;
|
|
||||||
padding-bottom: 1rem;
|
|
||||||
border-bottom: 2px solid var(--primary-color);
|
|
||||||
}
|
|
||||||
|
|
||||||
.chat-body {
|
|
||||||
display: flex;
|
|
||||||
flex-direction: column;
|
|
||||||
gap: 1.5rem;
|
|
||||||
margin: 2rem 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
.QaBox {
|
|
||||||
background: white;
|
|
||||||
padding: 1.5rem;
|
|
||||||
border-radius: 8px;
|
|
||||||
border-left: 4px solid var(--primary-color);
|
|
||||||
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
|
|
||||||
margin-bottom: 1.5rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
.Question {
|
|
||||||
color: var(--secondary-color);
|
|
||||||
font-weight: 500;
|
|
||||||
margin-bottom: 1rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
.Answer {
|
|
||||||
color: var(--text-color);
|
|
||||||
background: var(--primary-light);
|
|
||||||
padding: 1rem;
|
|
||||||
border-radius: 6px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.history-section {
|
|
||||||
margin-top: 3rem;
|
|
||||||
padding-top: 2rem;
|
|
||||||
border-top: 2px solid var(--border-color);
|
|
||||||
}
|
|
||||||
|
|
||||||
.history-title {
|
|
||||||
color: var(--secondary-color);
|
|
||||||
font-size: 1.5em;
|
|
||||||
margin-bottom: 1.5rem;
|
|
||||||
text-align: center;
|
|
||||||
}
|
|
||||||
|
|
||||||
.historyBox {
|
|
||||||
background: white;
|
|
||||||
padding: 1rem;
|
|
||||||
margin: 0.5rem 0;
|
|
||||||
border-radius: 6px;
|
|
||||||
border: 1px solid var(--border-color);
|
|
||||||
}
|
|
||||||
|
|
||||||
@media (prefers-color-scheme: dark) {
|
|
||||||
:root {
|
|
||||||
--background-color: #0f172a;
|
|
||||||
--text-color: #e2e8f0;
|
|
||||||
--border-color: #1e293b;
|
|
||||||
}
|
|
||||||
|
|
||||||
.container, .QaBox {
|
|
||||||
background: #1e293b;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
"""
|
|
||||||
|
|
||||||
def format_chat_content(self) -> str:
|
|
||||||
"""格式化聊天内容"""
|
|
||||||
chat_content = []
|
|
||||||
for q, a in self.chatbot:
|
|
||||||
question = str(q) if q is not None else ""
|
|
||||||
answer = str(a) if a is not None else ""
|
|
||||||
chat_content.append(f'''
|
|
||||||
<div class="QaBox">
|
|
||||||
<div class="Question">{question}</div>
|
|
||||||
<div class="Answer">{answer}</div>
|
|
||||||
</div>
|
|
||||||
''')
|
|
||||||
return "\n".join(chat_content)
|
|
||||||
|
|
||||||
def format_history_content(self) -> str:
|
|
||||||
"""格式化历史记录内容"""
|
|
||||||
if not self.history:
|
|
||||||
return ""
|
|
||||||
|
|
||||||
history_content = []
|
|
||||||
for entry in self.history:
|
|
||||||
history_content.append(f'''
|
|
||||||
<div class="historyBox">
|
|
||||||
<div class="entry">{entry}</div>
|
|
||||||
</div>
|
|
||||||
''')
|
|
||||||
return "\n".join(history_content)
|
|
||||||
|
|
||||||
def create_document(self) -> str:
|
|
||||||
"""生成完整的HTML文档
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: 完整的HTML文档字符串
|
|
||||||
"""
|
|
||||||
return f"""
|
|
||||||
<!DOCTYPE html>
|
|
||||||
<html lang="zh-CN">
|
|
||||||
<head>
|
|
||||||
<meta charset="utf-8">
|
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
|
||||||
<title>对话存档</title>
|
|
||||||
<style>{self.css_styles}</style>
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
<div class="container">
|
|
||||||
<h1 class="chat-title">对话存档</h1>
|
|
||||||
<div class="chat-body">
|
|
||||||
{self.format_chat_content()}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
||||||
"""
|
|
||||||
@@ -1,39 +0,0 @@
|
|||||||
|
|
||||||
class MarkdownFormatter:
|
|
||||||
"""Markdown格式文档生成器 - 用于生成对话记录的markdown文档"""
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.content = []
|
|
||||||
|
|
||||||
def _add_content(self, text: str):
|
|
||||||
"""添加正文内容"""
|
|
||||||
if text:
|
|
||||||
self.content.append(f"\n{text}\n")
|
|
||||||
|
|
||||||
def create_document(self, history: list) -> str:
|
|
||||||
"""
|
|
||||||
创建完整的Markdown文档
|
|
||||||
Args:
|
|
||||||
history: 历史记录列表,偶数位置为问题,奇数位置为答案
|
|
||||||
Returns:
|
|
||||||
str: 生成的Markdown文本
|
|
||||||
"""
|
|
||||||
self.content = []
|
|
||||||
|
|
||||||
# 处理问答对
|
|
||||||
for i in range(0, len(history), 2):
|
|
||||||
question = history[i]
|
|
||||||
answer = history[i + 1]
|
|
||||||
|
|
||||||
# 添加问题
|
|
||||||
self.content.append(f"\n### 问题 {i//2 + 1}")
|
|
||||||
self._add_content(question)
|
|
||||||
|
|
||||||
# 添加回答
|
|
||||||
self.content.append(f"\n### 回答 {i//2 + 1}")
|
|
||||||
self._add_content(answer)
|
|
||||||
|
|
||||||
# 添加分隔线
|
|
||||||
self.content.append("\n---\n")
|
|
||||||
|
|
||||||
return "\n".join(self.content)
|
|
||||||
@@ -1,172 +0,0 @@
|
|||||||
from datetime import datetime
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
from reportlab.pdfbase import pdfmetrics
|
|
||||||
from reportlab.pdfbase.ttfonts import TTFont
|
|
||||||
|
|
||||||
def convert_markdown_to_pdf(markdown_text):
|
|
||||||
"""将Markdown文本转换为PDF格式的纯文本"""
|
|
||||||
if not markdown_text:
|
|
||||||
return ""
|
|
||||||
|
|
||||||
# 标准化换行符
|
|
||||||
markdown_text = markdown_text.replace('\r\n', '\n').replace('\r', '\n')
|
|
||||||
|
|
||||||
# 处理标题、粗体、斜体
|
|
||||||
markdown_text = re.sub(r'^#\s+(.+)$', r'\1', markdown_text, flags=re.MULTILINE)
|
|
||||||
markdown_text = re.sub(r'\*\*(.+?)\*\*', r'\1', markdown_text)
|
|
||||||
markdown_text = re.sub(r'\*(.+?)\*', r'\1', markdown_text)
|
|
||||||
|
|
||||||
# 处理列表
|
|
||||||
markdown_text = re.sub(r'^\s*[-*+]\s+(.+?)(?=\n|$)', r'• \1', markdown_text, flags=re.MULTILINE)
|
|
||||||
markdown_text = re.sub(r'^\s*\d+\.\s+(.+?)(?=\n|$)', r'\1', markdown_text, flags=re.MULTILINE)
|
|
||||||
|
|
||||||
# 处理链接
|
|
||||||
markdown_text = re.sub(r'\[([^\]]+)\]\(([^)]+)\)', r'\1', markdown_text)
|
|
||||||
|
|
||||||
# 处理段落
|
|
||||||
markdown_text = re.sub(r'\n{2,}', '\n', markdown_text)
|
|
||||||
markdown_text = re.sub(r'(?<!\n)(?<!^)(?<!•\s)(?<!\d\.\s)\n(?![\s•\d])', '\n\n', markdown_text, flags=re.MULTILINE)
|
|
||||||
|
|
||||||
# 清理空白
|
|
||||||
markdown_text = re.sub(r' +', ' ', markdown_text)
|
|
||||||
markdown_text = re.sub(r'(?m)^\s+|\s+$', '', markdown_text)
|
|
||||||
|
|
||||||
return markdown_text.strip()
|
|
||||||
|
|
||||||
class PDFFormatter:
|
|
||||||
"""聊天记录PDF文档生成器 - 使用 Noto Sans CJK 字体"""
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self._init_reportlab()
|
|
||||||
self._register_fonts()
|
|
||||||
self.styles = self._get_reportlab_lib()['getSampleStyleSheet']()
|
|
||||||
self._create_styles()
|
|
||||||
|
|
||||||
def _init_reportlab(self):
|
|
||||||
"""初始化 ReportLab 相关组件"""
|
|
||||||
from reportlab.lib.pagesizes import A4
|
|
||||||
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
|
|
||||||
from reportlab.lib.units import cm
|
|
||||||
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer
|
|
||||||
|
|
||||||
self._lib = {
|
|
||||||
'A4': A4,
|
|
||||||
'getSampleStyleSheet': getSampleStyleSheet,
|
|
||||||
'ParagraphStyle': ParagraphStyle,
|
|
||||||
'cm': cm
|
|
||||||
}
|
|
||||||
|
|
||||||
self._platypus = {
|
|
||||||
'SimpleDocTemplate': SimpleDocTemplate,
|
|
||||||
'Paragraph': Paragraph,
|
|
||||||
'Spacer': Spacer
|
|
||||||
}
|
|
||||||
|
|
||||||
def _get_reportlab_lib(self):
|
|
||||||
return self._lib
|
|
||||||
|
|
||||||
def _get_reportlab_platypus(self):
|
|
||||||
return self._platypus
|
|
||||||
|
|
||||||
def _register_fonts(self):
|
|
||||||
"""注册 Noto Sans CJK 字体"""
|
|
||||||
possible_font_paths = [
|
|
||||||
'/usr/share/fonts/opentype/noto/NotoSansCJK-Regular.ttc',
|
|
||||||
'/usr/share/fonts/noto-cjk/NotoSansCJK-Regular.ttc',
|
|
||||||
'/usr/share/fonts/noto/NotoSansCJK-Regular.ttc'
|
|
||||||
]
|
|
||||||
|
|
||||||
font_registered = False
|
|
||||||
for path in possible_font_paths:
|
|
||||||
if os.path.exists(path):
|
|
||||||
try:
|
|
||||||
pdfmetrics.registerFont(TTFont('NotoSansCJK', path))
|
|
||||||
font_registered = True
|
|
||||||
break
|
|
||||||
except:
|
|
||||||
continue
|
|
||||||
|
|
||||||
if not font_registered:
|
|
||||||
print("Warning: Could not find Noto Sans CJK font. Using fallback font.")
|
|
||||||
self.font_name = 'Helvetica'
|
|
||||||
else:
|
|
||||||
self.font_name = 'NotoSansCJK'
|
|
||||||
|
|
||||||
def _create_styles(self):
|
|
||||||
"""创建文档样式"""
|
|
||||||
ParagraphStyle = self._lib['ParagraphStyle']
|
|
||||||
|
|
||||||
# 标题样式
|
|
||||||
self.styles.add(ParagraphStyle(
|
|
||||||
name='Title_Custom',
|
|
||||||
fontName=self.font_name,
|
|
||||||
fontSize=24,
|
|
||||||
leading=38,
|
|
||||||
alignment=1,
|
|
||||||
spaceAfter=32
|
|
||||||
))
|
|
||||||
|
|
||||||
# 日期样式
|
|
||||||
self.styles.add(ParagraphStyle(
|
|
||||||
name='Date_Style',
|
|
||||||
fontName=self.font_name,
|
|
||||||
fontSize=16,
|
|
||||||
leading=20,
|
|
||||||
alignment=1,
|
|
||||||
spaceAfter=20
|
|
||||||
))
|
|
||||||
|
|
||||||
# 问题样式
|
|
||||||
self.styles.add(ParagraphStyle(
|
|
||||||
name='Question_Style',
|
|
||||||
fontName=self.font_name,
|
|
||||||
fontSize=12,
|
|
||||||
leading=18,
|
|
||||||
leftIndent=28,
|
|
||||||
spaceAfter=6
|
|
||||||
))
|
|
||||||
|
|
||||||
# 回答样式
|
|
||||||
self.styles.add(ParagraphStyle(
|
|
||||||
name='Answer_Style',
|
|
||||||
fontName=self.font_name,
|
|
||||||
fontSize=12,
|
|
||||||
leading=18,
|
|
||||||
leftIndent=28,
|
|
||||||
spaceAfter=12
|
|
||||||
))
|
|
||||||
|
|
||||||
def create_document(self, history, output_path):
|
|
||||||
"""生成PDF文档"""
|
|
||||||
# 创建PDF文档
|
|
||||||
doc = self._platypus['SimpleDocTemplate'](
|
|
||||||
output_path,
|
|
||||||
pagesize=self._lib['A4'],
|
|
||||||
rightMargin=2.6 * self._lib['cm'],
|
|
||||||
leftMargin=2.8 * self._lib['cm'],
|
|
||||||
topMargin=3.7 * self._lib['cm'],
|
|
||||||
bottomMargin=3.5 * self._lib['cm']
|
|
||||||
)
|
|
||||||
|
|
||||||
# 构建内容
|
|
||||||
story = []
|
|
||||||
Paragraph = self._platypus['Paragraph']
|
|
||||||
|
|
||||||
# 添加对话内容
|
|
||||||
for i in range(0, len(history), 2):
|
|
||||||
question = history[i]
|
|
||||||
answer = convert_markdown_to_pdf(history[i + 1]) if i + 1 < len(history) else ""
|
|
||||||
|
|
||||||
if question:
|
|
||||||
q_text = f'问题 {i // 2 + 1}:{str(question)}'
|
|
||||||
story.append(Paragraph(q_text, self.styles['Question_Style']))
|
|
||||||
|
|
||||||
if answer:
|
|
||||||
a_text = f'回答 {i // 2 + 1}:{str(answer)}'
|
|
||||||
story.append(Paragraph(a_text, self.styles['Answer_Style']))
|
|
||||||
|
|
||||||
# 构建PDF
|
|
||||||
doc.build(story)
|
|
||||||
|
|
||||||
return doc
|
|
||||||
@@ -1,79 +0,0 @@
|
|||||||
|
|
||||||
import re
|
|
||||||
|
|
||||||
|
|
||||||
def convert_markdown_to_txt(markdown_text):
|
|
||||||
"""Convert markdown text to plain text while preserving formatting"""
|
|
||||||
# Standardize line endings
|
|
||||||
markdown_text = markdown_text.replace('\r\n', '\n').replace('\r', '\n')
|
|
||||||
|
|
||||||
# 1. Handle headers but keep their formatting instead of removing them
|
|
||||||
markdown_text = re.sub(r'^#\s+(.+)$', r'# \1', markdown_text, flags=re.MULTILINE)
|
|
||||||
markdown_text = re.sub(r'^##\s+(.+)$', r'## \1', markdown_text, flags=re.MULTILINE)
|
|
||||||
markdown_text = re.sub(r'^###\s+(.+)$', r'### \1', markdown_text, flags=re.MULTILINE)
|
|
||||||
|
|
||||||
# 2. Handle bold and italic - simply remove markers
|
|
||||||
markdown_text = re.sub(r'\*\*(.+?)\*\*', r'\1', markdown_text)
|
|
||||||
markdown_text = re.sub(r'\*(.+?)\*', r'\1', markdown_text)
|
|
||||||
|
|
||||||
# 3. Handle lists but preserve formatting
|
|
||||||
markdown_text = re.sub(r'^\s*[-*+]\s+(.+?)(?=\n|$)', r'• \1', markdown_text, flags=re.MULTILINE)
|
|
||||||
|
|
||||||
# 4. Handle links - keep only the text
|
|
||||||
markdown_text = re.sub(r'\[([^\]]+)\]\(([^)]+)\)', r'\1 (\2)', markdown_text)
|
|
||||||
|
|
||||||
# 5. Handle HTML links - convert to user-friendly format
|
|
||||||
markdown_text = re.sub(r'<a href=[\'"]([^\'"]+)[\'"](?:\s+target=[\'"][^\'"]+[\'"])?>([^<]+)</a>', r'\2 (\1)',
|
|
||||||
markdown_text)
|
|
||||||
|
|
||||||
# 6. Preserve paragraph breaks
|
|
||||||
markdown_text = re.sub(r'\n{3,}', '\n\n', markdown_text) # normalize multiple newlines to double newlines
|
|
||||||
|
|
||||||
# 7. Clean up extra spaces but maintain indentation
|
|
||||||
markdown_text = re.sub(r' +', ' ', markdown_text)
|
|
||||||
|
|
||||||
return markdown_text.strip()
|
|
||||||
|
|
||||||
|
|
||||||
class TxtFormatter:
|
|
||||||
"""Chat history TXT document generator"""
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.content = []
|
|
||||||
self._setup_document()
|
|
||||||
|
|
||||||
def _setup_document(self):
|
|
||||||
"""Initialize document with header"""
|
|
||||||
self.content.append("=" * 50)
|
|
||||||
self.content.append("GPT-Academic对话记录".center(48))
|
|
||||||
self.content.append("=" * 50)
|
|
||||||
|
|
||||||
def _format_header(self):
|
|
||||||
"""Create document header with current date"""
|
|
||||||
from datetime import datetime
|
|
||||||
date_str = datetime.now().strftime('%Y年%m月%d日')
|
|
||||||
return [
|
|
||||||
date_str.center(48),
|
|
||||||
"\n" # Add blank line after date
|
|
||||||
]
|
|
||||||
|
|
||||||
def create_document(self, history):
|
|
||||||
"""Generate document from chat history"""
|
|
||||||
# Add header with date
|
|
||||||
self.content.extend(self._format_header())
|
|
||||||
|
|
||||||
# Add conversation content
|
|
||||||
for i in range(0, len(history), 2):
|
|
||||||
question = history[i]
|
|
||||||
answer = convert_markdown_to_txt(history[i + 1]) if i + 1 < len(history) else ""
|
|
||||||
|
|
||||||
if question:
|
|
||||||
self.content.append(f"问题 {i // 2 + 1}:{str(question)}")
|
|
||||||
self.content.append("") # Add blank line
|
|
||||||
|
|
||||||
if answer:
|
|
||||||
self.content.append(f"回答 {i // 2 + 1}:{str(answer)}")
|
|
||||||
self.content.append("") # Add blank line
|
|
||||||
|
|
||||||
# Join all content with newlines
|
|
||||||
return "\n".join(self.content)
|
|
||||||
@@ -1,155 +0,0 @@
|
|||||||
from docx2pdf import convert
|
|
||||||
import os
|
|
||||||
import platform
|
|
||||||
import subprocess
|
|
||||||
from typing import Union
|
|
||||||
from pathlib import Path
|
|
||||||
from datetime import datetime
|
|
||||||
|
|
||||||
class WordToPdfConverter:
|
|
||||||
"""Word文档转PDF转换器"""
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def convert_to_pdf(word_path: Union[str, Path], pdf_path: Union[str, Path] = None) -> str:
|
|
||||||
"""
|
|
||||||
将Word文档转换为PDF
|
|
||||||
|
|
||||||
参数:
|
|
||||||
word_path: Word文档的路径
|
|
||||||
pdf_path: 可选,PDF文件的输出路径。如果未指定,将使用与Word文档相同的名称和位置
|
|
||||||
|
|
||||||
返回:
|
|
||||||
生成的PDF文件路径
|
|
||||||
|
|
||||||
异常:
|
|
||||||
如果转换失败,将抛出相应异常
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
# 确保输入路径是Path对象
|
|
||||||
word_path = Path(word_path)
|
|
||||||
|
|
||||||
# 如果未指定pdf_path,则使用与word文档相同的名称
|
|
||||||
if pdf_path is None:
|
|
||||||
pdf_path = word_path.with_suffix('.pdf')
|
|
||||||
else:
|
|
||||||
pdf_path = Path(pdf_path)
|
|
||||||
|
|
||||||
# 检查操作系统
|
|
||||||
if platform.system() == 'Linux':
|
|
||||||
# Linux系统需要安装libreoffice
|
|
||||||
which_result = subprocess.run(['which', 'libreoffice'], capture_output=True, text=True)
|
|
||||||
if which_result.returncode != 0:
|
|
||||||
raise RuntimeError("请先安装LibreOffice: sudo apt-get install libreoffice")
|
|
||||||
|
|
||||||
print(f"开始转换Word文档: {word_path} 到 PDF")
|
|
||||||
|
|
||||||
# 使用subprocess代替os.system
|
|
||||||
result = subprocess.run(
|
|
||||||
['libreoffice', '--headless', '--convert-to', 'pdf:writer_pdf_Export',
|
|
||||||
str(word_path), '--outdir', str(pdf_path.parent)],
|
|
||||||
capture_output=True, text=True
|
|
||||||
)
|
|
||||||
|
|
||||||
if result.returncode != 0:
|
|
||||||
error_msg = result.stderr or "未知错误"
|
|
||||||
print(f"LibreOffice转换失败,错误信息: {error_msg}")
|
|
||||||
raise RuntimeError(f"LibreOffice转换失败: {error_msg}")
|
|
||||||
|
|
||||||
print(f"LibreOffice转换输出: {result.stdout}")
|
|
||||||
|
|
||||||
# 如果输出路径与默认生成的不同,则重命名
|
|
||||||
default_pdf = word_path.with_suffix('.pdf')
|
|
||||||
if default_pdf != pdf_path and default_pdf.exists():
|
|
||||||
os.rename(default_pdf, pdf_path)
|
|
||||||
print(f"已将PDF从 {default_pdf} 重命名为 {pdf_path}")
|
|
||||||
|
|
||||||
# 验证PDF是否成功生成
|
|
||||||
if not pdf_path.exists() or pdf_path.stat().st_size == 0:
|
|
||||||
raise RuntimeError("PDF生成失败或文件为空")
|
|
||||||
|
|
||||||
print(f"PDF转换成功,文件大小: {pdf_path.stat().st_size} 字节")
|
|
||||||
else:
|
|
||||||
# Windows和MacOS使用docx2pdf
|
|
||||||
print(f"使用docx2pdf转换 {word_path} 到 {pdf_path}")
|
|
||||||
convert(word_path, pdf_path)
|
|
||||||
|
|
||||||
# 验证PDF是否成功生成
|
|
||||||
if not pdf_path.exists() or pdf_path.stat().st_size == 0:
|
|
||||||
raise RuntimeError("PDF生成失败或文件为空")
|
|
||||||
|
|
||||||
print(f"PDF转换成功,文件大小: {pdf_path.stat().st_size} 字节")
|
|
||||||
|
|
||||||
return str(pdf_path)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"PDF转换异常: {str(e)}")
|
|
||||||
raise Exception(f"转换PDF失败: {str(e)}")
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def batch_convert(word_dir: Union[str, Path], pdf_dir: Union[str, Path] = None) -> list:
|
|
||||||
"""
|
|
||||||
批量转换目录下的所有Word文档
|
|
||||||
|
|
||||||
参数:
|
|
||||||
word_dir: 包含Word文档的目录路径
|
|
||||||
pdf_dir: 可选,PDF文件的输出目录。如果未指定,将使用与Word文档相同的目录
|
|
||||||
|
|
||||||
返回:
|
|
||||||
生成的PDF文件路径列表
|
|
||||||
"""
|
|
||||||
word_dir = Path(word_dir)
|
|
||||||
if pdf_dir:
|
|
||||||
pdf_dir = Path(pdf_dir)
|
|
||||||
pdf_dir.mkdir(parents=True, exist_ok=True)
|
|
||||||
|
|
||||||
converted_files = []
|
|
||||||
|
|
||||||
for word_file in word_dir.glob("*.docx"):
|
|
||||||
try:
|
|
||||||
if pdf_dir:
|
|
||||||
pdf_path = pdf_dir / word_file.with_suffix('.pdf').name
|
|
||||||
else:
|
|
||||||
pdf_path = word_file.with_suffix('.pdf')
|
|
||||||
|
|
||||||
pdf_file = WordToPdfConverter.convert_to_pdf(word_file, pdf_path)
|
|
||||||
converted_files.append(pdf_file)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"转换 {word_file} 失败: {str(e)}")
|
|
||||||
|
|
||||||
return converted_files
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def convert_doc_to_pdf(doc, output_dir: Union[str, Path] = None) -> str:
|
|
||||||
"""
|
|
||||||
将docx对象直接转换为PDF
|
|
||||||
|
|
||||||
参数:
|
|
||||||
doc: python-docx的Document对象
|
|
||||||
output_dir: 可选,输出目录。如果未指定,将使用当前目录
|
|
||||||
|
|
||||||
返回:
|
|
||||||
生成的PDF文件路径
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
# 设置临时文件路径和输出路径
|
|
||||||
output_dir = Path(output_dir) if output_dir else Path.cwd()
|
|
||||||
output_dir.mkdir(parents=True, exist_ok=True)
|
|
||||||
|
|
||||||
# 生成临时word文件
|
|
||||||
temp_docx = output_dir / f"temp_{datetime.now().strftime('%Y%m%d_%H%M%S')}.docx"
|
|
||||||
doc.save(temp_docx)
|
|
||||||
|
|
||||||
# 转换为PDF
|
|
||||||
pdf_path = temp_docx.with_suffix('.pdf')
|
|
||||||
WordToPdfConverter.convert_to_pdf(temp_docx, pdf_path)
|
|
||||||
|
|
||||||
# 删除临时word文件
|
|
||||||
temp_docx.unlink()
|
|
||||||
|
|
||||||
return str(pdf_path)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
if temp_docx.exists():
|
|
||||||
temp_docx.unlink()
|
|
||||||
raise Exception(f"转换PDF失败: {str(e)}")
|
|
||||||
@@ -1,177 +0,0 @@
|
|||||||
import re
|
|
||||||
from docx import Document
|
|
||||||
from docx.shared import Cm, Pt
|
|
||||||
from docx.enum.text import WD_PARAGRAPH_ALIGNMENT, WD_LINE_SPACING
|
|
||||||
from docx.enum.style import WD_STYLE_TYPE
|
|
||||||
from docx.oxml.ns import qn
|
|
||||||
from datetime import datetime
|
|
||||||
|
|
||||||
|
|
||||||
def convert_markdown_to_word(markdown_text):
|
|
||||||
# 0. 首先标准化所有换行符为\n
|
|
||||||
markdown_text = markdown_text.replace('\r\n', '\n').replace('\r', '\n')
|
|
||||||
|
|
||||||
# 1. 处理标题 - 支持更多级别的标题,使用更精确的正则
|
|
||||||
# 保留标题标记,以便后续处理时还能识别出标题级别
|
|
||||||
markdown_text = re.sub(r'^(#{1,6})\s+(.+?)(?:\s+#+)?$', r'\1 \2', markdown_text, flags=re.MULTILINE)
|
|
||||||
|
|
||||||
# 2. 处理粗体、斜体和加粗斜体
|
|
||||||
markdown_text = re.sub(r'\*\*\*(.+?)\*\*\*', r'\1', markdown_text) # 加粗斜体
|
|
||||||
markdown_text = re.sub(r'\*\*(.+?)\*\*', r'\1', markdown_text) # 加粗
|
|
||||||
markdown_text = re.sub(r'\*(.+?)\*', r'\1', markdown_text) # 斜体
|
|
||||||
markdown_text = re.sub(r'_(.+?)_', r'\1', markdown_text) # 下划线斜体
|
|
||||||
markdown_text = re.sub(r'__(.+?)__', r'\1', markdown_text) # 下划线加粗
|
|
||||||
|
|
||||||
# 3. 处理代码块 - 不移除,而是简化格式
|
|
||||||
# 多行代码块
|
|
||||||
markdown_text = re.sub(r'```(?:\w+)?\n([\s\S]*?)```', r'[代码块]\n\1[/代码块]', markdown_text)
|
|
||||||
# 单行代码
|
|
||||||
markdown_text = re.sub(r'`([^`]+)`', r'[代码]\1[/代码]', markdown_text)
|
|
||||||
|
|
||||||
# 4. 处理列表 - 保留列表结构
|
|
||||||
# 匹配无序列表
|
|
||||||
markdown_text = re.sub(r'^(\s*)[-*+]\s+(.+?)$', r'\1• \2', markdown_text, flags=re.MULTILINE)
|
|
||||||
|
|
||||||
# 5. 处理Markdown链接
|
|
||||||
markdown_text = re.sub(r'\[([^\]]+)\]\(([^)]+?)\s*(?:"[^"]*")?\)', r'\1 (\2)', markdown_text)
|
|
||||||
|
|
||||||
# 6. 处理HTML链接
|
|
||||||
markdown_text = re.sub(r'<a href=[\'"]([^\'"]+)[\'"](?:\s+target=[\'"][^\'"]+[\'"])?>([^<]+)</a>', r'\2 (\1)',
|
|
||||||
markdown_text)
|
|
||||||
|
|
||||||
# 7. 处理图片
|
|
||||||
markdown_text = re.sub(r'!\[([^\]]*)\]\([^)]+\)', r'[图片:\1]', markdown_text)
|
|
||||||
|
|
||||||
return markdown_text
|
|
||||||
|
|
||||||
|
|
||||||
class WordFormatter:
|
|
||||||
"""聊天记录Word文档生成器 - 符合中国政府公文格式规范(GB/T 9704-2012)"""
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.doc = Document()
|
|
||||||
self._setup_document()
|
|
||||||
self._create_styles()
|
|
||||||
|
|
||||||
def _setup_document(self):
|
|
||||||
"""设置文档基本格式,包括页面设置和页眉"""
|
|
||||||
sections = self.doc.sections
|
|
||||||
for section in sections:
|
|
||||||
# 设置页面大小为A4
|
|
||||||
section.page_width = Cm(21)
|
|
||||||
section.page_height = Cm(29.7)
|
|
||||||
# 设置页边距
|
|
||||||
section.top_margin = Cm(3.7) # 上边距37mm
|
|
||||||
section.bottom_margin = Cm(3.5) # 下边距35mm
|
|
||||||
section.left_margin = Cm(2.8) # 左边距28mm
|
|
||||||
section.right_margin = Cm(2.6) # 右边距26mm
|
|
||||||
# 设置页眉页脚距离
|
|
||||||
section.header_distance = Cm(2.0)
|
|
||||||
section.footer_distance = Cm(2.0)
|
|
||||||
|
|
||||||
# 添加页眉
|
|
||||||
header = section.header
|
|
||||||
header_para = header.paragraphs[0]
|
|
||||||
header_para.alignment = WD_PARAGRAPH_ALIGNMENT.RIGHT
|
|
||||||
header_run = header_para.add_run("GPT-Academic对话记录")
|
|
||||||
header_run.font.name = '仿宋'
|
|
||||||
header_run._element.rPr.rFonts.set(qn('w:eastAsia'), '仿宋')
|
|
||||||
header_run.font.size = Pt(9)
|
|
||||||
|
|
||||||
def _create_styles(self):
|
|
||||||
"""创建文档样式"""
|
|
||||||
# 创建正文样式
|
|
||||||
style = self.doc.styles.add_style('Normal_Custom', WD_STYLE_TYPE.PARAGRAPH)
|
|
||||||
style.font.name = '仿宋'
|
|
||||||
style._element.rPr.rFonts.set(qn('w:eastAsia'), '仿宋')
|
|
||||||
style.font.size = Pt(12) # 调整为12磅
|
|
||||||
style.paragraph_format.line_spacing_rule = WD_LINE_SPACING.ONE_POINT_FIVE
|
|
||||||
style.paragraph_format.space_after = Pt(0)
|
|
||||||
|
|
||||||
# 创建问题样式
|
|
||||||
question_style = self.doc.styles.add_style('Question_Style', WD_STYLE_TYPE.PARAGRAPH)
|
|
||||||
question_style.font.name = '黑体'
|
|
||||||
question_style._element.rPr.rFonts.set(qn('w:eastAsia'), '黑体')
|
|
||||||
question_style.font.size = Pt(14) # 调整为14磅
|
|
||||||
question_style.font.bold = True
|
|
||||||
question_style.paragraph_format.space_before = Pt(12) # 减小段前距
|
|
||||||
question_style.paragraph_format.space_after = Pt(6)
|
|
||||||
question_style.paragraph_format.line_spacing_rule = WD_LINE_SPACING.ONE_POINT_FIVE
|
|
||||||
question_style.paragraph_format.left_indent = Pt(0) # 移除左缩进
|
|
||||||
|
|
||||||
# 创建回答样式
|
|
||||||
answer_style = self.doc.styles.add_style('Answer_Style', WD_STYLE_TYPE.PARAGRAPH)
|
|
||||||
answer_style.font.name = '仿宋'
|
|
||||||
answer_style._element.rPr.rFonts.set(qn('w:eastAsia'), '仿宋')
|
|
||||||
answer_style.font.size = Pt(12) # 调整为12磅
|
|
||||||
answer_style.paragraph_format.space_before = Pt(6)
|
|
||||||
answer_style.paragraph_format.space_after = Pt(12)
|
|
||||||
answer_style.paragraph_format.line_spacing_rule = WD_LINE_SPACING.ONE_POINT_FIVE
|
|
||||||
answer_style.paragraph_format.left_indent = Pt(0) # 移除左缩进
|
|
||||||
|
|
||||||
# 创建标题样式
|
|
||||||
title_style = self.doc.styles.add_style('Title_Custom', WD_STYLE_TYPE.PARAGRAPH)
|
|
||||||
title_style.font.name = '黑体' # 改用黑体
|
|
||||||
title_style._element.rPr.rFonts.set(qn('w:eastAsia'), '黑体')
|
|
||||||
title_style.font.size = Pt(22) # 调整为22磅
|
|
||||||
title_style.font.bold = True
|
|
||||||
title_style.paragraph_format.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
|
|
||||||
title_style.paragraph_format.space_before = Pt(0)
|
|
||||||
title_style.paragraph_format.space_after = Pt(24)
|
|
||||||
title_style.paragraph_format.line_spacing_rule = WD_LINE_SPACING.ONE_POINT_FIVE
|
|
||||||
|
|
||||||
# 添加参考文献样式
|
|
||||||
ref_style = self.doc.styles.add_style('Reference_Style', WD_STYLE_TYPE.PARAGRAPH)
|
|
||||||
ref_style.font.name = '宋体'
|
|
||||||
ref_style._element.rPr.rFonts.set(qn('w:eastAsia'), '宋体')
|
|
||||||
ref_style.font.size = Pt(10.5) # 参考文献使用小号字体
|
|
||||||
ref_style.paragraph_format.space_before = Pt(3)
|
|
||||||
ref_style.paragraph_format.space_after = Pt(3)
|
|
||||||
ref_style.paragraph_format.line_spacing_rule = WD_LINE_SPACING.SINGLE
|
|
||||||
ref_style.paragraph_format.left_indent = Pt(21)
|
|
||||||
ref_style.paragraph_format.first_line_indent = Pt(-21)
|
|
||||||
|
|
||||||
# 添加参考文献标题样式
|
|
||||||
ref_title_style = self.doc.styles.add_style('Reference_Title_Style', WD_STYLE_TYPE.PARAGRAPH)
|
|
||||||
ref_title_style.font.name = '黑体'
|
|
||||||
ref_title_style._element.rPr.rFonts.set(qn('w:eastAsia'), '黑体')
|
|
||||||
ref_title_style.font.size = Pt(16)
|
|
||||||
ref_title_style.font.bold = True
|
|
||||||
ref_title_style.paragraph_format.space_before = Pt(24)
|
|
||||||
ref_title_style.paragraph_format.space_after = Pt(12)
|
|
||||||
ref_title_style.paragraph_format.line_spacing_rule = WD_LINE_SPACING.ONE_POINT_FIVE
|
|
||||||
|
|
||||||
def create_document(self, history):
|
|
||||||
"""写入聊天历史"""
|
|
||||||
# 添加标题
|
|
||||||
title_para = self.doc.add_paragraph(style='Title_Custom')
|
|
||||||
title_run = title_para.add_run('GPT-Academic 对话记录')
|
|
||||||
|
|
||||||
# 添加日期
|
|
||||||
date_para = self.doc.add_paragraph()
|
|
||||||
date_para.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
|
|
||||||
date_run = date_para.add_run(datetime.now().strftime('%Y年%m月%d日'))
|
|
||||||
date_run.font.name = '仿宋'
|
|
||||||
date_run._element.rPr.rFonts.set(qn('w:eastAsia'), '仿宋')
|
|
||||||
date_run.font.size = Pt(16)
|
|
||||||
|
|
||||||
self.doc.add_paragraph() # 添加空行
|
|
||||||
|
|
||||||
# 添加对话内容
|
|
||||||
for i in range(0, len(history), 2):
|
|
||||||
question = history[i]
|
|
||||||
answer = convert_markdown_to_word(history[i + 1])
|
|
||||||
|
|
||||||
if question:
|
|
||||||
q_para = self.doc.add_paragraph(style='Question_Style')
|
|
||||||
q_para.add_run(f'问题 {i//2 + 1}:').bold = True
|
|
||||||
q_para.add_run(str(question))
|
|
||||||
|
|
||||||
if answer:
|
|
||||||
a_para = self.doc.add_paragraph(style='Answer_Style')
|
|
||||||
a_para.add_run(f'回答 {i//2 + 1}:').bold = True
|
|
||||||
a_para.add_run(str(answer))
|
|
||||||
|
|
||||||
|
|
||||||
return self.doc
|
|
||||||
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
import nltk
|
|
||||||
nltk.data.path.append('~/nltk_data')
|
|
||||||
nltk.download('averaged_perceptron_tagger', download_dir='~/nltk_data',
|
|
||||||
)
|
|
||||||
nltk.download('punkt', download_dir='~/nltk_data',
|
|
||||||
)
|
|
||||||
@@ -1,286 +0,0 @@
|
|||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import pandas as pd
|
|
||||||
import numpy as np
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import Optional, List, Set, Dict, Union, Iterator, Tuple
|
|
||||||
from dataclasses import dataclass, field
|
|
||||||
import logging
|
|
||||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
||||||
import chardet
|
|
||||||
from functools import lru_cache
|
|
||||||
import os
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class ExtractorConfig:
|
|
||||||
"""提取器配置类"""
|
|
||||||
encoding: str = 'auto'
|
|
||||||
na_filter: bool = True
|
|
||||||
skip_blank_lines: bool = True
|
|
||||||
chunk_size: int = 10000
|
|
||||||
max_workers: int = 4
|
|
||||||
preserve_format: bool = True
|
|
||||||
read_all_sheets: bool = True # 新增:是否读取所有工作表
|
|
||||||
text_cleanup: Dict[str, bool] = field(default_factory=lambda: {
|
|
||||||
'remove_extra_spaces': True,
|
|
||||||
'normalize_whitespace': False,
|
|
||||||
'remove_special_chars': False,
|
|
||||||
'lowercase': False
|
|
||||||
})
|
|
||||||
|
|
||||||
|
|
||||||
class ExcelTextExtractor:
|
|
||||||
"""增强的Excel格式文件文本内容提取器"""
|
|
||||||
|
|
||||||
SUPPORTED_EXTENSIONS: Set[str] = {
|
|
||||||
'.xlsx', '.xls', '.csv', '.tsv', '.xlsm', '.xltx', '.xltm', '.ods'
|
|
||||||
}
|
|
||||||
|
|
||||||
def __init__(self, config: Optional[ExtractorConfig] = None):
|
|
||||||
self.config = config or ExtractorConfig()
|
|
||||||
self._setup_logging()
|
|
||||||
self._detect_encoding = lru_cache(maxsize=128)(self._detect_encoding)
|
|
||||||
|
|
||||||
def _setup_logging(self) -> None:
|
|
||||||
"""配置日志记录器"""
|
|
||||||
logging.basicConfig(
|
|
||||||
level=logging.INFO,
|
|
||||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
|
||||||
)
|
|
||||||
self.logger = logging.getLogger(__name__)
|
|
||||||
fh = logging.FileHandler('excel_extractor.log')
|
|
||||||
fh.setLevel(logging.ERROR)
|
|
||||||
self.logger.addHandler(fh)
|
|
||||||
|
|
||||||
def _detect_encoding(self, file_path: Path) -> str:
|
|
||||||
if self.config.encoding != 'auto':
|
|
||||||
return self.config.encoding
|
|
||||||
|
|
||||||
try:
|
|
||||||
with open(file_path, 'rb') as f:
|
|
||||||
raw_data = f.read(10000)
|
|
||||||
result = chardet.detect(raw_data)
|
|
||||||
return result['encoding'] or 'utf-8'
|
|
||||||
except Exception as e:
|
|
||||||
self.logger.warning(f"Encoding detection failed: {e}. Using utf-8")
|
|
||||||
return 'utf-8'
|
|
||||||
|
|
||||||
def _validate_file(self, file_path: Union[str, Path]) -> Path:
|
|
||||||
path = Path(file_path).resolve()
|
|
||||||
|
|
||||||
if not path.exists():
|
|
||||||
raise ValueError(f"File not found: {path}")
|
|
||||||
|
|
||||||
if not path.is_file():
|
|
||||||
raise ValueError(f"Not a file: {path}")
|
|
||||||
|
|
||||||
if not os.access(path, os.R_OK):
|
|
||||||
raise PermissionError(f"No read permission: {path}")
|
|
||||||
|
|
||||||
if path.suffix.lower() not in self.SUPPORTED_EXTENSIONS:
|
|
||||||
raise ValueError(
|
|
||||||
f"Unsupported format: {path.suffix}. "
|
|
||||||
f"Supported: {', '.join(sorted(self.SUPPORTED_EXTENSIONS))}"
|
|
||||||
)
|
|
||||||
|
|
||||||
return path
|
|
||||||
|
|
||||||
def _format_value(self, value: Any) -> str:
|
|
||||||
if pd.isna(value) or value is None:
|
|
||||||
return ''
|
|
||||||
if isinstance(value, (int, float)):
|
|
||||||
return str(value)
|
|
||||||
return str(value).strip()
|
|
||||||
|
|
||||||
def _process_chunk(self, chunk: pd.DataFrame, columns: Optional[List[str]] = None, sheet_name: str = '') -> str:
|
|
||||||
"""处理数据块,新增sheet_name参数"""
|
|
||||||
try:
|
|
||||||
if columns:
|
|
||||||
chunk = chunk[columns]
|
|
||||||
|
|
||||||
if self.config.preserve_format:
|
|
||||||
formatted_chunk = chunk.applymap(self._format_value)
|
|
||||||
rows = []
|
|
||||||
|
|
||||||
# 添加工作表名称作为标题
|
|
||||||
if sheet_name:
|
|
||||||
rows.append(f"[Sheet: {sheet_name}]")
|
|
||||||
|
|
||||||
# 添加表头
|
|
||||||
headers = [str(col) for col in formatted_chunk.columns]
|
|
||||||
rows.append('\t'.join(headers))
|
|
||||||
|
|
||||||
# 添加数据行
|
|
||||||
for _, row in formatted_chunk.iterrows():
|
|
||||||
rows.append('\t'.join(row.values))
|
|
||||||
|
|
||||||
return '\n'.join(rows)
|
|
||||||
else:
|
|
||||||
flat_values = (
|
|
||||||
chunk.astype(str)
|
|
||||||
.replace({'nan': '', 'None': '', 'NaN': ''})
|
|
||||||
.values.flatten()
|
|
||||||
)
|
|
||||||
return ' '.join(v for v in flat_values if v)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
self.logger.error(f"Error processing chunk: {e}")
|
|
||||||
raise
|
|
||||||
|
|
||||||
def _read_file(self, file_path: Path) -> Union[pd.DataFrame, Iterator[pd.DataFrame], Dict[str, pd.DataFrame]]:
|
|
||||||
"""读取文件,支持多工作表"""
|
|
||||||
try:
|
|
||||||
encoding = self._detect_encoding(file_path)
|
|
||||||
|
|
||||||
if file_path.suffix.lower() in {'.csv', '.tsv'}:
|
|
||||||
sep = '\t' if file_path.suffix.lower() == '.tsv' else ','
|
|
||||||
|
|
||||||
# 对大文件使用分块读取
|
|
||||||
if file_path.stat().st_size > self.config.chunk_size * 1024:
|
|
||||||
return pd.read_csv(
|
|
||||||
file_path,
|
|
||||||
encoding=encoding,
|
|
||||||
na_filter=self.config.na_filter,
|
|
||||||
skip_blank_lines=self.config.skip_blank_lines,
|
|
||||||
sep=sep,
|
|
||||||
chunksize=self.config.chunk_size,
|
|
||||||
on_bad_lines='warn'
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
return pd.read_csv(
|
|
||||||
file_path,
|
|
||||||
encoding=encoding,
|
|
||||||
na_filter=self.config.na_filter,
|
|
||||||
skip_blank_lines=self.config.skip_blank_lines,
|
|
||||||
sep=sep
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
# Excel文件处理,支持多工作表
|
|
||||||
if self.config.read_all_sheets:
|
|
||||||
# 读取所有工作表
|
|
||||||
return pd.read_excel(
|
|
||||||
file_path,
|
|
||||||
na_filter=self.config.na_filter,
|
|
||||||
keep_default_na=self.config.na_filter,
|
|
||||||
engine='openpyxl',
|
|
||||||
sheet_name=None # None表示读取所有工作表
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
# 只读取第一个工作表
|
|
||||||
return pd.read_excel(
|
|
||||||
file_path,
|
|
||||||
na_filter=self.config.na_filter,
|
|
||||||
keep_default_na=self.config.na_filter,
|
|
||||||
engine='openpyxl',
|
|
||||||
sheet_name=0 # 读取第一个工作表
|
|
||||||
)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
self.logger.error(f"Error reading file {file_path}: {e}")
|
|
||||||
raise
|
|
||||||
|
|
||||||
def extract_text(
|
|
||||||
self,
|
|
||||||
file_path: Union[str, Path],
|
|
||||||
columns: Optional[List[str]] = None,
|
|
||||||
separator: str = '\n'
|
|
||||||
) -> str:
|
|
||||||
"""提取文本,支持多工作表"""
|
|
||||||
try:
|
|
||||||
path = self._validate_file(file_path)
|
|
||||||
self.logger.info(f"Processing: {path}")
|
|
||||||
|
|
||||||
reader = self._read_file(path)
|
|
||||||
texts = []
|
|
||||||
|
|
||||||
# 处理Excel多工作表
|
|
||||||
if isinstance(reader, dict):
|
|
||||||
for sheet_name, df in reader.items():
|
|
||||||
sheet_text = self._process_chunk(df, columns, sheet_name)
|
|
||||||
if sheet_text:
|
|
||||||
texts.append(sheet_text)
|
|
||||||
return separator.join(texts)
|
|
||||||
|
|
||||||
# 处理单个DataFrame
|
|
||||||
elif isinstance(reader, pd.DataFrame):
|
|
||||||
return self._process_chunk(reader, columns)
|
|
||||||
|
|
||||||
# 处理DataFrame迭代器
|
|
||||||
else:
|
|
||||||
with ThreadPoolExecutor(max_workers=self.config.max_workers) as executor:
|
|
||||||
futures = {
|
|
||||||
executor.submit(self._process_chunk, chunk, columns): i
|
|
||||||
for i, chunk in enumerate(reader)
|
|
||||||
}
|
|
||||||
|
|
||||||
chunk_texts = []
|
|
||||||
for future in as_completed(futures):
|
|
||||||
try:
|
|
||||||
text = future.result()
|
|
||||||
if text:
|
|
||||||
chunk_texts.append((futures[future], text))
|
|
||||||
except Exception as e:
|
|
||||||
self.logger.error(f"Error in chunk {futures[future]}: {e}")
|
|
||||||
|
|
||||||
# 按块的顺序排序
|
|
||||||
chunk_texts.sort(key=lambda x: x[0])
|
|
||||||
texts = [text for _, text in chunk_texts]
|
|
||||||
|
|
||||||
# 合并文本,保留格式
|
|
||||||
if texts and self.config.preserve_format:
|
|
||||||
result = texts[0] # 第一块包含表头
|
|
||||||
if len(texts) > 1:
|
|
||||||
# 跳过后续块的表头行
|
|
||||||
for text in texts[1:]:
|
|
||||||
result += '\n' + '\n'.join(text.split('\n')[1:])
|
|
||||||
return result
|
|
||||||
else:
|
|
||||||
return separator.join(texts)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
self.logger.error(f"Extraction failed: {e}")
|
|
||||||
raise
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_supported_formats() -> List[str]:
|
|
||||||
"""获取支持的文件格式列表"""
|
|
||||||
return sorted(ExcelTextExtractor.SUPPORTED_EXTENSIONS)
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
"""主函数:演示用法"""
|
|
||||||
config = ExtractorConfig(
|
|
||||||
encoding='auto',
|
|
||||||
preserve_format=True,
|
|
||||||
read_all_sheets=True, # 启用多工作表读取
|
|
||||||
text_cleanup={
|
|
||||||
'remove_extra_spaces': True,
|
|
||||||
'normalize_whitespace': False,
|
|
||||||
'remove_special_chars': False,
|
|
||||||
'lowercase': False
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
extractor = ExcelTextExtractor(config)
|
|
||||||
|
|
||||||
try:
|
|
||||||
sample_file = 'example.xlsx'
|
|
||||||
if Path(sample_file).exists():
|
|
||||||
text = extractor.extract_text(
|
|
||||||
sample_file,
|
|
||||||
columns=['title', 'content']
|
|
||||||
)
|
|
||||||
print("提取的文本:")
|
|
||||||
print(text)
|
|
||||||
else:
|
|
||||||
print(f"示例文件 {sample_file} 不存在")
|
|
||||||
|
|
||||||
print("\n支持的格式:", extractor.get_supported_formats())
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"错误: {e}")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
@@ -1,359 +0,0 @@
|
|||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import Optional, Set, Dict, Union, List
|
|
||||||
from dataclasses import dataclass, field
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
import subprocess
|
|
||||||
import tempfile
|
|
||||||
import shutil
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class MarkdownConverterConfig:
|
|
||||||
"""PDF 到 Markdown 转换器配置类
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
extract_images: 是否提取图片
|
|
||||||
extract_tables: 是否尝试保留表格结构
|
|
||||||
extract_code_blocks: 是否识别代码块
|
|
||||||
extract_math: 是否转换数学公式
|
|
||||||
output_dir: 输出目录路径
|
|
||||||
image_dir: 图片保存目录路径
|
|
||||||
paragraph_separator: 段落之间的分隔符
|
|
||||||
text_cleanup: 文本清理选项字典
|
|
||||||
docintel_endpoint: Document Intelligence端点URL (可选)
|
|
||||||
enable_plugins: 是否启用插件
|
|
||||||
llm_client: LLM客户端对象 (例如OpenAI client)
|
|
||||||
llm_model: 要使用的LLM模型名称
|
|
||||||
"""
|
|
||||||
extract_images: bool = True
|
|
||||||
extract_tables: bool = True
|
|
||||||
extract_code_blocks: bool = True
|
|
||||||
extract_math: bool = True
|
|
||||||
output_dir: str = ""
|
|
||||||
image_dir: str = "images"
|
|
||||||
paragraph_separator: str = '\n\n'
|
|
||||||
text_cleanup: Dict[str, bool] = field(default_factory=lambda: {
|
|
||||||
'remove_extra_spaces': True,
|
|
||||||
'normalize_whitespace': True,
|
|
||||||
'remove_special_chars': False,
|
|
||||||
'lowercase': False
|
|
||||||
})
|
|
||||||
docintel_endpoint: str = ""
|
|
||||||
enable_plugins: bool = False
|
|
||||||
llm_client: Optional[object] = None
|
|
||||||
llm_model: str = ""
|
|
||||||
|
|
||||||
|
|
||||||
class MarkdownConverter:
|
|
||||||
"""PDF 到 Markdown 转换器
|
|
||||||
|
|
||||||
使用 markitdown 库实现 PDF 到 Markdown 的转换,支持多种配置选项。
|
|
||||||
"""
|
|
||||||
|
|
||||||
SUPPORTED_EXTENSIONS: Set[str] = {
|
|
||||||
'.pdf',
|
|
||||||
}
|
|
||||||
|
|
||||||
def __init__(self, config: Optional[MarkdownConverterConfig] = None):
|
|
||||||
"""初始化转换器
|
|
||||||
|
|
||||||
Args:
|
|
||||||
config: 转换器配置对象,如果为None则使用默认配置
|
|
||||||
"""
|
|
||||||
self.config = config or MarkdownConverterConfig()
|
|
||||||
self._setup_logging()
|
|
||||||
|
|
||||||
# 检查是否安装了 markitdown
|
|
||||||
self._check_markitdown_installation()
|
|
||||||
|
|
||||||
def _setup_logging(self) -> None:
|
|
||||||
"""配置日志记录器"""
|
|
||||||
logging.basicConfig(
|
|
||||||
level=logging.INFO,
|
|
||||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
|
||||||
)
|
|
||||||
self.logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
# 添加文件处理器
|
|
||||||
fh = logging.FileHandler('markdown_converter.log')
|
|
||||||
fh.setLevel(logging.ERROR)
|
|
||||||
self.logger.addHandler(fh)
|
|
||||||
|
|
||||||
def _check_markitdown_installation(self) -> None:
|
|
||||||
"""检查是否安装了 markitdown"""
|
|
||||||
try:
|
|
||||||
# 尝试导入 markitdown 库
|
|
||||||
from markitdown import MarkItDown
|
|
||||||
self.logger.info("markitdown 库已安装")
|
|
||||||
except ImportError:
|
|
||||||
self.logger.warning("markitdown 库未安装,尝试安装...")
|
|
||||||
try:
|
|
||||||
subprocess.check_call(["pip", "install", "markitdown"])
|
|
||||||
self.logger.info("markitdown 库安装成功")
|
|
||||||
from markitdown import MarkItDown
|
|
||||||
except (subprocess.SubprocessError, ImportError):
|
|
||||||
self.logger.error("无法安装 markitdown 库,请手动安装")
|
|
||||||
self.markitdown_available = False
|
|
||||||
return
|
|
||||||
|
|
||||||
self.markitdown_available = True
|
|
||||||
|
|
||||||
def _validate_file(self, file_path: Union[str, Path], max_size_mb: int = 100) -> Path:
|
|
||||||
"""验证文件
|
|
||||||
|
|
||||||
Args:
|
|
||||||
file_path: 文件路径
|
|
||||||
max_size_mb: 允许的最大文件大小(MB)
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Path: 验证后的Path对象
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
ValueError: 文件不存在、格式不支持或大小超限
|
|
||||||
PermissionError: 没有读取权限
|
|
||||||
"""
|
|
||||||
path = Path(file_path).resolve()
|
|
||||||
|
|
||||||
if not path.exists():
|
|
||||||
raise ValueError(f"文件不存在: {path}")
|
|
||||||
|
|
||||||
if not path.is_file():
|
|
||||||
raise ValueError(f"不是一个文件: {path}")
|
|
||||||
|
|
||||||
if not os.access(path, os.R_OK):
|
|
||||||
raise PermissionError(f"没有读取权限: {path}")
|
|
||||||
|
|
||||||
file_size_mb = path.stat().st_size / (1024 * 1024)
|
|
||||||
if file_size_mb > max_size_mb:
|
|
||||||
raise ValueError(
|
|
||||||
f"文件大小 ({file_size_mb:.1f}MB) 超过限制 {max_size_mb}MB"
|
|
||||||
)
|
|
||||||
|
|
||||||
if path.suffix.lower() not in self.SUPPORTED_EXTENSIONS:
|
|
||||||
raise ValueError(
|
|
||||||
f"不支持的格式: {path.suffix}. "
|
|
||||||
f"支持的格式: {', '.join(sorted(self.SUPPORTED_EXTENSIONS))}"
|
|
||||||
)
|
|
||||||
|
|
||||||
return path
|
|
||||||
|
|
||||||
def _cleanup_text(self, text: str) -> str:
|
|
||||||
"""清理文本
|
|
||||||
|
|
||||||
Args:
|
|
||||||
text: 原始文本
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: 清理后的文本
|
|
||||||
"""
|
|
||||||
if self.config.text_cleanup['remove_extra_spaces']:
|
|
||||||
text = ' '.join(text.split())
|
|
||||||
|
|
||||||
if self.config.text_cleanup['normalize_whitespace']:
|
|
||||||
text = text.replace('\t', ' ').replace('\r', '\n')
|
|
||||||
|
|
||||||
if self.config.text_cleanup['lowercase']:
|
|
||||||
text = text.lower()
|
|
||||||
|
|
||||||
return text.strip()
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_supported_formats() -> List[str]:
|
|
||||||
"""获取支持的文件格式列表"""
|
|
||||||
return sorted(MarkdownConverter.SUPPORTED_EXTENSIONS)
|
|
||||||
|
|
||||||
def convert_to_markdown(
|
|
||||||
self,
|
|
||||||
file_path: Union[str, Path],
|
|
||||||
output_path: Optional[Union[str, Path]] = None
|
|
||||||
) -> str:
|
|
||||||
"""将 PDF 转换为 Markdown
|
|
||||||
|
|
||||||
Args:
|
|
||||||
file_path: PDF 文件路径
|
|
||||||
output_path: 输出 Markdown 文件路径,如果为 None 则返回内容而不保存
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: 转换后的 Markdown 内容
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
Exception: 转换过程中的错误
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
path = self._validate_file(file_path)
|
|
||||||
self.logger.info(f"处理: {path}")
|
|
||||||
|
|
||||||
if not self.markitdown_available:
|
|
||||||
raise ImportError("markitdown 库未安装,无法进行转换")
|
|
||||||
|
|
||||||
# 导入 markitdown 库
|
|
||||||
from markitdown import MarkItDown
|
|
||||||
|
|
||||||
# 准备输出目录
|
|
||||||
if output_path:
|
|
||||||
output_path = Path(output_path)
|
|
||||||
output_dir = output_path.parent
|
|
||||||
output_dir.mkdir(parents=True, exist_ok=True)
|
|
||||||
else:
|
|
||||||
# 创建临时目录作为输出目录
|
|
||||||
temp_dir = tempfile.mkdtemp()
|
|
||||||
output_dir = Path(temp_dir)
|
|
||||||
output_path = output_dir / f"{path.stem}.md"
|
|
||||||
|
|
||||||
# 图片目录
|
|
||||||
image_dir = output_dir / self.config.image_dir
|
|
||||||
image_dir.mkdir(parents=True, exist_ok=True)
|
|
||||||
|
|
||||||
# 创建 MarkItDown 实例并进行转换
|
|
||||||
if self.config.docintel_endpoint:
|
|
||||||
md = MarkItDown(docintel_endpoint=self.config.docintel_endpoint)
|
|
||||||
elif self.config.llm_client and self.config.llm_model:
|
|
||||||
md = MarkItDown(
|
|
||||||
enable_plugins=self.config.enable_plugins,
|
|
||||||
llm_client=self.config.llm_client,
|
|
||||||
llm_model=self.config.llm_model
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
md = MarkItDown(enable_plugins=self.config.enable_plugins)
|
|
||||||
|
|
||||||
# 执行转换
|
|
||||||
result = md.convert(str(path))
|
|
||||||
markdown_content = result.text_content
|
|
||||||
|
|
||||||
# 清理文本
|
|
||||||
markdown_content = self._cleanup_text(markdown_content)
|
|
||||||
|
|
||||||
# 如果需要保存到文件
|
|
||||||
if output_path:
|
|
||||||
with open(output_path, 'w', encoding='utf-8') as f:
|
|
||||||
f.write(markdown_content)
|
|
||||||
self.logger.info(f"转换成功,输出到: {output_path}")
|
|
||||||
|
|
||||||
return markdown_content
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
self.logger.error(f"转换失败: {e}")
|
|
||||||
raise
|
|
||||||
finally:
|
|
||||||
# 如果使用了临时目录且没有指定输出路径,则清理临时目录
|
|
||||||
if 'temp_dir' in locals() and not output_path:
|
|
||||||
shutil.rmtree(temp_dir, ignore_errors=True)
|
|
||||||
|
|
||||||
def convert_to_markdown_and_save(
|
|
||||||
self,
|
|
||||||
file_path: Union[str, Path],
|
|
||||||
output_path: Union[str, Path]
|
|
||||||
) -> Path:
|
|
||||||
"""将 PDF 转换为 Markdown 并保存到指定路径
|
|
||||||
|
|
||||||
Args:
|
|
||||||
file_path: PDF 文件路径
|
|
||||||
output_path: 输出 Markdown 文件路径
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Path: 输出文件的 Path 对象
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
Exception: 转换过程中的错误
|
|
||||||
"""
|
|
||||||
self.convert_to_markdown(file_path, output_path)
|
|
||||||
return Path(output_path)
|
|
||||||
|
|
||||||
def batch_convert(
|
|
||||||
self,
|
|
||||||
file_paths: List[Union[str, Path]],
|
|
||||||
output_dir: Union[str, Path]
|
|
||||||
) -> List[Path]:
|
|
||||||
"""批量转换多个 PDF 文件为 Markdown
|
|
||||||
|
|
||||||
Args:
|
|
||||||
file_paths: PDF 文件路径列表
|
|
||||||
output_dir: 输出目录路径
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List[Path]: 输出文件路径列表
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
Exception: 转换过程中的错误
|
|
||||||
"""
|
|
||||||
output_dir = Path(output_dir)
|
|
||||||
output_dir.mkdir(parents=True, exist_ok=True)
|
|
||||||
|
|
||||||
output_paths = []
|
|
||||||
for file_path in file_paths:
|
|
||||||
path = Path(file_path)
|
|
||||||
output_path = output_dir / f"{path.stem}.md"
|
|
||||||
|
|
||||||
try:
|
|
||||||
self.convert_to_markdown(file_path, output_path)
|
|
||||||
output_paths.append(output_path)
|
|
||||||
self.logger.info(f"成功转换: {path} -> {output_path}")
|
|
||||||
except Exception as e:
|
|
||||||
self.logger.error(f"转换失败 {path}: {e}")
|
|
||||||
|
|
||||||
return output_paths
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
"""主函数:演示用法"""
|
|
||||||
# 配置
|
|
||||||
config = MarkdownConverterConfig(
|
|
||||||
extract_images=True,
|
|
||||||
extract_tables=True,
|
|
||||||
extract_code_blocks=True,
|
|
||||||
extract_math=True,
|
|
||||||
enable_plugins=False,
|
|
||||||
text_cleanup={
|
|
||||||
'remove_extra_spaces': True,
|
|
||||||
'normalize_whitespace': True,
|
|
||||||
'remove_special_chars': False,
|
|
||||||
'lowercase': False
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
# 创建转换器
|
|
||||||
converter = MarkdownConverter(config)
|
|
||||||
|
|
||||||
# 使用示例
|
|
||||||
try:
|
|
||||||
# 替换为实际的文件路径
|
|
||||||
sample_file = './crazy_functions/doc_fns/read_fns/paper/2501.12599v1.pdf'
|
|
||||||
if Path(sample_file).exists():
|
|
||||||
# 转换为 Markdown 并打印内容
|
|
||||||
markdown_content = converter.convert_to_markdown(sample_file)
|
|
||||||
print("转换后的 Markdown 内容:")
|
|
||||||
print(markdown_content[:500] + "...") # 只打印前500个字符
|
|
||||||
|
|
||||||
# 转换并保存到文件
|
|
||||||
output_file = f"./output_{Path(sample_file).stem}.md"
|
|
||||||
output_path = converter.convert_to_markdown_and_save(sample_file, output_file)
|
|
||||||
print(f"\n已保存到: {output_path}")
|
|
||||||
|
|
||||||
# 使用LLM增强的示例 (需要添加相应的导入和配置)
|
|
||||||
# try:
|
|
||||||
# from openai import OpenAI
|
|
||||||
# client = OpenAI()
|
|
||||||
# llm_config = MarkdownConverterConfig(
|
|
||||||
# llm_client=client,
|
|
||||||
# llm_model="gpt-4o"
|
|
||||||
# )
|
|
||||||
# llm_converter = MarkdownConverter(llm_config)
|
|
||||||
# llm_result = llm_converter.convert_to_markdown("example.jpg")
|
|
||||||
# print("LLM增强的结果:")
|
|
||||||
# print(llm_result[:500] + "...")
|
|
||||||
# except ImportError:
|
|
||||||
# print("未安装OpenAI库,跳过LLM示例")
|
|
||||||
else:
|
|
||||||
print(f"示例文件 {sample_file} 不存在")
|
|
||||||
|
|
||||||
print("\n支持的格式:", converter.get_supported_formats())
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"错误: {e}")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
@@ -1,493 +0,0 @@
|
|||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import Optional, Set, Dict, Union, List
|
|
||||||
from dataclasses import dataclass, field
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
|
|
||||||
from unstructured.partition.auto import partition
|
|
||||||
from unstructured.documents.elements import (
|
|
||||||
Text, Title, NarrativeText, ListItem, Table,
|
|
||||||
Footer, Header, PageBreak, Image, Address
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class PaperMetadata:
|
|
||||||
"""论文元数据类"""
|
|
||||||
title: str = ""
|
|
||||||
authors: List[str] = field(default_factory=list)
|
|
||||||
affiliations: List[str] = field(default_factory=list)
|
|
||||||
journal: str = ""
|
|
||||||
volume: str = ""
|
|
||||||
issue: str = ""
|
|
||||||
year: str = ""
|
|
||||||
doi: str = ""
|
|
||||||
date: str = ""
|
|
||||||
publisher: str = ""
|
|
||||||
conference: str = ""
|
|
||||||
abstract: str = ""
|
|
||||||
keywords: List[str] = field(default_factory=list)
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class ExtractorConfig:
|
|
||||||
"""元数据提取器配置类"""
|
|
||||||
paragraph_separator: str = '\n\n'
|
|
||||||
text_cleanup: Dict[str, bool] = field(default_factory=lambda: {
|
|
||||||
'remove_extra_spaces': True,
|
|
||||||
'normalize_whitespace': True,
|
|
||||||
'remove_special_chars': False,
|
|
||||||
'lowercase': False
|
|
||||||
})
|
|
||||||
|
|
||||||
|
|
||||||
class PaperMetadataExtractor:
|
|
||||||
"""论文元数据提取器
|
|
||||||
|
|
||||||
使用unstructured库从多种文档格式中提取论文的标题、作者、摘要等元数据信息。
|
|
||||||
"""
|
|
||||||
|
|
||||||
SUPPORTED_EXTENSIONS: Set[str] = {
|
|
||||||
'.pdf', '.docx', '.doc', '.txt', '.ppt', '.pptx',
|
|
||||||
'.xlsx', '.xls', '.md', '.org', '.odt', '.rst',
|
|
||||||
'.rtf', '.epub', '.html', '.xml', '.json'
|
|
||||||
}
|
|
||||||
|
|
||||||
# 定义论文各部分的关键词模式
|
|
||||||
SECTION_PATTERNS = {
|
|
||||||
'abstract': r'\b(摘要|abstract|summary|概要|résumé|zusammenfassung|аннотация)\b',
|
|
||||||
'keywords': r'\b(关键词|keywords|key\s+words|关键字|mots[- ]clés|schlüsselwörter|ключевые слова)\b',
|
|
||||||
}
|
|
||||||
|
|
||||||
def __init__(self, config: Optional[ExtractorConfig] = None):
|
|
||||||
"""初始化提取器
|
|
||||||
|
|
||||||
Args:
|
|
||||||
config: 提取器配置对象,如果为None则使用默认配置
|
|
||||||
"""
|
|
||||||
self.config = config or ExtractorConfig()
|
|
||||||
self._setup_logging()
|
|
||||||
|
|
||||||
def _setup_logging(self) -> None:
|
|
||||||
"""配置日志记录器"""
|
|
||||||
logging.basicConfig(
|
|
||||||
level=logging.INFO,
|
|
||||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
|
||||||
)
|
|
||||||
self.logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
# 添加文件处理器
|
|
||||||
fh = logging.FileHandler('paper_metadata_extractor.log')
|
|
||||||
fh.setLevel(logging.ERROR)
|
|
||||||
self.logger.addHandler(fh)
|
|
||||||
|
|
||||||
def _validate_file(self, file_path: Union[str, Path], max_size_mb: int = 100) -> Path:
|
|
||||||
"""验证文件
|
|
||||||
|
|
||||||
Args:
|
|
||||||
file_path: 文件路径
|
|
||||||
max_size_mb: 允许的最大文件大小(MB)
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Path: 验证后的Path对象
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
ValueError: 文件不存在、格式不支持或大小超限
|
|
||||||
PermissionError: 没有读取权限
|
|
||||||
"""
|
|
||||||
path = Path(file_path).resolve()
|
|
||||||
|
|
||||||
if not path.exists():
|
|
||||||
raise ValueError(f"文件不存在: {path}")
|
|
||||||
|
|
||||||
if not path.is_file():
|
|
||||||
raise ValueError(f"不是文件: {path}")
|
|
||||||
|
|
||||||
if not os.access(path, os.R_OK):
|
|
||||||
raise PermissionError(f"没有读取权限: {path}")
|
|
||||||
|
|
||||||
file_size_mb = path.stat().st_size / (1024 * 1024)
|
|
||||||
if file_size_mb > max_size_mb:
|
|
||||||
raise ValueError(
|
|
||||||
f"文件大小 ({file_size_mb:.1f}MB) 超过限制 {max_size_mb}MB"
|
|
||||||
)
|
|
||||||
|
|
||||||
if path.suffix.lower() not in self.SUPPORTED_EXTENSIONS:
|
|
||||||
raise ValueError(
|
|
||||||
f"不支持的文件格式: {path.suffix}. "
|
|
||||||
f"支持的格式: {', '.join(sorted(self.SUPPORTED_EXTENSIONS))}"
|
|
||||||
)
|
|
||||||
|
|
||||||
return path
|
|
||||||
|
|
||||||
def _cleanup_text(self, text: str) -> str:
|
|
||||||
"""清理文本
|
|
||||||
|
|
||||||
Args:
|
|
||||||
text: 原始文本
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: 清理后的文本
|
|
||||||
"""
|
|
||||||
if self.config.text_cleanup['remove_extra_spaces']:
|
|
||||||
text = ' '.join(text.split())
|
|
||||||
|
|
||||||
if self.config.text_cleanup['normalize_whitespace']:
|
|
||||||
text = text.replace('\t', ' ').replace('\r', '\n')
|
|
||||||
|
|
||||||
if self.config.text_cleanup['lowercase']:
|
|
||||||
text = text.lower()
|
|
||||||
|
|
||||||
return text.strip()
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_supported_formats() -> List[str]:
|
|
||||||
"""获取支持的文件格式列表"""
|
|
||||||
return sorted(PaperMetadataExtractor.SUPPORTED_EXTENSIONS)
|
|
||||||
|
|
||||||
def extract_metadata(self, file_path: Union[str, Path], strategy: str = "fast") -> PaperMetadata:
|
|
||||||
"""提取论文元数据
|
|
||||||
|
|
||||||
Args:
|
|
||||||
file_path: 文件路径
|
|
||||||
strategy: 提取策略 ("fast" 或 "accurate")
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
PaperMetadata: 提取的论文元数据
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
Exception: 提取过程中的错误
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
path = self._validate_file(file_path)
|
|
||||||
self.logger.info(f"正在处理: {path}")
|
|
||||||
|
|
||||||
# 使用unstructured库分解文档
|
|
||||||
elements = partition(
|
|
||||||
str(path),
|
|
||||||
strategy=strategy,
|
|
||||||
include_metadata=True,
|
|
||||||
nlp=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
# 提取元数据
|
|
||||||
metadata = PaperMetadata()
|
|
||||||
|
|
||||||
# 提取标题和作者
|
|
||||||
self._extract_title_and_authors(elements, metadata)
|
|
||||||
|
|
||||||
# 提取摘要和关键词
|
|
||||||
self._extract_abstract_and_keywords(elements, metadata)
|
|
||||||
|
|
||||||
# 提取其他元数据
|
|
||||||
self._extract_additional_metadata(elements, metadata)
|
|
||||||
|
|
||||||
return metadata
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
self.logger.error(f"元数据提取失败: {e}")
|
|
||||||
raise
|
|
||||||
|
|
||||||
def _extract_title_and_authors(self, elements, metadata: PaperMetadata) -> None:
|
|
||||||
"""从文档中提取标题和作者信息 - 改进版"""
|
|
||||||
# 收集所有潜在的标题候选
|
|
||||||
title_candidates = []
|
|
||||||
all_text = []
|
|
||||||
raw_text = []
|
|
||||||
|
|
||||||
# 首先收集文档前30个元素的文本,用于辅助判断
|
|
||||||
for i, element in enumerate(elements[:30]):
|
|
||||||
if isinstance(element, (Text, Title, NarrativeText)):
|
|
||||||
text = str(element).strip()
|
|
||||||
if text:
|
|
||||||
all_text.append(text)
|
|
||||||
raw_text.append(text)
|
|
||||||
|
|
||||||
# 打印出原始文本,用于调试
|
|
||||||
print("原始文本前10行:")
|
|
||||||
for i, text in enumerate(raw_text[:10]):
|
|
||||||
print(f"{i}: {text}")
|
|
||||||
|
|
||||||
# 1. 尝试查找连续的标题片段并合并它们
|
|
||||||
i = 0
|
|
||||||
while i < len(all_text) - 1:
|
|
||||||
current = all_text[i]
|
|
||||||
next_text = all_text[i + 1]
|
|
||||||
|
|
||||||
# 检查是否存在标题分割情况:一行以冒号结尾,下一行像是标题的延续
|
|
||||||
if current.endswith(':') and len(current) < 50 and len(next_text) > 5 and next_text[0].isupper():
|
|
||||||
# 合并这两行文本
|
|
||||||
combined_title = f"{current} {next_text}"
|
|
||||||
# 查找合并前的文本并替换
|
|
||||||
all_text[i] = combined_title
|
|
||||||
all_text.pop(i + 1)
|
|
||||||
# 给合并后的标题很高的分数
|
|
||||||
title_candidates.append((combined_title, 15, i))
|
|
||||||
else:
|
|
||||||
i += 1
|
|
||||||
|
|
||||||
# 2. 首先尝试从标题元素中查找
|
|
||||||
for i, element in enumerate(elements[:15]): # 只检查前15个元素
|
|
||||||
if isinstance(element, Title):
|
|
||||||
title_text = str(element).strip()
|
|
||||||
# 排除常见的非标题内容
|
|
||||||
if title_text.lower() not in ['abstract', '摘要', 'introduction', '引言']:
|
|
||||||
# 计算标题分数(越高越可能是真正的标题)
|
|
||||||
score = self._evaluate_title_candidate(title_text, i, element)
|
|
||||||
title_candidates.append((title_text, score, i))
|
|
||||||
|
|
||||||
# 3. 特别处理常见的论文标题格式
|
|
||||||
for i, text in enumerate(all_text[:15]):
|
|
||||||
# 特别检查"KIMI K1.5:"类型的前缀标题
|
|
||||||
if re.match(r'^[A-Z][A-Z0-9\s\.]+(\s+K\d+(\.\d+)?)?:', text):
|
|
||||||
score = 12 # 给予很高的分数
|
|
||||||
title_candidates.append((text, score, i))
|
|
||||||
|
|
||||||
# 如果下一行也是全大写,很可能是标题的延续
|
|
||||||
if i+1 < len(all_text) and all_text[i+1].isupper() and len(all_text[i+1]) > 10:
|
|
||||||
combined_title = f"{text} {all_text[i+1]}"
|
|
||||||
title_candidates.append((combined_title, 15, i)) # 给合并标题更高分数
|
|
||||||
|
|
||||||
# 匹配全大写的标题行
|
|
||||||
elif text.isupper() and len(text) > 10 and len(text) < 100:
|
|
||||||
score = 10 - i * 0.5 # 越靠前越可能是标题
|
|
||||||
title_candidates.append((text, score, i))
|
|
||||||
|
|
||||||
# 对标题候选按分数排序并选取最佳候选
|
|
||||||
if title_candidates:
|
|
||||||
title_candidates.sort(key=lambda x: x[1], reverse=True)
|
|
||||||
metadata.title = title_candidates[0][0]
|
|
||||||
title_position = title_candidates[0][2]
|
|
||||||
print(f"所有标题候选: {title_candidates[:3]}")
|
|
||||||
else:
|
|
||||||
# 如果没有找到合适的标题,使用一个备选策略
|
|
||||||
for text in all_text[:10]:
|
|
||||||
if text.isupper() and len(text) > 10 and len(text) < 200: # 大写且适当长度的文本
|
|
||||||
metadata.title = text
|
|
||||||
break
|
|
||||||
title_position = 0
|
|
||||||
|
|
||||||
# 提取作者信息 - 改进后的作者提取逻辑
|
|
||||||
author_candidates = []
|
|
||||||
|
|
||||||
# 1. 特别处理"TECHNICAL REPORT OF"之后的行,通常是作者或团队
|
|
||||||
for i, text in enumerate(all_text):
|
|
||||||
if "TECHNICAL REPORT" in text.upper() and i+1 < len(all_text):
|
|
||||||
team_text = all_text[i+1].strip()
|
|
||||||
if re.search(r'\b(team|group|lab)\b', team_text, re.IGNORECASE):
|
|
||||||
author_candidates.append((team_text, 15))
|
|
||||||
|
|
||||||
# 2. 查找包含Team的文本
|
|
||||||
for text in all_text[:20]:
|
|
||||||
if "Team" in text and len(text) < 30:
|
|
||||||
# 这很可能是团队名
|
|
||||||
author_candidates.append((text, 12))
|
|
||||||
|
|
||||||
# 添加作者到元数据
|
|
||||||
if author_candidates:
|
|
||||||
# 按分数排序
|
|
||||||
author_candidates.sort(key=lambda x: x[1], reverse=True)
|
|
||||||
|
|
||||||
# 去重
|
|
||||||
seen_authors = set()
|
|
||||||
for author, _ in author_candidates:
|
|
||||||
if author.lower() not in seen_authors and not author.isdigit():
|
|
||||||
seen_authors.add(author.lower())
|
|
||||||
metadata.authors.append(author)
|
|
||||||
|
|
||||||
# 如果没有找到作者,尝试查找隶属机构信息中的团队名称
|
|
||||||
if not metadata.authors:
|
|
||||||
for text in all_text[:20]:
|
|
||||||
if re.search(r'\b(team|group|lab|laboratory|研究组|团队)\b', text, re.IGNORECASE):
|
|
||||||
if len(text) < 50: # 避免太长的文本
|
|
||||||
metadata.authors.append(text.strip())
|
|
||||||
break
|
|
||||||
|
|
||||||
# 提取隶属机构信息
|
|
||||||
for i, element in enumerate(elements[:30]):
|
|
||||||
element_text = str(element).strip()
|
|
||||||
if re.search(r'(university|institute|department|school|laboratory|college|center|centre|\d{5,}|^[a-zA-Z]+@|学院|大学|研究所|研究院)', element_text, re.IGNORECASE):
|
|
||||||
# 可能是隶属机构
|
|
||||||
if element_text not in metadata.affiliations and len(element_text) > 10:
|
|
||||||
metadata.affiliations.append(element_text)
|
|
||||||
|
|
||||||
def _evaluate_title_candidate(self, text, position, element):
|
|
||||||
"""评估标题候选项的可能性分数"""
|
|
||||||
score = 0
|
|
||||||
|
|
||||||
# 位置因素:越靠前越可能是标题
|
|
||||||
score += max(0, 10 - position) * 0.5
|
|
||||||
|
|
||||||
# 长度因素:标题通常不会太短也不会太长
|
|
||||||
if 10 <= len(text) <= 150:
|
|
||||||
score += 3
|
|
||||||
elif len(text) < 10:
|
|
||||||
score -= 2
|
|
||||||
elif len(text) > 150:
|
|
||||||
score -= 3
|
|
||||||
|
|
||||||
# 格式因素
|
|
||||||
if text.isupper(): # 全大写可能是标题
|
|
||||||
score += 2
|
|
||||||
if re.match(r'^[A-Z]', text): # 首字母大写
|
|
||||||
score += 1
|
|
||||||
if ':' in text: # 标题常包含冒号
|
|
||||||
score += 1.5
|
|
||||||
|
|
||||||
# 内容因素
|
|
||||||
if re.search(r'\b(scaling|learning|model|approach|method|system|framework|analysis)\b', text.lower()):
|
|
||||||
score += 2 # 包含常见的学术论文关键词
|
|
||||||
|
|
||||||
# 避免误判
|
|
||||||
if re.match(r'^\d+$', text): # 纯数字
|
|
||||||
score -= 10
|
|
||||||
if re.search(r'^(http|www|doi)', text.lower()): # URL或DOI
|
|
||||||
score -= 5
|
|
||||||
if len(text.split()) <= 2 and len(text) < 15: # 太短的短语
|
|
||||||
score -= 3
|
|
||||||
|
|
||||||
# 元数据因素(如果有)
|
|
||||||
if hasattr(element, 'metadata') and element.metadata:
|
|
||||||
# 修复:正确处理ElementMetadata对象
|
|
||||||
try:
|
|
||||||
# 尝试通过getattr安全地获取属性
|
|
||||||
font_size = getattr(element.metadata, 'font_size', None)
|
|
||||||
if font_size is not None and font_size > 14: # 假设标准字体大小是12
|
|
||||||
score += 3
|
|
||||||
|
|
||||||
font_weight = getattr(element.metadata, 'font_weight', None)
|
|
||||||
if font_weight == 'bold':
|
|
||||||
score += 2 # 粗体加分
|
|
||||||
except (AttributeError, TypeError):
|
|
||||||
# 如果metadata的访问方式不正确,尝试其他可能的访问方式
|
|
||||||
try:
|
|
||||||
metadata_dict = element.metadata.__dict__ if hasattr(element.metadata, '__dict__') else {}
|
|
||||||
if 'font_size' in metadata_dict and metadata_dict['font_size'] > 14:
|
|
||||||
score += 3
|
|
||||||
if 'font_weight' in metadata_dict and metadata_dict['font_weight'] == 'bold':
|
|
||||||
score += 2
|
|
||||||
except Exception:
|
|
||||||
# 如果所有尝试都失败,忽略元数据处理
|
|
||||||
pass
|
|
||||||
|
|
||||||
return score
|
|
||||||
|
|
||||||
def _extract_abstract_and_keywords(self, elements, metadata: PaperMetadata) -> None:
|
|
||||||
"""从文档中提取摘要和关键词"""
|
|
||||||
abstract_found = False
|
|
||||||
keywords_found = False
|
|
||||||
abstract_text = []
|
|
||||||
|
|
||||||
for i, element in enumerate(elements):
|
|
||||||
element_text = str(element).strip().lower()
|
|
||||||
|
|
||||||
# 寻找摘要部分
|
|
||||||
if not abstract_found and (
|
|
||||||
isinstance(element, Title) and
|
|
||||||
re.search(self.SECTION_PATTERNS['abstract'], element_text, re.IGNORECASE)
|
|
||||||
):
|
|
||||||
abstract_found = True
|
|
||||||
continue
|
|
||||||
|
|
||||||
# 如果找到摘要部分,收集内容直到遇到关键词部分或新章节
|
|
||||||
if abstract_found and not keywords_found:
|
|
||||||
# 检查是否遇到关键词部分或新章节
|
|
||||||
if (
|
|
||||||
isinstance(element, Title) or
|
|
||||||
re.search(self.SECTION_PATTERNS['keywords'], element_text, re.IGNORECASE) or
|
|
||||||
re.match(r'\b(introduction|引言|method|方法)\b', element_text, re.IGNORECASE)
|
|
||||||
):
|
|
||||||
keywords_found = re.search(self.SECTION_PATTERNS['keywords'], element_text, re.IGNORECASE)
|
|
||||||
abstract_found = False # 停止收集摘要
|
|
||||||
else:
|
|
||||||
# 收集摘要文本
|
|
||||||
if isinstance(element, (Text, NarrativeText)) and element_text:
|
|
||||||
abstract_text.append(element_text)
|
|
||||||
|
|
||||||
# 如果找到关键词部分,提取关键词
|
|
||||||
if keywords_found and not abstract_found and not metadata.keywords:
|
|
||||||
if isinstance(element, (Text, NarrativeText)):
|
|
||||||
# 清除可能的"关键词:"/"Keywords:"前缀
|
|
||||||
cleaned_text = re.sub(r'^\s*(关键词|keywords|key\s+words)\s*[::]\s*', '', element_text, flags=re.IGNORECASE)
|
|
||||||
|
|
||||||
# 尝试按不同分隔符分割
|
|
||||||
for separator in [';', ';', ',', ',']:
|
|
||||||
if separator in cleaned_text:
|
|
||||||
metadata.keywords = [k.strip() for k in cleaned_text.split(separator) if k.strip()]
|
|
||||||
break
|
|
||||||
|
|
||||||
# 如果未能分割,将整个文本作为一个关键词
|
|
||||||
if not metadata.keywords and cleaned_text:
|
|
||||||
metadata.keywords = [cleaned_text]
|
|
||||||
|
|
||||||
keywords_found = False # 已提取关键词,停止处理
|
|
||||||
|
|
||||||
# 设置摘要文本
|
|
||||||
if abstract_text:
|
|
||||||
metadata.abstract = self.config.paragraph_separator.join(abstract_text)
|
|
||||||
|
|
||||||
def _extract_additional_metadata(self, elements, metadata: PaperMetadata) -> None:
|
|
||||||
"""提取其他元数据信息"""
|
|
||||||
for element in elements[:30]: # 只检查文档前部分
|
|
||||||
element_text = str(element).strip()
|
|
||||||
|
|
||||||
# 尝试匹配DOI
|
|
||||||
doi_match = re.search(r'(doi|DOI):\s*(10\.\d{4,}\/[a-zA-Z0-9.-]+)', element_text)
|
|
||||||
if doi_match and not metadata.doi:
|
|
||||||
metadata.doi = doi_match.group(2)
|
|
||||||
|
|
||||||
# 尝试匹配日期
|
|
||||||
date_match = re.search(r'(published|received|accepted|submitted):\s*(\d{1,2}\s+[a-zA-Z]+\s+\d{4}|\d{4}[-/]\d{1,2}[-/]\d{1,2})', element_text, re.IGNORECASE)
|
|
||||||
if date_match and not metadata.date:
|
|
||||||
metadata.date = date_match.group(2)
|
|
||||||
|
|
||||||
# 尝试匹配年份
|
|
||||||
year_match = re.search(r'\b(19|20)\d{2}\b', element_text)
|
|
||||||
if year_match and not metadata.year:
|
|
||||||
metadata.year = year_match.group(0)
|
|
||||||
|
|
||||||
# 尝试匹配期刊/会议名称
|
|
||||||
journal_match = re.search(r'(journal|conference):\s*([^,;.]+)', element_text, re.IGNORECASE)
|
|
||||||
if journal_match:
|
|
||||||
if "journal" in journal_match.group(1).lower() and not metadata.journal:
|
|
||||||
metadata.journal = journal_match.group(2).strip()
|
|
||||||
elif not metadata.conference:
|
|
||||||
metadata.conference = journal_match.group(2).strip()
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
"""主函数:演示用法"""
|
|
||||||
# 创建提取器
|
|
||||||
extractor = PaperMetadataExtractor()
|
|
||||||
|
|
||||||
# 使用示例
|
|
||||||
try:
|
|
||||||
# 替换为实际的文件路径
|
|
||||||
sample_file = '/Users/boyin.liu/Documents/示例文档/论文/3.pdf'
|
|
||||||
if Path(sample_file).exists():
|
|
||||||
metadata = extractor.extract_metadata(sample_file)
|
|
||||||
print("提取的元数据:")
|
|
||||||
print(f"标题: {metadata.title}")
|
|
||||||
print(f"作者: {', '.join(metadata.authors)}")
|
|
||||||
print(f"机构: {', '.join(metadata.affiliations)}")
|
|
||||||
print(f"摘要: {metadata.abstract[:200]}...")
|
|
||||||
print(f"关键词: {', '.join(metadata.keywords)}")
|
|
||||||
print(f"DOI: {metadata.doi}")
|
|
||||||
print(f"日期: {metadata.date}")
|
|
||||||
print(f"年份: {metadata.year}")
|
|
||||||
print(f"期刊: {metadata.journal}")
|
|
||||||
print(f"会议: {metadata.conference}")
|
|
||||||
else:
|
|
||||||
print(f"示例文件 {sample_file} 不存在")
|
|
||||||
|
|
||||||
print("\n支持的格式:", extractor.get_supported_formats())
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"错误: {e}")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,86 +0,0 @@
|
|||||||
from pathlib import Path
|
|
||||||
from crazy_functions.doc_fns.read_fns.unstructured_all.paper_structure_extractor import PaperStructureExtractor
|
|
||||||
|
|
||||||
def extract_and_save_as_markdown(paper_path, output_path=None):
|
|
||||||
"""
|
|
||||||
提取论文结构并保存为Markdown格式
|
|
||||||
|
|
||||||
参数:
|
|
||||||
paper_path: 论文文件路径
|
|
||||||
output_path: 输出的Markdown文件路径,如果不指定,将使用与输入相同的文件名但扩展名为.md
|
|
||||||
|
|
||||||
返回:
|
|
||||||
保存的Markdown文件路径
|
|
||||||
"""
|
|
||||||
# 创建提取器
|
|
||||||
extractor = PaperStructureExtractor()
|
|
||||||
|
|
||||||
# 解析文件路径
|
|
||||||
paper_path = Path(paper_path)
|
|
||||||
|
|
||||||
# 如果未指定输出路径,使用相同文件名但扩展名为.md
|
|
||||||
if output_path is None:
|
|
||||||
output_path = paper_path.with_suffix('.md')
|
|
||||||
else:
|
|
||||||
output_path = Path(output_path)
|
|
||||||
|
|
||||||
# 确保输出目录存在
|
|
||||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
|
||||||
|
|
||||||
print(f"正在处理论文: {paper_path}")
|
|
||||||
|
|
||||||
try:
|
|
||||||
# 提取论文结构
|
|
||||||
paper = extractor.extract_paper_structure(paper_path)
|
|
||||||
|
|
||||||
# 生成Markdown内容
|
|
||||||
markdown_content = extractor.generate_markdown(paper)
|
|
||||||
|
|
||||||
# 保存到文件
|
|
||||||
with open(output_path, 'w', encoding='utf-8') as f:
|
|
||||||
f.write(markdown_content)
|
|
||||||
|
|
||||||
print(f"已成功保存Markdown文件: {output_path}")
|
|
||||||
|
|
||||||
# 打印摘要信息
|
|
||||||
print("\n论文摘要信息:")
|
|
||||||
print(f"标题: {paper.metadata.title}")
|
|
||||||
print(f"作者: {', '.join(paper.metadata.authors)}")
|
|
||||||
print(f"关键词: {', '.join(paper.keywords)}")
|
|
||||||
print(f"章节数: {len(paper.sections)}")
|
|
||||||
print(f"图表数: {len(paper.figures)}")
|
|
||||||
print(f"表格数: {len(paper.tables)}")
|
|
||||||
print(f"公式数: {len(paper.formulas)}")
|
|
||||||
print(f"参考文献数: {len(paper.references)}")
|
|
||||||
|
|
||||||
return output_path
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"处理论文时出错: {e}")
|
|
||||||
import traceback
|
|
||||||
traceback.print_exc()
|
|
||||||
return None
|
|
||||||
|
|
||||||
# 使用示例
|
|
||||||
if __name__ == "__main__":
|
|
||||||
# 替换为实际的论文文件路径
|
|
||||||
sample_paper = "crazy_functions/doc_fns/read_fns/paper/2501.12599v1.pdf"
|
|
||||||
|
|
||||||
# 可以指定输出路径,也可以使用默认路径
|
|
||||||
# output_file = "/path/to/output/paper_structure.md"
|
|
||||||
# extract_and_save_as_markdown(sample_paper, output_file)
|
|
||||||
|
|
||||||
# 使用默认输出路径(与输入文件同名但扩展名为.md)
|
|
||||||
extract_and_save_as_markdown(sample_paper)
|
|
||||||
|
|
||||||
# # 批量处理多个论文的示例
|
|
||||||
# paper_dir = Path("/path/to/papers/folder")
|
|
||||||
# output_dir = Path("/path/to/output/folder")
|
|
||||||
#
|
|
||||||
# # 确保输出目录存在
|
|
||||||
# output_dir.mkdir(parents=True, exist_ok=True)
|
|
||||||
#
|
|
||||||
# # 处理目录中的所有PDF文件
|
|
||||||
# for paper_file in paper_dir.glob("*.pdf"):
|
|
||||||
# output_file = output_dir / f"{paper_file.stem}.md"
|
|
||||||
# extract_and_save_as_markdown(paper_file, output_file)
|
|
||||||
@@ -1,275 +0,0 @@
|
|||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import Optional, Set, Dict, Union, List
|
|
||||||
from dataclasses import dataclass, field
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
|
|
||||||
from unstructured.partition.auto import partition
|
|
||||||
from unstructured.documents.elements import (
|
|
||||||
Text, Title, NarrativeText, ListItem, Table,
|
|
||||||
Footer, Header, PageBreak, Image, Address
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class TextExtractorConfig:
|
|
||||||
"""通用文档提取器配置类
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
extract_headers_footers: 是否提取页眉页脚
|
|
||||||
extract_tables: 是否提取表格内容
|
|
||||||
extract_lists: 是否提取列表内容
|
|
||||||
extract_titles: 是否提取标题
|
|
||||||
paragraph_separator: 段落之间的分隔符
|
|
||||||
text_cleanup: 文本清理选项字典
|
|
||||||
"""
|
|
||||||
extract_headers_footers: bool = False
|
|
||||||
extract_tables: bool = True
|
|
||||||
extract_lists: bool = True
|
|
||||||
extract_titles: bool = True
|
|
||||||
paragraph_separator: str = '\n\n'
|
|
||||||
text_cleanup: Dict[str, bool] = field(default_factory=lambda: {
|
|
||||||
'remove_extra_spaces': True,
|
|
||||||
'normalize_whitespace': True,
|
|
||||||
'remove_special_chars': False,
|
|
||||||
'lowercase': False
|
|
||||||
})
|
|
||||||
|
|
||||||
|
|
||||||
class UnstructuredTextExtractor:
|
|
||||||
"""通用文档文本内容提取器
|
|
||||||
|
|
||||||
使用 unstructured 库支持多种文档格式的文本提取,提供统一的接口和配置选项。
|
|
||||||
"""
|
|
||||||
|
|
||||||
SUPPORTED_EXTENSIONS: Set[str] = {
|
|
||||||
# 文档格式
|
|
||||||
'.pdf', '.docx', '.doc', '.txt',
|
|
||||||
# 演示文稿
|
|
||||||
'.ppt', '.pptx',
|
|
||||||
# 电子表格
|
|
||||||
'.xlsx', '.xls', '.csv',
|
|
||||||
# 图片
|
|
||||||
'.png', '.jpg', '.jpeg', '.tiff',
|
|
||||||
# 邮件
|
|
||||||
'.eml', '.msg', '.p7s',
|
|
||||||
# Markdown
|
|
||||||
".md",
|
|
||||||
# Org Mode
|
|
||||||
".org",
|
|
||||||
# Open Office
|
|
||||||
".odt",
|
|
||||||
# reStructured Text
|
|
||||||
".rst",
|
|
||||||
# Rich Text
|
|
||||||
".rtf",
|
|
||||||
# TSV
|
|
||||||
".tsv",
|
|
||||||
# EPUB
|
|
||||||
'.epub',
|
|
||||||
# 其他格式
|
|
||||||
'.html', '.xml', '.json',
|
|
||||||
}
|
|
||||||
|
|
||||||
def __init__(self, config: Optional[TextExtractorConfig] = None):
|
|
||||||
"""初始化提取器
|
|
||||||
|
|
||||||
Args:
|
|
||||||
config: 提取器配置对象,如果为None则使用默认配置
|
|
||||||
"""
|
|
||||||
self.config = config or TextExtractorConfig()
|
|
||||||
self._setup_logging()
|
|
||||||
|
|
||||||
def _setup_logging(self) -> None:
|
|
||||||
"""配置日志记录器"""
|
|
||||||
logging.basicConfig(
|
|
||||||
level=logging.INFO,
|
|
||||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
|
||||||
)
|
|
||||||
self.logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
# 添加文件处理器
|
|
||||||
fh = logging.FileHandler('text_extractor.log')
|
|
||||||
fh.setLevel(logging.ERROR)
|
|
||||||
self.logger.addHandler(fh)
|
|
||||||
|
|
||||||
def _validate_file(self, file_path: Union[str, Path], max_size_mb: int = 100) -> Path:
|
|
||||||
"""验证文件
|
|
||||||
|
|
||||||
Args:
|
|
||||||
file_path: 文件路径
|
|
||||||
max_size_mb: 允许的最大文件大小(MB)
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Path: 验证后的Path对象
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
ValueError: 文件不存在、格式不支持或大小超限
|
|
||||||
PermissionError: 没有读取权限
|
|
||||||
"""
|
|
||||||
path = Path(file_path).resolve()
|
|
||||||
|
|
||||||
if not path.exists():
|
|
||||||
raise ValueError(f"File not found: {path}")
|
|
||||||
|
|
||||||
if not path.is_file():
|
|
||||||
raise ValueError(f"Not a file: {path}")
|
|
||||||
|
|
||||||
if not os.access(path, os.R_OK):
|
|
||||||
raise PermissionError(f"No read permission: {path}")
|
|
||||||
|
|
||||||
file_size_mb = path.stat().st_size / (1024 * 1024)
|
|
||||||
if file_size_mb > max_size_mb:
|
|
||||||
raise ValueError(
|
|
||||||
f"File size ({file_size_mb:.1f}MB) exceeds limit of {max_size_mb}MB"
|
|
||||||
)
|
|
||||||
|
|
||||||
if path.suffix.lower() not in self.SUPPORTED_EXTENSIONS:
|
|
||||||
raise ValueError(
|
|
||||||
f"Unsupported format: {path.suffix}. "
|
|
||||||
f"Supported: {', '.join(sorted(self.SUPPORTED_EXTENSIONS))}"
|
|
||||||
)
|
|
||||||
|
|
||||||
return path
|
|
||||||
|
|
||||||
def _cleanup_text(self, text: str) -> str:
|
|
||||||
"""清理文本
|
|
||||||
|
|
||||||
Args:
|
|
||||||
text: 原始文本
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: 清理后的文本
|
|
||||||
"""
|
|
||||||
if self.config.text_cleanup['remove_extra_spaces']:
|
|
||||||
text = ' '.join(text.split())
|
|
||||||
|
|
||||||
if self.config.text_cleanup['normalize_whitespace']:
|
|
||||||
text = text.replace('\t', ' ').replace('\r', '\n')
|
|
||||||
|
|
||||||
if self.config.text_cleanup['lowercase']:
|
|
||||||
text = text.lower()
|
|
||||||
|
|
||||||
return text.strip()
|
|
||||||
|
|
||||||
def _should_extract_element(self, element) -> bool:
|
|
||||||
"""判断是否应该提取某个元素
|
|
||||||
|
|
||||||
Args:
|
|
||||||
element: 文档元素
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
bool: 是否应该提取
|
|
||||||
"""
|
|
||||||
if isinstance(element, (Text, NarrativeText)):
|
|
||||||
return True
|
|
||||||
|
|
||||||
if isinstance(element, Title) and self.config.extract_titles:
|
|
||||||
return True
|
|
||||||
|
|
||||||
if isinstance(element, ListItem) and self.config.extract_lists:
|
|
||||||
return True
|
|
||||||
|
|
||||||
if isinstance(element, Table) and self.config.extract_tables:
|
|
||||||
return True
|
|
||||||
|
|
||||||
if isinstance(element, (Header, Footer)) and self.config.extract_headers_footers:
|
|
||||||
return True
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_supported_formats() -> List[str]:
|
|
||||||
"""获取支持的文件格式列表"""
|
|
||||||
return sorted(UnstructuredTextExtractor.SUPPORTED_EXTENSIONS)
|
|
||||||
|
|
||||||
def extract_text(
|
|
||||||
self,
|
|
||||||
file_path: Union[str, Path],
|
|
||||||
strategy: str = "fast"
|
|
||||||
) -> str:
|
|
||||||
"""提取文本
|
|
||||||
|
|
||||||
Args:
|
|
||||||
file_path: 文件路径
|
|
||||||
strategy: 提取策略 ("fast" 或 "accurate")
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: 提取的文本内容
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
Exception: 提取过程中的错误
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
path = self._validate_file(file_path)
|
|
||||||
self.logger.info(f"Processing: {path}")
|
|
||||||
|
|
||||||
# 修改这里:添加 nlp=False 参数来禁用 NLTK
|
|
||||||
elements = partition(
|
|
||||||
str(path),
|
|
||||||
strategy=strategy,
|
|
||||||
include_metadata=True,
|
|
||||||
nlp=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
# 其余代码保持不变
|
|
||||||
text_parts = []
|
|
||||||
for element in elements:
|
|
||||||
if self._should_extract_element(element):
|
|
||||||
text = str(element)
|
|
||||||
cleaned_text = self._cleanup_text(text)
|
|
||||||
if cleaned_text:
|
|
||||||
if isinstance(element, (Header, Footer)):
|
|
||||||
prefix = "[Header] " if isinstance(element, Header) else "[Footer] "
|
|
||||||
text_parts.append(f"{prefix}{cleaned_text}")
|
|
||||||
else:
|
|
||||||
text_parts.append(cleaned_text)
|
|
||||||
|
|
||||||
return self.config.paragraph_separator.join(text_parts)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
self.logger.error(f"Extraction failed: {e}")
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
"""主函数:演示用法"""
|
|
||||||
# 配置
|
|
||||||
config = TextExtractorConfig(
|
|
||||||
extract_headers_footers=True,
|
|
||||||
extract_tables=True,
|
|
||||||
extract_lists=True,
|
|
||||||
extract_titles=True,
|
|
||||||
text_cleanup={
|
|
||||||
'remove_extra_spaces': True,
|
|
||||||
'normalize_whitespace': True,
|
|
||||||
'remove_special_chars': False,
|
|
||||||
'lowercase': False
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
# 创建提取器
|
|
||||||
extractor = UnstructuredTextExtractor(config)
|
|
||||||
|
|
||||||
# 使用示例
|
|
||||||
try:
|
|
||||||
# 替换为实际的文件路径
|
|
||||||
sample_file = './crazy_functions/doc_fns/read_fns/paper/2501.12599v1.pdf'
|
|
||||||
if Path(sample_file).exists() or True:
|
|
||||||
text = extractor.extract_text(sample_file)
|
|
||||||
print("提取的文本:")
|
|
||||||
print(text)
|
|
||||||
else:
|
|
||||||
print(f"示例文件 {sample_file} 不存在")
|
|
||||||
|
|
||||||
print("\n支持的格式:", extractor.get_supported_formats())
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"错误: {e}")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
@@ -1,219 +0,0 @@
|
|||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
from dataclasses import dataclass, field
|
|
||||||
from typing import Dict, Optional, Union
|
|
||||||
from urllib.parse import urlparse
|
|
||||||
import logging
|
|
||||||
import trafilatura
|
|
||||||
import requests
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class WebExtractorConfig:
|
|
||||||
"""网页内容提取器配置类
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
extract_comments: 是否提取评论
|
|
||||||
extract_tables: 是否提取表格
|
|
||||||
extract_links: 是否保留链接信息
|
|
||||||
paragraph_separator: 段落分隔符
|
|
||||||
timeout: 网络请求超时时间(秒)
|
|
||||||
max_retries: 最大重试次数
|
|
||||||
user_agent: 自定义User-Agent
|
|
||||||
text_cleanup: 文本清理选项
|
|
||||||
"""
|
|
||||||
extract_comments: bool = False
|
|
||||||
extract_tables: bool = True
|
|
||||||
extract_links: bool = False
|
|
||||||
paragraph_separator: str = '\n\n'
|
|
||||||
timeout: int = 10
|
|
||||||
max_retries: int = 3
|
|
||||||
user_agent: str = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
|
|
||||||
text_cleanup: Dict[str, bool] = field(default_factory=lambda: {
|
|
||||||
'remove_extra_spaces': True,
|
|
||||||
'normalize_whitespace': True,
|
|
||||||
'remove_special_chars': False,
|
|
||||||
'lowercase': False
|
|
||||||
})
|
|
||||||
|
|
||||||
|
|
||||||
class WebTextExtractor:
|
|
||||||
"""网页文本内容提取器
|
|
||||||
|
|
||||||
使用trafilatura库提取网页中的主要文本内容,去除广告、导航等无关内容。
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, config: Optional[WebExtractorConfig] = None):
|
|
||||||
"""初始化提取器
|
|
||||||
|
|
||||||
Args:
|
|
||||||
config: 提取器配置对象,如果为None则使用默认配置
|
|
||||||
"""
|
|
||||||
self.config = config or WebExtractorConfig()
|
|
||||||
self._setup_logging()
|
|
||||||
|
|
||||||
def _setup_logging(self) -> None:
|
|
||||||
"""配置日志记录器"""
|
|
||||||
logging.basicConfig(
|
|
||||||
level=logging.INFO,
|
|
||||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
|
||||||
)
|
|
||||||
self.logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
# 添加文件处理器
|
|
||||||
fh = logging.FileHandler('web_extractor.log')
|
|
||||||
fh.setLevel(logging.ERROR)
|
|
||||||
self.logger.addHandler(fh)
|
|
||||||
|
|
||||||
def _validate_url(self, url: str) -> bool:
|
|
||||||
"""验证URL格式是否有效
|
|
||||||
|
|
||||||
Args:
|
|
||||||
url: 网页URL
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
bool: URL是否有效
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
result = urlparse(url)
|
|
||||||
return all([result.scheme, result.netloc])
|
|
||||||
except Exception:
|
|
||||||
return False
|
|
||||||
|
|
||||||
def _download_webpage(self, url: str) -> Optional[str]:
|
|
||||||
"""下载网页内容
|
|
||||||
|
|
||||||
Args:
|
|
||||||
url: 网页URL
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Optional[str]: 网页HTML内容,失败返回None
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
Exception: 下载失败时抛出异常
|
|
||||||
"""
|
|
||||||
headers = {'User-Agent': self.config.user_agent}
|
|
||||||
|
|
||||||
for attempt in range(self.config.max_retries):
|
|
||||||
try:
|
|
||||||
response = requests.get(
|
|
||||||
url,
|
|
||||||
headers=headers,
|
|
||||||
timeout=self.config.timeout
|
|
||||||
)
|
|
||||||
response.raise_for_status()
|
|
||||||
return response.text
|
|
||||||
except requests.RequestException as e:
|
|
||||||
self.logger.warning(f"Attempt {attempt + 1} failed: {e}")
|
|
||||||
if attempt == self.config.max_retries - 1:
|
|
||||||
raise Exception(f"Failed to download webpage after {self.config.max_retries} attempts: {e}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
def _cleanup_text(self, text: str) -> str:
|
|
||||||
"""清理文本
|
|
||||||
|
|
||||||
Args:
|
|
||||||
text: 原始文本
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: 清理后的文本
|
|
||||||
"""
|
|
||||||
if not text:
|
|
||||||
return ""
|
|
||||||
|
|
||||||
if self.config.text_cleanup['remove_extra_spaces']:
|
|
||||||
text = ' '.join(text.split())
|
|
||||||
|
|
||||||
if self.config.text_cleanup['normalize_whitespace']:
|
|
||||||
text = text.replace('\t', ' ').replace('\r', '\n')
|
|
||||||
|
|
||||||
if self.config.text_cleanup['lowercase']:
|
|
||||||
text = text.lower()
|
|
||||||
|
|
||||||
return text.strip()
|
|
||||||
|
|
||||||
def extract_text(self, url: str) -> str:
|
|
||||||
"""提取网页文本内容
|
|
||||||
|
|
||||||
Args:
|
|
||||||
url: 网页URL
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: 提取的文本内容
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
ValueError: URL无效时抛出
|
|
||||||
Exception: 提取失败时抛出
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
if not self._validate_url(url):
|
|
||||||
raise ValueError(f"Invalid URL: {url}")
|
|
||||||
|
|
||||||
self.logger.info(f"Processing URL: {url}")
|
|
||||||
|
|
||||||
# 下载网页
|
|
||||||
html_content = self._download_webpage(url)
|
|
||||||
if not html_content:
|
|
||||||
raise Exception("Failed to download webpage")
|
|
||||||
|
|
||||||
# 配置trafilatura提取选项
|
|
||||||
extract_config = {
|
|
||||||
'include_comments': self.config.extract_comments,
|
|
||||||
'include_tables': self.config.extract_tables,
|
|
||||||
'include_links': self.config.extract_links,
|
|
||||||
'no_fallback': False, # 允许使用后备提取器
|
|
||||||
}
|
|
||||||
|
|
||||||
# 提取文本
|
|
||||||
extracted_text = trafilatura.extract(
|
|
||||||
html_content,
|
|
||||||
**extract_config
|
|
||||||
)
|
|
||||||
|
|
||||||
if not extracted_text:
|
|
||||||
raise Exception("No content could be extracted")
|
|
||||||
|
|
||||||
# 清理文本
|
|
||||||
cleaned_text = self._cleanup_text(extracted_text)
|
|
||||||
|
|
||||||
return cleaned_text
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
self.logger.error(f"Extraction failed: {e}")
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
"""主函数:演示用法"""
|
|
||||||
# 配置
|
|
||||||
config = WebExtractorConfig(
|
|
||||||
extract_comments=False,
|
|
||||||
extract_tables=True,
|
|
||||||
extract_links=False,
|
|
||||||
timeout=10,
|
|
||||||
text_cleanup={
|
|
||||||
'remove_extra_spaces': True,
|
|
||||||
'normalize_whitespace': True,
|
|
||||||
'remove_special_chars': False,
|
|
||||||
'lowercase': False
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
# 创建提取器
|
|
||||||
extractor = WebTextExtractor(config)
|
|
||||||
|
|
||||||
# 使用示例
|
|
||||||
try:
|
|
||||||
# 替换为实际的URL
|
|
||||||
sample_url = 'https://arxiv.org/abs/2412.00036'
|
|
||||||
text = extractor.extract_text(sample_url)
|
|
||||||
print("提取的文本:")
|
|
||||||
print(text)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"错误: {e}")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
from toolbox import CatchException, update_ui, update_ui_latest_msg
|
from toolbox import CatchException, update_ui, update_ui_lastest_msg
|
||||||
from crazy_functions.multi_stage.multi_stage_utils import GptAcademicGameBaseState
|
from crazy_functions.multi_stage.multi_stage_utils import GptAcademicGameBaseState
|
||||||
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
from request_llms.bridge_all import predict_no_ui_long_connection
|
from request_llms.bridge_all import predict_no_ui_long_connection
|
||||||
@@ -13,7 +13,7 @@ class MiniGame_ASCII_Art(GptAcademicGameBaseState):
|
|||||||
else:
|
else:
|
||||||
if prompt.strip() == 'exit':
|
if prompt.strip() == 'exit':
|
||||||
self.delete_game = True
|
self.delete_game = True
|
||||||
yield from update_ui_latest_msg(lastmsg=f"谜底是{self.obj},游戏结束。", chatbot=chatbot, history=history, delay=0.)
|
yield from update_ui_lastest_msg(lastmsg=f"谜底是{self.obj},游戏结束。", chatbot=chatbot, history=history, delay=0.)
|
||||||
return
|
return
|
||||||
chatbot.append([prompt, ""])
|
chatbot.append([prompt, ""])
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
@@ -31,12 +31,12 @@ class MiniGame_ASCII_Art(GptAcademicGameBaseState):
|
|||||||
self.cur_task = 'identify user guess'
|
self.cur_task = 'identify user guess'
|
||||||
res = get_code_block(raw_res)
|
res = get_code_block(raw_res)
|
||||||
history += ['', f'the answer is {self.obj}', inputs, res]
|
history += ['', f'the answer is {self.obj}', inputs, res]
|
||||||
yield from update_ui_latest_msg(lastmsg=res, chatbot=chatbot, history=history, delay=0.)
|
yield from update_ui_lastest_msg(lastmsg=res, chatbot=chatbot, history=history, delay=0.)
|
||||||
|
|
||||||
elif self.cur_task == 'identify user guess':
|
elif self.cur_task == 'identify user guess':
|
||||||
if is_same_thing(self.obj, prompt, self.llm_kwargs):
|
if is_same_thing(self.obj, prompt, self.llm_kwargs):
|
||||||
self.delete_game = True
|
self.delete_game = True
|
||||||
yield from update_ui_latest_msg(lastmsg="你猜对了!", chatbot=chatbot, history=history, delay=0.)
|
yield from update_ui_lastest_msg(lastmsg="你猜对了!", chatbot=chatbot, history=history, delay=0.)
|
||||||
else:
|
else:
|
||||||
self.cur_task = 'identify user guess'
|
self.cur_task = 'identify user guess'
|
||||||
yield from update_ui_latest_msg(lastmsg="猜错了,再试试,输入“exit”获取答案。", chatbot=chatbot, history=history, delay=0.)
|
yield from update_ui_lastest_msg(lastmsg="猜错了,再试试,输入“exit”获取答案。", chatbot=chatbot, history=history, delay=0.)
|
||||||
@@ -63,7 +63,7 @@ prompts_terminate = """小说的前文回顾:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
from toolbox import CatchException, update_ui, update_ui_latest_msg
|
from toolbox import CatchException, update_ui, update_ui_lastest_msg
|
||||||
from crazy_functions.multi_stage.multi_stage_utils import GptAcademicGameBaseState
|
from crazy_functions.multi_stage.multi_stage_utils import GptAcademicGameBaseState
|
||||||
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
from request_llms.bridge_all import predict_no_ui_long_connection
|
from request_llms.bridge_all import predict_no_ui_long_connection
|
||||||
@@ -112,7 +112,7 @@ class MiniGame_ResumeStory(GptAcademicGameBaseState):
|
|||||||
if prompt.strip() == 'exit' or prompt.strip() == '结束剧情':
|
if prompt.strip() == 'exit' or prompt.strip() == '结束剧情':
|
||||||
# should we terminate game here?
|
# should we terminate game here?
|
||||||
self.delete_game = True
|
self.delete_game = True
|
||||||
yield from update_ui_latest_msg(lastmsg=f"游戏结束。", chatbot=chatbot, history=history, delay=0.)
|
yield from update_ui_lastest_msg(lastmsg=f"游戏结束。", chatbot=chatbot, history=history, delay=0.)
|
||||||
return
|
return
|
||||||
if '剧情收尾' in prompt:
|
if '剧情收尾' in prompt:
|
||||||
self.cur_task = 'story_terminate'
|
self.cur_task = 'story_terminate'
|
||||||
@@ -137,8 +137,8 @@ class MiniGame_ResumeStory(GptAcademicGameBaseState):
|
|||||||
)
|
)
|
||||||
self.story.append(story_paragraph)
|
self.story.append(story_paragraph)
|
||||||
# # 配图
|
# # 配图
|
||||||
yield from update_ui_latest_msg(lastmsg=story_paragraph + '<br/>正在生成插图中 ...', chatbot=chatbot, history=history, delay=0.)
|
yield from update_ui_lastest_msg(lastmsg=story_paragraph + '<br/>正在生成插图中 ...', chatbot=chatbot, history=history, delay=0.)
|
||||||
yield from update_ui_latest_msg(lastmsg=story_paragraph + '<br/>'+ self.generate_story_image(story_paragraph), chatbot=chatbot, history=history, delay=0.)
|
yield from update_ui_lastest_msg(lastmsg=story_paragraph + '<br/>'+ self.generate_story_image(story_paragraph), chatbot=chatbot, history=history, delay=0.)
|
||||||
|
|
||||||
# # 构建后续剧情引导
|
# # 构建后续剧情引导
|
||||||
previously_on_story = ""
|
previously_on_story = ""
|
||||||
@@ -171,8 +171,8 @@ class MiniGame_ResumeStory(GptAcademicGameBaseState):
|
|||||||
)
|
)
|
||||||
self.story.append(story_paragraph)
|
self.story.append(story_paragraph)
|
||||||
# # 配图
|
# # 配图
|
||||||
yield from update_ui_latest_msg(lastmsg=story_paragraph + '<br/>正在生成插图中 ...', chatbot=chatbot, history=history, delay=0.)
|
yield from update_ui_lastest_msg(lastmsg=story_paragraph + '<br/>正在生成插图中 ...', chatbot=chatbot, history=history, delay=0.)
|
||||||
yield from update_ui_latest_msg(lastmsg=story_paragraph + '<br/>'+ self.generate_story_image(story_paragraph), chatbot=chatbot, history=history, delay=0.)
|
yield from update_ui_lastest_msg(lastmsg=story_paragraph + '<br/>'+ self.generate_story_image(story_paragraph), chatbot=chatbot, history=history, delay=0.)
|
||||||
|
|
||||||
# # 构建后续剧情引导
|
# # 构建后续剧情引导
|
||||||
previously_on_story = ""
|
previously_on_story = ""
|
||||||
@@ -204,8 +204,8 @@ class MiniGame_ResumeStory(GptAcademicGameBaseState):
|
|||||||
chatbot, history_, self.sys_prompt_
|
chatbot, history_, self.sys_prompt_
|
||||||
)
|
)
|
||||||
# # 配图
|
# # 配图
|
||||||
yield from update_ui_latest_msg(lastmsg=story_paragraph + '<br/>正在生成插图中 ...', chatbot=chatbot, history=history, delay=0.)
|
yield from update_ui_lastest_msg(lastmsg=story_paragraph + '<br/>正在生成插图中 ...', chatbot=chatbot, history=history, delay=0.)
|
||||||
yield from update_ui_latest_msg(lastmsg=story_paragraph + '<br/>'+ self.generate_story_image(story_paragraph), chatbot=chatbot, history=history, delay=0.)
|
yield from update_ui_lastest_msg(lastmsg=story_paragraph + '<br/>'+ self.generate_story_image(story_paragraph), chatbot=chatbot, history=history, delay=0.)
|
||||||
|
|
||||||
# terminate game
|
# terminate game
|
||||||
self.delete_game = True
|
self.delete_game = True
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ import time
|
|||||||
import importlib
|
import importlib
|
||||||
from toolbox import trimmed_format_exc, gen_time_str, get_log_folder
|
from toolbox import trimmed_format_exc, gen_time_str, get_log_folder
|
||||||
from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, is_the_upload_folder
|
from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, is_the_upload_folder
|
||||||
from toolbox import promote_file_to_downloadzone, get_log_folder, update_ui_latest_msg
|
from toolbox import promote_file_to_downloadzone, get_log_folder, update_ui_lastest_msg
|
||||||
import multiprocessing
|
import multiprocessing
|
||||||
|
|
||||||
def get_class_name(class_string):
|
def get_class_name(class_string):
|
||||||
|
|||||||
@@ -102,10 +102,10 @@ class GptJsonIO():
|
|||||||
logging.info(f'Repairing json:{response}')
|
logging.info(f'Repairing json:{response}')
|
||||||
repair_prompt = self.generate_repair_prompt(broken_json = response, error=repr(e))
|
repair_prompt = self.generate_repair_prompt(broken_json = response, error=repr(e))
|
||||||
result = self.generate_output(gpt_gen_fn(repair_prompt, self.format_instructions))
|
result = self.generate_output(gpt_gen_fn(repair_prompt, self.format_instructions))
|
||||||
logging.info('Repair json success.')
|
logging.info('Repaire json success.')
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
# 没辙了,放弃治疗
|
# 没辙了,放弃治疗
|
||||||
logging.info('Repair json fail.')
|
logging.info('Repaire json fail.')
|
||||||
raise JsonStringError('Cannot repair json.', str(e))
|
raise JsonStringError('Cannot repair json.', str(e))
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ import re
|
|||||||
import shutil
|
import shutil
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from loguru import logger
|
from loguru import logger
|
||||||
from toolbox import update_ui, update_ui_latest_msg, get_log_folder, gen_time_str
|
from toolbox import update_ui, update_ui_lastest_msg, get_log_folder, gen_time_str
|
||||||
from toolbox import get_conf, promote_file_to_downloadzone
|
from toolbox import get_conf, promote_file_to_downloadzone
|
||||||
from crazy_functions.latex_fns.latex_toolbox import PRESERVE, TRANSFORM
|
from crazy_functions.latex_fns.latex_toolbox import PRESERVE, TRANSFORM
|
||||||
from crazy_functions.latex_fns.latex_toolbox import set_forbidden_text, set_forbidden_text_begin_end, set_forbidden_text_careful_brace
|
from crazy_functions.latex_fns.latex_toolbox import set_forbidden_text, set_forbidden_text_begin_end, set_forbidden_text_careful_brace
|
||||||
@@ -20,7 +20,7 @@ def split_subprocess(txt, project_folder, return_dict, opts):
|
|||||||
"""
|
"""
|
||||||
break down latex file to a linked list,
|
break down latex file to a linked list,
|
||||||
each node use a preserve flag to indicate whether it should
|
each node use a preserve flag to indicate whether it should
|
||||||
be processed by GPT.
|
be proccessed by GPT.
|
||||||
"""
|
"""
|
||||||
text = txt
|
text = txt
|
||||||
mask = np.zeros(len(txt), dtype=np.uint8) + TRANSFORM
|
mask = np.zeros(len(txt), dtype=np.uint8) + TRANSFORM
|
||||||
@@ -85,14 +85,14 @@ class LatexPaperSplit():
|
|||||||
"""
|
"""
|
||||||
break down latex file to a linked list,
|
break down latex file to a linked list,
|
||||||
each node use a preserve flag to indicate whether it should
|
each node use a preserve flag to indicate whether it should
|
||||||
be processed by GPT.
|
be proccessed by GPT.
|
||||||
"""
|
"""
|
||||||
def __init__(self) -> None:
|
def __init__(self) -> None:
|
||||||
self.nodes = None
|
self.nodes = None
|
||||||
self.msg = "*{\\scriptsize\\textbf{警告:该PDF由GPT-Academic开源项目调用大语言模型+Latex翻译插件一键生成," + \
|
self.msg = "*{\\scriptsize\\textbf{警告:该PDF由GPT-Academic开源项目调用大语言模型+Latex翻译插件一键生成," + \
|
||||||
"版权归原文作者所有。翻译内容可靠性无保障,请仔细鉴别并以原文为准。" + \
|
"版权归原文作者所有。翻译内容可靠性无保障,请仔细鉴别并以原文为准。" + \
|
||||||
"项目Github地址 \\url{https://github.com/binary-husky/gpt_academic/}。"
|
"项目Github地址 \\url{https://github.com/binary-husky/gpt_academic/}。"
|
||||||
# 请您不要删除或修改这行警告,除非您是论文的原作者(如果您是论文原作者,欢迎加README中的QQ联系开发者)
|
# 请您不要删除或修改这行警告,除非您是论文的原作者(如果您是论文原作者,欢迎加REAME中的QQ联系开发者)
|
||||||
self.msg_declare = "为了防止大语言模型的意外谬误产生扩散影响,禁止移除或修改此警告。}}\\\\"
|
self.msg_declare = "为了防止大语言模型的意外谬误产生扩散影响,禁止移除或修改此警告。}}\\\\"
|
||||||
self.title = "unknown"
|
self.title = "unknown"
|
||||||
self.abstract = "unknown"
|
self.abstract = "unknown"
|
||||||
@@ -151,7 +151,7 @@ class LatexPaperSplit():
|
|||||||
"""
|
"""
|
||||||
break down latex file to a linked list,
|
break down latex file to a linked list,
|
||||||
each node use a preserve flag to indicate whether it should
|
each node use a preserve flag to indicate whether it should
|
||||||
be processed by GPT.
|
be proccessed by GPT.
|
||||||
P.S. use multiprocessing to avoid timeout error
|
P.S. use multiprocessing to avoid timeout error
|
||||||
"""
|
"""
|
||||||
import multiprocessing
|
import multiprocessing
|
||||||
@@ -300,8 +300,7 @@ def Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin
|
|||||||
write_html(pfg.sp_file_contents, pfg.sp_file_result, chatbot=chatbot, project_folder=project_folder)
|
write_html(pfg.sp_file_contents, pfg.sp_file_result, chatbot=chatbot, project_folder=project_folder)
|
||||||
|
|
||||||
# <-------- 写出文件 ---------->
|
# <-------- 写出文件 ---------->
|
||||||
model_name = llm_kwargs['llm_model'].replace('_', '\\_') # 替换LLM模型名称中的下划线为转义字符
|
msg = f"当前大语言模型: {llm_kwargs['llm_model']},当前语言模型温度设定: {llm_kwargs['temperature']}。"
|
||||||
msg = f"当前大语言模型: {model_name},当前语言模型温度设定: {llm_kwargs['temperature']}。"
|
|
||||||
final_tex = lps.merge_result(pfg.file_result, mode, msg)
|
final_tex = lps.merge_result(pfg.file_result, mode, msg)
|
||||||
objdump((lps, pfg.file_result, mode, msg), file=pj(project_folder,'merge_result.pkl'))
|
objdump((lps, pfg.file_result, mode, msg), file=pj(project_folder,'merge_result.pkl'))
|
||||||
|
|
||||||
@@ -351,42 +350,7 @@ def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_f
|
|||||||
max_try = 32
|
max_try = 32
|
||||||
chatbot.append([f"正在编译PDF文档", f'编译已经开始。当前工作路径为{work_folder},如果程序停顿5分钟以上,请直接去该路径下取回翻译结果,或者重启之后再度尝试 ...']); yield from update_ui(chatbot=chatbot, history=history)
|
chatbot.append([f"正在编译PDF文档", f'编译已经开始。当前工作路径为{work_folder},如果程序停顿5分钟以上,请直接去该路径下取回翻译结果,或者重启之后再度尝试 ...']); yield from update_ui(chatbot=chatbot, history=history)
|
||||||
chatbot.append([f"正在编译PDF文档", '...']); yield from update_ui(chatbot=chatbot, history=history); time.sleep(1); chatbot[-1] = list(chatbot[-1]) # 刷新界面
|
chatbot.append([f"正在编译PDF文档", '...']); yield from update_ui(chatbot=chatbot, history=history); time.sleep(1); chatbot[-1] = list(chatbot[-1]) # 刷新界面
|
||||||
yield from update_ui_latest_msg('编译已经开始...', chatbot, history) # 刷新Gradio前端界面
|
yield from update_ui_lastest_msg('编译已经开始...', chatbot, history) # 刷新Gradio前端界面
|
||||||
# 检查是否需要使用xelatex
|
|
||||||
def check_if_need_xelatex(tex_path):
|
|
||||||
try:
|
|
||||||
with open(tex_path, 'r', encoding='utf-8', errors='replace') as f:
|
|
||||||
content = f.read(5000)
|
|
||||||
# 检查是否有使用xelatex的宏包
|
|
||||||
need_xelatex = any(
|
|
||||||
pkg in content
|
|
||||||
for pkg in ['fontspec', 'xeCJK', 'xetex', 'unicode-math', 'xltxtra', 'xunicode']
|
|
||||||
)
|
|
||||||
if need_xelatex:
|
|
||||||
logger.info(f"检测到宏包需要xelatex编译, 切换至xelatex编译")
|
|
||||||
else:
|
|
||||||
logger.info(f"未检测到宏包需要xelatex编译, 使用pdflatex编译")
|
|
||||||
return need_xelatex
|
|
||||||
except Exception:
|
|
||||||
return False
|
|
||||||
|
|
||||||
# 根据编译器类型返回编译命令
|
|
||||||
def get_compile_command(compiler, filename):
|
|
||||||
compile_command = f'{compiler} -interaction=batchmode -file-line-error {filename}.tex'
|
|
||||||
logger.info('Latex 编译指令: ' + compile_command)
|
|
||||||
return compile_command
|
|
||||||
|
|
||||||
# 确定使用的编译器
|
|
||||||
compiler = 'pdflatex'
|
|
||||||
if check_if_need_xelatex(pj(work_folder_modified, f'{main_file_modified}.tex')):
|
|
||||||
logger.info("检测到宏包需要xelatex编译,切换至xelatex编译")
|
|
||||||
# Check if xelatex is installed
|
|
||||||
try:
|
|
||||||
import subprocess
|
|
||||||
subprocess.run(['xelatex', '--version'], capture_output=True, check=True)
|
|
||||||
compiler = 'xelatex'
|
|
||||||
except (subprocess.CalledProcessError, FileNotFoundError):
|
|
||||||
raise RuntimeError("检测到需要使用xelatex编译,但系统中未安装xelatex。请先安装texlive或其他提供xelatex的LaTeX发行版。")
|
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
import os
|
import os
|
||||||
@@ -396,36 +360,36 @@ def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_f
|
|||||||
shutil.copyfile(may_exist_bbl, target_bbl)
|
shutil.copyfile(may_exist_bbl, target_bbl)
|
||||||
|
|
||||||
# https://stackoverflow.com/questions/738755/dont-make-me-manually-abort-a-latex-compile-when-theres-an-error
|
# https://stackoverflow.com/questions/738755/dont-make-me-manually-abort-a-latex-compile-when-theres-an-error
|
||||||
yield from update_ui_latest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译原始PDF ...', chatbot, history) # 刷新Gradio前端界面
|
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译原始PDF ...', chatbot, history) # 刷新Gradio前端界面
|
||||||
ok = compile_latex_with_timeout(get_compile_command(compiler, main_file_original), work_folder_original)
|
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_original}.tex', work_folder_original)
|
||||||
|
|
||||||
yield from update_ui_latest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译转化后的PDF ...', chatbot, history) # 刷新Gradio前端界面
|
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译转化后的PDF ...', chatbot, history) # 刷新Gradio前端界面
|
||||||
ok = compile_latex_with_timeout(get_compile_command(compiler, main_file_modified), work_folder_modified)
|
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_modified}.tex', work_folder_modified)
|
||||||
|
|
||||||
if ok and os.path.exists(pj(work_folder_modified, f'{main_file_modified}.pdf')):
|
if ok and os.path.exists(pj(work_folder_modified, f'{main_file_modified}.pdf')):
|
||||||
# 只有第二步成功,才能继续下面的步骤
|
# 只有第二步成功,才能继续下面的步骤
|
||||||
yield from update_ui_latest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译BibTex ...', chatbot, history) # 刷新Gradio前端界面
|
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译BibTex ...', chatbot, history) # 刷新Gradio前端界面
|
||||||
if not os.path.exists(pj(work_folder_original, f'{main_file_original}.bbl')):
|
if not os.path.exists(pj(work_folder_original, f'{main_file_original}.bbl')):
|
||||||
ok = compile_latex_with_timeout(f'bibtex {main_file_original}.aux', work_folder_original)
|
ok = compile_latex_with_timeout(f'bibtex {main_file_original}.aux', work_folder_original)
|
||||||
if not os.path.exists(pj(work_folder_modified, f'{main_file_modified}.bbl')):
|
if not os.path.exists(pj(work_folder_modified, f'{main_file_modified}.bbl')):
|
||||||
ok = compile_latex_with_timeout(f'bibtex {main_file_modified}.aux', work_folder_modified)
|
ok = compile_latex_with_timeout(f'bibtex {main_file_modified}.aux', work_folder_modified)
|
||||||
|
|
||||||
yield from update_ui_latest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译文献交叉引用 ...', chatbot, history) # 刷新Gradio前端界面
|
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译文献交叉引用 ...', chatbot, history) # 刷新Gradio前端界面
|
||||||
ok = compile_latex_with_timeout(get_compile_command(compiler, main_file_original), work_folder_original)
|
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_original}.tex', work_folder_original)
|
||||||
ok = compile_latex_with_timeout(get_compile_command(compiler, main_file_modified), work_folder_modified)
|
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_modified}.tex', work_folder_modified)
|
||||||
ok = compile_latex_with_timeout(get_compile_command(compiler, main_file_original), work_folder_original)
|
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_original}.tex', work_folder_original)
|
||||||
ok = compile_latex_with_timeout(get_compile_command(compiler, main_file_modified), work_folder_modified)
|
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_modified}.tex', work_folder_modified)
|
||||||
|
|
||||||
if mode!='translate_zh':
|
if mode!='translate_zh':
|
||||||
yield from update_ui_latest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 使用latexdiff生成论文转化前后对比 ...', chatbot, history) # 刷新Gradio前端界面
|
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 使用latexdiff生成论文转化前后对比 ...', chatbot, history) # 刷新Gradio前端界面
|
||||||
logger.info( f'latexdiff --encoding=utf8 --append-safecmd=subfile {work_folder_original}/{main_file_original}.tex {work_folder_modified}/{main_file_modified}.tex --flatten > {work_folder}/merge_diff.tex')
|
logger.info( f'latexdiff --encoding=utf8 --append-safecmd=subfile {work_folder_original}/{main_file_original}.tex {work_folder_modified}/{main_file_modified}.tex --flatten > {work_folder}/merge_diff.tex')
|
||||||
ok = compile_latex_with_timeout(f'latexdiff --encoding=utf8 --append-safecmd=subfile {work_folder_original}/{main_file_original}.tex {work_folder_modified}/{main_file_modified}.tex --flatten > {work_folder}/merge_diff.tex', os.getcwd())
|
ok = compile_latex_with_timeout(f'latexdiff --encoding=utf8 --append-safecmd=subfile {work_folder_original}/{main_file_original}.tex {work_folder_modified}/{main_file_modified}.tex --flatten > {work_folder}/merge_diff.tex', os.getcwd())
|
||||||
|
|
||||||
yield from update_ui_latest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 正在编译对比PDF ...', chatbot, history) # 刷新Gradio前端界面
|
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 正在编译对比PDF ...', chatbot, history) # 刷新Gradio前端界面
|
||||||
ok = compile_latex_with_timeout(get_compile_command(compiler, 'merge_diff'), work_folder)
|
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error merge_diff.tex', work_folder)
|
||||||
ok = compile_latex_with_timeout(f'bibtex merge_diff.aux', work_folder)
|
ok = compile_latex_with_timeout(f'bibtex merge_diff.aux', work_folder)
|
||||||
ok = compile_latex_with_timeout(get_compile_command(compiler, 'merge_diff'), work_folder)
|
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error merge_diff.tex', work_folder)
|
||||||
ok = compile_latex_with_timeout(get_compile_command(compiler, 'merge_diff'), work_folder)
|
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error merge_diff.tex', work_folder)
|
||||||
|
|
||||||
# <---------- 检查结果 ----------->
|
# <---------- 检查结果 ----------->
|
||||||
results_ = ""
|
results_ = ""
|
||||||
@@ -435,13 +399,13 @@ def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_f
|
|||||||
results_ += f"原始PDF编译是否成功: {original_pdf_success};"
|
results_ += f"原始PDF编译是否成功: {original_pdf_success};"
|
||||||
results_ += f"转化PDF编译是否成功: {modified_pdf_success};"
|
results_ += f"转化PDF编译是否成功: {modified_pdf_success};"
|
||||||
results_ += f"对比PDF编译是否成功: {diff_pdf_success};"
|
results_ += f"对比PDF编译是否成功: {diff_pdf_success};"
|
||||||
yield from update_ui_latest_msg(f'第{n_fix}编译结束:<br/>{results_}...', chatbot, history) # 刷新Gradio前端界面
|
yield from update_ui_lastest_msg(f'第{n_fix}编译结束:<br/>{results_}...', chatbot, history) # 刷新Gradio前端界面
|
||||||
|
|
||||||
if diff_pdf_success:
|
if diff_pdf_success:
|
||||||
result_pdf = pj(work_folder_modified, f'merge_diff.pdf') # get pdf path
|
result_pdf = pj(work_folder_modified, f'merge_diff.pdf') # get pdf path
|
||||||
promote_file_to_downloadzone(result_pdf, rename_file=None, chatbot=chatbot) # promote file to web UI
|
promote_file_to_downloadzone(result_pdf, rename_file=None, chatbot=chatbot) # promote file to web UI
|
||||||
if modified_pdf_success:
|
if modified_pdf_success:
|
||||||
yield from update_ui_latest_msg(f'转化PDF编译已经成功, 正在尝试生成对比PDF, 请稍候 ...', chatbot, history) # 刷新Gradio前端界面
|
yield from update_ui_lastest_msg(f'转化PDF编译已经成功, 正在尝试生成对比PDF, 请稍候 ...', chatbot, history) # 刷新Gradio前端界面
|
||||||
result_pdf = pj(work_folder_modified, f'{main_file_modified}.pdf') # get pdf path
|
result_pdf = pj(work_folder_modified, f'{main_file_modified}.pdf') # get pdf path
|
||||||
origin_pdf = pj(work_folder_original, f'{main_file_original}.pdf') # get pdf path
|
origin_pdf = pj(work_folder_original, f'{main_file_original}.pdf') # get pdf path
|
||||||
if os.path.exists(pj(work_folder, '..', 'translation')):
|
if os.path.exists(pj(work_folder, '..', 'translation')):
|
||||||
@@ -472,7 +436,7 @@ def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_f
|
|||||||
work_folder_modified=work_folder_modified,
|
work_folder_modified=work_folder_modified,
|
||||||
fixed_line=fixed_line
|
fixed_line=fixed_line
|
||||||
)
|
)
|
||||||
yield from update_ui_latest_msg(f'由于最为关键的转化PDF编译失败, 将根据报错信息修正tex源文件并重试, 当前报错的latex代码处于第{buggy_lines}行 ...', chatbot, history) # 刷新Gradio前端界面
|
yield from update_ui_lastest_msg(f'由于最为关键的转化PDF编译失败, 将根据报错信息修正tex源文件并重试, 当前报错的latex代码处于第{buggy_lines}行 ...', chatbot, history) # 刷新Gradio前端界面
|
||||||
if not can_retry: break
|
if not can_retry: break
|
||||||
|
|
||||||
return False # 失败啦
|
return False # 失败啦
|
||||||
|
|||||||
@@ -6,16 +6,12 @@ class SafeUnpickler(pickle.Unpickler):
|
|||||||
def get_safe_classes(self):
|
def get_safe_classes(self):
|
||||||
from crazy_functions.latex_fns.latex_actions import LatexPaperFileGroup, LatexPaperSplit
|
from crazy_functions.latex_fns.latex_actions import LatexPaperFileGroup, LatexPaperSplit
|
||||||
from crazy_functions.latex_fns.latex_toolbox import LinkedListNode
|
from crazy_functions.latex_fns.latex_toolbox import LinkedListNode
|
||||||
from numpy.core.multiarray import scalar
|
|
||||||
from numpy import dtype
|
|
||||||
# 定义允许的安全类
|
# 定义允许的安全类
|
||||||
safe_classes = {
|
safe_classes = {
|
||||||
# 在这里添加其他安全的类
|
# 在这里添加其他安全的类
|
||||||
'LatexPaperFileGroup': LatexPaperFileGroup,
|
'LatexPaperFileGroup': LatexPaperFileGroup,
|
||||||
'LatexPaperSplit': LatexPaperSplit,
|
'LatexPaperSplit': LatexPaperSplit,
|
||||||
'LinkedListNode': LinkedListNode,
|
'LinkedListNode': LinkedListNode,
|
||||||
'scalar': scalar,
|
|
||||||
'dtype': dtype,
|
|
||||||
}
|
}
|
||||||
return safe_classes
|
return safe_classes
|
||||||
|
|
||||||
@@ -26,6 +22,8 @@ class SafeUnpickler(pickle.Unpickler):
|
|||||||
for class_name in self.safe_classes.keys():
|
for class_name in self.safe_classes.keys():
|
||||||
if (class_name in f'{module}.{name}'):
|
if (class_name in f'{module}.{name}'):
|
||||||
match_class_name = class_name
|
match_class_name = class_name
|
||||||
|
if module == 'numpy' or module.startswith('numpy.'):
|
||||||
|
return super().find_class(module, name)
|
||||||
if match_class_name is not None:
|
if match_class_name is not None:
|
||||||
return self.safe_classes[match_class_name]
|
return self.safe_classes[match_class_name]
|
||||||
# 如果尝试加载未授权的类,则抛出异常
|
# 如果尝试加载未授权的类,则抛出异常
|
||||||
|
|||||||
@@ -168,7 +168,7 @@ def set_forbidden_text(text, mask, pattern, flags=0):
|
|||||||
def reverse_forbidden_text(text, mask, pattern, flags=0, forbid_wrapper=True):
|
def reverse_forbidden_text(text, mask, pattern, flags=0, forbid_wrapper=True):
|
||||||
"""
|
"""
|
||||||
Move area out of preserve area (make text editable for GPT)
|
Move area out of preserve area (make text editable for GPT)
|
||||||
count the number of the braces so as to catch complete text area.
|
count the number of the braces so as to catch compelete text area.
|
||||||
e.g.
|
e.g.
|
||||||
\begin{abstract} blablablablablabla. \end{abstract}
|
\begin{abstract} blablablablablabla. \end{abstract}
|
||||||
"""
|
"""
|
||||||
@@ -188,7 +188,7 @@ def reverse_forbidden_text(text, mask, pattern, flags=0, forbid_wrapper=True):
|
|||||||
def set_forbidden_text_careful_brace(text, mask, pattern, flags=0):
|
def set_forbidden_text_careful_brace(text, mask, pattern, flags=0):
|
||||||
"""
|
"""
|
||||||
Add a preserve text area in this paper (text become untouchable for GPT).
|
Add a preserve text area in this paper (text become untouchable for GPT).
|
||||||
count the number of the braces so as to catch complete text area.
|
count the number of the braces so as to catch compelete text area.
|
||||||
e.g.
|
e.g.
|
||||||
\caption{blablablablabla\texbf{blablabla}blablabla.}
|
\caption{blablablablabla\texbf{blablabla}blablabla.}
|
||||||
"""
|
"""
|
||||||
@@ -214,7 +214,7 @@ def reverse_forbidden_text_careful_brace(
|
|||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Move area out of preserve area (make text editable for GPT)
|
Move area out of preserve area (make text editable for GPT)
|
||||||
count the number of the braces so as to catch complete text area.
|
count the number of the braces so as to catch compelete text area.
|
||||||
e.g.
|
e.g.
|
||||||
\caption{blablablablabla\texbf{blablabla}blablabla.}
|
\caption{blablablablabla\texbf{blablabla}blablabla.}
|
||||||
"""
|
"""
|
||||||
@@ -287,23 +287,23 @@ def find_main_tex_file(file_manifest, mode):
|
|||||||
在多Tex文档中,寻找主文件,必须包含documentclass,返回找到的第一个。
|
在多Tex文档中,寻找主文件,必须包含documentclass,返回找到的第一个。
|
||||||
P.S. 但愿没人把latex模板放在里面传进来 (6.25 加入判定latex模板的代码)
|
P.S. 但愿没人把latex模板放在里面传进来 (6.25 加入判定latex模板的代码)
|
||||||
"""
|
"""
|
||||||
candidates = []
|
canidates = []
|
||||||
for texf in file_manifest:
|
for texf in file_manifest:
|
||||||
if os.path.basename(texf).startswith("merge"):
|
if os.path.basename(texf).startswith("merge"):
|
||||||
continue
|
continue
|
||||||
with open(texf, "r", encoding="utf8", errors="ignore") as f:
|
with open(texf, "r", encoding="utf8", errors="ignore") as f:
|
||||||
file_content = f.read()
|
file_content = f.read()
|
||||||
if r"\documentclass" in file_content:
|
if r"\documentclass" in file_content:
|
||||||
candidates.append(texf)
|
canidates.append(texf)
|
||||||
else:
|
else:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if len(candidates) == 0:
|
if len(canidates) == 0:
|
||||||
raise RuntimeError("无法找到一个主Tex文件(包含documentclass关键字)")
|
raise RuntimeError("无法找到一个主Tex文件(包含documentclass关键字)")
|
||||||
elif len(candidates) == 1:
|
elif len(canidates) == 1:
|
||||||
return candidates[0]
|
return canidates[0]
|
||||||
else: # if len(candidates) >= 2 通过一些Latex模板中常见(但通常不会出现在正文)的单词,对不同latex源文件扣分,取评分最高者返回
|
else: # if len(canidates) >= 2 通过一些Latex模板中常见(但通常不会出现在正文)的单词,对不同latex源文件扣分,取评分最高者返回
|
||||||
candidates_score = []
|
canidates_score = []
|
||||||
# 给出一些判定模板文档的词作为扣分项
|
# 给出一些判定模板文档的词作为扣分项
|
||||||
unexpected_words = [
|
unexpected_words = [
|
||||||
"\\LaTeX",
|
"\\LaTeX",
|
||||||
@@ -316,19 +316,19 @@ def find_main_tex_file(file_manifest, mode):
|
|||||||
"reviewers",
|
"reviewers",
|
||||||
]
|
]
|
||||||
expected_words = ["\\input", "\\ref", "\\cite"]
|
expected_words = ["\\input", "\\ref", "\\cite"]
|
||||||
for texf in candidates:
|
for texf in canidates:
|
||||||
candidates_score.append(0)
|
canidates_score.append(0)
|
||||||
with open(texf, "r", encoding="utf8", errors="ignore") as f:
|
with open(texf, "r", encoding="utf8", errors="ignore") as f:
|
||||||
file_content = f.read()
|
file_content = f.read()
|
||||||
file_content = rm_comments(file_content)
|
file_content = rm_comments(file_content)
|
||||||
for uw in unexpected_words:
|
for uw in unexpected_words:
|
||||||
if uw in file_content:
|
if uw in file_content:
|
||||||
candidates_score[-1] -= 1
|
canidates_score[-1] -= 1
|
||||||
for uw in expected_words:
|
for uw in expected_words:
|
||||||
if uw in file_content:
|
if uw in file_content:
|
||||||
candidates_score[-1] += 1
|
canidates_score[-1] += 1
|
||||||
select = np.argmax(candidates_score) # 取评分最高者返回
|
select = np.argmax(canidates_score) # 取评分最高者返回
|
||||||
return candidates[select]
|
return canidates[select]
|
||||||
|
|
||||||
|
|
||||||
def rm_comments(main_file):
|
def rm_comments(main_file):
|
||||||
@@ -374,7 +374,7 @@ def find_tex_file_ignore_case(fp):
|
|||||||
|
|
||||||
def merge_tex_files_(project_foler, main_file, mode):
|
def merge_tex_files_(project_foler, main_file, mode):
|
||||||
"""
|
"""
|
||||||
Merge Tex project recursively
|
Merge Tex project recrusively
|
||||||
"""
|
"""
|
||||||
main_file = rm_comments(main_file)
|
main_file = rm_comments(main_file)
|
||||||
for s in reversed([q for q in re.finditer(r"\\input\{(.*?)\}", main_file, re.M)]):
|
for s in reversed([q for q in re.finditer(r"\\input\{(.*?)\}", main_file, re.M)]):
|
||||||
@@ -429,7 +429,7 @@ def find_title_and_abs(main_file):
|
|||||||
|
|
||||||
def merge_tex_files(project_foler, main_file, mode):
|
def merge_tex_files(project_foler, main_file, mode):
|
||||||
"""
|
"""
|
||||||
Merge Tex project recursively
|
Merge Tex project recrusively
|
||||||
P.S. 顺便把CTEX塞进去以支持中文
|
P.S. 顺便把CTEX塞进去以支持中文
|
||||||
P.S. 顺便把Latex的注释去除
|
P.S. 顺便把Latex的注释去除
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -1,43 +0,0 @@
|
|||||||
from toolbox import update_ui, get_conf, promote_file_to_downloadzone, update_ui_latest_msg, generate_file_link
|
|
||||||
from shared_utils.docker_as_service_api import stream_daas
|
|
||||||
from shared_utils.docker_as_service_api import DockerServiceApiComModel
|
|
||||||
import random
|
|
||||||
|
|
||||||
def download_video(video_id, only_audio, user_name, chatbot, history):
|
|
||||||
from toolbox import get_log_folder
|
|
||||||
chatbot.append([None, "Processing..."])
|
|
||||||
yield from update_ui(chatbot, history)
|
|
||||||
client_command = f'{video_id} --audio-only' if only_audio else video_id
|
|
||||||
server_urls = get_conf('DAAS_SERVER_URLS')
|
|
||||||
server_url = random.choice(server_urls)
|
|
||||||
docker_service_api_com_model = DockerServiceApiComModel(client_command=client_command)
|
|
||||||
save_file_dir = get_log_folder(user_name, plugin_name='media_downloader')
|
|
||||||
for output_manifest in stream_daas(docker_service_api_com_model, server_url, save_file_dir):
|
|
||||||
status_buf = ""
|
|
||||||
status_buf += "DaaS message: \n\n"
|
|
||||||
status_buf += output_manifest['server_message'].replace('\n', '<br/>')
|
|
||||||
status_buf += "\n\n"
|
|
||||||
status_buf += "DaaS standard error: \n\n"
|
|
||||||
status_buf += output_manifest['server_std_err'].replace('\n', '<br/>')
|
|
||||||
status_buf += "\n\n"
|
|
||||||
status_buf += "DaaS standard output: \n\n"
|
|
||||||
status_buf += output_manifest['server_std_out'].replace('\n', '<br/>')
|
|
||||||
status_buf += "\n\n"
|
|
||||||
status_buf += "DaaS file attach: \n\n"
|
|
||||||
status_buf += str(output_manifest['server_file_attach'])
|
|
||||||
yield from update_ui_latest_msg(status_buf, chatbot, history)
|
|
||||||
|
|
||||||
return output_manifest['server_file_attach']
|
|
||||||
|
|
||||||
|
|
||||||
def search_videos(keywords):
|
|
||||||
from toolbox import get_log_folder
|
|
||||||
client_command = keywords
|
|
||||||
server_urls = get_conf('DAAS_SERVER_URLS')
|
|
||||||
server_url = random.choice(server_urls)
|
|
||||||
server_url = server_url.replace('stream', 'search')
|
|
||||||
docker_service_api_com_model = DockerServiceApiComModel(client_command=client_command)
|
|
||||||
save_file_dir = get_log_folder("default_user", plugin_name='media_downloader')
|
|
||||||
for output_manifest in stream_daas(docker_service_api_com_model, server_url, save_file_dir):
|
|
||||||
return output_manifest['server_message']
|
|
||||||
|
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
from typing import List
|
from typing import List
|
||||||
from toolbox import update_ui_latest_msg, disable_auto_promotion
|
from toolbox import update_ui_lastest_msg, disable_auto_promotion
|
||||||
from toolbox import CatchException, update_ui, get_conf, select_api_key, get_log_folder
|
from toolbox import CatchException, update_ui, get_conf, select_api_key, get_log_folder
|
||||||
from request_llms.bridge_all import predict_no_ui_long_connection
|
from request_llms.bridge_all import predict_no_ui_long_connection
|
||||||
from crazy_functions.json_fns.pydantic_io import GptJsonIO, JsonStringError
|
from crazy_functions.json_fns.pydantic_io import GptJsonIO, JsonStringError
|
||||||
|
|||||||
@@ -113,7 +113,7 @@ def translate_pdf(article_dict, llm_kwargs, chatbot, fp, generated_conclusion_fi
|
|||||||
return [txt]
|
return [txt]
|
||||||
else:
|
else:
|
||||||
# raw_token_num > TOKEN_LIMIT_PER_FRAGMENT
|
# raw_token_num > TOKEN_LIMIT_PER_FRAGMENT
|
||||||
# find a smooth token limit to achieve even separation
|
# find a smooth token limit to achieve even seperation
|
||||||
count = int(math.ceil(raw_token_num / TOKEN_LIMIT_PER_FRAGMENT))
|
count = int(math.ceil(raw_token_num / TOKEN_LIMIT_PER_FRAGMENT))
|
||||||
token_limit_smooth = raw_token_num // count + count
|
token_limit_smooth = raw_token_num // count + count
|
||||||
return breakdown_text_to_satisfy_token_limit(txt, limit=token_limit_smooth, llm_model=llm_kwargs['llm_model'])
|
return breakdown_text_to_satisfy_token_limit(txt, limit=token_limit_smooth, llm_model=llm_kwargs['llm_model'])
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
import os
|
import os
|
||||||
from toolbox import CatchException, report_exception, get_log_folder, gen_time_str, check_packages
|
from toolbox import CatchException, report_exception, get_log_folder, gen_time_str, check_packages
|
||||||
from toolbox import update_ui, promote_file_to_downloadzone, update_ui_latest_msg, disable_auto_promotion
|
from toolbox import update_ui, promote_file_to_downloadzone, update_ui_lastest_msg, disable_auto_promotion
|
||||||
from toolbox import write_history_to_file, promote_file_to_downloadzone, get_conf, extract_archive
|
from toolbox import write_history_to_file, promote_file_to_downloadzone, get_conf, extract_archive
|
||||||
from crazy_functions.pdf_fns.parse_pdf import parse_pdf, translate_pdf
|
from crazy_functions.pdf_fns.parse_pdf import parse_pdf, translate_pdf
|
||||||
|
|
||||||
|
|||||||
@@ -6,128 +6,75 @@ from crazy_functions.crazy_utils import get_files_from_everything
|
|||||||
from shared_utils.colorful import *
|
from shared_utils.colorful import *
|
||||||
from loguru import logger
|
from loguru import logger
|
||||||
import os
|
import os
|
||||||
import requests
|
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
def refresh_key(doc2x_api_key):
|
||||||
|
import requests, json
|
||||||
|
url = "https://api.doc2x.noedgeai.com/api/token/refresh"
|
||||||
|
res = requests.post(
|
||||||
|
url,
|
||||||
|
headers={"Authorization": "Bearer " + doc2x_api_key}
|
||||||
|
)
|
||||||
|
res_json = []
|
||||||
|
if res.status_code == 200:
|
||||||
|
decoded = res.content.decode("utf-8")
|
||||||
|
res_json = json.loads(decoded)
|
||||||
|
doc2x_api_key = res_json['data']['token']
|
||||||
|
else:
|
||||||
|
raise RuntimeError(format("[ERROR] status code: %d, body: %s" % (res.status_code, res.text)))
|
||||||
|
return doc2x_api_key
|
||||||
|
|
||||||
def retry_request(max_retries=3, delay=3):
|
|
||||||
"""
|
|
||||||
Decorator for retrying HTTP requests
|
|
||||||
Args:
|
|
||||||
max_retries: Maximum number of retry attempts
|
|
||||||
delay: Delay between retries in seconds
|
|
||||||
"""
|
|
||||||
|
|
||||||
def decorator(func):
|
|
||||||
def wrapper(*args, **kwargs):
|
|
||||||
for attempt in range(max_retries):
|
|
||||||
try:
|
|
||||||
return func(*args, **kwargs)
|
|
||||||
except Exception as e:
|
|
||||||
if attempt < max_retries - 1:
|
|
||||||
logger.error(
|
|
||||||
f"Request failed, retrying... ({attempt + 1}/{max_retries}) Error: {e}"
|
|
||||||
)
|
|
||||||
time.sleep(delay)
|
|
||||||
continue
|
|
||||||
raise e
|
|
||||||
return None
|
|
||||||
|
|
||||||
return wrapper
|
|
||||||
|
|
||||||
return decorator
|
|
||||||
|
|
||||||
|
|
||||||
@retry_request()
|
|
||||||
def make_request(method, url, **kwargs):
|
|
||||||
"""
|
|
||||||
Make HTTP request with retry mechanism
|
|
||||||
"""
|
|
||||||
return requests.request(method, url, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
def doc2x_api_response_status(response, uid=""):
|
|
||||||
"""
|
|
||||||
Check the status of Doc2x API response
|
|
||||||
Args:
|
|
||||||
response_data: Response object from Doc2x API
|
|
||||||
"""
|
|
||||||
response_json = response.json()
|
|
||||||
response_data = response_json.get("data", {})
|
|
||||||
code = response_json.get("code", "Unknown")
|
|
||||||
meg = response_data.get("message", response_json)
|
|
||||||
trace_id = response.headers.get("trace-id", "Failed to get trace-id")
|
|
||||||
if response.status_code != 200:
|
|
||||||
raise RuntimeError(
|
|
||||||
f"Doc2x return an error:\nTrace ID: {trace_id} {uid}\n{response.status_code} - {response_json}"
|
|
||||||
)
|
|
||||||
if code in ["parse_page_limit_exceeded", "parse_concurrency_limit"]:
|
|
||||||
raise RuntimeError(
|
|
||||||
f"Reached the limit of Doc2x:\nTrace ID: {trace_id} {uid}\n{code} - {meg}"
|
|
||||||
)
|
|
||||||
if code not in ["ok", "success"]:
|
|
||||||
raise RuntimeError(
|
|
||||||
f"Doc2x return an error:\nTrace ID: {trace_id} {uid}\n{code} - {meg}"
|
|
||||||
)
|
|
||||||
return response_data
|
|
||||||
|
|
||||||
|
|
||||||
def 解析PDF_DOC2X_转Latex(pdf_file_path):
|
def 解析PDF_DOC2X_转Latex(pdf_file_path):
|
||||||
zip_file_path, unzipped_folder = 解析PDF_DOC2X(pdf_file_path, format="tex")
|
zip_file_path, unzipped_folder = 解析PDF_DOC2X(pdf_file_path, format='tex')
|
||||||
return unzipped_folder
|
return unzipped_folder
|
||||||
|
|
||||||
|
|
||||||
def 解析PDF_DOC2X(pdf_file_path, format="tex"):
|
def 解析PDF_DOC2X(pdf_file_path, format='tex'):
|
||||||
"""
|
"""
|
||||||
format: 'tex', 'md', 'docx'
|
format: 'tex', 'md', 'docx'
|
||||||
"""
|
"""
|
||||||
|
import requests, json, os
|
||||||
DOC2X_API_KEY = get_conf("DOC2X_API_KEY")
|
DOC2X_API_KEY = get_conf('DOC2X_API_KEY')
|
||||||
latex_dir = get_log_folder(plugin_name="pdf_ocr_latex")
|
latex_dir = get_log_folder(plugin_name="pdf_ocr_latex")
|
||||||
markdown_dir = get_log_folder(plugin_name="pdf_ocr")
|
markdown_dir = get_log_folder(plugin_name="pdf_ocr")
|
||||||
doc2x_api_key = DOC2X_API_KEY
|
doc2x_api_key = DOC2X_API_KEY
|
||||||
|
|
||||||
# < ------ 第1步:预上传获取URL,然后上传文件 ------ >
|
|
||||||
logger.info("Doc2x 上传文件:预上传获取URL")
|
|
||||||
res = make_request(
|
|
||||||
"POST",
|
|
||||||
"https://v2.doc2x.noedgeai.com/api/v2/parse/preupload",
|
|
||||||
headers={"Authorization": "Bearer " + doc2x_api_key},
|
|
||||||
timeout=15,
|
|
||||||
)
|
|
||||||
res_data = doc2x_api_response_status(res)
|
|
||||||
upload_url = res_data["url"]
|
|
||||||
uuid = res_data["uid"]
|
|
||||||
|
|
||||||
logger.info("Doc2x 上传文件:上传文件")
|
# < ------ 第1步:上传 ------ >
|
||||||
with open(pdf_file_path, "rb") as file:
|
logger.info("Doc2x 第1步:上传")
|
||||||
res = make_request("PUT", upload_url, data=file, timeout=60)
|
with open(pdf_file_path, 'rb') as file:
|
||||||
res.raise_for_status()
|
res = requests.post(
|
||||||
|
"https://v2.doc2x.noedgeai.com/api/v2/parse/pdf",
|
||||||
|
headers={"Authorization": "Bearer " + doc2x_api_key},
|
||||||
|
data=file
|
||||||
|
)
|
||||||
|
# res_json = []
|
||||||
|
if res.status_code == 200:
|
||||||
|
res_json = res.json()
|
||||||
|
else:
|
||||||
|
raise RuntimeError(f"Doc2x return an error: {res.json()}")
|
||||||
|
uuid = res_json['data']['uid']
|
||||||
|
|
||||||
# < ------ 第2步:轮询等待 ------ >
|
# < ------ 第2步:轮询等待 ------ >
|
||||||
logger.info("Doc2x 处理文件中:轮询等待")
|
logger.info("Doc2x 第2步:轮询等待")
|
||||||
params = {"uid": uuid}
|
params = {'uid': uuid}
|
||||||
max_attempts = 60
|
while True:
|
||||||
attempt = 0
|
res = requests.get(
|
||||||
while attempt < max_attempts:
|
'https://v2.doc2x.noedgeai.com/api/v2/parse/status',
|
||||||
res = make_request(
|
|
||||||
"GET",
|
|
||||||
"https://v2.doc2x.noedgeai.com/api/v2/parse/status",
|
|
||||||
headers={"Authorization": "Bearer " + doc2x_api_key},
|
headers={"Authorization": "Bearer " + doc2x_api_key},
|
||||||
params=params,
|
params=params
|
||||||
timeout=15,
|
|
||||||
)
|
)
|
||||||
res_data = doc2x_api_response_status(res)
|
res_json = res.json()
|
||||||
if res_data["status"] == "success":
|
if res_json['data']['status'] == "success":
|
||||||
break
|
break
|
||||||
elif res_data["status"] == "processing":
|
elif res_json['data']['status'] == "processing":
|
||||||
time.sleep(5)
|
time.sleep(3)
|
||||||
logger.info(f"Doc2x is processing at {res_data['progress']}%")
|
logger.info(f"Doc2x is processing at {res_json['data']['progress']}%")
|
||||||
attempt += 1
|
elif res_json['data']['status'] == "failed":
|
||||||
else:
|
raise RuntimeError(f"Doc2x return an error: {res_json}")
|
||||||
raise RuntimeError(f"Doc2x return an error: {res_data}")
|
|
||||||
if attempt >= max_attempts:
|
|
||||||
raise RuntimeError("Doc2x processing timeout after maximum attempts")
|
|
||||||
|
|
||||||
# < ------ 第3步:提交转化 ------ >
|
# < ------ 第3步:提交转化 ------ >
|
||||||
logger.info("Doc2x 第3步:提交转化")
|
logger.info("Doc2x 第3步:提交转化")
|
||||||
@@ -137,44 +84,42 @@ def 解析PDF_DOC2X(pdf_file_path, format="tex"):
|
|||||||
"formula_mode": "dollar",
|
"formula_mode": "dollar",
|
||||||
"filename": "output"
|
"filename": "output"
|
||||||
}
|
}
|
||||||
res = make_request(
|
res = requests.post(
|
||||||
"POST",
|
'https://v2.doc2x.noedgeai.com/api/v2/convert/parse',
|
||||||
"https://v2.doc2x.noedgeai.com/api/v2/convert/parse",
|
|
||||||
headers={"Authorization": "Bearer " + doc2x_api_key},
|
headers={"Authorization": "Bearer " + doc2x_api_key},
|
||||||
json=data,
|
json=data
|
||||||
timeout=15,
|
|
||||||
)
|
)
|
||||||
doc2x_api_response_status(res, uid=f"uid: {uuid}")
|
if res.status_code == 200:
|
||||||
|
res_json = res.json()
|
||||||
|
else:
|
||||||
|
raise RuntimeError(f"Doc2x return an error: {res.json()}")
|
||||||
|
|
||||||
|
|
||||||
# < ------ 第4步:等待结果 ------ >
|
# < ------ 第4步:等待结果 ------ >
|
||||||
logger.info("Doc2x 第4步:等待结果")
|
logger.info("Doc2x 第4步:等待结果")
|
||||||
params = {"uid": uuid}
|
params = {'uid': uuid}
|
||||||
max_attempts = 36
|
while True:
|
||||||
attempt = 0
|
res = requests.get(
|
||||||
while attempt < max_attempts:
|
'https://v2.doc2x.noedgeai.com/api/v2/convert/parse/result',
|
||||||
res = make_request(
|
|
||||||
"GET",
|
|
||||||
"https://v2.doc2x.noedgeai.com/api/v2/convert/parse/result",
|
|
||||||
headers={"Authorization": "Bearer " + doc2x_api_key},
|
headers={"Authorization": "Bearer " + doc2x_api_key},
|
||||||
params=params,
|
params=params
|
||||||
timeout=15,
|
|
||||||
)
|
)
|
||||||
res_data = doc2x_api_response_status(res, uid=f"uid: {uuid}")
|
res_json = res.json()
|
||||||
if res_data["status"] == "success":
|
if res_json['data']['status'] == "success":
|
||||||
break
|
break
|
||||||
elif res_data["status"] == "processing":
|
elif res_json['data']['status'] == "processing":
|
||||||
time.sleep(3)
|
time.sleep(3)
|
||||||
logger.info("Doc2x still processing to convert file")
|
logger.info(f"Doc2x still processing")
|
||||||
attempt += 1
|
elif res_json['data']['status'] == "failed":
|
||||||
if attempt >= max_attempts:
|
raise RuntimeError(f"Doc2x return an error: {res_json}")
|
||||||
raise RuntimeError("Doc2x conversion timeout after maximum attempts")
|
|
||||||
|
|
||||||
# < ------ 第5步:最后的处理 ------ >
|
# < ------ 第5步:最后的处理 ------ >
|
||||||
logger.info("Doc2x 第5步:下载转换后的文件")
|
logger.info("Doc2x 第5步:最后的处理")
|
||||||
|
|
||||||
if format == "tex":
|
if format=='tex':
|
||||||
target_path = latex_dir
|
target_path = latex_dir
|
||||||
if format == "md":
|
if format=='md':
|
||||||
target_path = markdown_dir
|
target_path = markdown_dir
|
||||||
os.makedirs(target_path, exist_ok=True)
|
os.makedirs(target_path, exist_ok=True)
|
||||||
|
|
||||||
@@ -182,18 +127,17 @@ def 解析PDF_DOC2X(pdf_file_path, format="tex"):
|
|||||||
# < ------ 下载 ------ >
|
# < ------ 下载 ------ >
|
||||||
for attempt in range(max_attempt):
|
for attempt in range(max_attempt):
|
||||||
try:
|
try:
|
||||||
result_url = res_data["url"]
|
result_url = res_json['data']['url']
|
||||||
res = make_request("GET", result_url, timeout=60)
|
res = requests.get(result_url)
|
||||||
zip_path = os.path.join(target_path, gen_time_str() + ".zip")
|
zip_path = os.path.join(target_path, gen_time_str() + '.zip')
|
||||||
unzip_path = os.path.join(target_path, gen_time_str())
|
unzip_path = os.path.join(target_path, gen_time_str())
|
||||||
if res.status_code == 200:
|
if res.status_code == 200:
|
||||||
with open(zip_path, "wb") as f:
|
with open(zip_path, "wb") as f: f.write(res.content)
|
||||||
f.write(res.content)
|
|
||||||
else:
|
else:
|
||||||
raise RuntimeError(f"Doc2x return an error: {res.json()}")
|
raise RuntimeError(f"Doc2x return an error: {res.json()}")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if attempt < max_attempt - 1:
|
if attempt < max_attempt - 1:
|
||||||
logger.error(f"Failed to download uid = {uuid} file, retrying... {e}")
|
logger.error(f"Failed to download latex file, retrying... {e}")
|
||||||
time.sleep(3)
|
time.sleep(3)
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
@@ -201,31 +145,22 @@ def 解析PDF_DOC2X(pdf_file_path, format="tex"):
|
|||||||
|
|
||||||
# < ------ 解压 ------ >
|
# < ------ 解压 ------ >
|
||||||
import zipfile
|
import zipfile
|
||||||
with zipfile.ZipFile(zip_path, "r") as zip_ref:
|
with zipfile.ZipFile(zip_path, 'r') as zip_ref:
|
||||||
zip_ref.extractall(unzip_path)
|
zip_ref.extractall(unzip_path)
|
||||||
return zip_path, unzip_path
|
return zip_path, unzip_path
|
||||||
|
|
||||||
|
|
||||||
def 解析PDF_DOC2X_单文件(
|
def 解析PDF_DOC2X_单文件(fp, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, DOC2X_API_KEY, user_request):
|
||||||
fp,
|
|
||||||
project_folder,
|
|
||||||
llm_kwargs,
|
|
||||||
plugin_kwargs,
|
|
||||||
chatbot,
|
|
||||||
history,
|
|
||||||
system_prompt,
|
|
||||||
DOC2X_API_KEY,
|
|
||||||
user_request,
|
|
||||||
):
|
|
||||||
def pdf2markdown(filepath):
|
def pdf2markdown(filepath):
|
||||||
chatbot.append((None, f"Doc2x 解析中"))
|
chatbot.append((None, f"Doc2x 解析中"))
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
md_zip_path, unzipped_folder = 解析PDF_DOC2X(filepath, format="md")
|
md_zip_path, unzipped_folder = 解析PDF_DOC2X(filepath, format='md')
|
||||||
|
|
||||||
promote_file_to_downloadzone(md_zip_path, chatbot=chatbot)
|
promote_file_to_downloadzone(md_zip_path, chatbot=chatbot)
|
||||||
chatbot.append((None, f"完成解析 {md_zip_path} ..."))
|
chatbot.append((None, f"完成解析 {md_zip_path} ..."))
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return md_zip_path
|
return md_zip_path
|
||||||
|
|
||||||
def deliver_to_markdown_plugin(md_zip_path, user_request):
|
def deliver_to_markdown_plugin(md_zip_path, user_request):
|
||||||
@@ -239,97 +174,77 @@ def 解析PDF_DOC2X_单文件(
|
|||||||
os.makedirs(target_path_base, exist_ok=True)
|
os.makedirs(target_path_base, exist_ok=True)
|
||||||
shutil.copyfile(md_zip_path, this_file_path)
|
shutil.copyfile(md_zip_path, this_file_path)
|
||||||
ex_folder = this_file_path + ".extract"
|
ex_folder = this_file_path + ".extract"
|
||||||
extract_archive(file_path=this_file_path, dest_dir=ex_folder)
|
extract_archive(
|
||||||
|
file_path=this_file_path, dest_dir=ex_folder
|
||||||
|
)
|
||||||
|
|
||||||
# edit markdown files
|
# edit markdown files
|
||||||
success, file_manifest, project_folder = get_files_from_everything(
|
success, file_manifest, project_folder = get_files_from_everything(ex_folder, type='.md')
|
||||||
ex_folder, type=".md"
|
|
||||||
)
|
|
||||||
for generated_fp in file_manifest:
|
for generated_fp in file_manifest:
|
||||||
# 修正一些公式问题
|
# 修正一些公式问题
|
||||||
with open(generated_fp, "r", encoding="utf8") as f:
|
with open(generated_fp, 'r', encoding='utf8') as f:
|
||||||
content = f.read()
|
content = f.read()
|
||||||
# 将公式中的\[ \]替换成$$
|
# 将公式中的\[ \]替换成$$
|
||||||
content = content.replace(r"\[", r"$$").replace(r"\]", r"$$")
|
content = content.replace(r'\[', r'$$').replace(r'\]', r'$$')
|
||||||
# 将公式中的\( \)替换成$
|
# 将公式中的\( \)替换成$
|
||||||
content = content.replace(r"\(", r"$").replace(r"\)", r"$")
|
content = content.replace(r'\(', r'$').replace(r'\)', r'$')
|
||||||
content = content.replace("```markdown", "\n").replace("```", "\n")
|
content = content.replace('```markdown', '\n').replace('```', '\n')
|
||||||
with open(generated_fp, "w", encoding="utf8") as f:
|
with open(generated_fp, 'w', encoding='utf8') as f:
|
||||||
f.write(content)
|
f.write(content)
|
||||||
promote_file_to_downloadzone(generated_fp, chatbot=chatbot)
|
promote_file_to_downloadzone(generated_fp, chatbot=chatbot)
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
# 生成在线预览html
|
# 生成在线预览html
|
||||||
file_name = "在线预览翻译(原文)" + gen_time_str() + ".html"
|
file_name = '在线预览翻译(原文)' + gen_time_str() + '.html'
|
||||||
preview_fp = os.path.join(ex_folder, file_name)
|
preview_fp = os.path.join(ex_folder, file_name)
|
||||||
from shared_utils.advanced_markdown_format import (
|
from shared_utils.advanced_markdown_format import markdown_convertion_for_file
|
||||||
markdown_convertion_for_file,
|
|
||||||
)
|
|
||||||
|
|
||||||
with open(generated_fp, "r", encoding="utf-8") as f:
|
with open(generated_fp, "r", encoding="utf-8") as f:
|
||||||
md = f.read()
|
md = f.read()
|
||||||
# # Markdown中使用不标准的表格,需要在表格前加上一个emoji,以便公式渲染
|
# # Markdown中使用不标准的表格,需要在表格前加上一个emoji,以便公式渲染
|
||||||
# md = re.sub(r'^<table>', r'.<table>', md, flags=re.MULTILINE)
|
# md = re.sub(r'^<table>', r'.<table>', md, flags=re.MULTILINE)
|
||||||
html = markdown_convertion_for_file(md)
|
html = markdown_convertion_for_file(md)
|
||||||
with open(preview_fp, "w", encoding="utf-8") as f:
|
with open(preview_fp, "w", encoding="utf-8") as f: f.write(html)
|
||||||
f.write(html)
|
|
||||||
chatbot.append([None, f"生成在线预览:{generate_file_link([preview_fp])}"])
|
chatbot.append([None, f"生成在线预览:{generate_file_link([preview_fp])}"])
|
||||||
promote_file_to_downloadzone(preview_fp, chatbot=chatbot)
|
promote_file_to_downloadzone(preview_fp, chatbot=chatbot)
|
||||||
|
|
||||||
chatbot.append((None, f"调用Markdown插件 {ex_folder} ..."))
|
|
||||||
plugin_kwargs["markdown_expected_output_dir"] = ex_folder
|
|
||||||
|
|
||||||
translated_f_name = "translated_markdown.md"
|
|
||||||
generated_fp = plugin_kwargs["markdown_expected_output_path"] = os.path.join(
|
chatbot.append((None, f"调用Markdown插件 {ex_folder} ..."))
|
||||||
ex_folder, translated_f_name
|
plugin_kwargs['markdown_expected_output_dir'] = ex_folder
|
||||||
)
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
translated_f_name = 'translated_markdown.md'
|
||||||
yield from Markdown英译中(
|
generated_fp = plugin_kwargs['markdown_expected_output_path'] = os.path.join(ex_folder, translated_f_name)
|
||||||
ex_folder,
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
llm_kwargs,
|
yield from Markdown英译中(ex_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)
|
||||||
plugin_kwargs,
|
|
||||||
chatbot,
|
|
||||||
history,
|
|
||||||
system_prompt,
|
|
||||||
user_request,
|
|
||||||
)
|
|
||||||
if os.path.exists(generated_fp):
|
if os.path.exists(generated_fp):
|
||||||
# 修正一些公式问题
|
# 修正一些公式问题
|
||||||
with open(generated_fp, "r", encoding="utf8") as f:
|
with open(generated_fp, 'r', encoding='utf8') as f: content = f.read()
|
||||||
content = f.read()
|
content = content.replace('```markdown', '\n').replace('```', '\n')
|
||||||
content = content.replace("```markdown", "\n").replace("```", "\n")
|
|
||||||
# Markdown中使用不标准的表格,需要在表格前加上一个emoji,以便公式渲染
|
# Markdown中使用不标准的表格,需要在表格前加上一个emoji,以便公式渲染
|
||||||
# content = re.sub(r'^<table>', r'.<table>', content, flags=re.MULTILINE)
|
# content = re.sub(r'^<table>', r'.<table>', content, flags=re.MULTILINE)
|
||||||
with open(generated_fp, "w", encoding="utf8") as f:
|
with open(generated_fp, 'w', encoding='utf8') as f: f.write(content)
|
||||||
f.write(content)
|
|
||||||
# 生成在线预览html
|
# 生成在线预览html
|
||||||
file_name = "在线预览翻译" + gen_time_str() + ".html"
|
file_name = '在线预览翻译' + gen_time_str() + '.html'
|
||||||
preview_fp = os.path.join(ex_folder, file_name)
|
preview_fp = os.path.join(ex_folder, file_name)
|
||||||
from shared_utils.advanced_markdown_format import (
|
from shared_utils.advanced_markdown_format import markdown_convertion_for_file
|
||||||
markdown_convertion_for_file,
|
|
||||||
)
|
|
||||||
|
|
||||||
with open(generated_fp, "r", encoding="utf-8") as f:
|
with open(generated_fp, "r", encoding="utf-8") as f:
|
||||||
md = f.read()
|
md = f.read()
|
||||||
html = markdown_convertion_for_file(md)
|
html = markdown_convertion_for_file(md)
|
||||||
with open(preview_fp, "w", encoding="utf-8") as f:
|
with open(preview_fp, "w", encoding="utf-8") as f: f.write(html)
|
||||||
f.write(html)
|
|
||||||
promote_file_to_downloadzone(preview_fp, chatbot=chatbot)
|
promote_file_to_downloadzone(preview_fp, chatbot=chatbot)
|
||||||
# 生成包含图片的压缩包
|
# 生成包含图片的压缩包
|
||||||
dest_folder = get_log_folder(chatbot.get_user())
|
dest_folder = get_log_folder(chatbot.get_user())
|
||||||
zip_name = "翻译后的带图文档.zip"
|
zip_name = '翻译后的带图文档.zip'
|
||||||
zip_folder(
|
zip_folder(source_folder=ex_folder, dest_folder=dest_folder, zip_name=zip_name)
|
||||||
source_folder=ex_folder, dest_folder=dest_folder, zip_name=zip_name
|
|
||||||
)
|
|
||||||
zip_fp = os.path.join(dest_folder, zip_name)
|
zip_fp = os.path.join(dest_folder, zip_name)
|
||||||
promote_file_to_downloadzone(zip_fp, chatbot=chatbot)
|
promote_file_to_downloadzone(zip_fp, chatbot=chatbot)
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
md_zip_path = yield from pdf2markdown(fp)
|
md_zip_path = yield from pdf2markdown(fp)
|
||||||
yield from deliver_to_markdown_plugin(md_zip_path, user_request)
|
yield from deliver_to_markdown_plugin(md_zip_path, user_request)
|
||||||
|
|
||||||
|
|
||||||
def 解析PDF_基于DOC2X(file_manifest, *args):
|
def 解析PDF_基于DOC2X(file_manifest, *args):
|
||||||
for index, fp in enumerate(file_manifest):
|
for index, fp in enumerate(file_manifest):
|
||||||
yield from 解析PDF_DOC2X_单文件(fp, *args)
|
yield from 解析PDF_DOC2X_单文件(fp, *args)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -14,17 +14,17 @@ def extract_text_from_files(txt, chatbot, history):
|
|||||||
final_result(list):文本内容
|
final_result(list):文本内容
|
||||||
page_one(list):第一页内容/摘要
|
page_one(list):第一页内容/摘要
|
||||||
file_manifest(list):文件路径
|
file_manifest(list):文件路径
|
||||||
exception(string):需要用户手动处理的信息,如没出错则保持为空
|
excption(string):需要用户手动处理的信息,如没出错则保持为空
|
||||||
"""
|
"""
|
||||||
|
|
||||||
final_result = []
|
final_result = []
|
||||||
page_one = []
|
page_one = []
|
||||||
file_manifest = []
|
file_manifest = []
|
||||||
exception = ""
|
excption = ""
|
||||||
|
|
||||||
if txt == "":
|
if txt == "":
|
||||||
final_result.append(txt)
|
final_result.append(txt)
|
||||||
return False, final_result, page_one, file_manifest, exception #如输入区内容不是文件则直接返回输入区内容
|
return False, final_result, page_one, file_manifest, excption #如输入区内容不是文件则直接返回输入区内容
|
||||||
|
|
||||||
#查找输入区内容中的文件
|
#查找输入区内容中的文件
|
||||||
file_pdf,pdf_manifest,folder_pdf = get_files_from_everything(txt, '.pdf')
|
file_pdf,pdf_manifest,folder_pdf = get_files_from_everything(txt, '.pdf')
|
||||||
@@ -33,20 +33,20 @@ def extract_text_from_files(txt, chatbot, history):
|
|||||||
file_doc,doc_manifest,folder_doc = get_files_from_everything(txt, '.doc')
|
file_doc,doc_manifest,folder_doc = get_files_from_everything(txt, '.doc')
|
||||||
|
|
||||||
if file_doc:
|
if file_doc:
|
||||||
exception = "word"
|
excption = "word"
|
||||||
return False, final_result, page_one, file_manifest, exception
|
return False, final_result, page_one, file_manifest, excption
|
||||||
|
|
||||||
file_num = len(pdf_manifest) + len(md_manifest) + len(word_manifest)
|
file_num = len(pdf_manifest) + len(md_manifest) + len(word_manifest)
|
||||||
if file_num == 0:
|
if file_num == 0:
|
||||||
final_result.append(txt)
|
final_result.append(txt)
|
||||||
return False, final_result, page_one, file_manifest, exception #如输入区内容不是文件则直接返回输入区内容
|
return False, final_result, page_one, file_manifest, excption #如输入区内容不是文件则直接返回输入区内容
|
||||||
|
|
||||||
if file_pdf:
|
if file_pdf:
|
||||||
try: # 尝试导入依赖,如果缺少依赖,则给出安装建议
|
try: # 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||||
import fitz
|
import fitz
|
||||||
except:
|
except:
|
||||||
exception = "pdf"
|
excption = "pdf"
|
||||||
return False, final_result, page_one, file_manifest, exception
|
return False, final_result, page_one, file_manifest, excption
|
||||||
for index, fp in enumerate(pdf_manifest):
|
for index, fp in enumerate(pdf_manifest):
|
||||||
file_content, pdf_one = read_and_clean_pdf_text(fp) # (尝试)按照章节切割PDF
|
file_content, pdf_one = read_and_clean_pdf_text(fp) # (尝试)按照章节切割PDF
|
||||||
file_content = file_content.encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars
|
file_content = file_content.encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars
|
||||||
@@ -72,8 +72,8 @@ def extract_text_from_files(txt, chatbot, history):
|
|||||||
try: # 尝试导入依赖,如果缺少依赖,则给出安装建议
|
try: # 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||||
from docx import Document
|
from docx import Document
|
||||||
except:
|
except:
|
||||||
exception = "word_pip"
|
excption = "word_pip"
|
||||||
return False, final_result, page_one, file_manifest, exception
|
return False, final_result, page_one, file_manifest, excption
|
||||||
for index, fp in enumerate(word_manifest):
|
for index, fp in enumerate(word_manifest):
|
||||||
doc = Document(fp)
|
doc = Document(fp)
|
||||||
file_content = '\n'.join([p.text for p in doc.paragraphs])
|
file_content = '\n'.join([p.text for p in doc.paragraphs])
|
||||||
@@ -82,4 +82,4 @@ def extract_text_from_files(txt, chatbot, history):
|
|||||||
final_result.append(file_content)
|
final_result.append(file_content)
|
||||||
file_manifest.append(os.path.relpath(fp, folder_word))
|
file_manifest.append(os.path.relpath(fp, folder_word))
|
||||||
|
|
||||||
return True, final_result, page_one, file_manifest, exception
|
return True, final_result, page_one, file_manifest, excption
|
||||||
@@ -1,22 +1,45 @@
|
|||||||
import os
|
import os
|
||||||
from llama_index.core import SimpleDirectoryReader
|
from llama_index.core import SimpleDirectoryReader
|
||||||
|
|
||||||
supports_format = ['.csv', '.docx', '.epub', '.ipynb', '.mbox', '.md', '.pdf', '.txt', '.ppt',
|
supports_format = ['.csv', '.docx','.doc', '.epub', '.ipynb', '.mbox', '.md', '.pdf', '.txt', '.ppt',
|
||||||
'.pptm', '.pptx']
|
'.pptm', '.pptx','.py', '.xls', '.xlsx', '.html', '.json', '.xml', '.yaml', '.yml' ,'.m']
|
||||||
|
|
||||||
|
def read_docx_doc(file_path):
|
||||||
|
if file_path.split(".")[-1] == "docx":
|
||||||
|
from docx import Document
|
||||||
|
doc = Document(file_path)
|
||||||
|
file_content = "\n".join([para.text for para in doc.paragraphs])
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
import win32com.client
|
||||||
|
word = win32com.client.Dispatch("Word.Application")
|
||||||
|
word.visible = False
|
||||||
|
# 打开文件
|
||||||
|
doc = word.Documents.Open(os.getcwd() + '/' + file_path)
|
||||||
|
# file_content = doc.Content.Text
|
||||||
|
doc = word.ActiveDocument
|
||||||
|
file_content = doc.Range().Text
|
||||||
|
doc.Close()
|
||||||
|
word.Quit()
|
||||||
|
except:
|
||||||
|
raise RuntimeError('请先将.doc文档转换为.docx文档。')
|
||||||
|
return file_content
|
||||||
|
|
||||||
# 修改后的 extract_text 函数,结合 SimpleDirectoryReader 和自定义解析逻辑
|
# 修改后的 extract_text 函数,结合 SimpleDirectoryReader 和自定义解析逻辑
|
||||||
|
import os
|
||||||
|
|
||||||
def extract_text(file_path):
|
def extract_text(file_path):
|
||||||
_, ext = os.path.splitext(file_path.lower())
|
_, ext = os.path.splitext(file_path.lower())
|
||||||
|
|
||||||
# 使用 SimpleDirectoryReader 处理它支持的文件格式
|
# 使用 SimpleDirectoryReader 处理它支持的文件格式
|
||||||
if ext in supports_format:
|
if ext in ['.docx', '.doc']:
|
||||||
try:
|
return read_docx_doc(file_path)
|
||||||
reader = SimpleDirectoryReader(input_files=[file_path])
|
try:
|
||||||
documents = reader.load_data()
|
reader = SimpleDirectoryReader(input_files=[file_path])
|
||||||
if len(documents) > 0:
|
documents = reader.load_data()
|
||||||
return documents[0].text
|
if len(documents) > 0:
|
||||||
except Exception as e:
|
return documents[0].text
|
||||||
pass
|
except Exception as e:
|
||||||
|
pass
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|||||||
@@ -60,7 +60,7 @@ def similarity_search_with_score_by_vector(
|
|||||||
self, embedding: List[float], k: int = 4
|
self, embedding: List[float], k: int = 4
|
||||||
) -> List[Tuple[Document, float]]:
|
) -> List[Tuple[Document, float]]:
|
||||||
|
|
||||||
def separate_list(ls: List[int]) -> List[List[int]]:
|
def seperate_list(ls: List[int]) -> List[List[int]]:
|
||||||
lists = []
|
lists = []
|
||||||
ls1 = [ls[0]]
|
ls1 = [ls[0]]
|
||||||
for i in range(1, len(ls)):
|
for i in range(1, len(ls)):
|
||||||
@@ -82,7 +82,7 @@ def similarity_search_with_score_by_vector(
|
|||||||
continue
|
continue
|
||||||
_id = self.index_to_docstore_id[i]
|
_id = self.index_to_docstore_id[i]
|
||||||
doc = self.docstore.search(_id)
|
doc = self.docstore.search(_id)
|
||||||
if not self.chunk_content:
|
if not self.chunk_conent:
|
||||||
if not isinstance(doc, Document):
|
if not isinstance(doc, Document):
|
||||||
raise ValueError(f"Could not find document for id {_id}, got {doc}")
|
raise ValueError(f"Could not find document for id {_id}, got {doc}")
|
||||||
doc.metadata["score"] = int(scores[0][j])
|
doc.metadata["score"] = int(scores[0][j])
|
||||||
@@ -104,12 +104,12 @@ def similarity_search_with_score_by_vector(
|
|||||||
id_set.add(l)
|
id_set.add(l)
|
||||||
if break_flag:
|
if break_flag:
|
||||||
break
|
break
|
||||||
if not self.chunk_content:
|
if not self.chunk_conent:
|
||||||
return docs
|
return docs
|
||||||
if len(id_set) == 0 and self.score_threshold > 0:
|
if len(id_set) == 0 and self.score_threshold > 0:
|
||||||
return []
|
return []
|
||||||
id_list = sorted(list(id_set))
|
id_list = sorted(list(id_set))
|
||||||
id_lists = separate_list(id_list)
|
id_lists = seperate_list(id_list)
|
||||||
for id_seq in id_lists:
|
for id_seq in id_lists:
|
||||||
for id in id_seq:
|
for id in id_seq:
|
||||||
if id == id_seq[0]:
|
if id == id_seq[0]:
|
||||||
@@ -132,7 +132,7 @@ class LocalDocQA:
|
|||||||
embeddings: object = None
|
embeddings: object = None
|
||||||
top_k: int = VECTOR_SEARCH_TOP_K
|
top_k: int = VECTOR_SEARCH_TOP_K
|
||||||
chunk_size: int = CHUNK_SIZE
|
chunk_size: int = CHUNK_SIZE
|
||||||
chunk_content: bool = True
|
chunk_conent: bool = True
|
||||||
score_threshold: int = VECTOR_SEARCH_SCORE_THRESHOLD
|
score_threshold: int = VECTOR_SEARCH_SCORE_THRESHOLD
|
||||||
|
|
||||||
def init_cfg(self,
|
def init_cfg(self,
|
||||||
@@ -209,16 +209,16 @@ class LocalDocQA:
|
|||||||
|
|
||||||
# query 查询内容
|
# query 查询内容
|
||||||
# vs_path 知识库路径
|
# vs_path 知识库路径
|
||||||
# chunk_content 是否启用上下文关联
|
# chunk_conent 是否启用上下文关联
|
||||||
# score_threshold 搜索匹配score阈值
|
# score_threshold 搜索匹配score阈值
|
||||||
# vector_search_top_k 搜索知识库内容条数,默认搜索5条结果
|
# vector_search_top_k 搜索知识库内容条数,默认搜索5条结果
|
||||||
# chunk_sizes 匹配单段内容的连接上下文长度
|
# chunk_sizes 匹配单段内容的连接上下文长度
|
||||||
def get_knowledge_based_content_test(self, query, vs_path, chunk_content,
|
def get_knowledge_based_conent_test(self, query, vs_path, chunk_conent,
|
||||||
score_threshold=VECTOR_SEARCH_SCORE_THRESHOLD,
|
score_threshold=VECTOR_SEARCH_SCORE_THRESHOLD,
|
||||||
vector_search_top_k=VECTOR_SEARCH_TOP_K, chunk_size=CHUNK_SIZE,
|
vector_search_top_k=VECTOR_SEARCH_TOP_K, chunk_size=CHUNK_SIZE,
|
||||||
text2vec=None):
|
text2vec=None):
|
||||||
self.vector_store = FAISS.load_local(vs_path, text2vec)
|
self.vector_store = FAISS.load_local(vs_path, text2vec)
|
||||||
self.vector_store.chunk_content = chunk_content
|
self.vector_store.chunk_conent = chunk_conent
|
||||||
self.vector_store.score_threshold = score_threshold
|
self.vector_store.score_threshold = score_threshold
|
||||||
self.vector_store.chunk_size = chunk_size
|
self.vector_store.chunk_size = chunk_size
|
||||||
|
|
||||||
@@ -241,7 +241,7 @@ class LocalDocQA:
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
def construct_vector_store(vs_id, vs_path, files, sentence_size, history, one_content, one_content_segmentation, text2vec):
|
def construct_vector_store(vs_id, vs_path, files, sentence_size, history, one_conent, one_content_segmentation, text2vec):
|
||||||
for file in files:
|
for file in files:
|
||||||
assert os.path.exists(file), "输入文件不存在:" + file
|
assert os.path.exists(file), "输入文件不存在:" + file
|
||||||
import nltk
|
import nltk
|
||||||
@@ -297,7 +297,7 @@ class knowledge_archive_interface():
|
|||||||
files=file_manifest,
|
files=file_manifest,
|
||||||
sentence_size=100,
|
sentence_size=100,
|
||||||
history=[],
|
history=[],
|
||||||
one_content="",
|
one_conent="",
|
||||||
one_content_segmentation="",
|
one_content_segmentation="",
|
||||||
text2vec = self.get_chinese_text2vec(),
|
text2vec = self.get_chinese_text2vec(),
|
||||||
)
|
)
|
||||||
@@ -319,19 +319,19 @@ class knowledge_archive_interface():
|
|||||||
files=[],
|
files=[],
|
||||||
sentence_size=100,
|
sentence_size=100,
|
||||||
history=[],
|
history=[],
|
||||||
one_content="",
|
one_conent="",
|
||||||
one_content_segmentation="",
|
one_content_segmentation="",
|
||||||
text2vec = self.get_chinese_text2vec(),
|
text2vec = self.get_chinese_text2vec(),
|
||||||
)
|
)
|
||||||
VECTOR_SEARCH_SCORE_THRESHOLD = 0
|
VECTOR_SEARCH_SCORE_THRESHOLD = 0
|
||||||
VECTOR_SEARCH_TOP_K = 4
|
VECTOR_SEARCH_TOP_K = 4
|
||||||
CHUNK_SIZE = 512
|
CHUNK_SIZE = 512
|
||||||
resp, prompt = self.qa_handle.get_knowledge_based_content_test(
|
resp, prompt = self.qa_handle.get_knowledge_based_conent_test(
|
||||||
query = txt,
|
query = txt,
|
||||||
vs_path = self.kai_path,
|
vs_path = self.kai_path,
|
||||||
score_threshold=VECTOR_SEARCH_SCORE_THRESHOLD,
|
score_threshold=VECTOR_SEARCH_SCORE_THRESHOLD,
|
||||||
vector_search_top_k=VECTOR_SEARCH_TOP_K,
|
vector_search_top_k=VECTOR_SEARCH_TOP_K,
|
||||||
chunk_content=True,
|
chunk_conent=True,
|
||||||
chunk_size=CHUNK_SIZE,
|
chunk_size=CHUNK_SIZE,
|
||||||
text2vec = self.get_chinese_text2vec(),
|
text2vec = self.get_chinese_text2vec(),
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
from typing import List
|
from typing import List
|
||||||
from toolbox import update_ui_latest_msg, disable_auto_promotion
|
from toolbox import update_ui_lastest_msg, disable_auto_promotion
|
||||||
from request_llms.bridge_all import predict_no_ui_long_connection
|
from request_llms.bridge_all import predict_no_ui_long_connection
|
||||||
from crazy_functions.json_fns.pydantic_io import GptJsonIO, JsonStringError
|
from crazy_functions.json_fns.pydantic_io import GptJsonIO, JsonStringError
|
||||||
import copy, json, pickle, os, sys, time
|
import copy, json, pickle, os, sys, time
|
||||||
@@ -9,14 +9,14 @@ import copy, json, pickle, os, sys, time
|
|||||||
def read_avail_plugin_enum():
|
def read_avail_plugin_enum():
|
||||||
from crazy_functional import get_crazy_functions
|
from crazy_functional import get_crazy_functions
|
||||||
plugin_arr = get_crazy_functions()
|
plugin_arr = get_crazy_functions()
|
||||||
# remove plugins with out explanation
|
# remove plugins with out explaination
|
||||||
plugin_arr = {k:v for k, v in plugin_arr.items() if ('Info' in v) and ('Function' in v)}
|
plugin_arr = {k:v for k, v in plugin_arr.items() if ('Info' in v) and ('Function' in v)}
|
||||||
plugin_arr_info = {"F_{:04d}".format(i):v["Info"] for i, v in enumerate(plugin_arr.values(), start=1)}
|
plugin_arr_info = {"F_{:04d}".format(i):v["Info"] for i, v in enumerate(plugin_arr.values(), start=1)}
|
||||||
plugin_arr_dict = {"F_{:04d}".format(i):v for i, v in enumerate(plugin_arr.values(), start=1)}
|
plugin_arr_dict = {"F_{:04d}".format(i):v for i, v in enumerate(plugin_arr.values(), start=1)}
|
||||||
plugin_arr_dict_parse = {"F_{:04d}".format(i):v for i, v in enumerate(plugin_arr.values(), start=1)}
|
plugin_arr_dict_parse = {"F_{:04d}".format(i):v for i, v in enumerate(plugin_arr.values(), start=1)}
|
||||||
plugin_arr_dict_parse.update({f"F_{i}":v for i, v in enumerate(plugin_arr.values(), start=1)})
|
plugin_arr_dict_parse.update({f"F_{i}":v for i, v in enumerate(plugin_arr.values(), start=1)})
|
||||||
prompt = json.dumps(plugin_arr_info, ensure_ascii=False, indent=2)
|
prompt = json.dumps(plugin_arr_info, ensure_ascii=False, indent=2)
|
||||||
prompt = "\n\nThe definition of PluginEnum:\nPluginEnum=" + prompt
|
prompt = "\n\nThe defination of PluginEnum:\nPluginEnum=" + prompt
|
||||||
return prompt, plugin_arr_dict, plugin_arr_dict_parse
|
return prompt, plugin_arr_dict, plugin_arr_dict_parse
|
||||||
|
|
||||||
def wrap_code(txt):
|
def wrap_code(txt):
|
||||||
@@ -55,7 +55,7 @@ def execute_plugin(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom
|
|||||||
plugin_selection: str = Field(description="The most related plugin from one of the PluginEnum.", default="F_0000")
|
plugin_selection: str = Field(description="The most related plugin from one of the PluginEnum.", default="F_0000")
|
||||||
reason_of_selection: str = Field(description="The reason why you should select this plugin.", default="This plugin satisfy user requirement most")
|
reason_of_selection: str = Field(description="The reason why you should select this plugin.", default="This plugin satisfy user requirement most")
|
||||||
# ⭐ ⭐ ⭐ 选择插件
|
# ⭐ ⭐ ⭐ 选择插件
|
||||||
yield from update_ui_latest_msg(lastmsg=f"正在执行任务: {txt}\n\n查找可用插件中...", chatbot=chatbot, history=history, delay=0)
|
yield from update_ui_lastest_msg(lastmsg=f"正在执行任务: {txt}\n\n查找可用插件中...", chatbot=chatbot, history=history, delay=0)
|
||||||
gpt_json_io = GptJsonIO(Plugin)
|
gpt_json_io = GptJsonIO(Plugin)
|
||||||
gpt_json_io.format_instructions = "The format of your output should be a json that can be parsed by json.loads.\n"
|
gpt_json_io.format_instructions = "The format of your output should be a json that can be parsed by json.loads.\n"
|
||||||
gpt_json_io.format_instructions += """Output example: {"plugin_selection":"F_1234", "reason_of_selection":"F_1234 plugin satisfy user requirement most"}\n"""
|
gpt_json_io.format_instructions += """Output example: {"plugin_selection":"F_1234", "reason_of_selection":"F_1234 plugin satisfy user requirement most"}\n"""
|
||||||
@@ -74,13 +74,13 @@ def execute_plugin(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom
|
|||||||
msg += "请求的Prompt为:\n" + wrap_code(get_inputs_show_user(inputs, plugin_arr_enum_prompt))
|
msg += "请求的Prompt为:\n" + wrap_code(get_inputs_show_user(inputs, plugin_arr_enum_prompt))
|
||||||
msg += "语言模型回复为:\n" + wrap_code(gpt_reply)
|
msg += "语言模型回复为:\n" + wrap_code(gpt_reply)
|
||||||
msg += "\n但您可以尝试再试一次\n"
|
msg += "\n但您可以尝试再试一次\n"
|
||||||
yield from update_ui_latest_msg(lastmsg=msg, chatbot=chatbot, history=history, delay=2)
|
yield from update_ui_lastest_msg(lastmsg=msg, chatbot=chatbot, history=history, delay=2)
|
||||||
return
|
return
|
||||||
if plugin_sel.plugin_selection not in plugin_arr_dict_parse:
|
if plugin_sel.plugin_selection not in plugin_arr_dict_parse:
|
||||||
msg = f"抱歉, 找不到合适插件执行该任务, 或者{llm_kwargs['llm_model']}无法理解您的需求。"
|
msg = f"抱歉, 找不到合适插件执行该任务, 或者{llm_kwargs['llm_model']}无法理解您的需求。"
|
||||||
msg += f"语言模型{llm_kwargs['llm_model']}选择了不存在的插件:\n" + wrap_code(gpt_reply)
|
msg += f"语言模型{llm_kwargs['llm_model']}选择了不存在的插件:\n" + wrap_code(gpt_reply)
|
||||||
msg += "\n但您可以尝试再试一次\n"
|
msg += "\n但您可以尝试再试一次\n"
|
||||||
yield from update_ui_latest_msg(lastmsg=msg, chatbot=chatbot, history=history, delay=2)
|
yield from update_ui_lastest_msg(lastmsg=msg, chatbot=chatbot, history=history, delay=2)
|
||||||
return
|
return
|
||||||
|
|
||||||
# ⭐ ⭐ ⭐ 确认插件参数
|
# ⭐ ⭐ ⭐ 确认插件参数
|
||||||
@@ -90,7 +90,7 @@ def execute_plugin(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom
|
|||||||
appendix_info = get_recent_file_prompt_support(chatbot)
|
appendix_info = get_recent_file_prompt_support(chatbot)
|
||||||
|
|
||||||
plugin = plugin_arr_dict_parse[plugin_sel.plugin_selection]
|
plugin = plugin_arr_dict_parse[plugin_sel.plugin_selection]
|
||||||
yield from update_ui_latest_msg(lastmsg=f"正在执行任务: {txt}\n\n提取插件参数...", chatbot=chatbot, history=history, delay=0)
|
yield from update_ui_lastest_msg(lastmsg=f"正在执行任务: {txt}\n\n提取插件参数...", chatbot=chatbot, history=history, delay=0)
|
||||||
class PluginExplicit(BaseModel):
|
class PluginExplicit(BaseModel):
|
||||||
plugin_selection: str = plugin_sel.plugin_selection
|
plugin_selection: str = plugin_sel.plugin_selection
|
||||||
plugin_arg: str = Field(description="The argument of the plugin.", default="")
|
plugin_arg: str = Field(description="The argument of the plugin.", default="")
|
||||||
@@ -109,6 +109,6 @@ def execute_plugin(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom
|
|||||||
fn = plugin['Function']
|
fn = plugin['Function']
|
||||||
fn_name = fn.__name__
|
fn_name = fn.__name__
|
||||||
msg = f'{llm_kwargs["llm_model"]}为您选择了插件: `{fn_name}`\n\n插件说明:{plugin["Info"]}\n\n插件参数:{plugin_sel.plugin_arg}\n\n假如偏离了您的要求,按停止键终止。'
|
msg = f'{llm_kwargs["llm_model"]}为您选择了插件: `{fn_name}`\n\n插件说明:{plugin["Info"]}\n\n插件参数:{plugin_sel.plugin_arg}\n\n假如偏离了您的要求,按停止键终止。'
|
||||||
yield from update_ui_latest_msg(lastmsg=msg, chatbot=chatbot, history=history, delay=2)
|
yield from update_ui_lastest_msg(lastmsg=msg, chatbot=chatbot, history=history, delay=2)
|
||||||
yield from fn(plugin_sel.plugin_arg, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, -1)
|
yield from fn(plugin_sel.plugin_arg, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, -1)
|
||||||
return
|
return
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
from typing import List
|
from typing import List
|
||||||
from toolbox import update_ui_latest_msg, get_conf
|
from toolbox import update_ui_lastest_msg, get_conf
|
||||||
from request_llms.bridge_all import predict_no_ui_long_connection
|
from request_llms.bridge_all import predict_no_ui_long_connection
|
||||||
from crazy_functions.json_fns.pydantic_io import GptJsonIO
|
from crazy_functions.json_fns.pydantic_io import GptJsonIO
|
||||||
import copy, json, pickle, os, sys
|
import copy, json, pickle, os, sys
|
||||||
@@ -9,7 +9,7 @@ import copy, json, pickle, os, sys
|
|||||||
def modify_configuration_hot(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention):
|
def modify_configuration_hot(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention):
|
||||||
ALLOW_RESET_CONFIG = get_conf('ALLOW_RESET_CONFIG')
|
ALLOW_RESET_CONFIG = get_conf('ALLOW_RESET_CONFIG')
|
||||||
if not ALLOW_RESET_CONFIG:
|
if not ALLOW_RESET_CONFIG:
|
||||||
yield from update_ui_latest_msg(
|
yield from update_ui_lastest_msg(
|
||||||
lastmsg=f"当前配置不允许被修改!如需激活本功能,请在config.py中设置ALLOW_RESET_CONFIG=True后重启软件。",
|
lastmsg=f"当前配置不允许被修改!如需激活本功能,请在config.py中设置ALLOW_RESET_CONFIG=True后重启软件。",
|
||||||
chatbot=chatbot, history=history, delay=2
|
chatbot=chatbot, history=history, delay=2
|
||||||
)
|
)
|
||||||
@@ -30,7 +30,7 @@ def modify_configuration_hot(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
|||||||
new_option_value: str = Field(description="the new value of the option", default=None)
|
new_option_value: str = Field(description="the new value of the option", default=None)
|
||||||
|
|
||||||
# ⭐ ⭐ ⭐ 分析用户意图
|
# ⭐ ⭐ ⭐ 分析用户意图
|
||||||
yield from update_ui_latest_msg(lastmsg=f"正在执行任务: {txt}\n\n读取新配置中", chatbot=chatbot, history=history, delay=0)
|
yield from update_ui_lastest_msg(lastmsg=f"正在执行任务: {txt}\n\n读取新配置中", chatbot=chatbot, history=history, delay=0)
|
||||||
gpt_json_io = GptJsonIO(ModifyConfigurationIntention)
|
gpt_json_io = GptJsonIO(ModifyConfigurationIntention)
|
||||||
inputs = "Analyze how to change configuration according to following user input, answer me with json: \n\n" + \
|
inputs = "Analyze how to change configuration according to following user input, answer me with json: \n\n" + \
|
||||||
">> " + txt.rstrip('\n').replace('\n','\n>> ') + '\n\n' + \
|
">> " + txt.rstrip('\n').replace('\n','\n>> ') + '\n\n' + \
|
||||||
@@ -44,11 +44,11 @@ def modify_configuration_hot(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
|||||||
|
|
||||||
ok = (explicit_conf in txt)
|
ok = (explicit_conf in txt)
|
||||||
if ok:
|
if ok:
|
||||||
yield from update_ui_latest_msg(
|
yield from update_ui_lastest_msg(
|
||||||
lastmsg=f"正在执行任务: {txt}\n\n新配置{explicit_conf}={user_intention.new_option_value}",
|
lastmsg=f"正在执行任务: {txt}\n\n新配置{explicit_conf}={user_intention.new_option_value}",
|
||||||
chatbot=chatbot, history=history, delay=1
|
chatbot=chatbot, history=history, delay=1
|
||||||
)
|
)
|
||||||
yield from update_ui_latest_msg(
|
yield from update_ui_lastest_msg(
|
||||||
lastmsg=f"正在执行任务: {txt}\n\n新配置{explicit_conf}={user_intention.new_option_value}\n\n正在修改配置中",
|
lastmsg=f"正在执行任务: {txt}\n\n新配置{explicit_conf}={user_intention.new_option_value}\n\n正在修改配置中",
|
||||||
chatbot=chatbot, history=history, delay=2
|
chatbot=chatbot, history=history, delay=2
|
||||||
)
|
)
|
||||||
@@ -57,25 +57,25 @@ def modify_configuration_hot(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
|||||||
from toolbox import set_conf
|
from toolbox import set_conf
|
||||||
set_conf(explicit_conf, user_intention.new_option_value)
|
set_conf(explicit_conf, user_intention.new_option_value)
|
||||||
|
|
||||||
yield from update_ui_latest_msg(
|
yield from update_ui_lastest_msg(
|
||||||
lastmsg=f"正在执行任务: {txt}\n\n配置修改完成,重新页面即可生效。", chatbot=chatbot, history=history, delay=1
|
lastmsg=f"正在执行任务: {txt}\n\n配置修改完成,重新页面即可生效。", chatbot=chatbot, history=history, delay=1
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
yield from update_ui_latest_msg(
|
yield from update_ui_lastest_msg(
|
||||||
lastmsg=f"失败,如果需要配置{explicit_conf},您需要明确说明并在指令中提到它。", chatbot=chatbot, history=history, delay=5
|
lastmsg=f"失败,如果需要配置{explicit_conf},您需要明确说明并在指令中提到它。", chatbot=chatbot, history=history, delay=5
|
||||||
)
|
)
|
||||||
|
|
||||||
def modify_configuration_reboot(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention):
|
def modify_configuration_reboot(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention):
|
||||||
ALLOW_RESET_CONFIG = get_conf('ALLOW_RESET_CONFIG')
|
ALLOW_RESET_CONFIG = get_conf('ALLOW_RESET_CONFIG')
|
||||||
if not ALLOW_RESET_CONFIG:
|
if not ALLOW_RESET_CONFIG:
|
||||||
yield from update_ui_latest_msg(
|
yield from update_ui_lastest_msg(
|
||||||
lastmsg=f"当前配置不允许被修改!如需激活本功能,请在config.py中设置ALLOW_RESET_CONFIG=True后重启软件。",
|
lastmsg=f"当前配置不允许被修改!如需激活本功能,请在config.py中设置ALLOW_RESET_CONFIG=True后重启软件。",
|
||||||
chatbot=chatbot, history=history, delay=2
|
chatbot=chatbot, history=history, delay=2
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
yield from modify_configuration_hot(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention)
|
yield from modify_configuration_hot(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention)
|
||||||
yield from update_ui_latest_msg(
|
yield from update_ui_lastest_msg(
|
||||||
lastmsg=f"正在执行任务: {txt}\n\n配置修改完成,五秒后即将重启!若出现报错请无视即可。", chatbot=chatbot, history=history, delay=5
|
lastmsg=f"正在执行任务: {txt}\n\n配置修改完成,五秒后即将重启!若出现报错请无视即可。", chatbot=chatbot, history=history, delay=5
|
||||||
)
|
)
|
||||||
os.execl(sys.executable, sys.executable, *sys.argv)
|
os.execl(sys.executable, sys.executable, *sys.argv)
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ class VoidTerminalState():
|
|||||||
self.reset_state()
|
self.reset_state()
|
||||||
|
|
||||||
def reset_state(self):
|
def reset_state(self):
|
||||||
self.has_provided_explanation = False
|
self.has_provided_explaination = False
|
||||||
|
|
||||||
def lock_plugin(self, chatbot):
|
def lock_plugin(self, chatbot):
|
||||||
chatbot._cookies['lock_plugin'] = 'crazy_functions.虚空终端->虚空终端'
|
chatbot._cookies['lock_plugin'] = 'crazy_functions.虚空终端->虚空终端'
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,4 @@
|
|||||||
from toolbox import CatchException, update_ui, update_ui_latest_msg
|
from toolbox import CatchException, update_ui, update_ui_lastest_msg
|
||||||
from crazy_functions.multi_stage.multi_stage_utils import GptAcademicGameBaseState
|
from crazy_functions.multi_stage.multi_stage_utils import GptAcademicGameBaseState
|
||||||
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
from request_llms.bridge_all import predict_no_ui_long_connection
|
from request_llms.bridge_all import predict_no_ui_long_connection
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ Testing:
|
|||||||
|
|
||||||
|
|
||||||
from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, is_the_upload_folder
|
from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, is_the_upload_folder
|
||||||
from toolbox import promote_file_to_downloadzone, get_log_folder, update_ui_latest_msg
|
from toolbox import promote_file_to_downloadzone, get_log_folder, update_ui_lastest_msg
|
||||||
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, get_plugin_arg
|
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, get_plugin_arg
|
||||||
from crazy_functions.crazy_utils import input_clipping, try_install_deps
|
from crazy_functions.crazy_utils import input_clipping, try_install_deps
|
||||||
from crazy_functions.gen_fns.gen_fns_shared import is_function_successfully_generated
|
from crazy_functions.gen_fns.gen_fns_shared import is_function_successfully_generated
|
||||||
@@ -27,7 +27,7 @@ import time
|
|||||||
import glob
|
import glob
|
||||||
import multiprocessing
|
import multiprocessing
|
||||||
|
|
||||||
template = """
|
templete = """
|
||||||
```python
|
```python
|
||||||
import ... # Put dependencies here, e.g. import numpy as np.
|
import ... # Put dependencies here, e.g. import numpy as np.
|
||||||
|
|
||||||
@@ -77,10 +77,10 @@ def gpt_interact_multi_step(txt, file_type, llm_kwargs, chatbot, history):
|
|||||||
|
|
||||||
# 第二步
|
# 第二步
|
||||||
prompt_compose = [
|
prompt_compose = [
|
||||||
"If previous stage is successful, rewrite the function you have just written to satisfy following template: \n",
|
"If previous stage is successful, rewrite the function you have just written to satisfy following templete: \n",
|
||||||
template
|
templete
|
||||||
]
|
]
|
||||||
i_say = "".join(prompt_compose); inputs_show_user = "If previous stage is successful, rewrite the function you have just written to satisfy executable template. "
|
i_say = "".join(prompt_compose); inputs_show_user = "If previous stage is successful, rewrite the function you have just written to satisfy executable templete. "
|
||||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||||
inputs=i_say, inputs_show_user=inputs_show_user,
|
inputs=i_say, inputs_show_user=inputs_show_user,
|
||||||
llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
|
llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
|
||||||
@@ -164,18 +164,18 @@ def 函数动态生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
|
|||||||
if get_plugin_arg(plugin_kwargs, key="file_path_arg", default=False):
|
if get_plugin_arg(plugin_kwargs, key="file_path_arg", default=False):
|
||||||
file_path = get_plugin_arg(plugin_kwargs, key="file_path_arg", default=None)
|
file_path = get_plugin_arg(plugin_kwargs, key="file_path_arg", default=None)
|
||||||
file_list.append(file_path)
|
file_list.append(file_path)
|
||||||
yield from update_ui_latest_msg(f"当前文件: {file_path}", chatbot, history, 1)
|
yield from update_ui_lastest_msg(f"当前文件: {file_path}", chatbot, history, 1)
|
||||||
elif have_any_recent_upload_files(chatbot):
|
elif have_any_recent_upload_files(chatbot):
|
||||||
file_dir = get_recent_file_prompt_support(chatbot)
|
file_dir = get_recent_file_prompt_support(chatbot)
|
||||||
file_list = glob.glob(os.path.join(file_dir, '**/*'), recursive=True)
|
file_list = glob.glob(os.path.join(file_dir, '**/*'), recursive=True)
|
||||||
yield from update_ui_latest_msg(f"当前文件处理列表: {file_list}", chatbot, history, 1)
|
yield from update_ui_lastest_msg(f"当前文件处理列表: {file_list}", chatbot, history, 1)
|
||||||
else:
|
else:
|
||||||
chatbot.append(["文件检索", "没有发现任何近期上传的文件。"])
|
chatbot.append(["文件检索", "没有发现任何近期上传的文件。"])
|
||||||
yield from update_ui_latest_msg("没有发现任何近期上传的文件。", chatbot, history, 1)
|
yield from update_ui_lastest_msg("没有发现任何近期上传的文件。", chatbot, history, 1)
|
||||||
return # 2. 如果没有文件
|
return # 2. 如果没有文件
|
||||||
if len(file_list) == 0:
|
if len(file_list) == 0:
|
||||||
chatbot.append(["文件检索", "没有发现任何近期上传的文件。"])
|
chatbot.append(["文件检索", "没有发现任何近期上传的文件。"])
|
||||||
yield from update_ui_latest_msg("没有发现任何近期上传的文件。", chatbot, history, 1)
|
yield from update_ui_lastest_msg("没有发现任何近期上传的文件。", chatbot, history, 1)
|
||||||
return # 2. 如果没有文件
|
return # 2. 如果没有文件
|
||||||
|
|
||||||
# 读取文件
|
# 读取文件
|
||||||
@@ -183,7 +183,7 @@ def 函数动态生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
|
|||||||
|
|
||||||
# 粗心检查
|
# 粗心检查
|
||||||
if is_the_upload_folder(txt):
|
if is_the_upload_folder(txt):
|
||||||
yield from update_ui_latest_msg(f"请在输入框内填写需求, 然后再次点击该插件! 至于您的文件,不用担心, 文件路径 {txt} 已经被记忆. ", chatbot, history, 1)
|
yield from update_ui_lastest_msg(f"请在输入框内填写需求, 然后再次点击该插件! 至于您的文件,不用担心, 文件路径 {txt} 已经被记忆. ", chatbot, history, 1)
|
||||||
return
|
return
|
||||||
|
|
||||||
# 开始干正事
|
# 开始干正事
|
||||||
@@ -195,7 +195,7 @@ def 函数动态生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
|
|||||||
code, installation_advance, txt, file_type, llm_kwargs, chatbot, history = \
|
code, installation_advance, txt, file_type, llm_kwargs, chatbot, history = \
|
||||||
yield from gpt_interact_multi_step(txt, file_type, llm_kwargs, chatbot, history)
|
yield from gpt_interact_multi_step(txt, file_type, llm_kwargs, chatbot, history)
|
||||||
chatbot.append(["代码生成阶段结束", ""])
|
chatbot.append(["代码生成阶段结束", ""])
|
||||||
yield from update_ui_latest_msg(f"正在验证上述代码的有效性 ...", chatbot, history, 1)
|
yield from update_ui_lastest_msg(f"正在验证上述代码的有效性 ...", chatbot, history, 1)
|
||||||
# ⭐ 分离代码块
|
# ⭐ 分离代码块
|
||||||
code = get_code_block(code)
|
code = get_code_block(code)
|
||||||
# ⭐ 检查模块
|
# ⭐ 检查模块
|
||||||
@@ -206,11 +206,11 @@ def 函数动态生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
|
|||||||
if not traceback: traceback = trimmed_format_exc()
|
if not traceback: traceback = trimmed_format_exc()
|
||||||
# 处理异常
|
# 处理异常
|
||||||
if not traceback: traceback = trimmed_format_exc()
|
if not traceback: traceback = trimmed_format_exc()
|
||||||
yield from update_ui_latest_msg(f"第 {j+1}/{MAX_TRY} 次代码生成尝试, 失败了~ 别担心, 我们5秒后再试一次... \n\n此次我们的错误追踪是\n```\n{traceback}\n```\n", chatbot, history, 5)
|
yield from update_ui_lastest_msg(f"第 {j+1}/{MAX_TRY} 次代码生成尝试, 失败了~ 别担心, 我们5秒后再试一次... \n\n此次我们的错误追踪是\n```\n{traceback}\n```\n", chatbot, history, 5)
|
||||||
|
|
||||||
# 代码生成结束, 开始执行
|
# 代码生成结束, 开始执行
|
||||||
TIME_LIMIT = 15
|
TIME_LIMIT = 15
|
||||||
yield from update_ui_latest_msg(f"开始创建新进程并执行代码! 时间限制 {TIME_LIMIT} 秒. 请等待任务完成... ", chatbot, history, 1)
|
yield from update_ui_lastest_msg(f"开始创建新进程并执行代码! 时间限制 {TIME_LIMIT} 秒. 请等待任务完成... ", chatbot, history, 1)
|
||||||
manager = multiprocessing.Manager()
|
manager = multiprocessing.Manager()
|
||||||
return_dict = manager.dict()
|
return_dict = manager.dict()
|
||||||
|
|
||||||
|
|||||||
@@ -8,7 +8,7 @@
|
|||||||
|
|
||||||
import time
|
import time
|
||||||
from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, ProxyNetworkActivate
|
from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, ProxyNetworkActivate
|
||||||
from toolbox import get_conf, select_api_key, update_ui_latest_msg, Singleton
|
from toolbox import get_conf, select_api_key, update_ui_lastest_msg, Singleton
|
||||||
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, get_plugin_arg
|
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, get_plugin_arg
|
||||||
from crazy_functions.crazy_utils import input_clipping, try_install_deps
|
from crazy_functions.crazy_utils import input_clipping, try_install_deps
|
||||||
from crazy_functions.agent_fns.persistent import GradioMultiuserManagerForPersistentClasses
|
from crazy_functions.agent_fns.persistent import GradioMultiuserManagerForPersistentClasses
|
||||||
|
|||||||
493
crazy_functions/批量文件询问.py
Normal file
493
crazy_functions/批量文件询问.py
Normal file
@@ -0,0 +1,493 @@
|
|||||||
|
import os
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from typing import List, Tuple, Dict, Generator
|
||||||
|
|
||||||
|
from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
||||||
|
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
|
||||||
|
from crazy_functions.rag_fns.rag_file_support import extract_text
|
||||||
|
from request_llms.bridge_all import model_info
|
||||||
|
from toolbox import update_ui, CatchException, report_exception
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class FileFragment:
|
||||||
|
"""文件片段数据类,用于组织处理单元"""
|
||||||
|
file_path: str
|
||||||
|
content: str
|
||||||
|
rel_path: str
|
||||||
|
fragment_index: int
|
||||||
|
total_fragments: int
|
||||||
|
|
||||||
|
|
||||||
|
class BatchDocumentSummarizer:
|
||||||
|
"""优化的文档总结器 - 批处理版本"""
|
||||||
|
|
||||||
|
def __init__(self, llm_kwargs: Dict, plugin_kwargs: Dict, chatbot: List, history: List, system_prompt: str):
|
||||||
|
"""初始化总结器"""
|
||||||
|
self.llm_kwargs = llm_kwargs
|
||||||
|
self.plugin_kwargs = plugin_kwargs
|
||||||
|
self.chatbot = chatbot
|
||||||
|
self.history = history
|
||||||
|
self.system_prompt = system_prompt
|
||||||
|
self.failed_files = []
|
||||||
|
self.file_summaries_map = {}
|
||||||
|
|
||||||
|
def _get_token_limit(self) -> int:
|
||||||
|
"""获取模型token限制"""
|
||||||
|
max_token = model_info[self.llm_kwargs['llm_model']]['max_token']
|
||||||
|
return max_token * 3 // 4
|
||||||
|
|
||||||
|
def _create_batch_inputs(self, fragments: List[FileFragment]) -> Tuple[List, List, List]:
|
||||||
|
"""创建批处理输入"""
|
||||||
|
inputs_array = []
|
||||||
|
inputs_show_user_array = []
|
||||||
|
history_array = []
|
||||||
|
|
||||||
|
for frag in fragments:
|
||||||
|
if self.plugin_kwargs.get("advanced_arg"):
|
||||||
|
i_say = (f'请按照用户要求对文件内容进行处理,文件名为{os.path.basename(frag.file_path)},'
|
||||||
|
f'用户要求为:{self.plugin_kwargs["advanced_arg"]}:'
|
||||||
|
f'文件内容是 ```{frag.content}```')
|
||||||
|
i_say_show_user = (f'正在处理 {frag.rel_path} (片段 {frag.fragment_index + 1}/{frag.total_fragments})')
|
||||||
|
else:
|
||||||
|
i_say = (f'请对下面的内容用中文做概述,文件名是{os.path.basename(frag.file_path)},'
|
||||||
|
f'内容是 ```{frag.content}```')
|
||||||
|
i_say_show_user = f'正在处理 {frag.rel_path} (片段 {frag.fragment_index + 1}/{frag.total_fragments})'
|
||||||
|
|
||||||
|
inputs_array.append(i_say)
|
||||||
|
inputs_show_user_array.append(i_say_show_user)
|
||||||
|
history_array.append([])
|
||||||
|
|
||||||
|
return inputs_array, inputs_show_user_array, history_array
|
||||||
|
|
||||||
|
def _process_single_file_with_timeout(self, file_info: Tuple[str, str], mutable_status: List) -> List[FileFragment]:
|
||||||
|
"""包装了超时控制的文件处理函数"""
|
||||||
|
|
||||||
|
def timeout_handler():
|
||||||
|
thread = threading.current_thread()
|
||||||
|
if hasattr(thread, '_timeout_occurred'):
|
||||||
|
thread._timeout_occurred = True
|
||||||
|
|
||||||
|
# 设置超时标记
|
||||||
|
thread = threading.current_thread()
|
||||||
|
thread._timeout_occurred = False
|
||||||
|
|
||||||
|
# 设置超时定时器
|
||||||
|
timer = threading.Timer(self.watch_dog_patience, timeout_handler)
|
||||||
|
timer.start()
|
||||||
|
|
||||||
|
try:
|
||||||
|
fp, project_folder = file_info
|
||||||
|
fragments = []
|
||||||
|
|
||||||
|
# 定期检查是否超时
|
||||||
|
def check_timeout():
|
||||||
|
if hasattr(thread, '_timeout_occurred') and thread._timeout_occurred:
|
||||||
|
raise TimeoutError("处理超时")
|
||||||
|
|
||||||
|
# 更新状态
|
||||||
|
mutable_status[0] = "检查文件大小"
|
||||||
|
mutable_status[1] = time.time()
|
||||||
|
check_timeout()
|
||||||
|
|
||||||
|
# 文件大小检查
|
||||||
|
if os.path.getsize(fp) > self.max_file_size:
|
||||||
|
self.failed_files.append((fp, f"文件过大:超过{self.max_file_size / 1024 / 1024}MB"))
|
||||||
|
mutable_status[2] = "文件过大"
|
||||||
|
return fragments
|
||||||
|
|
||||||
|
check_timeout()
|
||||||
|
|
||||||
|
# 更新状态
|
||||||
|
mutable_status[0] = "提取文件内容"
|
||||||
|
mutable_status[1] = time.time()
|
||||||
|
|
||||||
|
# 提取内容
|
||||||
|
content = extract_text(fp)
|
||||||
|
if content is None:
|
||||||
|
self.failed_files.append((fp, "文件解析失败:不支持的格式或文件损坏"))
|
||||||
|
mutable_status[2] = "格式不支持"
|
||||||
|
return fragments
|
||||||
|
elif not content.strip():
|
||||||
|
self.failed_files.append((fp, "文件内容为空"))
|
||||||
|
mutable_status[2] = "内容为空"
|
||||||
|
return fragments
|
||||||
|
|
||||||
|
check_timeout()
|
||||||
|
|
||||||
|
# 更新状态
|
||||||
|
mutable_status[0] = "分割文本"
|
||||||
|
mutable_status[1] = time.time()
|
||||||
|
|
||||||
|
# 分割文本
|
||||||
|
try:
|
||||||
|
paper_fragments = breakdown_text_to_satisfy_token_limit(
|
||||||
|
txt=content,
|
||||||
|
limit=self._get_token_limit(),
|
||||||
|
llm_model=self.llm_kwargs['llm_model']
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
self.failed_files.append((fp, f"文本分割失败:{str(e)}"))
|
||||||
|
mutable_status[2] = "分割失败"
|
||||||
|
return fragments
|
||||||
|
|
||||||
|
check_timeout()
|
||||||
|
|
||||||
|
# 处理片段
|
||||||
|
rel_path = os.path.relpath(fp, project_folder)
|
||||||
|
for i, frag in enumerate(paper_fragments):
|
||||||
|
if frag.strip():
|
||||||
|
fragments.append(FileFragment(
|
||||||
|
file_path=fp,
|
||||||
|
content=frag,
|
||||||
|
rel_path=rel_path,
|
||||||
|
fragment_index=i,
|
||||||
|
total_fragments=len(paper_fragments)
|
||||||
|
))
|
||||||
|
|
||||||
|
mutable_status[2] = "处理完成"
|
||||||
|
return fragments
|
||||||
|
|
||||||
|
except TimeoutError as e:
|
||||||
|
self.failed_files.append((fp, "处理超时"))
|
||||||
|
mutable_status[2] = "处理超时"
|
||||||
|
return []
|
||||||
|
except Exception as e:
|
||||||
|
self.failed_files.append((fp, f"处理失败:{str(e)}"))
|
||||||
|
mutable_status[2] = "处理异常"
|
||||||
|
return []
|
||||||
|
finally:
|
||||||
|
timer.cancel()
|
||||||
|
|
||||||
|
def prepare_fragments(self, project_folder: str, file_paths: List[str]) -> Generator:
|
||||||
|
import concurrent.futures
|
||||||
|
|
||||||
|
|
||||||
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
|
from typing import Generator, List
|
||||||
|
"""并行准备所有文件的处理片段"""
|
||||||
|
all_fragments = []
|
||||||
|
total_files = len(file_paths)
|
||||||
|
|
||||||
|
# 配置参数
|
||||||
|
self.refresh_interval = 0.2 # UI刷新间隔
|
||||||
|
self.watch_dog_patience = 5 # 看门狗超时时间
|
||||||
|
self.max_file_size = 10 * 1024 * 1024 # 10MB限制
|
||||||
|
self.max_workers = min(32, len(file_paths)) # 最多32个线程
|
||||||
|
|
||||||
|
# 创建有超时控制的线程池
|
||||||
|
executor = ThreadPoolExecutor(max_workers=self.max_workers)
|
||||||
|
|
||||||
|
# 用于跨线程状态传递的可变列表 - 增加文件名信息
|
||||||
|
mutable_status_array = [["等待中", time.time(), "pending", file_path] for file_path in file_paths]
|
||||||
|
|
||||||
|
# 创建文件处理任务
|
||||||
|
file_infos = [(fp, project_folder) for fp in file_paths]
|
||||||
|
|
||||||
|
# 提交所有任务,使用带超时控制的处理函数
|
||||||
|
futures = [
|
||||||
|
executor.submit(
|
||||||
|
self._process_single_file_with_timeout,
|
||||||
|
file_info,
|
||||||
|
mutable_status_array[i]
|
||||||
|
) for i, file_info in enumerate(file_infos)
|
||||||
|
]
|
||||||
|
|
||||||
|
# 更新UI的计数器
|
||||||
|
cnt = 0
|
||||||
|
|
||||||
|
try:
|
||||||
|
# 监控任务执行
|
||||||
|
while True:
|
||||||
|
time.sleep(self.refresh_interval)
|
||||||
|
cnt += 1
|
||||||
|
|
||||||
|
# 检查任务完成状态
|
||||||
|
worker_done = [f.done() for f in futures]
|
||||||
|
|
||||||
|
# 更新状态显示
|
||||||
|
status_str = ""
|
||||||
|
for i, (status, timestamp, desc, file_path) in enumerate(mutable_status_array):
|
||||||
|
# 获取文件名(去掉路径)
|
||||||
|
file_name = os.path.basename(file_path)
|
||||||
|
if worker_done[i]:
|
||||||
|
status_str += f"文件 {file_name}: {desc}\n"
|
||||||
|
else:
|
||||||
|
status_str += f"文件 {file_name}: {status} {desc}\n"
|
||||||
|
|
||||||
|
# 更新UI
|
||||||
|
self.chatbot[-1] = [
|
||||||
|
"处理进度",
|
||||||
|
f"正在处理文件...\n\n{status_str}" + "." * (cnt % 10 + 1)
|
||||||
|
]
|
||||||
|
yield from update_ui(chatbot=self.chatbot, history=self.history)
|
||||||
|
|
||||||
|
# 检查是否所有任务完成
|
||||||
|
if all(worker_done):
|
||||||
|
break
|
||||||
|
|
||||||
|
finally:
|
||||||
|
# 确保线程池正确关闭
|
||||||
|
executor.shutdown(wait=False)
|
||||||
|
|
||||||
|
# 收集结果
|
||||||
|
processed_files = 0
|
||||||
|
for future in futures:
|
||||||
|
try:
|
||||||
|
fragments = future.result(timeout=0.1) # 给予一个短暂的超时时间来获取结果
|
||||||
|
all_fragments.extend(fragments)
|
||||||
|
processed_files += 1
|
||||||
|
except concurrent.futures.TimeoutError:
|
||||||
|
# 处理获取结果超时
|
||||||
|
file_index = futures.index(future)
|
||||||
|
self.failed_files.append((file_paths[file_index], "结果获取超时"))
|
||||||
|
continue
|
||||||
|
except Exception as e:
|
||||||
|
# 处理其他异常
|
||||||
|
file_index = futures.index(future)
|
||||||
|
self.failed_files.append((file_paths[file_index], f"未知错误:{str(e)}"))
|
||||||
|
continue
|
||||||
|
|
||||||
|
# 最终进度更新
|
||||||
|
self.chatbot.append([
|
||||||
|
"文件处理完成",
|
||||||
|
f"成功处理 {len(all_fragments)} 个片段,失败 {len(self.failed_files)} 个文件"
|
||||||
|
])
|
||||||
|
yield from update_ui(chatbot=self.chatbot, history=self.history)
|
||||||
|
|
||||||
|
return all_fragments
|
||||||
|
|
||||||
|
def _process_fragments_batch(self, fragments: List[FileFragment]) -> Generator:
|
||||||
|
"""批量处理文件片段"""
|
||||||
|
from collections import defaultdict
|
||||||
|
batch_size = 64 # 每批处理的片段数
|
||||||
|
max_retries = 3 # 最大重试次数
|
||||||
|
retry_delay = 5 # 重试延迟(秒)
|
||||||
|
results = defaultdict(list)
|
||||||
|
|
||||||
|
# 按批次处理
|
||||||
|
for i in range(0, len(fragments), batch_size):
|
||||||
|
batch = fragments[i:i + batch_size]
|
||||||
|
|
||||||
|
inputs_array, inputs_show_user_array, history_array = self._create_batch_inputs(batch)
|
||||||
|
sys_prompt_array = ["请总结以下内容:"] * len(batch)
|
||||||
|
|
||||||
|
# 添加重试机制
|
||||||
|
for retry in range(max_retries):
|
||||||
|
try:
|
||||||
|
response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||||
|
inputs_array=inputs_array,
|
||||||
|
inputs_show_user_array=inputs_show_user_array,
|
||||||
|
llm_kwargs=self.llm_kwargs,
|
||||||
|
chatbot=self.chatbot,
|
||||||
|
history_array=history_array,
|
||||||
|
sys_prompt_array=sys_prompt_array,
|
||||||
|
)
|
||||||
|
|
||||||
|
# 处理响应
|
||||||
|
for j, frag in enumerate(batch):
|
||||||
|
summary = response_collection[j * 2 + 1]
|
||||||
|
if summary and summary.strip():
|
||||||
|
results[frag.rel_path].append({
|
||||||
|
'index': frag.fragment_index,
|
||||||
|
'summary': summary,
|
||||||
|
'total': frag.total_fragments
|
||||||
|
})
|
||||||
|
break # 成功处理,跳出重试循环
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
if retry == max_retries - 1: # 最后一次重试失败
|
||||||
|
for frag in batch:
|
||||||
|
self.failed_files.append((frag.file_path, f"处理失败:{str(e)}"))
|
||||||
|
else:
|
||||||
|
yield from update_ui(self.chatbot.append([f"批次处理失败,{retry_delay}秒后重试...", str(e)]))
|
||||||
|
time.sleep(retry_delay)
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
def _generate_final_summary_request(self) -> Tuple[List, List, List]:
|
||||||
|
"""准备最终总结请求"""
|
||||||
|
if not self.file_summaries_map:
|
||||||
|
return (["无可用的文件总结"], ["生成最终总结"], [[]])
|
||||||
|
|
||||||
|
summaries = list(self.file_summaries_map.values())
|
||||||
|
if all(not summary for summary in summaries):
|
||||||
|
return (["所有文件处理均失败"], ["生成最终总结"], [[]])
|
||||||
|
|
||||||
|
if self.plugin_kwargs.get("advanced_arg"):
|
||||||
|
i_say = "根据以上所有文件的处理结果,按要求进行综合处理:" + self.plugin_kwargs['advanced_arg']
|
||||||
|
else:
|
||||||
|
i_say = "请根据以上所有文件的处理结果,生成最终的总结,不超过1000字。"
|
||||||
|
|
||||||
|
return ([i_say], [i_say], [summaries])
|
||||||
|
|
||||||
|
def process_files(self, project_folder: str, file_paths: List[str]) -> Generator:
|
||||||
|
"""处理所有文件"""
|
||||||
|
total_files = len(file_paths)
|
||||||
|
self.chatbot.append([f"开始处理", f"总计 {total_files} 个文件"])
|
||||||
|
yield from update_ui(chatbot=self.chatbot, history=self.history)
|
||||||
|
|
||||||
|
# 1. 准备所有文件片段
|
||||||
|
# 在 process_files 函数中:
|
||||||
|
fragments = yield from self.prepare_fragments(project_folder, file_paths)
|
||||||
|
if not fragments:
|
||||||
|
self.chatbot.append(["处理失败", "没有可处理的文件内容"])
|
||||||
|
return "没有可处理的文件内容"
|
||||||
|
|
||||||
|
# 2. 批量处理所有文件片段
|
||||||
|
self.chatbot.append([f"文件分析", f"共计 {len(fragments)} 个处理单元"])
|
||||||
|
yield from update_ui(chatbot=self.chatbot, history=self.history)
|
||||||
|
|
||||||
|
try:
|
||||||
|
file_summaries = yield from self._process_fragments_batch(fragments)
|
||||||
|
except Exception as e:
|
||||||
|
self.chatbot.append(["处理错误", f"批处理过程失败:{str(e)}"])
|
||||||
|
return "处理过程发生错误"
|
||||||
|
|
||||||
|
# 3. 为每个文件生成整体总结
|
||||||
|
self.chatbot.append(["生成总结", "正在汇总文件内容..."])
|
||||||
|
yield from update_ui(chatbot=self.chatbot, history=self.history)
|
||||||
|
|
||||||
|
# 处理每个文件的总结
|
||||||
|
for rel_path, summaries in file_summaries.items():
|
||||||
|
if len(summaries) > 1: # 多片段文件需要生成整体总结
|
||||||
|
sorted_summaries = sorted(summaries, key=lambda x: x['index'])
|
||||||
|
if self.plugin_kwargs.get("advanced_arg"):
|
||||||
|
i_say = (f"根据以下内容,按要求:{self.plugin_kwargs['advanced_arg']},"
|
||||||
|
f"总结文件 {os.path.basename(rel_path)} 的主要内容。")
|
||||||
|
else:
|
||||||
|
i_say = f"请总结文件 {os.path.basename(rel_path)} 的主要内容,不超过500字。"
|
||||||
|
|
||||||
|
try:
|
||||||
|
summary_texts = [s['summary'] for s in sorted_summaries]
|
||||||
|
response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||||
|
inputs_array=[i_say],
|
||||||
|
inputs_show_user_array=[f"生成 {rel_path} 的总结"],
|
||||||
|
llm_kwargs=self.llm_kwargs,
|
||||||
|
chatbot=self.chatbot,
|
||||||
|
history_array=[summary_texts],
|
||||||
|
sys_prompt_array=["总结文件内容。"],
|
||||||
|
)
|
||||||
|
self.file_summaries_map[rel_path] = response_collection[1]
|
||||||
|
except Exception as e:
|
||||||
|
self.chatbot.append(["警告", f"文件 {rel_path} 总结生成失败:{str(e)}"])
|
||||||
|
self.file_summaries_map[rel_path] = "总结生成失败"
|
||||||
|
else: # 单片段文件直接使用其唯一的总结
|
||||||
|
self.file_summaries_map[rel_path] = summaries[0]['summary']
|
||||||
|
|
||||||
|
# 4. 生成最终总结
|
||||||
|
try:
|
||||||
|
# 收集所有文件的总结用于生成最终总结
|
||||||
|
file_summaries_for_final = []
|
||||||
|
for rel_path, summary in self.file_summaries_map.items():
|
||||||
|
file_summaries_for_final.append(f"文件 {rel_path} 的总结:\n{summary}")
|
||||||
|
|
||||||
|
if self.plugin_kwargs.get("advanced_arg"):
|
||||||
|
final_summary_prompt = ("根据以下所有文件的总结内容,按要求进行综合处理:" +
|
||||||
|
self.plugin_kwargs['advanced_arg'])
|
||||||
|
else:
|
||||||
|
final_summary_prompt = "请根据以下所有文件的总结内容,生成最终的总结报告。"
|
||||||
|
|
||||||
|
response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||||
|
inputs_array=[final_summary_prompt],
|
||||||
|
inputs_show_user_array=["生成最终总结报告"],
|
||||||
|
llm_kwargs=self.llm_kwargs,
|
||||||
|
chatbot=self.chatbot,
|
||||||
|
history_array=[file_summaries_for_final],
|
||||||
|
sys_prompt_array=["总结所有文件内容。"],
|
||||||
|
max_workers=1
|
||||||
|
)
|
||||||
|
|
||||||
|
return response_collection[1] if len(response_collection) > 1 else "生成总结失败"
|
||||||
|
except Exception as e:
|
||||||
|
self.chatbot.append(["错误", f"最终总结生成失败:{str(e)}"])
|
||||||
|
return "生成总结失败"
|
||||||
|
|
||||||
|
def save_results(self, final_summary: str):
|
||||||
|
"""保存结果到文件"""
|
||||||
|
from toolbox import promote_file_to_downloadzone, write_history_to_file
|
||||||
|
from crazy_functions.doc_fns.batch_file_query_doc import MarkdownFormatter, HtmlFormatter, WordFormatter
|
||||||
|
import os
|
||||||
|
timestamp = time.strftime("%Y%m%d_%H%M%S")
|
||||||
|
|
||||||
|
# 创建各种格式化器
|
||||||
|
md_formatter = MarkdownFormatter(final_summary, self.file_summaries_map, self.failed_files)
|
||||||
|
html_formatter = HtmlFormatter(final_summary, self.file_summaries_map, self.failed_files)
|
||||||
|
word_formatter = WordFormatter(final_summary, self.file_summaries_map, self.failed_files)
|
||||||
|
|
||||||
|
result_files = []
|
||||||
|
|
||||||
|
# 保存 Markdown
|
||||||
|
md_content = md_formatter.create_document()
|
||||||
|
result_file_md = write_history_to_file(
|
||||||
|
history=[md_content], # 直接传入内容列表
|
||||||
|
file_basename=f"文档总结_{timestamp}.md"
|
||||||
|
)
|
||||||
|
result_files.append(result_file_md)
|
||||||
|
|
||||||
|
# 保存 HTML
|
||||||
|
html_content = html_formatter.create_document()
|
||||||
|
result_file_html = write_history_to_file(
|
||||||
|
history=[html_content],
|
||||||
|
file_basename=f"文档总结_{timestamp}.html"
|
||||||
|
)
|
||||||
|
result_files.append(result_file_html)
|
||||||
|
|
||||||
|
# 保存 Word
|
||||||
|
doc = word_formatter.create_document()
|
||||||
|
# 由于 Word 文档需要用 doc.save(),我们使用与 md 文件相同的目录
|
||||||
|
result_file_docx = os.path.join(
|
||||||
|
os.path.dirname(result_file_md),
|
||||||
|
f"文档总结_{timestamp}.docx"
|
||||||
|
)
|
||||||
|
doc.save(result_file_docx)
|
||||||
|
result_files.append(result_file_docx)
|
||||||
|
|
||||||
|
# 添加到下载区
|
||||||
|
for file in result_files:
|
||||||
|
promote_file_to_downloadzone(file, chatbot=self.chatbot)
|
||||||
|
|
||||||
|
self.chatbot.append(["处理完成", f"结果已保存至: {', '.join(result_files)}"])
|
||||||
|
@CatchException
|
||||||
|
def 批量文件询问(txt: str, llm_kwargs: Dict, plugin_kwargs: Dict, chatbot: List,
|
||||||
|
history: List, system_prompt: str, user_request: str):
|
||||||
|
"""主函数 - 优化版本"""
|
||||||
|
# 初始化
|
||||||
|
import glob
|
||||||
|
import re
|
||||||
|
from crazy_functions.rag_fns.rag_file_support import supports_format
|
||||||
|
from toolbox import report_exception
|
||||||
|
|
||||||
|
summarizer = BatchDocumentSummarizer(llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||||
|
chatbot.append(["函数插件功能", f"作者:lbykkkk,批量总结文件。支持格式: {', '.join(supports_format)}等其他文本格式文件,如果长时间卡在文件处理过程,请查看处理进度,然后删除所有处于“pending”状态的文件,然后重新上传处理。"])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
|
||||||
|
# 验证输入路径
|
||||||
|
if not os.path.exists(txt):
|
||||||
|
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到项目或无权访问: {txt}")
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
return
|
||||||
|
|
||||||
|
# 获取文件列表
|
||||||
|
project_folder = txt
|
||||||
|
extract_folder = next((d for d in glob.glob(f'{project_folder}/*')
|
||||||
|
if os.path.isdir(d) and d.endswith('.extract')), project_folder)
|
||||||
|
|
||||||
|
exclude_patterns = r'/[^/]+\.(zip|rar|7z|tar|gz)$'
|
||||||
|
file_manifest = [f for f in glob.glob(f'{extract_folder}/**', recursive=True)
|
||||||
|
if os.path.isfile(f) and not re.search(exclude_patterns, f)]
|
||||||
|
|
||||||
|
if not file_manifest:
|
||||||
|
report_exception(chatbot, history, a=f"解析项目: {txt}", b="未找到支持的文件类型")
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
return
|
||||||
|
|
||||||
|
# 处理所有文件并生成总结
|
||||||
|
final_summary = yield from summarizer.process_files(project_folder, file_manifest)
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
|
||||||
|
# 保存结果
|
||||||
|
summarizer.save_results(final_summary)
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
from toolbox import CatchException, report_exception, get_log_folder, gen_time_str
|
from toolbox import CatchException, report_exception, get_log_folder, gen_time_str
|
||||||
from toolbox import update_ui, promote_file_to_downloadzone, update_ui_latest_msg, disable_auto_promotion
|
from toolbox import update_ui, promote_file_to_downloadzone, update_ui_lastest_msg, disable_auto_promotion
|
||||||
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
||||||
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
||||||
|
|||||||
@@ -166,7 +166,7 @@ class PointWithTrace(Scene):
|
|||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
# do not use get_graph, this function is deprecated
|
# do not use get_graph, this funciton is deprecated
|
||||||
|
|
||||||
class ExampleFunctionGraph(Scene):
|
class ExampleFunctionGraph(Scene):
|
||||||
def construct(self):
|
def construct(self):
|
||||||
|
|||||||
@@ -324,16 +324,16 @@ def 生成多种Mermaid图表(
|
|||||||
if os.path.exists(txt): # 如输入区无内容则直接解析历史记录
|
if os.path.exists(txt): # 如输入区无内容则直接解析历史记录
|
||||||
from crazy_functions.pdf_fns.parse_word import extract_text_from_files
|
from crazy_functions.pdf_fns.parse_word import extract_text_from_files
|
||||||
|
|
||||||
file_exist, final_result, page_one, file_manifest, exception = (
|
file_exist, final_result, page_one, file_manifest, excption = (
|
||||||
extract_text_from_files(txt, chatbot, history)
|
extract_text_from_files(txt, chatbot, history)
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
file_exist = False
|
file_exist = False
|
||||||
exception = ""
|
excption = ""
|
||||||
file_manifest = []
|
file_manifest = []
|
||||||
|
|
||||||
if exception != "":
|
if excption != "":
|
||||||
if exception == "word":
|
if excption == "word":
|
||||||
report_exception(
|
report_exception(
|
||||||
chatbot,
|
chatbot,
|
||||||
history,
|
history,
|
||||||
@@ -341,7 +341,7 @@ def 生成多种Mermaid图表(
|
|||||||
b=f"找到了.doc文件,但是该文件格式不被支持,请先转化为.docx格式。",
|
b=f"找到了.doc文件,但是该文件格式不被支持,请先转化为.docx格式。",
|
||||||
)
|
)
|
||||||
|
|
||||||
elif exception == "pdf":
|
elif excption == "pdf":
|
||||||
report_exception(
|
report_exception(
|
||||||
chatbot,
|
chatbot,
|
||||||
history,
|
history,
|
||||||
@@ -349,7 +349,7 @@ def 生成多种Mermaid图表(
|
|||||||
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。",
|
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。",
|
||||||
)
|
)
|
||||||
|
|
||||||
elif exception == "word_pip":
|
elif excption == "word_pip":
|
||||||
report_exception(
|
report_exception(
|
||||||
chatbot,
|
chatbot,
|
||||||
history,
|
history,
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
from toolbox import CatchException, update_ui, ProxyNetworkActivate, update_ui_latest_msg, get_log_folder, get_user
|
from toolbox import CatchException, update_ui, ProxyNetworkActivate, update_ui_lastest_msg, get_log_folder, get_user
|
||||||
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, get_files_from_everything
|
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, get_files_from_everything
|
||||||
from loguru import logger
|
from loguru import logger
|
||||||
install_msg ="""
|
install_msg ="""
|
||||||
@@ -42,7 +42,7 @@ def 知识库文件注入(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
|
|||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
# from crazy_functions.crazy_utils import try_install_deps
|
# from crazy_functions.crazy_utils import try_install_deps
|
||||||
# try_install_deps(['zh_langchain==0.2.1', 'pypinyin'], reload_m=['pypinyin', 'zh_langchain'])
|
# try_install_deps(['zh_langchain==0.2.1', 'pypinyin'], reload_m=['pypinyin', 'zh_langchain'])
|
||||||
# yield from update_ui_latest_msg("安装完成,您可以再次重试。", chatbot, history)
|
# yield from update_ui_lastest_msg("安装完成,您可以再次重试。", chatbot, history)
|
||||||
return
|
return
|
||||||
|
|
||||||
# < --------------------读取文件--------------- >
|
# < --------------------读取文件--------------- >
|
||||||
@@ -95,7 +95,7 @@ def 读取知识库作答(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
|
|||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
# from crazy_functions.crazy_utils import try_install_deps
|
# from crazy_functions.crazy_utils import try_install_deps
|
||||||
# try_install_deps(['zh_langchain==0.2.1', 'pypinyin'], reload_m=['pypinyin', 'zh_langchain'])
|
# try_install_deps(['zh_langchain==0.2.1', 'pypinyin'], reload_m=['pypinyin', 'zh_langchain'])
|
||||||
# yield from update_ui_latest_msg("安装完成,您可以再次重试。", chatbot, history)
|
# yield from update_ui_lastest_msg("安装完成,您可以再次重试。", chatbot, history)
|
||||||
return
|
return
|
||||||
|
|
||||||
# < ------------------- --------------- >
|
# < ------------------- --------------- >
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ explain_msg = """
|
|||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
from typing import List
|
from typing import List
|
||||||
from toolbox import CatchException, update_ui, is_the_upload_folder
|
from toolbox import CatchException, update_ui, is_the_upload_folder
|
||||||
from toolbox import update_ui_latest_msg, disable_auto_promotion
|
from toolbox import update_ui_lastest_msg, disable_auto_promotion
|
||||||
from request_llms.bridge_all import predict_no_ui_long_connection
|
from request_llms.bridge_all import predict_no_ui_long_connection
|
||||||
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
from crazy_functions.crazy_utils import input_clipping
|
from crazy_functions.crazy_utils import input_clipping
|
||||||
@@ -113,19 +113,19 @@ def 虚空终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
|
|||||||
# 用简单的关键词检测用户意图
|
# 用简单的关键词检测用户意图
|
||||||
is_certain, _ = analyze_intention_with_simple_rules(txt)
|
is_certain, _ = analyze_intention_with_simple_rules(txt)
|
||||||
if is_the_upload_folder(txt):
|
if is_the_upload_folder(txt):
|
||||||
state.set_state(chatbot=chatbot, key='has_provided_explanation', value=False)
|
state.set_state(chatbot=chatbot, key='has_provided_explaination', value=False)
|
||||||
appendix_msg = "\n\n**很好,您已经上传了文件**,现在请您描述您的需求。"
|
appendix_msg = "\n\n**很好,您已经上传了文件**,现在请您描述您的需求。"
|
||||||
|
|
||||||
if is_certain or (state.has_provided_explanation):
|
if is_certain or (state.has_provided_explaination):
|
||||||
# 如果意图明确,跳过提示环节
|
# 如果意图明确,跳过提示环节
|
||||||
state.set_state(chatbot=chatbot, key='has_provided_explanation', value=True)
|
state.set_state(chatbot=chatbot, key='has_provided_explaination', value=True)
|
||||||
state.unlock_plugin(chatbot=chatbot)
|
state.unlock_plugin(chatbot=chatbot)
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
yield from 虚空终端主路由(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)
|
yield from 虚空终端主路由(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
# 如果意图模糊,提示
|
# 如果意图模糊,提示
|
||||||
state.set_state(chatbot=chatbot, key='has_provided_explanation', value=True)
|
state.set_state(chatbot=chatbot, key='has_provided_explaination', value=True)
|
||||||
state.lock_plugin(chatbot=chatbot)
|
state.lock_plugin(chatbot=chatbot)
|
||||||
chatbot.append(("虚空终端状态:", explain_msg+appendix_msg))
|
chatbot.append(("虚空终端状态:", explain_msg+appendix_msg))
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
@@ -141,7 +141,7 @@ def 虚空终端主路由(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
|
|||||||
# ⭐ ⭐ ⭐ 分析用户意图
|
# ⭐ ⭐ ⭐ 分析用户意图
|
||||||
is_certain, user_intention = analyze_intention_with_simple_rules(txt)
|
is_certain, user_intention = analyze_intention_with_simple_rules(txt)
|
||||||
if not is_certain:
|
if not is_certain:
|
||||||
yield from update_ui_latest_msg(
|
yield from update_ui_lastest_msg(
|
||||||
lastmsg=f"正在执行任务: {txt}\n\n分析用户意图中", chatbot=chatbot, history=history, delay=0)
|
lastmsg=f"正在执行任务: {txt}\n\n分析用户意图中", chatbot=chatbot, history=history, delay=0)
|
||||||
gpt_json_io = GptJsonIO(UserIntention)
|
gpt_json_io = GptJsonIO(UserIntention)
|
||||||
rf_req = "\nchoose from ['ModifyConfiguration', 'ExecutePlugin', 'Chat']"
|
rf_req = "\nchoose from ['ModifyConfiguration', 'ExecutePlugin', 'Chat']"
|
||||||
@@ -154,13 +154,13 @@ def 虚空终端主路由(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
|
|||||||
user_intention = gpt_json_io.generate_output_auto_repair(analyze_res, run_gpt_fn)
|
user_intention = gpt_json_io.generate_output_auto_repair(analyze_res, run_gpt_fn)
|
||||||
lastmsg=f"正在执行任务: {txt}\n\n用户意图理解: 意图={explain_intention_to_user[user_intention.intention_type]}",
|
lastmsg=f"正在执行任务: {txt}\n\n用户意图理解: 意图={explain_intention_to_user[user_intention.intention_type]}",
|
||||||
except JsonStringError as e:
|
except JsonStringError as e:
|
||||||
yield from update_ui_latest_msg(
|
yield from update_ui_lastest_msg(
|
||||||
lastmsg=f"正在执行任务: {txt}\n\n用户意图理解: 失败 当前语言模型({llm_kwargs['llm_model']})不能理解您的意图", chatbot=chatbot, history=history, delay=0)
|
lastmsg=f"正在执行任务: {txt}\n\n用户意图理解: 失败 当前语言模型({llm_kwargs['llm_model']})不能理解您的意图", chatbot=chatbot, history=history, delay=0)
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
yield from update_ui_latest_msg(
|
yield from update_ui_lastest_msg(
|
||||||
lastmsg=f"正在执行任务: {txt}\n\n用户意图理解: 意图={explain_intention_to_user[user_intention.intention_type]}",
|
lastmsg=f"正在执行任务: {txt}\n\n用户意图理解: 意图={explain_intention_to_user[user_intention.intention_type]}",
|
||||||
chatbot=chatbot, history=history, delay=0)
|
chatbot=chatbot, history=history, delay=0)
|
||||||
|
|
||||||
|
|||||||
@@ -42,7 +42,7 @@ class AsyncGptTask():
|
|||||||
MAX_TOKEN_ALLO = 2560
|
MAX_TOKEN_ALLO = 2560
|
||||||
i_say, history = input_clipping(i_say, history, max_token_limit=MAX_TOKEN_ALLO)
|
i_say, history = input_clipping(i_say, history, max_token_limit=MAX_TOKEN_ALLO)
|
||||||
gpt_say_partial = predict_no_ui_long_connection(inputs=i_say, llm_kwargs=llm_kwargs, history=history, sys_prompt=sys_prompt,
|
gpt_say_partial = predict_no_ui_long_connection(inputs=i_say, llm_kwargs=llm_kwargs, history=history, sys_prompt=sys_prompt,
|
||||||
observe_window=observe_window[index], console_silence=True)
|
observe_window=observe_window[index], console_slience=True)
|
||||||
except ConnectionAbortedError as token_exceed_err:
|
except ConnectionAbortedError as token_exceed_err:
|
||||||
logger.error('至少一个线程任务Token溢出而失败', e)
|
logger.error('至少一个线程任务Token溢出而失败', e)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
from toolbox import CatchException, report_exception, promote_file_to_downloadzone
|
from toolbox import CatchException, report_exception, promote_file_to_downloadzone
|
||||||
from toolbox import update_ui, update_ui_latest_msg, disable_auto_promotion, write_history_to_file
|
from toolbox import update_ui, update_ui_lastest_msg, disable_auto_promotion, write_history_to_file
|
||||||
import logging
|
import logging
|
||||||
import requests
|
import requests
|
||||||
import time
|
import time
|
||||||
@@ -156,7 +156,7 @@ def 谷歌检索小助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
|
|||||||
history = []
|
history = []
|
||||||
meta_paper_info_list = yield from get_meta_information(txt, chatbot, history)
|
meta_paper_info_list = yield from get_meta_information(txt, chatbot, history)
|
||||||
if len(meta_paper_info_list) == 0:
|
if len(meta_paper_info_list) == 0:
|
||||||
yield from update_ui_latest_msg(lastmsg='获取文献失败,可能触发了google反爬虫机制。',chatbot=chatbot, history=history, delay=0)
|
yield from update_ui_lastest_msg(lastmsg='获取文献失败,可能触发了google反爬虫机制。',chatbot=chatbot, history=history, delay=0)
|
||||||
return
|
return
|
||||||
batchsize = 5
|
batchsize = 5
|
||||||
for batch in range(math.ceil(len(meta_paper_info_list)/batchsize)):
|
for batch in range(math.ceil(len(meta_paper_info_list)/batchsize)):
|
||||||
|
|||||||
@@ -5,10 +5,6 @@ FROM fuqingxu/11.3.1-runtime-ubuntu20.04-with-texlive:latest
|
|||||||
|
|
||||||
# edge-tts需要的依赖,某些pip包所需的依赖
|
# edge-tts需要的依赖,某些pip包所需的依赖
|
||||||
RUN apt update && apt install ffmpeg build-essential -y
|
RUN apt update && apt install ffmpeg build-essential -y
|
||||||
RUN apt-get install -y fontconfig
|
|
||||||
RUN ln -s /usr/local/texlive/2023/texmf-dist/fonts/truetype /usr/share/fonts/truetype/texlive
|
|
||||||
RUN fc-cache -fv
|
|
||||||
RUN apt-get clean
|
|
||||||
|
|
||||||
# use python3 as the system default python
|
# use python3 as the system default python
|
||||||
WORKDIR /gpt
|
WORKDIR /gpt
|
||||||
@@ -34,7 +30,7 @@ RUN python3 -m pip install -r request_llms/requirements_qwen.txt
|
|||||||
RUN python3 -m pip install -r request_llms/requirements_chatglm.txt
|
RUN python3 -m pip install -r request_llms/requirements_chatglm.txt
|
||||||
RUN python3 -m pip install -r request_llms/requirements_newbing.txt
|
RUN python3 -m pip install -r request_llms/requirements_newbing.txt
|
||||||
RUN python3 -m pip install nougat-ocr
|
RUN python3 -m pip install nougat-ocr
|
||||||
RUN python3 -m pip cache purge
|
|
||||||
|
|
||||||
# 预热Tiktoken模块
|
# 预热Tiktoken模块
|
||||||
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
|
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ RUN apt-get install -y git python python3 python-dev python3-dev --fix-missing
|
|||||||
|
|
||||||
# edge-tts需要的依赖,某些pip包所需的依赖
|
# edge-tts需要的依赖,某些pip包所需的依赖
|
||||||
RUN apt update && apt install ffmpeg build-essential -y
|
RUN apt update && apt install ffmpeg build-essential -y
|
||||||
RUN apt-get clean
|
|
||||||
|
|
||||||
# use python3 as the system default python
|
# use python3 as the system default python
|
||||||
RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8
|
RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8
|
||||||
@@ -23,7 +22,6 @@ RUN python3 -m pip install -r request_llms/requirements_moss.txt
|
|||||||
RUN python3 -m pip install -r request_llms/requirements_qwen.txt
|
RUN python3 -m pip install -r request_llms/requirements_qwen.txt
|
||||||
RUN python3 -m pip install -r request_llms/requirements_chatglm.txt
|
RUN python3 -m pip install -r request_llms/requirements_chatglm.txt
|
||||||
RUN python3 -m pip install -r request_llms/requirements_newbing.txt
|
RUN python3 -m pip install -r request_llms/requirements_newbing.txt
|
||||||
RUN python3 -m pip cache purge
|
|
||||||
|
|
||||||
|
|
||||||
# 预热Tiktoken模块
|
# 预热Tiktoken模块
|
||||||
|
|||||||
@@ -18,7 +18,5 @@ RUN apt update && apt install ffmpeg -y
|
|||||||
# 可选步骤,用于预热模块
|
# 可选步骤,用于预热模块
|
||||||
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
|
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
|
||||||
|
|
||||||
RUN python3 -m pip cache purge && apt-get clean
|
|
||||||
|
|
||||||
# 启动
|
# 启动
|
||||||
CMD ["python3", "-u", "main.py"]
|
CMD ["python3", "-u", "main.py"]
|
||||||
|
|||||||
@@ -30,7 +30,5 @@ COPY --chown=gptuser:gptuser . .
|
|||||||
# 可选步骤,用于预热模块
|
# 可选步骤,用于预热模块
|
||||||
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
|
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
|
||||||
|
|
||||||
RUN python3 -m pip cache purge
|
|
||||||
|
|
||||||
# 启动
|
# 启动
|
||||||
CMD ["python3", "-u", "main.py"]
|
CMD ["python3", "-u", "main.py"]
|
||||||
|
|||||||
@@ -24,8 +24,6 @@ RUN apt update && apt install ffmpeg -y
|
|||||||
|
|
||||||
# 可选步骤,用于预热模块
|
# 可选步骤,用于预热模块
|
||||||
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
|
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
|
||||||
RUN python3 -m pip cache purge && apt-get clean
|
|
||||||
|
|
||||||
|
|
||||||
# 启动
|
# 启动
|
||||||
CMD ["python3", "-u", "main.py"]
|
CMD ["python3", "-u", "main.py"]
|
||||||
|
|||||||
@@ -1,26 +0,0 @@
|
|||||||
@echo off
|
|
||||||
setlocal
|
|
||||||
|
|
||||||
:: 设置环境变量
|
|
||||||
set ENV_NAME=gpt
|
|
||||||
set ENV_PATH=%~dp0%ENV_NAME%
|
|
||||||
set SCRIPT_PATH=%~dp0main.py
|
|
||||||
|
|
||||||
:: 判断环境是否已解压
|
|
||||||
if not exist "%ENV_PATH%" (
|
|
||||||
echo Extracting environment...
|
|
||||||
mkdir "%ENV_PATH%"
|
|
||||||
tar -xzf gpt.tar.gz -C "%ENV_PATH%"
|
|
||||||
|
|
||||||
:: 运行conda环境激活脚本
|
|
||||||
call "%ENV_PATH%\Scripts\activate.bat"
|
|
||||||
) else (
|
|
||||||
:: 如果环境已存在,直接激活
|
|
||||||
call "%ENV_PATH%\Scripts\activate.bat"
|
|
||||||
)
|
|
||||||
echo Start to run program:
|
|
||||||
:: 运行Python脚本
|
|
||||||
python "%SCRIPT_PATH%"
|
|
||||||
|
|
||||||
endlocal
|
|
||||||
pause
|
|
||||||
@@ -1141,7 +1141,7 @@
|
|||||||
"内容太长了都会触发token数量溢出的错误": "An error of token overflow will be triggered if the content is too long",
|
"内容太长了都会触发token数量溢出的错误": "An error of token overflow will be triggered if the content is too long",
|
||||||
"chatbot 为WebUI中显示的对话列表": "chatbot is the conversation list displayed in WebUI",
|
"chatbot 为WebUI中显示的对话列表": "chatbot is the conversation list displayed in WebUI",
|
||||||
"修改它": "Modify it",
|
"修改它": "Modify it",
|
||||||
"然后yield出去": "Then yield it out",
|
"然后yeild出去": "Then yield it out",
|
||||||
"可以直接修改对话界面内容": "You can directly modify the conversation interface content",
|
"可以直接修改对话界面内容": "You can directly modify the conversation interface content",
|
||||||
"additional_fn代表点击的哪个按钮": "additional_fn represents which button is clicked",
|
"additional_fn代表点击的哪个按钮": "additional_fn represents which button is clicked",
|
||||||
"按钮见functional.py": "See functional.py for buttons",
|
"按钮见functional.py": "See functional.py for buttons",
|
||||||
@@ -1732,7 +1732,7 @@
|
|||||||
"或者重启之后再度尝试": "Or try again after restarting",
|
"或者重启之后再度尝试": "Or try again after restarting",
|
||||||
"免费": "Free",
|
"免费": "Free",
|
||||||
"仅在Windows系统进行了测试": "Tested only on Windows system",
|
"仅在Windows系统进行了测试": "Tested only on Windows system",
|
||||||
"欢迎加README中的QQ联系开发者": "Feel free to contact the developer via QQ in README",
|
"欢迎加REAME中的QQ联系开发者": "Feel free to contact the developer via QQ in REAME",
|
||||||
"当前知识库内的有效文件": "Valid files in the current knowledge base",
|
"当前知识库内的有效文件": "Valid files in the current knowledge base",
|
||||||
"您可以到Github Issue区": "You can go to the Github Issue area",
|
"您可以到Github Issue区": "You can go to the Github Issue area",
|
||||||
"刷新Gradio前端界面": "Refresh the Gradio frontend interface",
|
"刷新Gradio前端界面": "Refresh the Gradio frontend interface",
|
||||||
@@ -1759,7 +1759,7 @@
|
|||||||
"报错信息如下. 如果是与网络相关的问题": "Error message as follows. If it is related to network issues",
|
"报错信息如下. 如果是与网络相关的问题": "Error message as follows. If it is related to network issues",
|
||||||
"功能描述": "Function description",
|
"功能描述": "Function description",
|
||||||
"禁止移除或修改此警告": "Removal or modification of this warning is prohibited",
|
"禁止移除或修改此警告": "Removal or modification of this warning is prohibited",
|
||||||
"ArXiv翻译": "ArXiv translation",
|
"Arixv翻译": "Arixv translation",
|
||||||
"读取优先级": "Read priority",
|
"读取优先级": "Read priority",
|
||||||
"包含documentclass关键字": "Contains the documentclass keyword",
|
"包含documentclass关键字": "Contains the documentclass keyword",
|
||||||
"根据文本使用GPT模型生成相应的图像": "Generate corresponding images using GPT model based on the text",
|
"根据文本使用GPT模型生成相应的图像": "Generate corresponding images using GPT model based on the text",
|
||||||
@@ -1998,7 +1998,7 @@
|
|||||||
"开始最终总结": "Start final summary",
|
"开始最终总结": "Start final summary",
|
||||||
"openai的官方KEY需要伴随组织编码": "Openai's official KEY needs to be accompanied by organizational code",
|
"openai的官方KEY需要伴随组织编码": "Openai's official KEY needs to be accompanied by organizational code",
|
||||||
"将子线程的gpt结果写入chatbot": "Write the GPT result of the sub-thread into the chatbot",
|
"将子线程的gpt结果写入chatbot": "Write the GPT result of the sub-thread into the chatbot",
|
||||||
"ArXiv论文精细翻译": "Fine translation of ArXiv paper",
|
"Arixv论文精细翻译": "Fine translation of Arixv paper",
|
||||||
"开始接收chatglmft的回复": "Start receiving replies from chatglmft",
|
"开始接收chatglmft的回复": "Start receiving replies from chatglmft",
|
||||||
"请先将.doc文档转换为.docx文档": "Please convert .doc documents to .docx documents first",
|
"请先将.doc文档转换为.docx文档": "Please convert .doc documents to .docx documents first",
|
||||||
"避免多用户干扰": "Avoid multiple user interference",
|
"避免多用户干扰": "Avoid multiple user interference",
|
||||||
@@ -2360,7 +2360,7 @@
|
|||||||
"请在config.py中设置ALLOW_RESET_CONFIG=True后重启软件": "Please set ALLOW_RESET_CONFIG=True in config.py and restart the software",
|
"请在config.py中设置ALLOW_RESET_CONFIG=True后重启软件": "Please set ALLOW_RESET_CONFIG=True in config.py and restart the software",
|
||||||
"按照自然语言描述生成一个动画 | 输入参数是一段话": "Generate an animation based on natural language description | Input parameter is a sentence",
|
"按照自然语言描述生成一个动画 | 输入参数是一段话": "Generate an animation based on natural language description | Input parameter is a sentence",
|
||||||
"你的hf用户名如qingxu98": "Your hf username is qingxu98",
|
"你的hf用户名如qingxu98": "Your hf username is qingxu98",
|
||||||
"ArXiv论文精细翻译 | 输入参数arxiv论文的ID": "Fine translation of ArXiv paper | Input parameter is the ID of arxiv paper",
|
"Arixv论文精细翻译 | 输入参数arxiv论文的ID": "Fine translation of Arixv paper | Input parameter is the ID of arxiv paper",
|
||||||
"无法获取 abstract": "Unable to retrieve abstract",
|
"无法获取 abstract": "Unable to retrieve abstract",
|
||||||
"尽可能地仅用一行命令解决我的要求": "Try to solve my request using only one command",
|
"尽可能地仅用一行命令解决我的要求": "Try to solve my request using only one command",
|
||||||
"提取插件参数": "Extract plugin parameters",
|
"提取插件参数": "Extract plugin parameters",
|
||||||
|
|||||||
@@ -753,7 +753,7 @@
|
|||||||
"手动指定和筛选源代码文件类型": "ソースコードファイルタイプを手動で指定およびフィルタリングする",
|
"手动指定和筛选源代码文件类型": "ソースコードファイルタイプを手動で指定およびフィルタリングする",
|
||||||
"更多函数插件": "その他の関数プラグイン",
|
"更多函数插件": "その他の関数プラグイン",
|
||||||
"看门狗的耐心": "監視犬の忍耐力",
|
"看门狗的耐心": "監視犬の忍耐力",
|
||||||
"然后yield出去": "そして出力する",
|
"然后yeild出去": "そして出力する",
|
||||||
"拆分过长的IPynb文件": "長すぎるIPynbファイルを分割する",
|
"拆分过长的IPynb文件": "長すぎるIPynbファイルを分割する",
|
||||||
"1. 把input的余量留出来": "1. 入力の余裕を残す",
|
"1. 把input的余量留出来": "1. 入力の余裕を残す",
|
||||||
"请求超时": "リクエストがタイムアウトしました",
|
"请求超时": "リクエストがタイムアウトしました",
|
||||||
@@ -1803,7 +1803,7 @@
|
|||||||
"默认值为1000": "デフォルト値は1000です",
|
"默认值为1000": "デフォルト値は1000です",
|
||||||
"写出文件": "ファイルに書き出す",
|
"写出文件": "ファイルに書き出す",
|
||||||
"生成的视频文件路径": "生成されたビデオファイルのパス",
|
"生成的视频文件路径": "生成されたビデオファイルのパス",
|
||||||
"ArXiv论文精细翻译": "ArXiv論文の詳細な翻訳",
|
"Arixv论文精细翻译": "Arixv論文の詳細な翻訳",
|
||||||
"用latex编译为PDF对修正处做高亮": "LaTeXでコンパイルしてPDFに修正をハイライトする",
|
"用latex编译为PDF对修正处做高亮": "LaTeXでコンパイルしてPDFに修正をハイライトする",
|
||||||
"点击“停止”键可终止程序": "「停止」ボタンをクリックしてプログラムを終了できます",
|
"点击“停止”键可终止程序": "「停止」ボタンをクリックしてプログラムを終了できます",
|
||||||
"否则将导致每个人的Claude问询历史互相渗透": "さもないと、各人のClaudeの問い合わせ履歴が相互に侵入します",
|
"否则将导致每个人的Claude问询历史互相渗透": "さもないと、各人のClaudeの問い合わせ履歴が相互に侵入します",
|
||||||
@@ -1987,7 +1987,7 @@
|
|||||||
"前面是中文逗号": "前面是中文逗号",
|
"前面是中文逗号": "前面是中文逗号",
|
||||||
"的依赖": "的依赖",
|
"的依赖": "的依赖",
|
||||||
"材料如下": "材料如下",
|
"材料如下": "材料如下",
|
||||||
"欢迎加README中的QQ联系开发者": "欢迎加README中的QQ联系开发者",
|
"欢迎加REAME中的QQ联系开发者": "欢迎加REAME中的QQ联系开发者",
|
||||||
"开始下载": "開始ダウンロード",
|
"开始下载": "開始ダウンロード",
|
||||||
"100字以内": "100文字以内",
|
"100字以内": "100文字以内",
|
||||||
"创建request": "リクエストの作成",
|
"创建request": "リクエストの作成",
|
||||||
|
|||||||
@@ -771,7 +771,7 @@
|
|||||||
"查询代理的地理位置": "查詢代理的地理位置",
|
"查询代理的地理位置": "查詢代理的地理位置",
|
||||||
"是否在输入过长时": "是否在輸入過長時",
|
"是否在输入过长时": "是否在輸入過長時",
|
||||||
"chatGPT分析报告": "chatGPT分析報告",
|
"chatGPT分析报告": "chatGPT分析報告",
|
||||||
"然后yield出去": "然後yield出去",
|
"然后yeild出去": "然後yield出去",
|
||||||
"用户取消了程序": "使用者取消了程式",
|
"用户取消了程序": "使用者取消了程式",
|
||||||
"琥珀色": "琥珀色",
|
"琥珀色": "琥珀色",
|
||||||
"这里是特殊函数插件的高级参数输入区": "這裡是特殊函數插件的高級參數輸入區",
|
"这里是特殊函数插件的高级参数输入区": "這裡是特殊函數插件的高級參數輸入區",
|
||||||
@@ -1587,7 +1587,7 @@
|
|||||||
"否则将导致每个人的Claude问询历史互相渗透": "否則將導致每個人的Claude問詢歷史互相滲透",
|
"否则将导致每个人的Claude问询历史互相渗透": "否則將導致每個人的Claude問詢歷史互相滲透",
|
||||||
"提问吧! 但注意": "提問吧!但注意",
|
"提问吧! 但注意": "提問吧!但注意",
|
||||||
"待处理的word文档路径": "待處理的word文檔路徑",
|
"待处理的word文档路径": "待處理的word文檔路徑",
|
||||||
"欢迎加README中的QQ联系开发者": "歡迎加README中的QQ聯繫開發者",
|
"欢迎加REAME中的QQ联系开发者": "歡迎加REAME中的QQ聯繫開發者",
|
||||||
"建议暂时不要使用": "建議暫時不要使用",
|
"建议暂时不要使用": "建議暫時不要使用",
|
||||||
"Latex没有安装": "Latex沒有安裝",
|
"Latex没有安装": "Latex沒有安裝",
|
||||||
"在这里放一些网上搜集的demo": "在這裡放一些網上搜集的demo",
|
"在这里放一些网上搜集的demo": "在這裡放一些網上搜集的demo",
|
||||||
@@ -1989,7 +1989,7 @@
|
|||||||
"请耐心等待": "請耐心等待",
|
"请耐心等待": "請耐心等待",
|
||||||
"在执行完成之后": "在執行完成之後",
|
"在执行完成之后": "在執行完成之後",
|
||||||
"参数简单": "參數簡單",
|
"参数简单": "參數簡單",
|
||||||
"ArXiv论文精细翻译": "ArXiv論文精細翻譯",
|
"Arixv论文精细翻译": "Arixv論文精細翻譯",
|
||||||
"备份和下载": "備份和下載",
|
"备份和下载": "備份和下載",
|
||||||
"当前报错的latex代码处于第": "當前報錯的latex代碼處於第",
|
"当前报错的latex代码处于第": "當前報錯的latex代碼處於第",
|
||||||
"Markdown翻译": "Markdown翻譯",
|
"Markdown翻译": "Markdown翻譯",
|
||||||
|
|||||||
47
main.py
47
main.py
@@ -1,7 +1,10 @@
|
|||||||
import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
import os, json; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
||||||
|
|
||||||
help_menu_description = \
|
help_menu_description = \
|
||||||
"""
|
"""Github源代码开源和更新[地址🚀](https://github.com/binary-husky/gpt_academic),
|
||||||
|
感谢热情的[开发者们❤️](https://github.com/binary-husky/gpt_academic/graphs/contributors).
|
||||||
|
</br></br>常见问题请查阅[项目Wiki](https://github.com/binary-husky/gpt_academic/wiki),
|
||||||
|
如遇到Bug请前往[Bug反馈](https://github.com/binary-husky/gpt_academic/issues).
|
||||||
</br></br>普通对话使用说明: 1. 输入问题; 2. 点击提交
|
</br></br>普通对话使用说明: 1. 输入问题; 2. 点击提交
|
||||||
</br></br>基础功能区使用说明: 1. 输入文本; 2. 点击任意基础功能区按钮
|
</br></br>基础功能区使用说明: 1. 输入文本; 2. 点击任意基础功能区按钮
|
||||||
</br></br>函数插件区使用说明: 1. 输入路径/问题, 或者上传文件; 2. 点击任意函数插件区按钮
|
</br></br>函数插件区使用说明: 1. 输入路径/问题, 或者上传文件; 2. 点击任意函数插件区按钮
|
||||||
@@ -31,9 +34,9 @@ def encode_plugin_info(k, plugin)->str:
|
|||||||
|
|
||||||
def main():
|
def main():
|
||||||
import gradio as gr
|
import gradio as gr
|
||||||
if gr.__version__ not in ['3.32.15']:
|
if gr.__version__ not in ['3.32.9', '3.32.10', '3.32.11']:
|
||||||
raise ModuleNotFoundError("使用项目内置Gradio获取最优体验! 请运行 `pip install -r requirements.txt` 指令安装内置Gradio及其他依赖, 详情信息见requirements.txt.")
|
raise ModuleNotFoundError("使用项目内置Gradio获取最优体验! 请运行 `pip install -r requirements.txt` 指令安装内置Gradio及其他依赖, 详情信息见requirements.txt.")
|
||||||
|
|
||||||
# 一些基础工具
|
# 一些基础工具
|
||||||
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith
|
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith
|
||||||
|
|
||||||
@@ -46,7 +49,7 @@ def main():
|
|||||||
# 读取配置
|
# 读取配置
|
||||||
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION')
|
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION')
|
||||||
CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT')
|
CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT')
|
||||||
ENABLE_AUDIO, AUTO_CLEAR_TXT, AVAIL_FONTS, AVAIL_THEMES, THEME, ADD_WAIFU = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT', 'AVAIL_FONTS', 'AVAIL_THEMES', 'THEME', 'ADD_WAIFU')
|
ENABLE_AUDIO, AUTO_CLEAR_TXT, PATH_LOGGING, AVAIL_THEMES, THEME, ADD_WAIFU = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT', 'PATH_LOGGING', 'AVAIL_THEMES', 'THEME', 'ADD_WAIFU')
|
||||||
NUM_CUSTOM_BASIC_BTN, SSL_KEYFILE, SSL_CERTFILE = get_conf('NUM_CUSTOM_BASIC_BTN', 'SSL_KEYFILE', 'SSL_CERTFILE')
|
NUM_CUSTOM_BASIC_BTN, SSL_KEYFILE, SSL_CERTFILE = get_conf('NUM_CUSTOM_BASIC_BTN', 'SSL_KEYFILE', 'SSL_CERTFILE')
|
||||||
DARK_MODE, INIT_SYS_PROMPT, ADD_WAIFU, TTS_TYPE = get_conf('DARK_MODE', 'INIT_SYS_PROMPT', 'ADD_WAIFU', 'TTS_TYPE')
|
DARK_MODE, INIT_SYS_PROMPT, ADD_WAIFU, TTS_TYPE = get_conf('DARK_MODE', 'INIT_SYS_PROMPT', 'ADD_WAIFU', 'TTS_TYPE')
|
||||||
if LLM_MODEL not in AVAIL_LLM_MODELS: AVAIL_LLM_MODELS += [LLM_MODEL]
|
if LLM_MODEL not in AVAIL_LLM_MODELS: AVAIL_LLM_MODELS += [LLM_MODEL]
|
||||||
@@ -54,8 +57,8 @@ def main():
|
|||||||
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
||||||
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
||||||
from check_proxy import get_current_version
|
from check_proxy import get_current_version
|
||||||
from themes.theme import adjust_theme, advanced_css, theme_declaration, js_code_clear, js_code_show_or_hide
|
from themes.theme import adjust_theme, advanced_css, theme_declaration, js_code_clear, js_code_reset, js_code_show_or_hide, js_code_show_or_hide_group2
|
||||||
from themes.theme import js_code_for_toggle_darkmode
|
from themes.theme import js_code_for_toggle_darkmode, js_code_for_persistent_cookie_init
|
||||||
from themes.theme import load_dynamic_theme, to_cookie_str, from_cookie_str, assign_user_uuid
|
from themes.theme import load_dynamic_theme, to_cookie_str, from_cookie_str, assign_user_uuid
|
||||||
title_html = f"<h1 align=\"center\">GPT 学术优化 {get_current_version()}</h1>{theme_declaration}"
|
title_html = f"<h1 align=\"center\">GPT 学术优化 {get_current_version()}</h1>{theme_declaration}"
|
||||||
|
|
||||||
@@ -65,7 +68,7 @@ def main():
|
|||||||
functional = get_core_functions()
|
functional = get_core_functions()
|
||||||
|
|
||||||
# 高级函数插件
|
# 高级函数插件
|
||||||
from crazy_functional import get_crazy_functions, get_multiplex_button_functions
|
from crazy_functional import get_crazy_functions
|
||||||
DEFAULT_FN_GROUPS = get_conf('DEFAULT_FN_GROUPS')
|
DEFAULT_FN_GROUPS = get_conf('DEFAULT_FN_GROUPS')
|
||||||
plugins = get_crazy_functions()
|
plugins = get_crazy_functions()
|
||||||
all_plugin_groups = list(set([g for _, plugin in plugins.items() for g in plugin['Group'].split('|')]))
|
all_plugin_groups = list(set([g for _, plugin in plugins.items() for g in plugin['Group'].split('|')]))
|
||||||
@@ -103,7 +106,7 @@ def main():
|
|||||||
with gr_L2(scale=2, elem_id="gpt-chat"):
|
with gr_L2(scale=2, elem_id="gpt-chat"):
|
||||||
chatbot = gr.Chatbot(label=f"当前模型:{LLM_MODEL}", elem_id="gpt-chatbot")
|
chatbot = gr.Chatbot(label=f"当前模型:{LLM_MODEL}", elem_id="gpt-chatbot")
|
||||||
if LAYOUT == "TOP-DOWN": chatbot.style(height=CHATBOT_HEIGHT)
|
if LAYOUT == "TOP-DOWN": chatbot.style(height=CHATBOT_HEIGHT)
|
||||||
history, _, _ = make_history_cache() # 定义 后端state(history)、前端(history_cache)、后端setter(history_cache_update)三兄弟
|
history, history_cache, history_cache_update = make_history_cache() # 定义 后端state(history)、前端(history_cache)、后端setter(history_cache_update)三兄弟
|
||||||
with gr_L2(scale=1, elem_id="gpt-panel"):
|
with gr_L2(scale=1, elem_id="gpt-panel"):
|
||||||
with gr.Accordion("输入区", open=True, elem_id="input-panel") as area_input_primary:
|
with gr.Accordion("输入区", open=True, elem_id="input-panel") as area_input_primary:
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
@@ -111,7 +114,12 @@ def main():
|
|||||||
with gr.Row(elem_id="gpt-submit-row"):
|
with gr.Row(elem_id="gpt-submit-row"):
|
||||||
multiplex_submit_btn = gr.Button("提交", elem_id="elem_submit_visible", variant="primary")
|
multiplex_submit_btn = gr.Button("提交", elem_id="elem_submit_visible", variant="primary")
|
||||||
multiplex_sel = gr.Dropdown(
|
multiplex_sel = gr.Dropdown(
|
||||||
choices=get_multiplex_button_functions().keys(), value="常规对话",
|
choices=[
|
||||||
|
"常规对话",
|
||||||
|
"多模型对话",
|
||||||
|
"智能召回 RAG",
|
||||||
|
# "智能上下文",
|
||||||
|
], value="常规对话",
|
||||||
interactive=True, label='', show_label=False,
|
interactive=True, label='', show_label=False,
|
||||||
elem_classes='normal_mut_select', elem_id="gpt-submit-dropdown").style(container=False)
|
elem_classes='normal_mut_select', elem_id="gpt-submit-dropdown").style(container=False)
|
||||||
submit_btn = gr.Button("提交", elem_id="elem_submit", variant="primary", visible=False)
|
submit_btn = gr.Button("提交", elem_id="elem_submit", variant="primary", visible=False)
|
||||||
@@ -171,20 +179,16 @@ def main():
|
|||||||
with gr.Accordion("点击展开“文件下载区”。", open=False) as area_file_up:
|
with gr.Accordion("点击展开“文件下载区”。", open=False) as area_file_up:
|
||||||
file_upload = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple", elem_id="elem_upload")
|
file_upload = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple", elem_id="elem_upload")
|
||||||
|
|
||||||
|
|
||||||
# 左上角工具栏定义
|
# 左上角工具栏定义
|
||||||
from themes.gui_toolbar import define_gui_toolbar
|
from themes.gui_toolbar import define_gui_toolbar
|
||||||
checkboxes, checkboxes_2, max_length_sl, theme_dropdown, system_prompt, file_upload_2, md_dropdown, top_p, temperature = \
|
checkboxes, checkboxes_2, max_length_sl, theme_dropdown, system_prompt, file_upload_2, md_dropdown, top_p, temperature = \
|
||||||
define_gui_toolbar(AVAIL_LLM_MODELS, LLM_MODEL, INIT_SYS_PROMPT, THEME, AVAIL_THEMES, AVAIL_FONTS, ADD_WAIFU, help_menu_description, js_code_for_toggle_darkmode)
|
define_gui_toolbar(AVAIL_LLM_MODELS, LLM_MODEL, INIT_SYS_PROMPT, THEME, AVAIL_THEMES, ADD_WAIFU, help_menu_description, js_code_for_toggle_darkmode)
|
||||||
|
|
||||||
# 浮动菜单定义
|
# 浮动菜单定义
|
||||||
from themes.gui_floating_menu import define_gui_floating_menu
|
from themes.gui_floating_menu import define_gui_floating_menu
|
||||||
area_input_secondary, txt2, area_customize, _, resetBtn2, clearBtn2, stopBtn2 = \
|
area_input_secondary, txt2, area_customize, _, resetBtn2, clearBtn2, stopBtn2 = \
|
||||||
define_gui_floating_menu(customize_btns, functional, predefined_btns, cookies, web_cookie_cache)
|
define_gui_floating_menu(customize_btns, functional, predefined_btns, cookies, web_cookie_cache)
|
||||||
|
|
||||||
# 浮动时间线定义
|
|
||||||
gr.Spark()
|
|
||||||
|
|
||||||
# 插件二级菜单的实现
|
# 插件二级菜单的实现
|
||||||
from themes.gui_advanced_plugin_class import define_gui_advanced_plugin_class
|
from themes.gui_advanced_plugin_class import define_gui_advanced_plugin_class
|
||||||
new_plugin_callback, route_switchy_bt_with_arg, usr_confirmed_arg = \
|
new_plugin_callback, route_switchy_bt_with_arg, usr_confirmed_arg = \
|
||||||
@@ -207,14 +211,14 @@ def main():
|
|||||||
ret.update({area_customize: gr.update(visible=("自定义菜单" in a))})
|
ret.update({area_customize: gr.update(visible=("自定义菜单" in a))})
|
||||||
return ret
|
return ret
|
||||||
checkboxes_2.select(fn_area_visibility_2, [checkboxes_2], [area_customize] )
|
checkboxes_2.select(fn_area_visibility_2, [checkboxes_2], [area_customize] )
|
||||||
checkboxes_2.select(None, [checkboxes_2], None, _js="""apply_checkbox_change_for_group2""")
|
checkboxes_2.select(None, [checkboxes_2], None, _js=js_code_show_or_hide_group2)
|
||||||
|
|
||||||
# 整理反复出现的控件句柄组合
|
# 整理反复出现的控件句柄组合
|
||||||
input_combo = [cookies, max_length_sl, md_dropdown, txt, txt2, top_p, temperature, chatbot, history, system_prompt, plugin_advanced_arg]
|
input_combo = [cookies, max_length_sl, md_dropdown, txt, txt2, top_p, temperature, chatbot, history, system_prompt, plugin_advanced_arg]
|
||||||
input_combo_order = ["cookies", "max_length_sl", "md_dropdown", "txt", "txt2", "top_p", "temperature", "chatbot", "history", "system_prompt", "plugin_advanced_arg"]
|
input_combo_order = ["cookies", "max_length_sl", "md_dropdown", "txt", "txt2", "top_p", "temperature", "chatbot", "history", "system_prompt", "plugin_advanced_arg"]
|
||||||
output_combo = [cookies, chatbot, history, status]
|
output_combo = [cookies, chatbot, history, status]
|
||||||
predict_args = dict(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True)], outputs=output_combo)
|
predict_args = dict(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True)], outputs=output_combo)
|
||||||
|
|
||||||
# 提交按钮、重置按钮
|
# 提交按钮、重置按钮
|
||||||
multiplex_submit_btn.click(
|
multiplex_submit_btn.click(
|
||||||
None, [multiplex_sel], None, _js="""(multiplex_sel)=>multiplex_function_begin(multiplex_sel)""")
|
None, [multiplex_sel], None, _js="""(multiplex_sel)=>multiplex_function_begin(multiplex_sel)""")
|
||||||
@@ -223,8 +227,11 @@ def main():
|
|||||||
multiplex_sel.select(
|
multiplex_sel.select(
|
||||||
None, [multiplex_sel], None, _js=f"""(multiplex_sel)=>run_multiplex_shift(multiplex_sel)""")
|
None, [multiplex_sel], None, _js=f"""(multiplex_sel)=>run_multiplex_shift(multiplex_sel)""")
|
||||||
cancel_handles.append(submit_btn.click(**predict_args))
|
cancel_handles.append(submit_btn.click(**predict_args))
|
||||||
resetBtn.click(None, None, [chatbot, history, status], _js= """clear_conversation""") # 先在前端快速清除chatbot&status
|
resetBtn.click(None, None, [chatbot, history, status], _js=js_code_reset) # 先在前端快速清除chatbot&status
|
||||||
resetBtn2.click(None, None, [chatbot, history, status], _js="""clear_conversation""") # 先在前端快速清除chatbot&status
|
resetBtn2.click(None, None, [chatbot, history, status], _js=js_code_reset) # 先在前端快速清除chatbot&status
|
||||||
|
reset_server_side_args = (lambda history: ([], [], "已重置", json.dumps(history)), [history], [chatbot, history, status, history_cache])
|
||||||
|
resetBtn.click(*reset_server_side_args) # 再在后端清除history,把history转存history_cache备用
|
||||||
|
resetBtn2.click(*reset_server_side_args) # 再在后端清除history,把history转存history_cache备用
|
||||||
clearBtn.click(None, None, [txt, txt2], _js=js_code_clear)
|
clearBtn.click(None, None, [txt, txt2], _js=js_code_clear)
|
||||||
clearBtn2.click(None, None, [txt, txt2], _js=js_code_clear)
|
clearBtn2.click(None, None, [txt, txt2], _js=js_code_clear)
|
||||||
if AUTO_CLEAR_TXT:
|
if AUTO_CLEAR_TXT:
|
||||||
@@ -324,7 +331,7 @@ def main():
|
|||||||
from shared_utils.cookie_manager import load_web_cookie_cache__fn_builder
|
from shared_utils.cookie_manager import load_web_cookie_cache__fn_builder
|
||||||
load_web_cookie_cache = load_web_cookie_cache__fn_builder(customize_btns, cookies, predefined_btns)
|
load_web_cookie_cache = load_web_cookie_cache__fn_builder(customize_btns, cookies, predefined_btns)
|
||||||
app_block.load(load_web_cookie_cache, inputs = [web_cookie_cache, cookies],
|
app_block.load(load_web_cookie_cache, inputs = [web_cookie_cache, cookies],
|
||||||
outputs = [web_cookie_cache, cookies, *customize_btns.values(), *predefined_btns.values()], _js="""persistent_cookie_init""")
|
outputs = [web_cookie_cache, cookies, *customize_btns.values(), *predefined_btns.values()], _js=js_code_for_persistent_cookie_init)
|
||||||
app_block.load(None, inputs=[], outputs=None, _js=f"""()=>GptAcademicJavaScriptInit("{DARK_MODE}","{INIT_SYS_PROMPT}","{ADD_WAIFU}","{LAYOUT}","{TTS_TYPE}")""") # 配置暗色主题或亮色主题
|
app_block.load(None, inputs=[], outputs=None, _js=f"""()=>GptAcademicJavaScriptInit("{DARK_MODE}","{INIT_SYS_PROMPT}","{ADD_WAIFU}","{LAYOUT}","{TTS_TYPE}")""") # 配置暗色主题或亮色主题
|
||||||
app_block.load(None, inputs=[], outputs=None, _js="""()=>{REP}""".replace("REP", register_advanced_plugin_init_arr))
|
app_block.load(None, inputs=[], outputs=None, _js="""()=>{REP}""".replace("REP", register_advanced_plugin_init_arr))
|
||||||
|
|
||||||
|
|||||||
@@ -26,9 +26,6 @@ from .bridge_chatglm import predict as chatglm_ui
|
|||||||
from .bridge_chatglm3 import predict_no_ui_long_connection as chatglm3_noui
|
from .bridge_chatglm3 import predict_no_ui_long_connection as chatglm3_noui
|
||||||
from .bridge_chatglm3 import predict as chatglm3_ui
|
from .bridge_chatglm3 import predict as chatglm3_ui
|
||||||
|
|
||||||
from .bridge_chatglm4 import predict_no_ui_long_connection as chatglm4_noui
|
|
||||||
from .bridge_chatglm4 import predict as chatglm4_ui
|
|
||||||
|
|
||||||
from .bridge_qianfan import predict_no_ui_long_connection as qianfan_noui
|
from .bridge_qianfan import predict_no_ui_long_connection as qianfan_noui
|
||||||
from .bridge_qianfan import predict as qianfan_ui
|
from .bridge_qianfan import predict as qianfan_ui
|
||||||
|
|
||||||
@@ -79,8 +76,6 @@ cohere_endpoint = "https://api.cohere.ai/v1/chat"
|
|||||||
ollama_endpoint = "http://localhost:11434/api/chat"
|
ollama_endpoint = "http://localhost:11434/api/chat"
|
||||||
yimodel_endpoint = "https://api.lingyiwanwu.com/v1/chat/completions"
|
yimodel_endpoint = "https://api.lingyiwanwu.com/v1/chat/completions"
|
||||||
deepseekapi_endpoint = "https://api.deepseek.com/v1/chat/completions"
|
deepseekapi_endpoint = "https://api.deepseek.com/v1/chat/completions"
|
||||||
grok_model_endpoint = "https://api.x.ai/v1/chat/completions"
|
|
||||||
volcengine_endpoint = "https://ark.cn-beijing.volces.com/api/v3/chat/completions"
|
|
||||||
|
|
||||||
if not AZURE_ENDPOINT.endswith('/'): AZURE_ENDPOINT += '/'
|
if not AZURE_ENDPOINT.endswith('/'): AZURE_ENDPOINT += '/'
|
||||||
azure_endpoint = AZURE_ENDPOINT + f'openai/deployments/{AZURE_ENGINE}/chat/completions?api-version=2023-05-15'
|
azure_endpoint = AZURE_ENDPOINT + f'openai/deployments/{AZURE_ENGINE}/chat/completions?api-version=2023-05-15'
|
||||||
@@ -102,8 +97,6 @@ if cohere_endpoint in API_URL_REDIRECT: cohere_endpoint = API_URL_REDIRECT[coher
|
|||||||
if ollama_endpoint in API_URL_REDIRECT: ollama_endpoint = API_URL_REDIRECT[ollama_endpoint]
|
if ollama_endpoint in API_URL_REDIRECT: ollama_endpoint = API_URL_REDIRECT[ollama_endpoint]
|
||||||
if yimodel_endpoint in API_URL_REDIRECT: yimodel_endpoint = API_URL_REDIRECT[yimodel_endpoint]
|
if yimodel_endpoint in API_URL_REDIRECT: yimodel_endpoint = API_URL_REDIRECT[yimodel_endpoint]
|
||||||
if deepseekapi_endpoint in API_URL_REDIRECT: deepseekapi_endpoint = API_URL_REDIRECT[deepseekapi_endpoint]
|
if deepseekapi_endpoint in API_URL_REDIRECT: deepseekapi_endpoint = API_URL_REDIRECT[deepseekapi_endpoint]
|
||||||
if grok_model_endpoint in API_URL_REDIRECT: grok_model_endpoint = API_URL_REDIRECT[grok_model_endpoint]
|
|
||||||
if volcengine_endpoint in API_URL_REDIRECT: volcengine_endpoint = API_URL_REDIRECT[volcengine_endpoint]
|
|
||||||
|
|
||||||
# 获取tokenizer
|
# 获取tokenizer
|
||||||
tokenizer_gpt35 = LazyloadTiktoken("gpt-3.5-turbo")
|
tokenizer_gpt35 = LazyloadTiktoken("gpt-3.5-turbo")
|
||||||
@@ -219,16 +212,6 @@ model_info = {
|
|||||||
"token_cnt": get_token_num_gpt4,
|
"token_cnt": get_token_num_gpt4,
|
||||||
},
|
},
|
||||||
|
|
||||||
"chatgpt-4o-latest": {
|
|
||||||
"fn_with_ui": chatgpt_ui,
|
|
||||||
"fn_without_ui": chatgpt_noui,
|
|
||||||
"endpoint": openai_endpoint,
|
|
||||||
"has_multimodal_capacity": True,
|
|
||||||
"max_token": 128000,
|
|
||||||
"tokenizer": tokenizer_gpt4,
|
|
||||||
"token_cnt": get_token_num_gpt4,
|
|
||||||
},
|
|
||||||
|
|
||||||
"gpt-4o-2024-05-13": {
|
"gpt-4o-2024-05-13": {
|
||||||
"fn_with_ui": chatgpt_ui,
|
"fn_with_ui": chatgpt_ui,
|
||||||
"fn_without_ui": chatgpt_noui,
|
"fn_without_ui": chatgpt_noui,
|
||||||
@@ -275,88 +258,16 @@ model_info = {
|
|||||||
"token_cnt": get_token_num_gpt4,
|
"token_cnt": get_token_num_gpt4,
|
||||||
"openai_disable_system_prompt": True,
|
"openai_disable_system_prompt": True,
|
||||||
"openai_disable_stream": True,
|
"openai_disable_stream": True,
|
||||||
"openai_force_temperature_one": True,
|
|
||||||
},
|
},
|
||||||
|
|
||||||
"o1-mini": {
|
"o1-mini": {
|
||||||
"fn_with_ui": chatgpt_ui,
|
"fn_with_ui": chatgpt_ui,
|
||||||
"fn_without_ui": chatgpt_noui,
|
"fn_without_ui": chatgpt_noui,
|
||||||
"endpoint": openai_endpoint,
|
"endpoint": openai_endpoint,
|
||||||
"can_multi_thread": True,
|
|
||||||
"max_token": 128000,
|
"max_token": 128000,
|
||||||
"tokenizer": tokenizer_gpt4,
|
"tokenizer": tokenizer_gpt4,
|
||||||
"token_cnt": get_token_num_gpt4,
|
"token_cnt": get_token_num_gpt4,
|
||||||
"openai_disable_system_prompt": True,
|
"openai_disable_system_prompt": True,
|
||||||
"openai_disable_stream": True,
|
"openai_disable_stream": True,
|
||||||
"openai_force_temperature_one": True,
|
|
||||||
},
|
|
||||||
|
|
||||||
"o1-2024-12-17": {
|
|
||||||
"fn_with_ui": chatgpt_ui,
|
|
||||||
"fn_without_ui": chatgpt_noui,
|
|
||||||
"endpoint": openai_endpoint,
|
|
||||||
"max_token": 200000,
|
|
||||||
"tokenizer": tokenizer_gpt4,
|
|
||||||
"token_cnt": get_token_num_gpt4,
|
|
||||||
"openai_disable_system_prompt": True,
|
|
||||||
"openai_disable_stream": True,
|
|
||||||
"openai_force_temperature_one": True,
|
|
||||||
},
|
|
||||||
|
|
||||||
"o1": {
|
|
||||||
"fn_with_ui": chatgpt_ui,
|
|
||||||
"fn_without_ui": chatgpt_noui,
|
|
||||||
"endpoint": openai_endpoint,
|
|
||||||
"max_token": 200000,
|
|
||||||
"tokenizer": tokenizer_gpt4,
|
|
||||||
"token_cnt": get_token_num_gpt4,
|
|
||||||
"openai_disable_system_prompt": True,
|
|
||||||
"openai_disable_stream": True,
|
|
||||||
"openai_force_temperature_one": True,
|
|
||||||
},
|
|
||||||
|
|
||||||
"gpt-4.1":{
|
|
||||||
"fn_with_ui": chatgpt_ui,
|
|
||||||
"fn_without_ui": chatgpt_noui,
|
|
||||||
"has_multimodal_capacity": True,
|
|
||||||
"endpoint": openai_endpoint,
|
|
||||||
"max_token": 828000,
|
|
||||||
"tokenizer": tokenizer_gpt4,
|
|
||||||
"token_cnt": get_token_num_gpt4,
|
|
||||||
},
|
|
||||||
|
|
||||||
"gpt-4.1-mini":{
|
|
||||||
"fn_with_ui": chatgpt_ui,
|
|
||||||
"fn_without_ui": chatgpt_noui,
|
|
||||||
"has_multimodal_capacity": True,
|
|
||||||
"endpoint": openai_endpoint,
|
|
||||||
"max_token": 828000,
|
|
||||||
"tokenizer": tokenizer_gpt4,
|
|
||||||
"token_cnt": get_token_num_gpt4,
|
|
||||||
},
|
|
||||||
|
|
||||||
"o3":{
|
|
||||||
"fn_with_ui": chatgpt_ui,
|
|
||||||
"fn_without_ui": chatgpt_noui,
|
|
||||||
"has_multimodal_capacity": True,
|
|
||||||
"endpoint": openai_endpoint,
|
|
||||||
"max_token": 828000,
|
|
||||||
"tokenizer": tokenizer_gpt4,
|
|
||||||
"token_cnt": get_token_num_gpt4,
|
|
||||||
"openai_disable_system_prompt": True,
|
|
||||||
"openai_disable_stream": True,
|
|
||||||
"openai_force_temperature_one": True,
|
|
||||||
},
|
|
||||||
|
|
||||||
"o4-mini":{
|
|
||||||
"fn_with_ui": chatgpt_ui,
|
|
||||||
"fn_without_ui": chatgpt_noui,
|
|
||||||
"has_multimodal_capacity": True,
|
|
||||||
"can_multi_thread": True,
|
|
||||||
"endpoint": openai_endpoint,
|
|
||||||
"max_token": 828000,
|
|
||||||
"tokenizer": tokenizer_gpt4,
|
|
||||||
"token_cnt": get_token_num_gpt4,
|
|
||||||
},
|
},
|
||||||
|
|
||||||
"gpt-4-turbo": {
|
"gpt-4-turbo": {
|
||||||
@@ -474,14 +385,6 @@ model_info = {
|
|||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
"token_cnt": get_token_num_gpt35,
|
"token_cnt": get_token_num_gpt35,
|
||||||
},
|
},
|
||||||
"glm-4-plus":{
|
|
||||||
"fn_with_ui": zhipu_ui,
|
|
||||||
"fn_without_ui": zhipu_noui,
|
|
||||||
"endpoint": None,
|
|
||||||
"max_token": 10124 * 8,
|
|
||||||
"tokenizer": tokenizer_gpt35,
|
|
||||||
"token_cnt": get_token_num_gpt35,
|
|
||||||
},
|
|
||||||
|
|
||||||
# api_2d (此后不需要在此处添加api2d的接口了,因为下面的代码会自动添加)
|
# api_2d (此后不需要在此处添加api2d的接口了,因为下面的代码会自动添加)
|
||||||
"api2d-gpt-4": {
|
"api2d-gpt-4": {
|
||||||
@@ -493,7 +396,6 @@ model_info = {
|
|||||||
"token_cnt": get_token_num_gpt4,
|
"token_cnt": get_token_num_gpt4,
|
||||||
},
|
},
|
||||||
|
|
||||||
# ChatGLM本地模型
|
|
||||||
# 将 chatglm 直接对齐到 chatglm2
|
# 将 chatglm 直接对齐到 chatglm2
|
||||||
"chatglm": {
|
"chatglm": {
|
||||||
"fn_with_ui": chatglm_ui,
|
"fn_with_ui": chatglm_ui,
|
||||||
@@ -519,14 +421,6 @@ model_info = {
|
|||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
"token_cnt": get_token_num_gpt35,
|
"token_cnt": get_token_num_gpt35,
|
||||||
},
|
},
|
||||||
"chatglm4": {
|
|
||||||
"fn_with_ui": chatglm4_ui,
|
|
||||||
"fn_without_ui": chatglm4_noui,
|
|
||||||
"endpoint": None,
|
|
||||||
"max_token": 8192,
|
|
||||||
"tokenizer": tokenizer_gpt35,
|
|
||||||
"token_cnt": get_token_num_gpt35,
|
|
||||||
},
|
|
||||||
"qianfan": {
|
"qianfan": {
|
||||||
"fn_with_ui": qianfan_ui,
|
"fn_with_ui": qianfan_ui,
|
||||||
"fn_without_ui": qianfan_noui,
|
"fn_without_ui": qianfan_noui,
|
||||||
@@ -575,15 +469,6 @@ model_info = {
|
|||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
"token_cnt": get_token_num_gpt35,
|
"token_cnt": get_token_num_gpt35,
|
||||||
},
|
},
|
||||||
"gemini-2.0-flash": {
|
|
||||||
"fn_with_ui": genai_ui,
|
|
||||||
"fn_without_ui": genai_noui,
|
|
||||||
"endpoint": gemini_endpoint,
|
|
||||||
"has_multimodal_capacity": True,
|
|
||||||
"max_token": 1024 * 204800,
|
|
||||||
"tokenizer": tokenizer_gpt35,
|
|
||||||
"token_cnt": get_token_num_gpt35,
|
|
||||||
},
|
|
||||||
|
|
||||||
# cohere
|
# cohere
|
||||||
"cohere-command-r-plus": {
|
"cohere-command-r-plus": {
|
||||||
@@ -867,13 +752,8 @@ if "qwen-local" in AVAIL_LLM_MODELS:
|
|||||||
})
|
})
|
||||||
except:
|
except:
|
||||||
logger.error(trimmed_format_exc())
|
logger.error(trimmed_format_exc())
|
||||||
|
# -=-=-=-=-=-=- 通义-在线模型 -=-=-=-=-=-=-
|
||||||
# -=-=-=-=-=-=- 阿里云百炼(通义)-在线模型 -=-=-=-=-=-=-
|
if "qwen-turbo" in AVAIL_LLM_MODELS or "qwen-plus" in AVAIL_LLM_MODELS or "qwen-max" in AVAIL_LLM_MODELS: # zhipuai
|
||||||
qwen_models = ["qwen-max-latest", "qwen-max-2025-01-25","qwen-max","qwen-turbo","qwen-plus",
|
|
||||||
"dashscope-deepseek-r1","dashscope-deepseek-v3",
|
|
||||||
"dashscope-qwen3-14b", "dashscope-qwen3-235b-a22b", "dashscope-qwen3-qwen3-32b",
|
|
||||||
]
|
|
||||||
if any(item in qwen_models for item in AVAIL_LLM_MODELS):
|
|
||||||
try:
|
try:
|
||||||
from .bridge_qwen import predict_no_ui_long_connection as qwen_noui
|
from .bridge_qwen import predict_no_ui_long_connection as qwen_noui
|
||||||
from .bridge_qwen import predict as qwen_ui
|
from .bridge_qwen import predict as qwen_ui
|
||||||
@@ -883,7 +763,7 @@ if any(item in qwen_models for item in AVAIL_LLM_MODELS):
|
|||||||
"fn_without_ui": qwen_noui,
|
"fn_without_ui": qwen_noui,
|
||||||
"can_multi_thread": True,
|
"can_multi_thread": True,
|
||||||
"endpoint": None,
|
"endpoint": None,
|
||||||
"max_token": 100000,
|
"max_token": 6144,
|
||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
"token_cnt": get_token_num_gpt35,
|
"token_cnt": get_token_num_gpt35,
|
||||||
},
|
},
|
||||||
@@ -892,7 +772,7 @@ if any(item in qwen_models for item in AVAIL_LLM_MODELS):
|
|||||||
"fn_without_ui": qwen_noui,
|
"fn_without_ui": qwen_noui,
|
||||||
"can_multi_thread": True,
|
"can_multi_thread": True,
|
||||||
"endpoint": None,
|
"endpoint": None,
|
||||||
"max_token": 129024,
|
"max_token": 30720,
|
||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
"token_cnt": get_token_num_gpt35,
|
"token_cnt": get_token_num_gpt35,
|
||||||
},
|
},
|
||||||
@@ -901,79 +781,13 @@ if any(item in qwen_models for item in AVAIL_LLM_MODELS):
|
|||||||
"fn_without_ui": qwen_noui,
|
"fn_without_ui": qwen_noui,
|
||||||
"can_multi_thread": True,
|
"can_multi_thread": True,
|
||||||
"endpoint": None,
|
"endpoint": None,
|
||||||
"max_token": 30720,
|
"max_token": 28672,
|
||||||
"tokenizer": tokenizer_gpt35,
|
|
||||||
"token_cnt": get_token_num_gpt35,
|
|
||||||
},
|
|
||||||
"qwen-max-latest": {
|
|
||||||
"fn_with_ui": qwen_ui,
|
|
||||||
"fn_without_ui": qwen_noui,
|
|
||||||
"can_multi_thread": True,
|
|
||||||
"endpoint": None,
|
|
||||||
"max_token": 30720,
|
|
||||||
"tokenizer": tokenizer_gpt35,
|
|
||||||
"token_cnt": get_token_num_gpt35,
|
|
||||||
},
|
|
||||||
"qwen-max-2025-01-25": {
|
|
||||||
"fn_with_ui": qwen_ui,
|
|
||||||
"fn_without_ui": qwen_noui,
|
|
||||||
"can_multi_thread": True,
|
|
||||||
"endpoint": None,
|
|
||||||
"max_token": 30720,
|
|
||||||
"tokenizer": tokenizer_gpt35,
|
|
||||||
"token_cnt": get_token_num_gpt35,
|
|
||||||
},
|
|
||||||
"dashscope-deepseek-r1": {
|
|
||||||
"fn_with_ui": qwen_ui,
|
|
||||||
"fn_without_ui": qwen_noui,
|
|
||||||
"enable_reasoning": True,
|
|
||||||
"can_multi_thread": True,
|
|
||||||
"endpoint": None,
|
|
||||||
"max_token": 57344,
|
|
||||||
"tokenizer": tokenizer_gpt35,
|
|
||||||
"token_cnt": get_token_num_gpt35,
|
|
||||||
},
|
|
||||||
"dashscope-deepseek-v3": {
|
|
||||||
"fn_with_ui": qwen_ui,
|
|
||||||
"fn_without_ui": qwen_noui,
|
|
||||||
"can_multi_thread": True,
|
|
||||||
"endpoint": None,
|
|
||||||
"max_token": 57344,
|
|
||||||
"tokenizer": tokenizer_gpt35,
|
|
||||||
"token_cnt": get_token_num_gpt35,
|
|
||||||
},
|
|
||||||
"dashscope-qwen3-14b": {
|
|
||||||
"fn_with_ui": qwen_ui,
|
|
||||||
"fn_without_ui": qwen_noui,
|
|
||||||
"enable_reasoning": True,
|
|
||||||
"can_multi_thread": True,
|
|
||||||
"endpoint": None,
|
|
||||||
"max_token": 129024,
|
|
||||||
"tokenizer": tokenizer_gpt35,
|
|
||||||
"token_cnt": get_token_num_gpt35,
|
|
||||||
},
|
|
||||||
"dashscope-qwen3-235b-a22b": {
|
|
||||||
"fn_with_ui": qwen_ui,
|
|
||||||
"fn_without_ui": qwen_noui,
|
|
||||||
"can_multi_thread": True,
|
|
||||||
"endpoint": None,
|
|
||||||
"max_token": 129024,
|
|
||||||
"tokenizer": tokenizer_gpt35,
|
|
||||||
"token_cnt": get_token_num_gpt35,
|
|
||||||
},
|
|
||||||
"dashscope-qwen3-32b": {
|
|
||||||
"fn_with_ui": qwen_ui,
|
|
||||||
"fn_without_ui": qwen_noui,
|
|
||||||
"can_multi_thread": True,
|
|
||||||
"endpoint": None,
|
|
||||||
"max_token": 129024,
|
|
||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
"token_cnt": get_token_num_gpt35,
|
"token_cnt": get_token_num_gpt35,
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
except:
|
except:
|
||||||
logger.error(trimmed_format_exc())
|
logger.error(trimmed_format_exc())
|
||||||
|
|
||||||
# -=-=-=-=-=-=- 零一万物模型 -=-=-=-=-=-=-
|
# -=-=-=-=-=-=- 零一万物模型 -=-=-=-=-=-=-
|
||||||
yi_models = ["yi-34b-chat-0205","yi-34b-chat-200k","yi-large","yi-medium","yi-spark","yi-large-turbo","yi-large-preview"]
|
yi_models = ["yi-34b-chat-0205","yi-34b-chat-200k","yi-large","yi-medium","yi-spark","yi-large-turbo","yi-large-preview"]
|
||||||
if any(item in yi_models for item in AVAIL_LLM_MODELS):
|
if any(item in yi_models for item in AVAIL_LLM_MODELS):
|
||||||
@@ -1054,31 +868,6 @@ if any(item in yi_models for item in AVAIL_LLM_MODELS):
|
|||||||
})
|
})
|
||||||
except:
|
except:
|
||||||
logger.error(trimmed_format_exc())
|
logger.error(trimmed_format_exc())
|
||||||
|
|
||||||
|
|
||||||
# -=-=-=-=-=-=- Grok model from x.ai -=-=-=-=-=-=-
|
|
||||||
grok_models = ["grok-beta"]
|
|
||||||
if any(item in grok_models for item in AVAIL_LLM_MODELS):
|
|
||||||
try:
|
|
||||||
grok_beta_128k_noui, grok_beta_128k_ui = get_predict_function(
|
|
||||||
api_key_conf_name="GROK_API_KEY", max_output_token=8192, disable_proxy=False
|
|
||||||
)
|
|
||||||
|
|
||||||
model_info.update({
|
|
||||||
"grok-beta": {
|
|
||||||
"fn_with_ui": grok_beta_128k_ui,
|
|
||||||
"fn_without_ui": grok_beta_128k_noui,
|
|
||||||
"can_multi_thread": True,
|
|
||||||
"endpoint": grok_model_endpoint,
|
|
||||||
"max_token": 128000,
|
|
||||||
"tokenizer": tokenizer_gpt35,
|
|
||||||
"token_cnt": get_token_num_gpt35,
|
|
||||||
},
|
|
||||||
|
|
||||||
})
|
|
||||||
except:
|
|
||||||
logger.error(trimmed_format_exc())
|
|
||||||
|
|
||||||
# -=-=-=-=-=-=- 讯飞星火认知大模型 -=-=-=-=-=-=-
|
# -=-=-=-=-=-=- 讯飞星火认知大模型 -=-=-=-=-=-=-
|
||||||
if "spark" in AVAIL_LLM_MODELS:
|
if "spark" in AVAIL_LLM_MODELS:
|
||||||
try:
|
try:
|
||||||
@@ -1180,7 +969,7 @@ if "zhipuai" in AVAIL_LLM_MODELS: # zhipuai 是glm-4的别名,向后兼容
|
|||||||
})
|
})
|
||||||
except:
|
except:
|
||||||
logger.error(trimmed_format_exc())
|
logger.error(trimmed_format_exc())
|
||||||
# -=-=-=-=-=-=- 幻方-深度求索本地大模型 -=-=-=-=-=-=-
|
# -=-=-=-=-=-=- 幻方-深度求索大模型 -=-=-=-=-=-=-
|
||||||
if "deepseekcoder" in AVAIL_LLM_MODELS: # deepseekcoder
|
if "deepseekcoder" in AVAIL_LLM_MODELS: # deepseekcoder
|
||||||
try:
|
try:
|
||||||
from .bridge_deepseekcoder import predict_no_ui_long_connection as deepseekcoder_noui
|
from .bridge_deepseekcoder import predict_no_ui_long_connection as deepseekcoder_noui
|
||||||
@@ -1197,21 +986,19 @@ if "deepseekcoder" in AVAIL_LLM_MODELS: # deepseekcoder
|
|||||||
})
|
})
|
||||||
except:
|
except:
|
||||||
logger.error(trimmed_format_exc())
|
logger.error(trimmed_format_exc())
|
||||||
|
|
||||||
# -=-=-=-=-=-=- 幻方-深度求索大模型在线API -=-=-=-=-=-=-
|
# -=-=-=-=-=-=- 幻方-深度求索大模型在线API -=-=-=-=-=-=-
|
||||||
claude_models = ["deepseek-chat", "deepseek-coder", "deepseek-reasoner"]
|
if "deepseek-chat" in AVAIL_LLM_MODELS or "deepseek-coder" in AVAIL_LLM_MODELS:
|
||||||
if any(item in claude_models for item in AVAIL_LLM_MODELS):
|
|
||||||
try:
|
try:
|
||||||
deepseekapi_noui, deepseekapi_ui = get_predict_function(
|
deepseekapi_noui, deepseekapi_ui = get_predict_function(
|
||||||
api_key_conf_name="DEEPSEEK_API_KEY", max_output_token=4096, disable_proxy=False
|
api_key_conf_name="DEEPSEEK_API_KEY", max_output_token=4096, disable_proxy=False
|
||||||
)
|
)
|
||||||
model_info.update({
|
model_info.update({
|
||||||
"deepseek-chat":{
|
"deepseek-chat":{
|
||||||
"fn_with_ui": deepseekapi_ui,
|
"fn_with_ui": deepseekapi_ui,
|
||||||
"fn_without_ui": deepseekapi_noui,
|
"fn_without_ui": deepseekapi_noui,
|
||||||
"endpoint": deepseekapi_endpoint,
|
"endpoint": deepseekapi_endpoint,
|
||||||
"can_multi_thread": True,
|
"can_multi_thread": True,
|
||||||
"max_token": 64000,
|
"max_token": 32000,
|
||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
"token_cnt": get_token_num_gpt35,
|
"token_cnt": get_token_num_gpt35,
|
||||||
},
|
},
|
||||||
@@ -1224,73 +1011,9 @@ if any(item in claude_models for item in AVAIL_LLM_MODELS):
|
|||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
"token_cnt": get_token_num_gpt35,
|
"token_cnt": get_token_num_gpt35,
|
||||||
},
|
},
|
||||||
"deepseek-reasoner":{
|
|
||||||
"fn_with_ui": deepseekapi_ui,
|
|
||||||
"fn_without_ui": deepseekapi_noui,
|
|
||||||
"endpoint": deepseekapi_endpoint,
|
|
||||||
"can_multi_thread": True,
|
|
||||||
"max_token": 64000,
|
|
||||||
"tokenizer": tokenizer_gpt35,
|
|
||||||
"token_cnt": get_token_num_gpt35,
|
|
||||||
"enable_reasoning": True
|
|
||||||
},
|
|
||||||
})
|
})
|
||||||
except:
|
except:
|
||||||
logger.error(trimmed_format_exc())
|
logger.error(trimmed_format_exc())
|
||||||
|
|
||||||
# -=-=-=-=-=-=- 火山引擎 对齐支持 -=-=-=-=-=-=-
|
|
||||||
for model in [m for m in AVAIL_LLM_MODELS if m.startswith("volcengine-")]:
|
|
||||||
# 为了更灵活地接入volcengine多模型管理界面,设计了此接口,例子:AVAIL_LLM_MODELS = ["volcengine-deepseek-r1-250120(max_token=6666)"]
|
|
||||||
# 其中
|
|
||||||
# "volcengine-" 是前缀(必要)
|
|
||||||
# "deepseek-r1-250120" 是模型名(必要)
|
|
||||||
# "(max_token=6666)" 是配置(非必要)
|
|
||||||
model_info_extend = model_info
|
|
||||||
model_info_extend.update({
|
|
||||||
"deepseek-r1-250120": {
|
|
||||||
"max_token": 16384,
|
|
||||||
"enable_reasoning": True,
|
|
||||||
"can_multi_thread": True,
|
|
||||||
"endpoint": volcengine_endpoint,
|
|
||||||
"tokenizer": tokenizer_gpt35,
|
|
||||||
"token_cnt": get_token_num_gpt35,
|
|
||||||
},
|
|
||||||
"deepseek-v3-241226": {
|
|
||||||
"max_token": 16384,
|
|
||||||
"enable_reasoning": False,
|
|
||||||
"can_multi_thread": True,
|
|
||||||
"endpoint": volcengine_endpoint,
|
|
||||||
"tokenizer": tokenizer_gpt35,
|
|
||||||
"token_cnt": get_token_num_gpt35,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
try:
|
|
||||||
origin_model_name, max_token_tmp = read_one_api_model_name(model)
|
|
||||||
# 如果是已知模型,则尝试获取其信息
|
|
||||||
original_model_info = model_info_extend.get(origin_model_name.replace("volcengine-", "", 1), None)
|
|
||||||
except:
|
|
||||||
logger.error(f"volcengine模型 {model} 的 max_token 配置不是整数,请检查配置文件。")
|
|
||||||
continue
|
|
||||||
|
|
||||||
volcengine_noui, volcengine_ui = get_predict_function(api_key_conf_name="ARK_API_KEY", max_output_token=8192, disable_proxy=True, model_remove_prefix = ["volcengine-"])
|
|
||||||
|
|
||||||
this_model_info = {
|
|
||||||
"fn_with_ui": volcengine_ui,
|
|
||||||
"fn_without_ui": volcengine_noui,
|
|
||||||
"endpoint": volcengine_endpoint,
|
|
||||||
"can_multi_thread": True,
|
|
||||||
"max_token": 64000,
|
|
||||||
"tokenizer": tokenizer_gpt35,
|
|
||||||
"token_cnt": get_token_num_gpt35,
|
|
||||||
}
|
|
||||||
|
|
||||||
# 同步已知模型的其他信息
|
|
||||||
attribute = "has_multimodal_capacity"
|
|
||||||
if original_model_info is not None and original_model_info.get(attribute, None) is not None: this_model_info.update({attribute: original_model_info.get(attribute, None)})
|
|
||||||
attribute = "enable_reasoning"
|
|
||||||
if original_model_info is not None and original_model_info.get(attribute, None) is not None: this_model_info.update({attribute: original_model_info.get(attribute, None)})
|
|
||||||
model_info.update({model: this_model_info})
|
|
||||||
|
|
||||||
# -=-=-=-=-=-=- one-api 对齐支持 -=-=-=-=-=-=-
|
# -=-=-=-=-=-=- one-api 对齐支持 -=-=-=-=-=-=-
|
||||||
for model in [m for m in AVAIL_LLM_MODELS if m.startswith("one-api-")]:
|
for model in [m for m in AVAIL_LLM_MODELS if m.startswith("one-api-")]:
|
||||||
# 为了更灵活地接入one-api多模型管理界面,设计了此接口,例子:AVAIL_LLM_MODELS = ["one-api-mixtral-8x7b(max_token=6666)"]
|
# 为了更灵活地接入one-api多模型管理界面,设计了此接口,例子:AVAIL_LLM_MODELS = ["one-api-mixtral-8x7b(max_token=6666)"]
|
||||||
@@ -1429,9 +1152,9 @@ def LLM_CATCH_EXCEPTION(f):
|
|||||||
"""
|
"""
|
||||||
装饰器函数,将错误显示出来
|
装饰器函数,将错误显示出来
|
||||||
"""
|
"""
|
||||||
def decorated(inputs:str, llm_kwargs:dict, history:list, sys_prompt:str, observe_window:list, console_silence:bool):
|
def decorated(inputs:str, llm_kwargs:dict, history:list, sys_prompt:str, observe_window:list, console_slience:bool):
|
||||||
try:
|
try:
|
||||||
return f(inputs, llm_kwargs, history, sys_prompt, observe_window, console_silence)
|
return f(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
|
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
|
||||||
observe_window[0] = tb_str
|
observe_window[0] = tb_str
|
||||||
@@ -1439,7 +1162,7 @@ def LLM_CATCH_EXCEPTION(f):
|
|||||||
return decorated
|
return decorated
|
||||||
|
|
||||||
|
|
||||||
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list, sys_prompt:str, observe_window:list=[], console_silence:bool=False):
|
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list, sys_prompt:str, observe_window:list=[], console_slience:bool=False):
|
||||||
"""
|
"""
|
||||||
发送至LLM,等待回复,一次性完成,不显示中间过程。但内部(尽可能地)用stream的方法避免中途网线被掐。
|
发送至LLM,等待回复,一次性完成,不显示中间过程。但内部(尽可能地)用stream的方法避免中途网线被掐。
|
||||||
inputs:
|
inputs:
|
||||||
@@ -1461,7 +1184,7 @@ def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list, sys
|
|||||||
if '&' not in model:
|
if '&' not in model:
|
||||||
# 如果只询问“一个”大语言模型(多数情况):
|
# 如果只询问“一个”大语言模型(多数情况):
|
||||||
method = model_info[model]["fn_without_ui"]
|
method = model_info[model]["fn_without_ui"]
|
||||||
return method(inputs, llm_kwargs, history, sys_prompt, observe_window, console_silence)
|
return method(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
|
||||||
else:
|
else:
|
||||||
# 如果同时询问“多个”大语言模型,这个稍微啰嗦一点,但思路相同,您不必读这个else分支
|
# 如果同时询问“多个”大语言模型,这个稍微啰嗦一点,但思路相同,您不必读这个else分支
|
||||||
executor = ThreadPoolExecutor(max_workers=4)
|
executor = ThreadPoolExecutor(max_workers=4)
|
||||||
@@ -1478,7 +1201,7 @@ def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list, sys
|
|||||||
method = model_info[model]["fn_without_ui"]
|
method = model_info[model]["fn_without_ui"]
|
||||||
llm_kwargs_feedin = copy.deepcopy(llm_kwargs)
|
llm_kwargs_feedin = copy.deepcopy(llm_kwargs)
|
||||||
llm_kwargs_feedin['llm_model'] = model
|
llm_kwargs_feedin['llm_model'] = model
|
||||||
future = executor.submit(LLM_CATCH_EXCEPTION(method), inputs, llm_kwargs_feedin, history, sys_prompt, window_mutex[i], console_silence)
|
future = executor.submit(LLM_CATCH_EXCEPTION(method), inputs, llm_kwargs_feedin, history, sys_prompt, window_mutex[i], console_slience)
|
||||||
futures.append(future)
|
futures.append(future)
|
||||||
|
|
||||||
def mutex_manager(window_mutex, observe_window):
|
def mutex_manager(window_mutex, observe_window):
|
||||||
@@ -1555,11 +1278,6 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot,
|
|||||||
|
|
||||||
inputs = apply_gpt_academic_string_mask(inputs, mode="show_llm")
|
inputs = apply_gpt_academic_string_mask(inputs, mode="show_llm")
|
||||||
|
|
||||||
if llm_kwargs['llm_model'] not in model_info:
|
|
||||||
from toolbox import update_ui
|
|
||||||
chatbot.append([inputs, f"很抱歉,模型 '{llm_kwargs['llm_model']}' 暂不支持<br/>(1) 检查config中的AVAIL_LLM_MODELS选项<br/>(2) 检查request_llms/bridge_all.py中的模型路由"])
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
|
|
||||||
method = model_info[llm_kwargs['llm_model']]["fn_with_ui"] # 如果这里报错,检查config中的AVAIL_LLM_MODELS选项
|
method = model_info[llm_kwargs['llm_model']]["fn_with_ui"] # 如果这里报错,检查config中的AVAIL_LLM_MODELS选项
|
||||||
|
|
||||||
if additional_fn: # 根据基础功能区 ModelOverride 参数调整模型类型
|
if additional_fn: # 根据基础功能区 ModelOverride 参数调整模型类型
|
||||||
|
|||||||
@@ -23,33 +23,39 @@ class GetGLM3Handle(LocalLLMHandle):
|
|||||||
import os
|
import os
|
||||||
import platform
|
import platform
|
||||||
|
|
||||||
LOCAL_MODEL_PATH, LOCAL_MODEL_QUANT, device = get_conf("CHATGLM_LOCAL_MODEL_PATH", "LOCAL_MODEL_QUANT", "LOCAL_MODEL_DEVICE")
|
LOCAL_MODEL_QUANT, device = get_conf("LOCAL_MODEL_QUANT", "LOCAL_MODEL_DEVICE")
|
||||||
model_path = LOCAL_MODEL_PATH
|
_model_name_ = "THUDM/chatglm3-6b"
|
||||||
|
# if LOCAL_MODEL_QUANT == "INT4": # INT4
|
||||||
|
# _model_name_ = "THUDM/chatglm3-6b-int4"
|
||||||
|
# elif LOCAL_MODEL_QUANT == "INT8": # INT8
|
||||||
|
# _model_name_ = "THUDM/chatglm3-6b-int8"
|
||||||
|
# else:
|
||||||
|
# _model_name_ = "THUDM/chatglm3-6b" # FP16
|
||||||
with ProxyNetworkActivate("Download_LLM"):
|
with ProxyNetworkActivate("Download_LLM"):
|
||||||
chatglm_tokenizer = AutoTokenizer.from_pretrained(
|
chatglm_tokenizer = AutoTokenizer.from_pretrained(
|
||||||
model_path, trust_remote_code=True
|
_model_name_, trust_remote_code=True
|
||||||
)
|
)
|
||||||
if device == "cpu":
|
if device == "cpu":
|
||||||
chatglm_model = AutoModel.from_pretrained(
|
chatglm_model = AutoModel.from_pretrained(
|
||||||
model_path,
|
_model_name_,
|
||||||
trust_remote_code=True,
|
trust_remote_code=True,
|
||||||
device="cpu",
|
device="cpu",
|
||||||
).float()
|
).float()
|
||||||
elif LOCAL_MODEL_QUANT == "INT4": # INT4
|
elif LOCAL_MODEL_QUANT == "INT4": # INT4
|
||||||
chatglm_model = AutoModel.from_pretrained(
|
chatglm_model = AutoModel.from_pretrained(
|
||||||
pretrained_model_name_or_path=model_path,
|
pretrained_model_name_or_path=_model_name_,
|
||||||
trust_remote_code=True,
|
trust_remote_code=True,
|
||||||
quantization_config=BitsAndBytesConfig(load_in_4bit=True),
|
quantization_config=BitsAndBytesConfig(load_in_4bit=True),
|
||||||
)
|
)
|
||||||
elif LOCAL_MODEL_QUANT == "INT8": # INT8
|
elif LOCAL_MODEL_QUANT == "INT8": # INT8
|
||||||
chatglm_model = AutoModel.from_pretrained(
|
chatglm_model = AutoModel.from_pretrained(
|
||||||
pretrained_model_name_or_path=model_path,
|
pretrained_model_name_or_path=_model_name_,
|
||||||
trust_remote_code=True,
|
trust_remote_code=True,
|
||||||
quantization_config=BitsAndBytesConfig(load_in_8bit=True),
|
quantization_config=BitsAndBytesConfig(load_in_8bit=True),
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
chatglm_model = AutoModel.from_pretrained(
|
chatglm_model = AutoModel.from_pretrained(
|
||||||
pretrained_model_name_or_path=model_path,
|
pretrained_model_name_or_path=_model_name_,
|
||||||
trust_remote_code=True,
|
trust_remote_code=True,
|
||||||
device="cuda",
|
device="cuda",
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,81 +0,0 @@
|
|||||||
model_name = "ChatGLM4"
|
|
||||||
cmd_to_install = """
|
|
||||||
`pip install -r request_llms/requirements_chatglm4.txt`
|
|
||||||
`pip install modelscope`
|
|
||||||
`modelscope download --model ZhipuAI/glm-4-9b-chat --local_dir ./THUDM/glm-4-9b-chat`
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
from toolbox import get_conf, ProxyNetworkActivate
|
|
||||||
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
|
|
||||||
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
|
||||||
# 🔌💻 Local Model
|
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
|
||||||
class GetGLM4Handle(LocalLLMHandle):
|
|
||||||
|
|
||||||
def load_model_info(self):
|
|
||||||
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
|
||||||
self.model_name = model_name
|
|
||||||
self.cmd_to_install = cmd_to_install
|
|
||||||
|
|
||||||
def load_model_and_tokenizer(self):
|
|
||||||
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
|
||||||
import torch
|
|
||||||
from transformers import AutoModel, AutoModelForCausalLM, AutoTokenizer
|
|
||||||
import os
|
|
||||||
|
|
||||||
LOCAL_MODEL_PATH, device = get_conf("CHATGLM_LOCAL_MODEL_PATH", "LOCAL_MODEL_DEVICE")
|
|
||||||
model_path = LOCAL_MODEL_PATH
|
|
||||||
chatglm_tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
|
||||||
chatglm_model = AutoModelForCausalLM.from_pretrained(
|
|
||||||
model_path,
|
|
||||||
torch_dtype=torch.bfloat16,
|
|
||||||
low_cpu_mem_usage=True,
|
|
||||||
trust_remote_code=True,
|
|
||||||
device=device
|
|
||||||
).eval().to(device)
|
|
||||||
self._model = chatglm_model
|
|
||||||
self._tokenizer = chatglm_tokenizer
|
|
||||||
return self._model, self._tokenizer
|
|
||||||
|
|
||||||
|
|
||||||
def llm_stream_generator(self, **kwargs):
|
|
||||||
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
|
||||||
def adaptor(kwargs):
|
|
||||||
query = kwargs["query"]
|
|
||||||
max_length = kwargs["max_length"]
|
|
||||||
top_p = kwargs["top_p"]
|
|
||||||
temperature = kwargs["temperature"]
|
|
||||||
history = kwargs["history"]
|
|
||||||
return query, max_length, top_p, temperature, history
|
|
||||||
|
|
||||||
query, max_length, top_p, temperature, history = adaptor(kwargs)
|
|
||||||
inputs = self._tokenizer.apply_chat_template([{"role": "user", "content": query}],
|
|
||||||
add_generation_prompt=True,
|
|
||||||
tokenize=True,
|
|
||||||
return_tensors="pt",
|
|
||||||
return_dict=True
|
|
||||||
).to(self._model.device)
|
|
||||||
gen_kwargs = {"max_length": max_length, "do_sample": True, "top_k": top_p}
|
|
||||||
|
|
||||||
outputs = self._model.generate(**inputs, **gen_kwargs)
|
|
||||||
outputs = outputs[:, inputs['input_ids'].shape[1]:]
|
|
||||||
response = self._tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
||||||
yield response
|
|
||||||
|
|
||||||
def try_to_import_special_deps(self, **kwargs):
|
|
||||||
# import something that will raise error if the user does not install requirement_*.txt
|
|
||||||
# 🏃♂️🏃♂️🏃♂️ 主进程执行
|
|
||||||
import importlib
|
|
||||||
|
|
||||||
# importlib.import_module('modelscope')
|
|
||||||
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
|
||||||
# 🔌💻 GPT-Academic Interface
|
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
|
||||||
predict_no_ui_long_connection, predict = get_local_llm_predict_fns(
|
|
||||||
GetGLM4Handle, model_name, history_format="chatglm3"
|
|
||||||
)
|
|
||||||
@@ -139,7 +139,7 @@ global glmft_handle
|
|||||||
glmft_handle = None
|
glmft_handle = None
|
||||||
#################################################################################
|
#################################################################################
|
||||||
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
|
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
|
||||||
observe_window:list=[], console_silence:bool=False):
|
observe_window:list=[], console_slience:bool=False):
|
||||||
"""
|
"""
|
||||||
多线程方法
|
多线程方法
|
||||||
函数的说明请见 request_llms/bridge_all.py
|
函数的说明请见 request_llms/bridge_all.py
|
||||||
|
|||||||
@@ -23,13 +23,8 @@ from loguru import logger
|
|||||||
from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys, clip_history
|
from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys, clip_history
|
||||||
from toolbox import trimmed_format_exc, is_the_upload_folder, read_one_api_model_name, log_chat
|
from toolbox import trimmed_format_exc, is_the_upload_folder, read_one_api_model_name, log_chat
|
||||||
from toolbox import ChatBotWithCookies, have_any_recent_upload_image_files, encode_image
|
from toolbox import ChatBotWithCookies, have_any_recent_upload_image_files, encode_image
|
||||||
proxies, WHEN_TO_USE_PROXY, TIMEOUT_SECONDS, MAX_RETRY, API_ORG, AZURE_CFG_ARRAY = \
|
proxies, TIMEOUT_SECONDS, MAX_RETRY, API_ORG, AZURE_CFG_ARRAY = \
|
||||||
get_conf('proxies', 'WHEN_TO_USE_PROXY', 'TIMEOUT_SECONDS', 'MAX_RETRY', 'API_ORG', 'AZURE_CFG_ARRAY')
|
get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY', 'API_ORG', 'AZURE_CFG_ARRAY')
|
||||||
|
|
||||||
if "Connect_OpenAI" not in WHEN_TO_USE_PROXY:
|
|
||||||
if proxies is not None:
|
|
||||||
logger.error("虽然您配置了代理设置,但不会在连接OpenAI的过程中起作用,请检查WHEN_TO_USE_PROXY配置。")
|
|
||||||
proxies = None
|
|
||||||
|
|
||||||
timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \
|
timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \
|
||||||
'网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。'
|
'网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。'
|
||||||
@@ -125,7 +120,7 @@ def verify_endpoint(endpoint):
|
|||||||
raise ValueError("Endpoint不正确, 请检查AZURE_ENDPOINT的配置! 当前的Endpoint为:" + endpoint)
|
raise ValueError("Endpoint不正确, 请检查AZURE_ENDPOINT的配置! 当前的Endpoint为:" + endpoint)
|
||||||
return endpoint
|
return endpoint
|
||||||
|
|
||||||
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="", observe_window:list=None, console_silence:bool=False):
|
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="", observe_window:list=None, console_slience:bool=False):
|
||||||
"""
|
"""
|
||||||
发送至chatGPT,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
|
发送至chatGPT,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
|
||||||
inputs:
|
inputs:
|
||||||
@@ -185,25 +180,19 @@ def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[],
|
|||||||
raise ConnectionAbortedError("正常结束,但显示Token不足,导致输出不完整,请削减单次输入的文本量。")
|
raise ConnectionAbortedError("正常结束,但显示Token不足,导致输出不完整,请削减单次输入的文本量。")
|
||||||
else:
|
else:
|
||||||
raise RuntimeError("OpenAI拒绝了请求:" + error_msg)
|
raise RuntimeError("OpenAI拒绝了请求:" + error_msg)
|
||||||
if ('data: [DONE]' in chunk_decoded): break # api2d & one-api 正常完成
|
if ('data: [DONE]' in chunk_decoded): break # api2d 正常完成
|
||||||
# 提前读取一些信息 (用于判断异常)
|
# 提前读取一些信息 (用于判断异常)
|
||||||
if has_choices and not choice_valid:
|
if has_choices and not choice_valid:
|
||||||
# 一些垃圾第三方接口的出现这样的错误
|
# 一些垃圾第三方接口的出现这样的错误
|
||||||
continue
|
continue
|
||||||
json_data = chunkjson['choices'][0]
|
json_data = chunkjson['choices'][0]
|
||||||
delta = json_data["delta"]
|
delta = json_data["delta"]
|
||||||
|
if len(delta) == 0: break
|
||||||
if len(delta) == 0:
|
|
||||||
is_termination_certain = False
|
|
||||||
if (has_choices) and (chunkjson['choices'][0].get('finish_reason', 'null') == 'stop'): is_termination_certain = True
|
|
||||||
if is_termination_certain: break
|
|
||||||
else: continue # 对于不符合规范的狗屎接口,这里需要继续
|
|
||||||
|
|
||||||
if (not has_content) and has_role: continue
|
if (not has_content) and has_role: continue
|
||||||
if (not has_content) and (not has_role): continue # raise RuntimeError("发现不标准的第三方接口:"+delta)
|
if (not has_content) and (not has_role): continue # raise RuntimeError("发现不标准的第三方接口:"+delta)
|
||||||
if has_content: # has_role = True/False
|
if has_content: # has_role = True/False
|
||||||
result += delta["content"]
|
result += delta["content"]
|
||||||
if not console_silence: print(delta["content"], end='')
|
if not console_slience: print(delta["content"], end='')
|
||||||
if observe_window is not None:
|
if observe_window is not None:
|
||||||
# 观测窗,把已经获取的数据显示出去
|
# 观测窗,把已经获取的数据显示出去
|
||||||
if len(observe_window) >= 1:
|
if len(observe_window) >= 1:
|
||||||
@@ -231,7 +220,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
|||||||
inputs 是本次问询的输入
|
inputs 是本次问询的输入
|
||||||
top_p, temperature是chatGPT的内部调优参数
|
top_p, temperature是chatGPT的内部调优参数
|
||||||
history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误)
|
history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误)
|
||||||
chatbot 为WebUI中显示的对话列表,修改它,然后yield出去,可以直接修改对话界面内容
|
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
|
||||||
additional_fn代表点击的哪个按钮,按钮见functional.py
|
additional_fn代表点击的哪个按钮,按钮见functional.py
|
||||||
"""
|
"""
|
||||||
from request_llms.bridge_all import model_info
|
from request_llms.bridge_all import model_info
|
||||||
@@ -296,8 +285,6 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
|||||||
history.extend([inputs, ""])
|
history.extend([inputs, ""])
|
||||||
|
|
||||||
retry = 0
|
retry = 0
|
||||||
previous_ui_reflesh_time = 0
|
|
||||||
ui_reflesh_min_interval = 0.0
|
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
# make a POST request to the API endpoint, stream=True
|
# make a POST request to the API endpoint, stream=True
|
||||||
@@ -310,13 +297,13 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
|||||||
yield from update_ui(chatbot=chatbot, history=history, msg="请求超时"+retry_msg) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history, msg="请求超时"+retry_msg) # 刷新界面
|
||||||
if retry > MAX_RETRY: raise TimeoutError
|
if retry > MAX_RETRY: raise TimeoutError
|
||||||
|
|
||||||
|
|
||||||
if not stream:
|
if not stream:
|
||||||
# 该分支仅适用于不支持stream的o1模型,其他情形一律不适用
|
# 该分支仅适用于不支持stream的o1模型,其他情形一律不适用
|
||||||
yield from handle_o1_model_special(response, inputs, llm_kwargs, chatbot, history)
|
yield from handle_o1_model_special(response, inputs, llm_kwargs, chatbot, history)
|
||||||
return
|
return
|
||||||
|
|
||||||
if stream:
|
if stream:
|
||||||
reach_termination = False # 处理一些 new-api 的奇葩异常
|
|
||||||
gpt_replying_buffer = ""
|
gpt_replying_buffer = ""
|
||||||
is_head_of_the_stream = True
|
is_head_of_the_stream = True
|
||||||
stream_response = response.iter_lines()
|
stream_response = response.iter_lines()
|
||||||
@@ -329,14 +316,11 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
|||||||
error_msg = chunk_decoded
|
error_msg = chunk_decoded
|
||||||
# 首先排除一个one-api没有done数据包的第三方Bug情形
|
# 首先排除一个one-api没有done数据包的第三方Bug情形
|
||||||
if len(gpt_replying_buffer.strip()) > 0 and len(error_msg) == 0:
|
if len(gpt_replying_buffer.strip()) > 0 and len(error_msg) == 0:
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="检测到有缺陷的接口,建议选择更稳定的接口。")
|
yield from update_ui(chatbot=chatbot, history=history, msg="检测到有缺陷的非OpenAI官方接口,建议选择更稳定的接口。")
|
||||||
if not reach_termination:
|
|
||||||
reach_termination = True
|
|
||||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
|
||||||
break
|
break
|
||||||
# 其他情况,直接返回报错
|
# 其他情况,直接返回报错
|
||||||
chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg)
|
chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg)
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="接口返回了错误:" + chunk.decode()) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history, msg="非OpenAI官方接口返回了错误:" + chunk.decode()) # 刷新界面
|
||||||
return
|
return
|
||||||
|
|
||||||
# 提前读取一些信息 (用于判断异常)
|
# 提前读取一些信息 (用于判断异常)
|
||||||
@@ -346,8 +330,6 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
|||||||
# 数据流的第一帧不携带content
|
# 数据流的第一帧不携带content
|
||||||
is_head_of_the_stream = False; continue
|
is_head_of_the_stream = False; continue
|
||||||
|
|
||||||
if "error" in chunk_decoded: logger.error(f"接口返回了未知错误: {chunk_decoded}")
|
|
||||||
|
|
||||||
if chunk:
|
if chunk:
|
||||||
try:
|
try:
|
||||||
if has_choices and not choice_valid:
|
if has_choices and not choice_valid:
|
||||||
@@ -356,25 +338,14 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
|||||||
if ('data: [DONE]' not in chunk_decoded) and len(chunk_decoded) > 0 and (chunkjson is None):
|
if ('data: [DONE]' not in chunk_decoded) and len(chunk_decoded) > 0 and (chunkjson is None):
|
||||||
# 传递进来一些奇怪的东西
|
# 传递进来一些奇怪的东西
|
||||||
raise ValueError(f'无法读取以下数据,请检查配置。\n\n{chunk_decoded}')
|
raise ValueError(f'无法读取以下数据,请检查配置。\n\n{chunk_decoded}')
|
||||||
# 前者是API2D & One-API的结束条件,后者是OPENAI的结束条件
|
# 前者是API2D的结束条件,后者是OPENAI的结束条件
|
||||||
one_api_terminate = ('data: [DONE]' in chunk_decoded)
|
if ('data: [DONE]' in chunk_decoded) or (len(chunkjson['choices'][0]["delta"]) == 0):
|
||||||
openai_terminate = (has_choices) and (len(chunkjson['choices'][0]["delta"]) == 0)
|
# 判定为数据流的结束,gpt_replying_buffer也写完了
|
||||||
if one_api_terminate or openai_terminate:
|
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
||||||
is_termination_certain = False
|
break
|
||||||
if one_api_terminate: is_termination_certain = True # 抓取符合规范的结束条件
|
|
||||||
elif (has_choices) and (chunkjson['choices'][0].get('finish_reason', 'null') == 'stop'): is_termination_certain = True # 抓取符合规范的结束条件
|
|
||||||
if is_termination_certain:
|
|
||||||
reach_termination = True
|
|
||||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
|
||||||
break # 对于符合规范的接口,这里可以break
|
|
||||||
else:
|
|
||||||
continue # 对于不符合规范的接口,这里需要继续
|
|
||||||
# 到这里,我们已经可以假定必须包含choice了
|
|
||||||
try:
|
|
||||||
status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}"
|
|
||||||
except:
|
|
||||||
logger.error(f"一些第三方接口出现这样的错误,兼容一下吧: {chunk_decoded}")
|
|
||||||
# 处理数据流的主体
|
# 处理数据流的主体
|
||||||
|
status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}"
|
||||||
|
# 如果这里抛出异常,一般是文本过长,详情见get_full_error的输出
|
||||||
if has_content:
|
if has_content:
|
||||||
# 正常情况
|
# 正常情况
|
||||||
gpt_replying_buffer = gpt_replying_buffer + chunkjson['choices'][0]["delta"]["content"]
|
gpt_replying_buffer = gpt_replying_buffer + chunkjson['choices'][0]["delta"]["content"]
|
||||||
@@ -382,27 +353,22 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
|||||||
# 一些第三方接口的出现这样的错误,兼容一下吧
|
# 一些第三方接口的出现这样的错误,兼容一下吧
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
# 至此已经超出了正常接口应该进入的范围,一些第三方接口会出现这样的错误
|
# 至此已经超出了正常接口应该进入的范围,一些垃圾第三方接口会出现这样的错误
|
||||||
if chunkjson['choices'][0]["delta"].get("content", None) is None:
|
if chunkjson['choices'][0]["delta"]["content"] is None: continue # 一些垃圾第三方接口出现这样的错误,兼容一下吧
|
||||||
logger.error(f"一些第三方接口出现这样的错误,兼容一下吧: {chunk_decoded}")
|
|
||||||
continue
|
|
||||||
gpt_replying_buffer = gpt_replying_buffer + chunkjson['choices'][0]["delta"]["content"]
|
gpt_replying_buffer = gpt_replying_buffer + chunkjson['choices'][0]["delta"]["content"]
|
||||||
|
|
||||||
history[-1] = gpt_replying_buffer
|
history[-1] = gpt_replying_buffer
|
||||||
chatbot[-1] = (history[-2], history[-1])
|
chatbot[-1] = (history[-2], history[-1])
|
||||||
if time.time() - previous_ui_reflesh_time > ui_reflesh_min_interval:
|
yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面
|
|
||||||
previous_ui_reflesh_time = time.time()
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="Json解析不合常规") # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history, msg="Json解析不合常规") # 刷新界面
|
||||||
chunk = get_full_error(chunk, stream_response)
|
chunk = get_full_error(chunk, stream_response)
|
||||||
chunk_decoded = chunk.decode()
|
chunk_decoded = chunk.decode()
|
||||||
error_msg = chunk_decoded
|
error_msg = chunk_decoded
|
||||||
chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg)
|
chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg)
|
||||||
logger.error(error_msg)
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="Json解析异常" + error_msg) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history, msg="Json解析异常" + error_msg) # 刷新界面
|
||||||
|
logger.error(error_msg)
|
||||||
return
|
return
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="完成") # 刷新界面
|
|
||||||
return # return from stream-branch
|
return # return from stream-branch
|
||||||
|
|
||||||
def handle_o1_model_special(response, inputs, llm_kwargs, chatbot, history):
|
def handle_o1_model_special(response, inputs, llm_kwargs, chatbot, history):
|
||||||
@@ -570,8 +536,6 @@ def generate_payload(inputs:str, llm_kwargs:dict, history:list, system_prompt:st
|
|||||||
"n": 1,
|
"n": 1,
|
||||||
"stream": stream,
|
"stream": stream,
|
||||||
}
|
}
|
||||||
openai_force_temperature_one = model_info[llm_kwargs['llm_model']].get('openai_force_temperature_one', False)
|
|
||||||
if openai_force_temperature_one:
|
|
||||||
payload.pop('temperature')
|
|
||||||
return headers,payload
|
return headers,payload
|
||||||
|
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ import base64
|
|||||||
import glob
|
import glob
|
||||||
from loguru import logger
|
from loguru import logger
|
||||||
from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys, clip_history, trimmed_format_exc, is_the_upload_folder, \
|
from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys, clip_history, trimmed_format_exc, is_the_upload_folder, \
|
||||||
update_ui_latest_msg, get_max_token, encode_image, have_any_recent_upload_image_files, log_chat
|
update_ui_lastest_msg, get_max_token, encode_image, have_any_recent_upload_image_files, log_chat
|
||||||
|
|
||||||
|
|
||||||
proxies, TIMEOUT_SECONDS, MAX_RETRY, API_ORG, AZURE_CFG_ARRAY = \
|
proxies, TIMEOUT_SECONDS, MAX_RETRY, API_ORG, AZURE_CFG_ARRAY = \
|
||||||
@@ -67,7 +67,7 @@ def verify_endpoint(endpoint):
|
|||||||
"""
|
"""
|
||||||
return endpoint
|
return endpoint
|
||||||
|
|
||||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_silence=False):
|
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
|
|
||||||
@@ -183,7 +183,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
if ('data: [DONE]' in chunk_decoded) or (len(chunkjson['choices'][0]["delta"]) == 0):
|
if ('data: [DONE]' in chunk_decoded) or (len(chunkjson['choices'][0]["delta"]) == 0):
|
||||||
# 判定为数据流的结束,gpt_replying_buffer也写完了
|
# 判定为数据流的结束,gpt_replying_buffer也写完了
|
||||||
lastmsg = chatbot[-1][-1] + f"\n\n\n\n「{llm_kwargs['llm_model']}调用结束,该模型不具备上下文对话能力,如需追问,请及时切换模型。」"
|
lastmsg = chatbot[-1][-1] + f"\n\n\n\n「{llm_kwargs['llm_model']}调用结束,该模型不具备上下文对话能力,如需追问,请及时切换模型。」"
|
||||||
yield from update_ui_latest_msg(lastmsg, chatbot, history, delay=1)
|
yield from update_ui_lastest_msg(lastmsg, chatbot, history, delay=1)
|
||||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
||||||
break
|
break
|
||||||
# 处理数据流的主体
|
# 处理数据流的主体
|
||||||
|
|||||||
@@ -69,7 +69,7 @@ def decode_chunk(chunk):
|
|||||||
return need_to_pass, chunkjson, is_last_chunk
|
return need_to_pass, chunkjson, is_last_chunk
|
||||||
|
|
||||||
|
|
||||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_silence=False):
|
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
|
||||||
"""
|
"""
|
||||||
发送至chatGPT,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
|
发送至chatGPT,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
|
||||||
inputs:
|
inputs:
|
||||||
@@ -151,7 +151,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
inputs 是本次问询的输入
|
inputs 是本次问询的输入
|
||||||
top_p, temperature是chatGPT的内部调优参数
|
top_p, temperature是chatGPT的内部调优参数
|
||||||
history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误)
|
history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误)
|
||||||
chatbot 为WebUI中显示的对话列表,修改它,然后yield出去,可以直接修改对话界面内容
|
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
|
||||||
additional_fn代表点击的哪个按钮,按钮见functional.py
|
additional_fn代表点击的哪个按钮,按钮见functional.py
|
||||||
"""
|
"""
|
||||||
if inputs == "": inputs = "空空如也的输入栏"
|
if inputs == "": inputs = "空空如也的输入栏"
|
||||||
|
|||||||
@@ -68,7 +68,7 @@ def verify_endpoint(endpoint):
|
|||||||
raise ValueError("Endpoint不正确, 请检查AZURE_ENDPOINT的配置! 当前的Endpoint为:" + endpoint)
|
raise ValueError("Endpoint不正确, 请检查AZURE_ENDPOINT的配置! 当前的Endpoint为:" + endpoint)
|
||||||
return endpoint
|
return endpoint
|
||||||
|
|
||||||
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="", observe_window:list=None, console_silence:bool=False):
|
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="", observe_window:list=None, console_slience:bool=False):
|
||||||
"""
|
"""
|
||||||
发送,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
|
发送,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
|
||||||
inputs:
|
inputs:
|
||||||
@@ -111,7 +111,7 @@ def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[],
|
|||||||
if chunkjson['event_type'] == 'stream-start': continue
|
if chunkjson['event_type'] == 'stream-start': continue
|
||||||
if chunkjson['event_type'] == 'text-generation':
|
if chunkjson['event_type'] == 'text-generation':
|
||||||
result += chunkjson["text"]
|
result += chunkjson["text"]
|
||||||
if not console_silence: print(chunkjson["text"], end='')
|
if not console_slience: print(chunkjson["text"], end='')
|
||||||
if observe_window is not None:
|
if observe_window is not None:
|
||||||
# 观测窗,把已经获取的数据显示出去
|
# 观测窗,把已经获取的数据显示出去
|
||||||
if len(observe_window) >= 1:
|
if len(observe_window) >= 1:
|
||||||
@@ -132,7 +132,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
|||||||
inputs 是本次问询的输入
|
inputs 是本次问询的输入
|
||||||
top_p, temperature是chatGPT的内部调优参数
|
top_p, temperature是chatGPT的内部调优参数
|
||||||
history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误)
|
history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误)
|
||||||
chatbot 为WebUI中显示的对话列表,修改它,然后yield出去,可以直接修改对话界面内容
|
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
|
||||||
additional_fn代表点击的哪个按钮,按钮见functional.py
|
additional_fn代表点击的哪个按钮,按钮见functional.py
|
||||||
"""
|
"""
|
||||||
# if is_any_api_key(inputs):
|
# if is_any_api_key(inputs):
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ from toolbox import get_conf
|
|||||||
from request_llms.local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
|
from request_llms.local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
|
||||||
from threading import Thread
|
from threading import Thread
|
||||||
from loguru import logger
|
from loguru import logger
|
||||||
|
import torch
|
||||||
import os
|
import os
|
||||||
|
|
||||||
def download_huggingface_model(model_name, max_retry, local_dir):
|
def download_huggingface_model(model_name, max_retry, local_dir):
|
||||||
@@ -28,7 +29,6 @@ class GetCoderLMHandle(LocalLLMHandle):
|
|||||||
self.cmd_to_install = cmd_to_install
|
self.cmd_to_install = cmd_to_install
|
||||||
|
|
||||||
def load_model_and_tokenizer(self):
|
def load_model_and_tokenizer(self):
|
||||||
import torch
|
|
||||||
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
||||||
with ProxyNetworkActivate('Download_LLM'):
|
with ProxyNetworkActivate('Download_LLM'):
|
||||||
from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
|
from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ import os
|
|||||||
import time
|
import time
|
||||||
from request_llms.com_google import GoogleChatInit
|
from request_llms.com_google import GoogleChatInit
|
||||||
from toolbox import ChatBotWithCookies
|
from toolbox import ChatBotWithCookies
|
||||||
from toolbox import get_conf, update_ui, update_ui_latest_msg, have_any_recent_upload_image_files, trimmed_format_exc, log_chat, encode_image
|
from toolbox import get_conf, update_ui, update_ui_lastest_msg, have_any_recent_upload_image_files, trimmed_format_exc, log_chat, encode_image
|
||||||
|
|
||||||
proxies, TIMEOUT_SECONDS, MAX_RETRY = get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY')
|
proxies, TIMEOUT_SECONDS, MAX_RETRY = get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY')
|
||||||
timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \
|
timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \
|
||||||
@@ -16,7 +16,7 @@ timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check
|
|||||||
|
|
||||||
|
|
||||||
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="", observe_window:list=[],
|
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="", observe_window:list=[],
|
||||||
console_silence:bool=False):
|
console_slience:bool=False):
|
||||||
# 检查API_KEY
|
# 检查API_KEY
|
||||||
if get_conf("GEMINI_API_KEY") == "":
|
if get_conf("GEMINI_API_KEY") == "":
|
||||||
raise ValueError(f"请配置 GEMINI_API_KEY。")
|
raise ValueError(f"请配置 GEMINI_API_KEY。")
|
||||||
@@ -60,7 +60,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
|||||||
|
|
||||||
# 检查API_KEY
|
# 检查API_KEY
|
||||||
if get_conf("GEMINI_API_KEY") == "":
|
if get_conf("GEMINI_API_KEY") == "":
|
||||||
yield from update_ui_latest_msg(f"请配置 GEMINI_API_KEY。", chatbot=chatbot, history=history, delay=0)
|
yield from update_ui_lastest_msg(f"请配置 GEMINI_API_KEY。", chatbot=chatbot, history=history, delay=0)
|
||||||
return
|
return
|
||||||
|
|
||||||
# 适配润色区域
|
# 适配润色区域
|
||||||
|
|||||||
@@ -55,7 +55,7 @@ class GetGLMHandle(Process):
|
|||||||
if self.jittorllms_model is None:
|
if self.jittorllms_model is None:
|
||||||
device = get_conf('LOCAL_MODEL_DEVICE')
|
device = get_conf('LOCAL_MODEL_DEVICE')
|
||||||
from .jittorllms.models import get_model
|
from .jittorllms.models import get_model
|
||||||
# available_models = ["chatglm", "pangualpha", "llama", "chatrwkv"]
|
# availabel_models = ["chatglm", "pangualpha", "llama", "chatrwkv"]
|
||||||
args_dict = {'model': 'llama'}
|
args_dict = {'model': 'llama'}
|
||||||
print('self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))')
|
print('self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))')
|
||||||
self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))
|
self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))
|
||||||
@@ -107,7 +107,7 @@ global llama_glm_handle
|
|||||||
llama_glm_handle = None
|
llama_glm_handle = None
|
||||||
#################################################################################
|
#################################################################################
|
||||||
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
|
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
|
||||||
observe_window:list=[], console_silence:bool=False):
|
observe_window:list=[], console_slience:bool=False):
|
||||||
"""
|
"""
|
||||||
多线程方法
|
多线程方法
|
||||||
函数的说明请见 request_llms/bridge_all.py
|
函数的说明请见 request_llms/bridge_all.py
|
||||||
|
|||||||
@@ -55,7 +55,7 @@ class GetGLMHandle(Process):
|
|||||||
if self.jittorllms_model is None:
|
if self.jittorllms_model is None:
|
||||||
device = get_conf('LOCAL_MODEL_DEVICE')
|
device = get_conf('LOCAL_MODEL_DEVICE')
|
||||||
from .jittorllms.models import get_model
|
from .jittorllms.models import get_model
|
||||||
# available_models = ["chatglm", "pangualpha", "llama", "chatrwkv"]
|
# availabel_models = ["chatglm", "pangualpha", "llama", "chatrwkv"]
|
||||||
args_dict = {'model': 'pangualpha'}
|
args_dict = {'model': 'pangualpha'}
|
||||||
print('self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))')
|
print('self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))')
|
||||||
self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))
|
self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))
|
||||||
@@ -107,7 +107,7 @@ global pangu_glm_handle
|
|||||||
pangu_glm_handle = None
|
pangu_glm_handle = None
|
||||||
#################################################################################
|
#################################################################################
|
||||||
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
|
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
|
||||||
observe_window:list=[], console_silence:bool=False):
|
observe_window:list=[], console_slience:bool=False):
|
||||||
"""
|
"""
|
||||||
多线程方法
|
多线程方法
|
||||||
函数的说明请见 request_llms/bridge_all.py
|
函数的说明请见 request_llms/bridge_all.py
|
||||||
|
|||||||
@@ -55,7 +55,7 @@ class GetGLMHandle(Process):
|
|||||||
if self.jittorllms_model is None:
|
if self.jittorllms_model is None:
|
||||||
device = get_conf('LOCAL_MODEL_DEVICE')
|
device = get_conf('LOCAL_MODEL_DEVICE')
|
||||||
from .jittorllms.models import get_model
|
from .jittorllms.models import get_model
|
||||||
# available_models = ["chatglm", "pangualpha", "llama", "chatrwkv"]
|
# availabel_models = ["chatglm", "pangualpha", "llama", "chatrwkv"]
|
||||||
args_dict = {'model': 'chatrwkv'}
|
args_dict = {'model': 'chatrwkv'}
|
||||||
print('self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))')
|
print('self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))')
|
||||||
self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))
|
self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))
|
||||||
@@ -107,7 +107,7 @@ global rwkv_glm_handle
|
|||||||
rwkv_glm_handle = None
|
rwkv_glm_handle = None
|
||||||
#################################################################################
|
#################################################################################
|
||||||
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
|
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
|
||||||
observe_window:list=[], console_silence:bool=False):
|
observe_window:list=[], console_slience:bool=False):
|
||||||
"""
|
"""
|
||||||
多线程方法
|
多线程方法
|
||||||
函数的说明请见 request_llms/bridge_all.py
|
函数的说明请见 request_llms/bridge_all.py
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user