Compare commits
47 Commits
binary-hus
...
huggingfac
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
68ff3660ae | ||
|
|
d0703ef32d | ||
|
|
47289f863d | ||
|
|
eaf27df32a | ||
|
|
d245958dfa | ||
|
|
8dd4d48474 | ||
|
|
15f14f51ff | ||
|
|
1de63835fc | ||
|
|
17d0a32f36 | ||
|
|
971ac206f3 | ||
|
|
c89c62b914 | ||
|
|
f8946c13f2 | ||
|
|
098d8654b3 | ||
|
|
ac8830e30e | ||
|
|
5c0a0882c8 | ||
|
|
f5357f67ca | ||
|
|
a1fe67d7f2 | ||
|
|
cbee909bc8 | ||
|
|
8a5e8bc5c1 | ||
|
|
96c1852abc | ||
|
|
cd145c0794 | ||
|
|
7a4d4ad956 | ||
|
|
9f9848c6e9 | ||
|
|
94425c49fd | ||
|
|
e874a16050 | ||
|
|
c28388c5fe | ||
|
|
b4a56d391b | ||
|
|
7075092f86 | ||
|
|
1086ff8092 | ||
|
|
3a22446b47 | ||
|
|
7842cf03cc | ||
|
|
54f55c32f2 | ||
|
|
94318ff0a2 | ||
|
|
5be6b83762 | ||
|
|
6f18d1716e | ||
|
|
90944bd744 | ||
|
|
752937cb70 | ||
|
|
c584cbac5b | ||
|
|
309d12b404 | ||
|
|
52ea0acd61 | ||
|
|
9f5e3e0fd5 | ||
|
|
315e78e5d9 | ||
|
|
b6b4ba684a | ||
|
|
2281a5ca7f | ||
|
|
49558686f2 | ||
|
|
b050ccedb5 | ||
|
|
ae56cab6f4 |
16
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
16
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -11,8 +11,6 @@ body:
|
||||
- Please choose | 请选择
|
||||
- Pip Install (I ignored requirements.txt)
|
||||
- Pip Install (I used latest requirements.txt)
|
||||
- OneKeyInstall (一键安装脚本-windows)
|
||||
- OneKeyInstall (一键安装脚本-mac)
|
||||
- Anaconda (I ignored requirements.txt)
|
||||
- Anaconda (I used latest requirements.txt)
|
||||
- Docker(Windows/Mac)
|
||||
@@ -34,7 +32,7 @@ body:
|
||||
- Others | 非最新版
|
||||
validations:
|
||||
required: true
|
||||
|
||||
|
||||
- type: dropdown
|
||||
id: os
|
||||
attributes:
|
||||
@@ -47,7 +45,7 @@ body:
|
||||
- Docker
|
||||
validations:
|
||||
required: true
|
||||
|
||||
|
||||
- type: textarea
|
||||
id: describe
|
||||
attributes:
|
||||
@@ -55,7 +53,7 @@ body:
|
||||
description: Describe the bug | 简述
|
||||
validations:
|
||||
required: true
|
||||
|
||||
|
||||
- type: textarea
|
||||
id: screenshot
|
||||
attributes:
|
||||
@@ -63,9 +61,15 @@ body:
|
||||
description: Screen Shot | 有帮助的截图
|
||||
validations:
|
||||
required: true
|
||||
|
||||
|
||||
- type: textarea
|
||||
id: traceback
|
||||
attributes:
|
||||
label: Terminal Traceback & Material to Help Reproduce Bugs | 终端traceback(如有) + 帮助我们复现的测试材料样本(如有)
|
||||
description: Terminal Traceback & Material to Help Reproduce Bugs | 终端traceback(如有) + 帮助我们复现的测试材料样本(如有)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
5
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
5
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
@@ -21,3 +21,8 @@ body:
|
||||
attributes:
|
||||
label: Feature Request | 功能请求
|
||||
description: Feature Request | 功能请求
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
|
||||
name: build-with-all-capacity-beta
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}_with_all_capacity_beta
|
||||
|
||||
jobs:
|
||||
build-and-push-image:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
file: docs/GithubAction+AllCapacityBeta
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
44
.github/workflows/build-with-all-capacity.yml
vendored
44
.github/workflows/build-with-all-capacity.yml
vendored
@@ -1,44 +0,0 @@
|
||||
# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
|
||||
name: build-with-all-capacity
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}_with_all_capacity
|
||||
|
||||
jobs:
|
||||
build-and-push-image:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
file: docs/GithubAction+AllCapacity
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
44
.github/workflows/build-with-audio-assistant.yml
vendored
44
.github/workflows/build-with-audio-assistant.yml
vendored
@@ -1,44 +0,0 @@
|
||||
# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
|
||||
name: build-with-audio-assistant
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}_audio_assistant
|
||||
|
||||
jobs:
|
||||
build-and-push-image:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
file: docs/GithubAction+NoLocal+AudioAssistant
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
2
.github/workflows/build-with-chatglm.yml
vendored
2
.github/workflows/build-with-chatglm.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
|
||||
name: build-with-chatglm
|
||||
name: Create and publish a Docker image for ChatGLM support
|
||||
|
||||
on:
|
||||
push:
|
||||
|
||||
2
.github/workflows/build-with-jittorllms.yml
vendored
2
.github/workflows/build-with-jittorllms.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
|
||||
name: build-with-jittorllms
|
||||
name: Create and publish a Docker image for ChatGLM support
|
||||
|
||||
on:
|
||||
push:
|
||||
|
||||
2
.github/workflows/build-with-latex.yml
vendored
2
.github/workflows/build-with-latex.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
|
||||
name: build-with-latex
|
||||
name: Create and publish a Docker image for Latex support
|
||||
|
||||
on:
|
||||
push:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
|
||||
name: build-without-local-llms
|
||||
name: Create and publish a Docker image
|
||||
|
||||
on:
|
||||
push:
|
||||
|
||||
25
.github/workflows/stale.yml
vendored
25
.github/workflows/stale.yml
vendored
@@ -1,25 +0,0 @@
|
||||
# This workflow warns and then closes issues and PRs that have had no activity for a specified amount of time.
|
||||
#
|
||||
# You can adjust the behavior by modifying this file.
|
||||
# For more information, see:
|
||||
# https://github.com/actions/stale
|
||||
|
||||
name: 'Close stale issues and PRs'
|
||||
on:
|
||||
schedule:
|
||||
- cron: '*/5 * * * *'
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: read
|
||||
|
||||
steps:
|
||||
- uses: actions/stale@v8
|
||||
with:
|
||||
stale-issue-message: 'This issue is stale because it has been open 100 days with no activity. Remove stale label or comment or this will be closed in 1 days.'
|
||||
days-before-stale: 100
|
||||
days-before-close: 1
|
||||
debug-only: true
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -146,10 +146,7 @@ debug*
|
||||
private*
|
||||
crazy_functions/test_project/pdf_and_word
|
||||
crazy_functions/test_samples
|
||||
request_llms/jittorllms
|
||||
request_llm/jittorllms
|
||||
multi-language
|
||||
request_llms/moss
|
||||
request_llm/moss
|
||||
media
|
||||
flagged
|
||||
request_llms/ChatGLM-6b-onnx-u8s8
|
||||
.pre-commit-config.yaml
|
||||
|
||||
32
.pre-commit-config.yaml
Normal file
32
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,32 @@
|
||||
default_language_version:
|
||||
python: python3
|
||||
exclude: 'dotnet'
|
||||
ci:
|
||||
autofix_prs: true
|
||||
autoupdate_commit_msg: '[pre-commit.ci] pre-commit suggestions'
|
||||
autoupdate_schedule: 'quarterly'
|
||||
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.4.0
|
||||
hooks:
|
||||
- id: check-ast
|
||||
# - id: check-yaml
|
||||
- id: check-toml
|
||||
- id: check-json
|
||||
- id: check-byte-order-marker
|
||||
exclude: .gitignore
|
||||
- id: check-merge-conflict
|
||||
- id: detect-private-key
|
||||
- id: trailing-whitespace
|
||||
- id: end-of-file-fixer
|
||||
- id: no-commit-to-branch
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 23.3.0
|
||||
hooks:
|
||||
- id: black
|
||||
# - repo: https://github.com/charliermarsh/ruff-pre-commit
|
||||
# rev: v0.0.261
|
||||
# hooks:
|
||||
# - id: ruff
|
||||
# args: ["--fix"]
|
||||
22
README.md
22
README.md
@@ -1,8 +1,20 @@
|
||||
> [!IMPORTANT]
|
||||
> 2024.1.18: 更新3.70版本,支持Mermaid绘图库(让大模型绘制脑图)
|
||||
> 2024.1.17: 恭迎GLM4,全力支持Qwen、GLM、DeepseekCoder等国内中文大语言基座模型!
|
||||
> 2024.1.17: 某些依赖包尚不兼容python 3.12,推荐python 3.11。
|
||||
> 2024.1.17: 安装依赖时,请选择`requirements.txt`中**指定的版本**。 安装命令:`pip install -r requirements.txt`。本项目完全开源免费,您可通过订阅[在线服务](https://github.com/binary-husky/gpt_academic/wiki/online)的方式鼓励本项目的发展。
|
||||
---
|
||||
title: GPT-Academic
|
||||
emoji: 😻
|
||||
colorFrom: blue
|
||||
colorTo: blue
|
||||
sdk: gradio
|
||||
sdk_version: 3.32.0
|
||||
app_file: app.py
|
||||
pinned: false
|
||||
---
|
||||
|
||||
# ChatGPT 学术优化
|
||||
> **Note**
|
||||
>
|
||||
> 2023.11.12: 某些依赖包尚不兼容python 3.12,推荐python 3.11。
|
||||
>
|
||||
> 2023.12.26: 安装依赖时,请选择`requirements.txt`中**指定的版本**。 安装命令:`pip install -r requirements.txt`。本项目完全开源免费,您可通过订阅[在线服务](https://github.com/binary-husky/gpt_academic/wiki/online)的方式鼓励本项目的发展。
|
||||
|
||||
<br>
|
||||
|
||||
|
||||
112
main.py → app.py
112
main.py → app.py
@@ -14,23 +14,25 @@ help_menu_description = \
|
||||
</br></br>如何临时更换API_KEY: 在输入区输入临时API_KEY后提交(网页刷新后失效)"""
|
||||
|
||||
def main():
|
||||
import subprocess, sys
|
||||
subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'https://public.agent-matrix.com/publish/gradio-3.32.8-py3-none-any.whl'])
|
||||
import gradio as gr
|
||||
if gr.__version__ not in ['3.32.6', '3.32.7', '3.32.8']:
|
||||
if gr.__version__ not in ['3.32.8']:
|
||||
raise ModuleNotFoundError("使用项目内置Gradio获取最优体验! 请运行 `pip install -r requirements.txt` 指令安装内置Gradio及其他依赖, 详情信息见requirements.txt.")
|
||||
from request_llms.bridge_all import predict
|
||||
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, load_chat_cookies, DummyWith
|
||||
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址
|
||||
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION')
|
||||
CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT')
|
||||
ENABLE_AUDIO, AUTO_CLEAR_TXT, PATH_LOGGING, AVAIL_THEMES, THEME = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT', 'PATH_LOGGING', 'AVAIL_THEMES', 'THEME')
|
||||
ENABLE_AUDIO, AUTO_CLEAR_TXT, PATH_LOGGING, AVAIL_THEMES, THEME, ADD_WAIFU = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT', 'PATH_LOGGING', 'AVAIL_THEMES', 'THEME', 'ADD_WAIFU')
|
||||
DARK_MODE, NUM_CUSTOM_BASIC_BTN, SSL_KEYFILE, SSL_CERTFILE = get_conf('DARK_MODE', 'NUM_CUSTOM_BASIC_BTN', 'SSL_KEYFILE', 'SSL_CERTFILE')
|
||||
INIT_SYS_PROMPT = get_conf('INIT_SYS_PROMPT')
|
||||
|
||||
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
||||
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
||||
from check_proxy import get_current_version
|
||||
from themes.theme import adjust_theme, advanced_css, theme_declaration
|
||||
from themes.theme import js_code_for_css_changing, js_code_for_darkmode_init, js_code_for_toggle_darkmode, js_code_for_persistent_cookie_init
|
||||
from themes.theme import adjust_theme, advanced_css, theme_declaration, js_code_clear, js_code_reset, js_code_show_or_hide, js_code_show_or_hide_group2
|
||||
from themes.theme import js_code_for_css_changing, js_code_for_toggle_darkmode, js_code_for_persistent_cookie_init
|
||||
from themes.theme import load_dynamic_theme, to_cookie_str, from_cookie_str, init_cookie
|
||||
title_html = f"<h1 align=\"center\">GPT 学术优化 {get_current_version()}</h1>{theme_declaration}"
|
||||
|
||||
@@ -65,7 +67,7 @@ def main():
|
||||
proxy_info = check_proxy(proxies)
|
||||
|
||||
gr_L1 = lambda: gr.Row().style()
|
||||
gr_L2 = lambda scale, elem_id: gr.Column(scale=scale, elem_id=elem_id)
|
||||
gr_L2 = lambda scale, elem_id: gr.Column(scale=scale, elem_id=elem_id, min_width=400)
|
||||
if LAYOUT == "TOP-DOWN":
|
||||
gr_L1 = lambda: DummyWith()
|
||||
gr_L2 = lambda scale, elem_id: gr.Row()
|
||||
@@ -76,7 +78,8 @@ def main():
|
||||
predefined_btns = {}
|
||||
with gr.Blocks(title="GPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
|
||||
gr.HTML(title_html)
|
||||
secret_css, dark_mode, persistent_cookie = gr.Textbox(visible=False), gr.Textbox(DARK_MODE, visible=False), gr.Textbox(visible=False)
|
||||
gr.HTML('''<center><a href="https://huggingface.co/spaces/qingxu98/gpt-academic?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>请您打开此页面后务必点击上方的“复制空间”(Duplicate Space)按钮!<font color="#FF00FF">使用时,先在输入框填入API-KEY然后回车。</font><br/>切忌在“复制空间”(Duplicate Space)之前填入API_KEY或进行提问,否则您的API_KEY将极可能被空间所有者攫取!<br/>支持任意数量的OpenAI的密钥和API2D的密钥共存,例如输入"OpenAI密钥1,API2D密钥2",然后提交,即可同时使用两种模型接口。</center>''')
|
||||
secret_css, dark_mode, py_pickle_cookie = gr.Textbox(visible=False), gr.Textbox(DARK_MODE, visible=False), gr.Textbox(visible=False)
|
||||
cookies = gr.State(load_chat_cookies())
|
||||
with gr_L1():
|
||||
with gr_L2(scale=2, elem_id="gpt-chat"):
|
||||
@@ -86,7 +89,7 @@ def main():
|
||||
with gr_L2(scale=1, elem_id="gpt-panel"):
|
||||
with gr.Accordion("输入区", open=True, elem_id="input-panel") as area_input_primary:
|
||||
with gr.Row():
|
||||
txt = gr.Textbox(show_label=False, placeholder="Input question here.", elem_id='user_input_main').style(container=False)
|
||||
txt = gr.Textbox(show_label=False, lines=2, placeholder="输入问题或API密钥,输入多个密钥时,用英文逗号间隔。支持多个OpenAI密钥共存。").style(container=False)
|
||||
with gr.Row():
|
||||
submitBtn = gr.Button("提交", elem_id="elem_submit", variant="primary")
|
||||
with gr.Row():
|
||||
@@ -98,6 +101,7 @@ def main():
|
||||
audio_mic = gr.Audio(source="microphone", type="numpy", elem_id="elem_audio", streaming=True, show_label=False).style(container=False)
|
||||
with gr.Row():
|
||||
status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}", elem_id="state-panel")
|
||||
|
||||
with gr.Accordion("基础功能区", open=True, elem_id="basic-panel") as area_basic_fn:
|
||||
with gr.Row():
|
||||
for k in range(NUM_CUSTOM_BASIC_BTN):
|
||||
@@ -142,7 +146,6 @@ def main():
|
||||
with gr.Accordion("点击展开“文件下载区”。", open=False) as area_file_up:
|
||||
file_upload = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple", elem_id="elem_upload")
|
||||
|
||||
|
||||
with gr.Floating(init_x="0%", init_y="0%", visible=True, width=None, drag="forbidden", elem_id="tooltip"):
|
||||
with gr.Row():
|
||||
with gr.Tab("上传文件", elem_id="interact-panel"):
|
||||
@@ -158,10 +161,11 @@ def main():
|
||||
|
||||
with gr.Tab("界面外观", elem_id="interact-panel"):
|
||||
theme_dropdown = gr.Dropdown(AVAIL_THEMES, value=THEME, label="更换UI主题").style(container=False)
|
||||
checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "浮动输入区", "输入清除键", "插件参数区"],
|
||||
value=["基础功能区", "函数插件区"], label="显示/隐藏功能区", elem_id='cbs').style(container=False)
|
||||
checkboxes_2 = gr.CheckboxGroup(["自定义菜单"],
|
||||
value=[], label="显示/隐藏自定义菜单", elem_id='cbsc').style(container=False)
|
||||
checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "浮动输入区", "输入清除键", "插件参数区"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区", elem_id='cbs').style(container=False)
|
||||
opt = ["自定义菜单"]
|
||||
value=[]
|
||||
if ADD_WAIFU: opt += ["添加Live2D形象"]; value += ["添加Live2D形象"]
|
||||
checkboxes_2 = gr.CheckboxGroup(opt, value=value, label="显示/隐藏自定义菜单", elem_id='cbsc').style(container=False)
|
||||
dark_mode_btn = gr.Button("切换界面明暗 ☀", variant="secondary").style(size="sm")
|
||||
dark_mode_btn.click(None, None, None, _js=js_code_for_toggle_darkmode)
|
||||
with gr.Tab("帮助", elem_id="interact-panel"):
|
||||
@@ -178,7 +182,7 @@ def main():
|
||||
submitBtn2 = gr.Button("提交", variant="primary"); submitBtn2.style(size="sm")
|
||||
resetBtn2 = gr.Button("重置", variant="secondary"); resetBtn2.style(size="sm")
|
||||
stopBtn2 = gr.Button("停止", variant="secondary"); stopBtn2.style(size="sm")
|
||||
clearBtn2 = gr.Button("清除", variant="secondary", visible=False); clearBtn2.style(size="sm")
|
||||
clearBtn2 = gr.Button("清除", elem_id="elem_clear2", variant="secondary", visible=False); clearBtn2.style(size="sm")
|
||||
|
||||
|
||||
with gr.Floating(init_x="20%", init_y="50%", visible=False, width="40%", drag="top") as area_customize:
|
||||
@@ -192,10 +196,12 @@ def main():
|
||||
basic_fn_suffix = gr.Textbox(show_label=False, placeholder="输入新提示后缀", lines=4).style(container=False)
|
||||
with gr.Column(scale=1, min_width=70):
|
||||
basic_fn_confirm = gr.Button("确认并保存", variant="primary"); basic_fn_confirm.style(size="sm")
|
||||
basic_fn_load = gr.Button("加载已保存", variant="primary"); basic_fn_load.style(size="sm")
|
||||
def assign_btn(persistent_cookie_, cookies_, basic_btn_dropdown_, basic_fn_title, basic_fn_prefix, basic_fn_suffix):
|
||||
basic_fn_clean = gr.Button("恢复默认", variant="primary"); basic_fn_clean.style(size="sm")
|
||||
def assign_btn(persistent_cookie_, cookies_, basic_btn_dropdown_, basic_fn_title, basic_fn_prefix, basic_fn_suffix, clean_up=False):
|
||||
ret = {}
|
||||
# 读取之前的自定义按钮
|
||||
customize_fn_overwrite_ = cookies_['customize_fn_overwrite']
|
||||
# 更新新的自定义按钮
|
||||
customize_fn_overwrite_.update({
|
||||
basic_btn_dropdown_:
|
||||
{
|
||||
@@ -205,20 +211,34 @@ def main():
|
||||
}
|
||||
}
|
||||
)
|
||||
cookies_.update(customize_fn_overwrite_)
|
||||
if clean_up:
|
||||
customize_fn_overwrite_ = {}
|
||||
cookies_.update(customize_fn_overwrite_) # 更新cookie
|
||||
visible = (not clean_up) and (basic_fn_title != "")
|
||||
if basic_btn_dropdown_ in customize_btns:
|
||||
ret.update({customize_btns[basic_btn_dropdown_]: gr.update(visible=True, value=basic_fn_title)})
|
||||
# 是自定义按钮,不是预定义按钮
|
||||
ret.update({customize_btns[basic_btn_dropdown_]: gr.update(visible=visible, value=basic_fn_title)})
|
||||
else:
|
||||
ret.update({predefined_btns[basic_btn_dropdown_]: gr.update(visible=True, value=basic_fn_title)})
|
||||
# 是预定义按钮
|
||||
ret.update({predefined_btns[basic_btn_dropdown_]: gr.update(visible=visible, value=basic_fn_title)})
|
||||
ret.update({cookies: cookies_})
|
||||
try: persistent_cookie_ = from_cookie_str(persistent_cookie_) # persistent cookie to dict
|
||||
except: persistent_cookie_ = {}
|
||||
persistent_cookie_["custom_bnt"] = customize_fn_overwrite_ # dict update new value
|
||||
persistent_cookie_ = to_cookie_str(persistent_cookie_) # persistent cookie to dict
|
||||
ret.update({persistent_cookie: persistent_cookie_}) # write persistent cookie
|
||||
ret.update({py_pickle_cookie: persistent_cookie_}) # write persistent cookie
|
||||
return ret
|
||||
|
||||
def reflesh_btn(persistent_cookie_, cookies_):
|
||||
# update btn
|
||||
h = basic_fn_confirm.click(assign_btn, [py_pickle_cookie, cookies, basic_btn_dropdown, basic_fn_title, basic_fn_prefix, basic_fn_suffix],
|
||||
[py_pickle_cookie, cookies, *customize_btns.values(), *predefined_btns.values()])
|
||||
h.then(None, [py_pickle_cookie], None, _js="""(py_pickle_cookie)=>{setCookie("py_pickle_cookie", py_pickle_cookie, 365);}""")
|
||||
# clean up btn
|
||||
h2 = basic_fn_clean.click(assign_btn, [py_pickle_cookie, cookies, basic_btn_dropdown, basic_fn_title, basic_fn_prefix, basic_fn_suffix, gr.State(True)],
|
||||
[py_pickle_cookie, cookies, *customize_btns.values(), *predefined_btns.values()])
|
||||
h2.then(None, [py_pickle_cookie], None, _js="""(py_pickle_cookie)=>{setCookie("py_pickle_cookie", py_pickle_cookie, 365);}""")
|
||||
|
||||
def persistent_cookie_reload(persistent_cookie_, cookies_):
|
||||
ret = {}
|
||||
for k in customize_btns:
|
||||
ret.update({customize_btns[k]: gr.update(visible=False, value="")})
|
||||
@@ -236,25 +256,16 @@ def main():
|
||||
else: ret.update({predefined_btns[k]: gr.update(visible=True, value=v['Title'])})
|
||||
return ret
|
||||
|
||||
basic_fn_load.click(reflesh_btn, [persistent_cookie, cookies], [cookies, *customize_btns.values(), *predefined_btns.values()])
|
||||
h = basic_fn_confirm.click(assign_btn, [persistent_cookie, cookies, basic_btn_dropdown, basic_fn_title, basic_fn_prefix, basic_fn_suffix],
|
||||
[persistent_cookie, cookies, *customize_btns.values(), *predefined_btns.values()])
|
||||
# save persistent cookie
|
||||
h.then(None, [persistent_cookie], None, _js="""(persistent_cookie)=>{setCookie("persistent_cookie", persistent_cookie, 5);}""")
|
||||
|
||||
# 功能区显示开关与功能区的互动
|
||||
def fn_area_visibility(a):
|
||||
ret = {}
|
||||
ret.update({area_basic_fn: gr.update(visible=("基础功能区" in a))})
|
||||
ret.update({area_crazy_fn: gr.update(visible=("函数插件区" in a))})
|
||||
ret.update({area_input_primary: gr.update(visible=("浮动输入区" not in a))})
|
||||
ret.update({area_input_secondary: gr.update(visible=("浮动输入区" in a))})
|
||||
ret.update({clearBtn: gr.update(visible=("输入清除键" in a))})
|
||||
ret.update({clearBtn2: gr.update(visible=("输入清除键" in a))})
|
||||
ret.update({plugin_advanced_arg: gr.update(visible=("插件参数区" in a))})
|
||||
if "浮动输入区" in a: ret.update({txt: gr.update(value="")})
|
||||
return ret
|
||||
checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn, area_input_primary, area_input_secondary, txt, txt2, clearBtn, clearBtn2, plugin_advanced_arg] )
|
||||
checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn, area_input_primary, area_input_secondary, txt, txt2, plugin_advanced_arg] )
|
||||
checkboxes.select(None, [checkboxes], None, _js=js_code_show_or_hide)
|
||||
|
||||
# 功能区显示开关与功能区的互动
|
||||
def fn_area_visibility_2(a):
|
||||
@@ -262,6 +273,7 @@ def main():
|
||||
ret.update({area_customize: gr.update(visible=("自定义菜单" in a))})
|
||||
return ret
|
||||
checkboxes_2.select(fn_area_visibility_2, [checkboxes_2], [area_customize] )
|
||||
checkboxes_2.select(None, [checkboxes_2], None, _js=js_code_show_or_hide_group2)
|
||||
|
||||
# 整理反复出现的控件句柄组合
|
||||
input_combo = [cookies, max_length_sl, md_dropdown, txt, txt2, top_p, temperature, chatbot, history, system_prompt, plugin_advanced_arg]
|
||||
@@ -272,15 +284,17 @@ def main():
|
||||
cancel_handles.append(txt2.submit(**predict_args))
|
||||
cancel_handles.append(submitBtn.click(**predict_args))
|
||||
cancel_handles.append(submitBtn2.click(**predict_args))
|
||||
resetBtn.click(lambda: ([], [], "已重置"), None, [chatbot, history, status])
|
||||
resetBtn2.click(lambda: ([], [], "已重置"), None, [chatbot, history, status])
|
||||
clearBtn.click(lambda: ("",""), None, [txt, txt2])
|
||||
clearBtn2.click(lambda: ("",""), None, [txt, txt2])
|
||||
resetBtn.click(None, None, [chatbot, history, status], _js=js_code_reset) # 先在前端快速清除chatbot&status
|
||||
resetBtn2.click(None, None, [chatbot, history, status], _js=js_code_reset) # 先在前端快速清除chatbot&status
|
||||
resetBtn.click(lambda: ([], [], "已重置"), None, [chatbot, history, status]) # 再在后端清除history
|
||||
resetBtn2.click(lambda: ([], [], "已重置"), None, [chatbot, history, status]) # 再在后端清除history
|
||||
clearBtn.click(None, None, [txt, txt2], _js=js_code_clear)
|
||||
clearBtn2.click(None, None, [txt, txt2], _js=js_code_clear)
|
||||
if AUTO_CLEAR_TXT:
|
||||
submitBtn.click(lambda: ("",""), None, [txt, txt2])
|
||||
submitBtn2.click(lambda: ("",""), None, [txt, txt2])
|
||||
txt.submit(lambda: ("",""), None, [txt, txt2])
|
||||
txt2.submit(lambda: ("",""), None, [txt, txt2])
|
||||
submitBtn.click(None, None, [txt, txt2], _js=js_code_clear)
|
||||
submitBtn2.click(None, None, [txt, txt2], _js=js_code_clear)
|
||||
txt.submit(None, None, [txt, txt2], _js=js_code_clear)
|
||||
txt2.submit(None, None, [txt, txt2], _js=js_code_clear)
|
||||
# 基础功能区的回调函数注册
|
||||
for k in functional:
|
||||
if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue
|
||||
@@ -360,10 +374,10 @@ def main():
|
||||
audio_mic.stream(deal_audio, inputs=[audio_mic, cookies])
|
||||
|
||||
|
||||
demo.load(init_cookie, inputs=[cookies, chatbot], outputs=[cookies])
|
||||
darkmode_js = js_code_for_darkmode_init
|
||||
demo.load(None, inputs=None, outputs=[persistent_cookie], _js=js_code_for_persistent_cookie_init)
|
||||
demo.load(None, inputs=[dark_mode], outputs=None, _js=darkmode_js) # 配置暗色主题或亮色主题
|
||||
demo.load(init_cookie, inputs=[cookies], outputs=[cookies])
|
||||
demo.load(persistent_cookie_reload, inputs = [py_pickle_cookie, cookies],
|
||||
outputs = [py_pickle_cookie, cookies, *customize_btns.values(), *predefined_btns.values()], _js=js_code_for_persistent_cookie_init)
|
||||
demo.load(None, inputs=[dark_mode], outputs=None, _js="""(dark_mode)=>{apply_cookie_for_checkbox(dark_mode);}""") # 配置暗色主题或亮色主题
|
||||
demo.load(None, inputs=[gr.Textbox(LAYOUT, visible=False)], outputs=None, _js='(LAYOUT)=>{GptAcademicJavaScriptInit(LAYOUT);}')
|
||||
|
||||
# gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
|
||||
@@ -382,16 +396,8 @@ def main():
|
||||
threading.Thread(target=warm_up_mods, name="warm-up", daemon=True).start() # 预热tiktoken模块
|
||||
|
||||
run_delayed_tasks()
|
||||
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(
|
||||
quiet=True,
|
||||
server_name="0.0.0.0",
|
||||
ssl_keyfile=None if SSL_KEYFILE == "" else SSL_KEYFILE,
|
||||
ssl_certfile=None if SSL_CERTFILE == "" else SSL_CERTFILE,
|
||||
ssl_verify=False,
|
||||
server_port=PORT,
|
||||
favicon_path=os.path.join(os.path.dirname(__file__), "docs/logo.png"),
|
||||
auth=AUTHENTICATION if len(AUTHENTICATION) != 0 else None,
|
||||
blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile",f"{PATH_LOGGING}/admin"])
|
||||
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", share=False, favicon_path="docs/logo.png", blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile"])
|
||||
|
||||
|
||||
# 如果需要在二级路径下运行
|
||||
# CUSTOM_PATH = get_conf('CUSTOM_PATH')
|
||||
54
config.py
54
config.py
@@ -2,8 +2,8 @@
|
||||
以下所有配置也都支持利用环境变量覆写,环境变量配置格式见docker-compose.yml。
|
||||
读取优先级:环境变量 > config_private.py > config.py
|
||||
--- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- ---
|
||||
All the following configurations also support using environment variables to override,
|
||||
and the environment variable configuration format can be seen in docker-compose.yml.
|
||||
All the following configurations also support using environment variables to override,
|
||||
and the environment variable configuration format can be seen in docker-compose.yml.
|
||||
Configuration reading priority: environment variable > config_private.py > config.py
|
||||
"""
|
||||
|
||||
@@ -11,6 +11,10 @@
|
||||
API_KEY = "此处填API密钥" # 可同时填写多个API-KEY,用英文逗号分割,例如API_KEY = "sk-openaikey1,sk-openaikey2,fkxxxx-api2dkey3,azure-apikey4"
|
||||
|
||||
|
||||
# [step 1]>> API_KEY = "sk-123456789xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx123456789"。极少数情况下,还需要填写组织(格式如org-123456789abcdefghijklmno的),请向下翻,找 API_ORG 设置项
|
||||
API_KEY = "此处填API密钥" # 可同时填写多个API-KEY,用英文逗号分割,例如API_KEY = "sk-openaikey1,sk-openaikey2,fkxxxx-api2dkey3,azure-apikey4"
|
||||
|
||||
|
||||
# [step 2]>> 改为True应用代理,如果直接在海外服务器部署,此处不修改;如果使用本地或无地域限制的大模型时,此处也不需要修改
|
||||
USE_PROXY = False
|
||||
if USE_PROXY:
|
||||
@@ -33,7 +37,7 @@ else:
|
||||
# ------------------------------------ 以下配置可以优化体验, 但大部分场合下并不需要修改 ------------------------------------
|
||||
|
||||
# 重新URL重新定向,实现更换API_URL的作用(高危设置! 常规情况下不要修改! 通过修改此设置,您将把您的API-KEY和对话隐私完全暴露给您设定的中间人!)
|
||||
# 格式: API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "在这里填写重定向的api.openai.com的URL"}
|
||||
# 格式: API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "在这里填写重定向的api.openai.com的URL"}
|
||||
# 举例: API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "https://reverse-proxy-url/v1/chat/completions"}
|
||||
API_URL_REDIRECT = {}
|
||||
|
||||
@@ -45,7 +49,7 @@ DEFAULT_WORKER_NUM = 3
|
||||
|
||||
# 色彩主题, 可选 ["Default", "Chuanhu-Small-and-Beautiful", "High-Contrast"]
|
||||
# 更多主题, 请查阅Gradio主题商店: https://huggingface.co/spaces/gradio/theme-gallery 可选 ["Gstaff/Xkcd", "NoCrypt/Miku", ...]
|
||||
THEME = "Default"
|
||||
THEME = "Chuanhu-Small-and-Beautiful"
|
||||
AVAIL_THEMES = ["Default", "Chuanhu-Small-and-Beautiful", "High-Contrast", "Gstaff/Xkcd", "NoCrypt/Miku"]
|
||||
|
||||
|
||||
@@ -66,7 +70,7 @@ LAYOUT = "LEFT-RIGHT" # "LEFT-RIGHT"(左右布局) # "TOP-DOWN"(上下
|
||||
|
||||
|
||||
# 暗色模式 / 亮色模式
|
||||
DARK_MODE = True
|
||||
DARK_MODE = False
|
||||
|
||||
|
||||
# 发送请求到OpenAI后,等待多久判定为超时
|
||||
@@ -80,6 +84,9 @@ WEB_PORT = -1
|
||||
# 如果OpenAI不响应(网络卡顿、代理失败、KEY失效),重试的次数限制
|
||||
MAX_RETRY = 2
|
||||
|
||||
# OpenAI模型选择是(gpt4现在只对申请成功的人开放)
|
||||
LLM_MODEL = "gpt-3.5-turbo" # 可选 "chatglm"
|
||||
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "api2d-gpt-3.5-turbo", "spark", "azure-gpt-3.5"]
|
||||
|
||||
# 插件分类默认选项
|
||||
DEFAULT_FN_GROUPS = ['对话', '编程', '学术', '智能体']
|
||||
@@ -89,11 +96,11 @@ DEFAULT_FN_GROUPS = ['对话', '编程', '学术', '智能体']
|
||||
LLM_MODEL = "gpt-3.5-turbo-16k" # 可选 ↓↓↓
|
||||
AVAIL_LLM_MODELS = ["gpt-4-1106-preview", "gpt-4-turbo-preview", "gpt-4-vision-preview",
|
||||
"gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5",
|
||||
"gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4",
|
||||
"gemini-pro", "chatglm3", "claude-2", "zhipuai"]
|
||||
"gpt-4", "gpt-4-32k", "azure-gpt-4", "glm-4", "glm-3-turbo",
|
||||
"gemini-pro", "chatglm3", "claude-2"]
|
||||
# P.S. 其他可用的模型还包括 [
|
||||
# "moss", "qwen-turbo", "qwen-plus", "qwen-max"
|
||||
# "zhipuai", "qianfan", "deepseekcoder", "llama2", "qwen-local", "gpt-3.5-turbo-0613",
|
||||
# "zhipuai", "qianfan", "deepseekcoder", "llama2", "qwen-local", "gpt-3.5-turbo-0613",
|
||||
# "gpt-3.5-turbo-16k-0613", "gpt-3.5-random", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k',
|
||||
# "spark", "sparkv2", "sparkv3", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"
|
||||
# ]
|
||||
@@ -136,7 +143,7 @@ AUTO_CLEAR_TXT = False
|
||||
|
||||
|
||||
# 加一个live2d装饰
|
||||
ADD_WAIFU = False
|
||||
ADD_WAIFU = True
|
||||
|
||||
|
||||
# 设置用户名和密码(不需要修改)(相关功能不稳定,与gradio版本和网络都相关,如果本地使用不建议加这个)
|
||||
@@ -158,7 +165,7 @@ API_ORG = ""
|
||||
|
||||
|
||||
# 如果需要使用Slack Claude,使用教程详情见 request_llms/README.md
|
||||
SLACK_CLAUDE_BOT_ID = ''
|
||||
SLACK_CLAUDE_BOT_ID = ''
|
||||
SLACK_CLAUDE_USER_TOKEN = ''
|
||||
|
||||
|
||||
@@ -195,7 +202,7 @@ XFYUN_API_KEY = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
|
||||
|
||||
# 接入智谱大模型
|
||||
ZHIPUAI_API_KEY = ""
|
||||
ZHIPUAI_MODEL = "glm-4" # 可选 "glm-3-turbo" "glm-4"
|
||||
ZHIPUAI_MODEL = "" # 此选项已废弃,不再需要填写
|
||||
|
||||
|
||||
# # 火山引擎YUNQUE大模型
|
||||
@@ -208,6 +215,11 @@ ZHIPUAI_MODEL = "glm-4" # 可选 "glm-3-turbo" "glm-4"
|
||||
ANTHROPIC_API_KEY = ""
|
||||
|
||||
|
||||
# Mathpix 拥有执行PDF的OCR功能,但是需要注册账号
|
||||
MATHPIX_APPID = ""
|
||||
MATHPIX_APPKEY = ""
|
||||
|
||||
|
||||
# 自定义API KEY格式
|
||||
CUSTOM_API_KEY_PATTERN = ""
|
||||
|
||||
@@ -217,15 +229,15 @@ GEMINI_API_KEY = ''
|
||||
|
||||
|
||||
# HUGGINGFACE的TOKEN,下载LLAMA时起作用 https://huggingface.co/docs/hub/security-tokens
|
||||
HUGGINGFACE_ACCESS_TOKEN = "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"
|
||||
HUGGINGFACE_ACCESS_TOKEN = ""
|
||||
|
||||
|
||||
# GROBID服务器地址(填写多个可以均衡负载),用于高质量地读取PDF文档
|
||||
# 获取方法:复制以下空间https://huggingface.co/spaces/qingxu98/grobid,设为public,然后GROBID_URL = "https://(你的hf用户名如qingxu98)-(你的填写的空间名如grobid).hf.space"
|
||||
GROBID_URLS = [
|
||||
"https://qingxu98-grobid.hf.space","https://qingxu98-grobid2.hf.space","https://qingxu98-grobid3.hf.space",
|
||||
"https://qingxu98-grobid4.hf.space","https://qingxu98-grobid5.hf.space", "https://qingxu98-grobid6.hf.space",
|
||||
"https://qingxu98-grobid7.hf.space", "https://qingxu98-grobid8.hf.space",
|
||||
"https://qingxu98-grobid4.hf.space","https://qingxu98-grobid5.hf.space", "https://qingxu98-grobid6.hf.space",
|
||||
"https://qingxu98-grobid7.hf.space", "https://qingxu98-grobid8.hf.space",
|
||||
]
|
||||
|
||||
|
||||
@@ -246,7 +258,7 @@ PATH_LOGGING = "gpt_log"
|
||||
|
||||
|
||||
# 除了连接OpenAI之外,还有哪些场合允许使用代理,请勿修改
|
||||
WHEN_TO_USE_PROXY = ["Download_LLM", "Download_Gradio_Theme", "Connect_Grobid",
|
||||
WHEN_TO_USE_PROXY = ["Download_LLM", "Download_Gradio_Theme", "Connect_Grobid",
|
||||
"Warmup_Modules", "Nougat_Download", "AutoGen"]
|
||||
|
||||
|
||||
@@ -297,9 +309,8 @@ NUM_CUSTOM_BASIC_BTN = 4
|
||||
│ ├── BAIDU_CLOUD_API_KEY
|
||||
│ └── BAIDU_CLOUD_SECRET_KEY
|
||||
│
|
||||
├── "zhipuai" 智谱AI大模型chatglm_turbo
|
||||
│ ├── ZHIPUAI_API_KEY
|
||||
│ └── ZHIPUAI_MODEL
|
||||
├── "glm-4", "glm-3-turbo", "zhipuai" 智谱AI大模型
|
||||
│ └── ZHIPUAI_API_KEY
|
||||
│
|
||||
├── "qwen-turbo" 等通义千问大模型
|
||||
│ └── DASHSCOPE_API_KEY
|
||||
@@ -311,7 +322,7 @@ NUM_CUSTOM_BASIC_BTN = 4
|
||||
├── NEWBING_STYLE
|
||||
└── NEWBING_COOKIES
|
||||
|
||||
|
||||
|
||||
本地大模型示意图
|
||||
│
|
||||
├── "chatglm3"
|
||||
@@ -351,6 +362,9 @@ NUM_CUSTOM_BASIC_BTN = 4
|
||||
│ └── ALIYUN_SECRET
|
||||
│
|
||||
└── PDF文档精准解析
|
||||
└── GROBID_URLS
|
||||
├── GROBID_URLS
|
||||
├── MATHPIX_APPID
|
||||
└── MATHPIX_APPKEY
|
||||
|
||||
|
||||
"""
|
||||
|
||||
@@ -70,11 +70,11 @@ def get_crazy_functions():
|
||||
"Info": "清除所有缓存文件,谨慎操作 | 不需要输入参数",
|
||||
"Function": HotReload(清除缓存),
|
||||
},
|
||||
"生成多种Mermaid图表(从当前对话或文件(.pdf/.md)中生产图表)": {
|
||||
"生成多种Mermaid图表(从当前对话或路径(.pdf/.md/.docx)中生产图表)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"Info" : "基于当前对话或PDF生成多种Mermaid图表,图表类型由模型判断",
|
||||
"Info" : "基于当前对话或文件生成多种Mermaid图表,图表类型由模型判断",
|
||||
"Function": HotReload(生成多种Mermaid图表),
|
||||
"AdvancedArgs": True,
|
||||
"ArgsReminder": "请输入图类型对应的数字,不输入则为模型自行判断:1-流程图,2-序列图,3-类图,4-饼图,5-甘特图,6-状态图,7-实体关系图,8-象限提示图,9-思维导图",
|
||||
@@ -532,8 +532,9 @@ def get_crazy_functions():
|
||||
print("Load function plugin failed")
|
||||
|
||||
try:
|
||||
from crazy_functions.Latex输出PDF结果 import Latex英文纠错加PDF对比
|
||||
from crazy_functions.Latex输出PDF结果 import Latex翻译中文并重新编译PDF
|
||||
from crazy_functions.Latex输出PDF import Latex英文纠错加PDF对比
|
||||
from crazy_functions.Latex输出PDF import Latex翻译中文并重新编译PDF
|
||||
from crazy_functions.Latex输出PDF import PDF翻译中文并重新编译PDF
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
@@ -550,9 +551,9 @@ def get_crazy_functions():
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True,
|
||||
"ArgsReminder": "如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "
|
||||
+ "例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: "
|
||||
+ 'If the term "agent" is used in this section, it should be translated to "智能体". ',
|
||||
"ArgsReminder": r"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "
|
||||
r"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: "
|
||||
r'If the term "agent" is used in this section, it should be translated to "智能体". ',
|
||||
"Info": "Arixv论文精细翻译 | 输入参数arxiv论文的ID,比如1812.10695",
|
||||
"Function": HotReload(Latex翻译中文并重新编译PDF),
|
||||
},
|
||||
@@ -561,11 +562,22 @@ def get_crazy_functions():
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True,
|
||||
"ArgsReminder": "如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "
|
||||
+ "例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: "
|
||||
+ 'If the term "agent" is used in this section, it should be translated to "智能体". ',
|
||||
"ArgsReminder": r"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "
|
||||
r"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: "
|
||||
r'If the term "agent" is used in this section, it should be translated to "智能体". ',
|
||||
"Info": "本地Latex论文精细翻译 | 输入参数是路径",
|
||||
"Function": HotReload(Latex翻译中文并重新编译PDF),
|
||||
},
|
||||
"PDF翻译中文并重新编译PDF(上传PDF)[需Latex]": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True,
|
||||
"ArgsReminder": r"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "
|
||||
r"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: "
|
||||
r'If the term "agent" is used in this section, it should be translated to "智能体". ',
|
||||
"Info": "PDF翻译中文,并重新编译PDF | 输入参数为路径",
|
||||
"Function": HotReload(PDF翻译中文并重新编译PDF)
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
@@ -137,7 +137,7 @@ def get_recent_file_prompt_support(chatbot):
|
||||
return path
|
||||
|
||||
@CatchException
|
||||
def 虚空终端CodeInterpreter(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
def 虚空终端CodeInterpreter(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
"""
|
||||
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
||||
@@ -145,7 +145,7 @@ def 虚空终端CodeInterpreter(txt, llm_kwargs, plugin_kwargs, chatbot, history
|
||||
chatbot 聊天显示框的句柄,用于显示给用户
|
||||
history 聊天历史,前情提要
|
||||
system_prompt 给gpt的静默提醒
|
||||
user_request 当前用户的请求信息(IP地址等)
|
||||
web_port 当前软件运行的端口号
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
106
crazy_functions/Langchain知识库.py
Normal file
106
crazy_functions/Langchain知识库.py
Normal file
@@ -0,0 +1,106 @@
|
||||
from toolbox import CatchException, update_ui, ProxyNetworkActivate, update_ui_lastest_msg
|
||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, get_files_from_everything
|
||||
|
||||
|
||||
|
||||
@CatchException
|
||||
def 知识库问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
"""
|
||||
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||
llm_kwargs gpt模型参数, 如温度和top_p等, 一般原样传递下去就行
|
||||
plugin_kwargs 插件模型的参数,暂时没有用武之地
|
||||
chatbot 聊天显示框的句柄,用于显示给用户
|
||||
history 聊天历史,前情提要
|
||||
system_prompt 给gpt的静默提醒
|
||||
web_port 当前软件运行的端口号
|
||||
"""
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
|
||||
# < --------------------读取参数--------------- >
|
||||
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
||||
kai_id = plugin_kwargs.get("advanced_arg", 'default')
|
||||
|
||||
chatbot.append((f"向`{kai_id}`知识库中添加文件。", "[Local Message] 从一批文件(txt, md, tex)中读取数据构建知识库, 然后进行问答。"))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# resolve deps
|
||||
try:
|
||||
from zh_langchain import construct_vector_store
|
||||
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
||||
from .crazy_utils import knowledge_archive_interface
|
||||
except Exception as e:
|
||||
chatbot.append(["依赖不足", "导入依赖失败。正在尝试自动安装,请查看终端的输出或耐心等待..."])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
from .crazy_utils import try_install_deps
|
||||
try_install_deps(['zh_langchain==0.2.1', 'pypinyin'], reload_m=['pypinyin', 'zh_langchain'])
|
||||
yield from update_ui_lastest_msg("安装完成,您可以再次重试。", chatbot, history)
|
||||
return
|
||||
|
||||
# < --------------------读取文件--------------- >
|
||||
file_manifest = []
|
||||
spl = ["txt", "doc", "docx", "email", "epub", "html", "json", "md", "msg", "pdf", "ppt", "pptx", "rtf"]
|
||||
for sp in spl:
|
||||
_, file_manifest_tmp, _ = get_files_from_everything(txt, type=f'.{sp}')
|
||||
file_manifest += file_manifest_tmp
|
||||
|
||||
if len(file_manifest) == 0:
|
||||
chatbot.append(["没有找到任何可读取文件", "当前支持的格式包括: txt, md, docx, pptx, pdf, json等"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# < -------------------预热文本向量化模组--------------- >
|
||||
chatbot.append(['<br/>'.join(file_manifest), "正在预热文本向量化模组, 如果是第一次运行, 将消耗较长时间下载中文向量化模型..."])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
print('Checking Text2vec ...')
|
||||
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
||||
with ProxyNetworkActivate('Download_LLM'): # 临时地激活代理网络
|
||||
HuggingFaceEmbeddings(model_name="GanymedeNil/text2vec-large-chinese")
|
||||
|
||||
# < -------------------构建知识库--------------- >
|
||||
chatbot.append(['<br/>'.join(file_manifest), "正在构建知识库..."])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
print('Establishing knowledge archive ...')
|
||||
with ProxyNetworkActivate('Download_LLM'): # 临时地激活代理网络
|
||||
kai = knowledge_archive_interface()
|
||||
kai.feed_archive(file_manifest=file_manifest, id=kai_id)
|
||||
kai_files = kai.get_loaded_file()
|
||||
kai_files = '<br/>'.join(kai_files)
|
||||
# chatbot.append(['知识库构建成功', "正在将知识库存储至cookie中"])
|
||||
# yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
# chatbot._cookies['langchain_plugin_embedding'] = kai.get_current_archive_id()
|
||||
# chatbot._cookies['lock_plugin'] = 'crazy_functions.Langchain知识库->读取知识库作答'
|
||||
# chatbot.append(['完成', "“根据知识库作答”函数插件已经接管问答系统, 提问吧! 但注意, 您接下来不能再使用其他插件了,刷新页面即可以退出知识库问答模式。"])
|
||||
chatbot.append(['构建完成', f"当前知识库内的有效文件:\n\n---\n\n{kai_files}\n\n---\n\n请切换至“知识库问答”插件进行知识库访问, 或者使用此插件继续上传更多文件。"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
||||
|
||||
@CatchException
|
||||
def 读取知识库作答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port=-1):
|
||||
# resolve deps
|
||||
try:
|
||||
from zh_langchain import construct_vector_store
|
||||
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
||||
from .crazy_utils import knowledge_archive_interface
|
||||
except Exception as e:
|
||||
chatbot.append(["依赖不足", "导入依赖失败。正在尝试自动安装,请查看终端的输出或耐心等待..."])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
from .crazy_utils import try_install_deps
|
||||
try_install_deps(['zh_langchain==0.2.1', 'pypinyin'], reload_m=['pypinyin', 'zh_langchain'])
|
||||
yield from update_ui_lastest_msg("安装完成,您可以再次重试。", chatbot, history)
|
||||
return
|
||||
|
||||
# < ------------------- --------------- >
|
||||
kai = knowledge_archive_interface()
|
||||
|
||||
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
||||
kai_id = plugin_kwargs.get("advanced_arg", 'default')
|
||||
resp, prompt = kai.answer_with_archive_by_id(txt, kai_id)
|
||||
|
||||
chatbot.append((txt, f'[知识库 {kai_id}] ' + prompt))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=prompt, inputs_show_user=txt,
|
||||
llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
|
||||
sys_prompt=system_prompt
|
||||
)
|
||||
history.extend((prompt, gpt_say))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
||||
484
crazy_functions/Latex输出PDF.py
Normal file
484
crazy_functions/Latex输出PDF.py
Normal file
@@ -0,0 +1,484 @@
|
||||
from toolbox import update_ui, trimmed_format_exc, get_conf, get_log_folder, promote_file_to_downloadzone
|
||||
from toolbox import CatchException, report_exception, update_ui_lastest_msg, zip_result, gen_time_str
|
||||
from functools import partial
|
||||
import glob, os, requests, time, json, tarfile
|
||||
|
||||
pj = os.path.join
|
||||
ARXIV_CACHE_DIR = os.path.expanduser(f"~/arxiv_cache/")
|
||||
|
||||
|
||||
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- 工具函数 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
# 专业词汇声明 = 'If the term "agent" is used in this section, it should be translated to "智能体". '
|
||||
def switch_prompt(pfg, mode, more_requirement):
|
||||
"""
|
||||
Generate prompts and system prompts based on the mode for proofreading or translating.
|
||||
Args:
|
||||
- pfg: Proofreader or Translator instance.
|
||||
- mode: A string specifying the mode, either 'proofread' or 'translate_zh'.
|
||||
|
||||
Returns:
|
||||
- inputs_array: A list of strings containing prompts for users to respond to.
|
||||
- sys_prompt_array: A list of strings containing prompts for system prompts.
|
||||
"""
|
||||
n_split = len(pfg.sp_file_contents)
|
||||
if mode == 'proofread_en':
|
||||
inputs_array = [r"Below is a section from an academic paper, proofread this section." +
|
||||
r"Do not modify any latex command such as \section, \cite, \begin, \item and equations. " + more_requirement +
|
||||
r"Answer me only with the revised text:" +
|
||||
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
||||
sys_prompt_array = ["You are a professional academic paper writer." for _ in range(n_split)]
|
||||
elif mode == 'translate_zh':
|
||||
inputs_array = [
|
||||
r"Below is a section from an English academic paper, translate it into Chinese. " + more_requirement +
|
||||
r"Do not modify any latex command such as \section, \cite, \begin, \item and equations. " +
|
||||
r"Answer me only with the translated text:" +
|
||||
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
||||
sys_prompt_array = ["You are a professional translator." for _ in range(n_split)]
|
||||
else:
|
||||
assert False, "未知指令"
|
||||
return inputs_array, sys_prompt_array
|
||||
|
||||
|
||||
def desend_to_extracted_folder_if_exist(project_folder):
|
||||
"""
|
||||
Descend into the extracted folder if it exists, otherwise return the original folder.
|
||||
|
||||
Args:
|
||||
- project_folder: A string specifying the folder path.
|
||||
|
||||
Returns:
|
||||
- A string specifying the path to the extracted folder, or the original folder if there is no extracted folder.
|
||||
"""
|
||||
maybe_dir = [f for f in glob.glob(f'{project_folder}/*') if os.path.isdir(f)]
|
||||
if len(maybe_dir) == 0: return project_folder
|
||||
if maybe_dir[0].endswith('.extract'): return maybe_dir[0]
|
||||
return project_folder
|
||||
|
||||
|
||||
def move_project(project_folder, arxiv_id=None):
|
||||
"""
|
||||
Create a new work folder and copy the project folder to it.
|
||||
|
||||
Args:
|
||||
- project_folder: A string specifying the folder path of the project.
|
||||
|
||||
Returns:
|
||||
- A string specifying the path to the new work folder.
|
||||
"""
|
||||
import shutil, time
|
||||
time.sleep(2) # avoid time string conflict
|
||||
if arxiv_id is not None:
|
||||
new_workfolder = pj(ARXIV_CACHE_DIR, arxiv_id, 'workfolder')
|
||||
else:
|
||||
new_workfolder = f'{get_log_folder()}/{gen_time_str()}'
|
||||
try:
|
||||
shutil.rmtree(new_workfolder)
|
||||
except:
|
||||
pass
|
||||
|
||||
# align subfolder if there is a folder wrapper
|
||||
items = glob.glob(pj(project_folder, '*'))
|
||||
items = [item for item in items if os.path.basename(item) != '__MACOSX']
|
||||
if len(glob.glob(pj(project_folder, '*.tex'))) == 0 and len(items) == 1:
|
||||
if os.path.isdir(items[0]): project_folder = items[0]
|
||||
|
||||
shutil.copytree(src=project_folder, dst=new_workfolder)
|
||||
return new_workfolder
|
||||
|
||||
|
||||
def arxiv_download(chatbot, history, txt, allow_cache=True):
|
||||
def check_cached_translation_pdf(arxiv_id):
|
||||
translation_dir = pj(ARXIV_CACHE_DIR, arxiv_id, 'translation')
|
||||
if not os.path.exists(translation_dir):
|
||||
os.makedirs(translation_dir)
|
||||
target_file = pj(translation_dir, 'translate_zh.pdf')
|
||||
if os.path.exists(target_file):
|
||||
promote_file_to_downloadzone(target_file, rename_file=None, chatbot=chatbot)
|
||||
target_file_compare = pj(translation_dir, 'comparison.pdf')
|
||||
if os.path.exists(target_file_compare):
|
||||
promote_file_to_downloadzone(target_file_compare, rename_file=None, chatbot=chatbot)
|
||||
return target_file
|
||||
return False
|
||||
|
||||
def is_float(s):
|
||||
try:
|
||||
float(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
if ('.' in txt) and ('/' not in txt) and is_float(txt): # is arxiv ID
|
||||
txt = 'https://arxiv.org/abs/' + txt.strip()
|
||||
if ('.' in txt) and ('/' not in txt) and is_float(txt[:10]): # is arxiv ID
|
||||
txt = 'https://arxiv.org/abs/' + txt[:10]
|
||||
|
||||
if not txt.startswith('https://arxiv.org'):
|
||||
return txt, None # 是本地文件,跳过下载
|
||||
|
||||
# <-------------- inspect format ------------->
|
||||
chatbot.append([f"检测到arxiv文档连接", '尝试下载 ...'])
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
time.sleep(1) # 刷新界面
|
||||
|
||||
url_ = txt # https://arxiv.org/abs/1707.06690
|
||||
if not txt.startswith('https://arxiv.org/abs/'):
|
||||
msg = f"解析arxiv网址失败, 期望格式例如: https://arxiv.org/abs/1707.06690。实际得到格式: {url_}。"
|
||||
yield from update_ui_lastest_msg(msg, chatbot=chatbot, history=history) # 刷新界面
|
||||
return msg, None
|
||||
# <-------------- set format ------------->
|
||||
arxiv_id = url_.split('/abs/')[-1]
|
||||
if 'v' in arxiv_id: arxiv_id = arxiv_id[:10]
|
||||
cached_translation_pdf = check_cached_translation_pdf(arxiv_id)
|
||||
if cached_translation_pdf and allow_cache: return cached_translation_pdf, arxiv_id
|
||||
|
||||
url_tar = url_.replace('/abs/', '/e-print/')
|
||||
translation_dir = pj(ARXIV_CACHE_DIR, arxiv_id, 'e-print')
|
||||
extract_dst = pj(ARXIV_CACHE_DIR, arxiv_id, 'extract')
|
||||
os.makedirs(translation_dir, exist_ok=True)
|
||||
|
||||
# <-------------- download arxiv source file ------------->
|
||||
dst = pj(translation_dir, arxiv_id + '.tar')
|
||||
if os.path.exists(dst):
|
||||
yield from update_ui_lastest_msg("调用缓存", chatbot=chatbot, history=history) # 刷新界面
|
||||
else:
|
||||
yield from update_ui_lastest_msg("开始下载", chatbot=chatbot, history=history) # 刷新界面
|
||||
proxies = get_conf('proxies')
|
||||
r = requests.get(url_tar, proxies=proxies)
|
||||
with open(dst, 'wb+') as f:
|
||||
f.write(r.content)
|
||||
# <-------------- extract file ------------->
|
||||
yield from update_ui_lastest_msg("下载完成", chatbot=chatbot, history=history) # 刷新界面
|
||||
from toolbox import extract_archive
|
||||
extract_archive(file_path=dst, dest_dir=extract_dst)
|
||||
return extract_dst, arxiv_id
|
||||
|
||||
|
||||
def pdf2tex_project(pdf_file_path):
|
||||
# Mathpix API credentials
|
||||
app_id, app_key = get_conf('MATHPIX_APPID', 'MATHPIX_APPKEY')
|
||||
headers = {"app_id": app_id, "app_key": app_key}
|
||||
|
||||
# Step 1: Send PDF file for processing
|
||||
options = {
|
||||
"conversion_formats": {"tex.zip": True},
|
||||
"math_inline_delimiters": ["$", "$"],
|
||||
"rm_spaces": True
|
||||
}
|
||||
|
||||
response = requests.post(url="https://api.mathpix.com/v3/pdf",
|
||||
headers=headers,
|
||||
data={"options_json": json.dumps(options)},
|
||||
files={"file": open(pdf_file_path, "rb")})
|
||||
|
||||
if response.ok:
|
||||
pdf_id = response.json()["pdf_id"]
|
||||
print(f"PDF processing initiated. PDF ID: {pdf_id}")
|
||||
|
||||
# Step 2: Check processing status
|
||||
while True:
|
||||
conversion_response = requests.get(f"https://api.mathpix.com/v3/pdf/{pdf_id}", headers=headers)
|
||||
conversion_data = conversion_response.json()
|
||||
|
||||
if conversion_data["status"] == "completed":
|
||||
print("PDF processing completed.")
|
||||
break
|
||||
elif conversion_data["status"] == "error":
|
||||
print("Error occurred during processing.")
|
||||
else:
|
||||
print(f"Processing status: {conversion_data['status']}")
|
||||
time.sleep(5) # wait for a few seconds before checking again
|
||||
|
||||
# Step 3: Save results to local files
|
||||
output_dir = os.path.join(os.path.dirname(pdf_file_path), 'mathpix_output')
|
||||
if not os.path.exists(output_dir):
|
||||
os.makedirs(output_dir)
|
||||
|
||||
url = f"https://api.mathpix.com/v3/pdf/{pdf_id}.tex"
|
||||
response = requests.get(url, headers=headers)
|
||||
file_name_wo_dot = '_'.join(os.path.basename(pdf_file_path).split('.')[:-1])
|
||||
output_name = f"{file_name_wo_dot}.tex.zip"
|
||||
output_path = os.path.join(output_dir, output_name)
|
||||
with open(output_path, "wb") as output_file:
|
||||
output_file.write(response.content)
|
||||
print(f"tex.zip file saved at: {output_path}")
|
||||
|
||||
import zipfile
|
||||
unzip_dir = os.path.join(output_dir, file_name_wo_dot)
|
||||
with zipfile.ZipFile(output_path, 'r') as zip_ref:
|
||||
zip_ref.extractall(unzip_dir)
|
||||
|
||||
return unzip_dir
|
||||
|
||||
else:
|
||||
print(f"Error sending PDF for processing. Status code: {response.status_code}")
|
||||
return None
|
||||
|
||||
|
||||
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= 插件主程序1 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
|
||||
|
||||
@CatchException
|
||||
def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
# <-------------- information about this plugin ------------->
|
||||
chatbot.append(["函数插件功能?",
|
||||
"对整个Latex项目进行纠错, 用latex编译为PDF对修正处做高亮。函数插件贡献者: Binary-Husky。注意事项: 目前仅支持GPT3.5/GPT4,其他模型转化效果未知。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。仅在Windows系统进行了测试,其他操作系统表现未知。"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# <-------------- more requirements ------------->
|
||||
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
||||
more_req = plugin_kwargs.get("advanced_arg", "")
|
||||
_switch_prompt_ = partial(switch_prompt, more_requirement=more_req)
|
||||
|
||||
# <-------------- check deps ------------->
|
||||
try:
|
||||
import glob, os, time, subprocess
|
||||
subprocess.Popen(['pdflatex', '-version'])
|
||||
from .latex_fns.latex_actions import Latex精细分解与转化, 编译Latex
|
||||
except Exception as e:
|
||||
chatbot.append([f"解析项目: {txt}",
|
||||
f"尝试执行Latex指令失败。Latex没有安装, 或者不在环境变量PATH中。安装方法https://tug.org/texlive/。报错信息\n\n```\n\n{trimmed_format_exc()}\n\n```\n\n"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# <-------------- clear history and read input ------------->
|
||||
history = []
|
||||
if os.path.exists(txt):
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.tex文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# <-------------- if is a zip/tar file ------------->
|
||||
project_folder = desend_to_extracted_folder_if_exist(project_folder)
|
||||
|
||||
# <-------------- move latex project away from temp folder ------------->
|
||||
project_folder = move_project(project_folder, arxiv_id=None)
|
||||
|
||||
# <-------------- if merge_translate_zh is already generated, skip gpt req ------------->
|
||||
if not os.path.exists(project_folder + '/merge_proofread_en.tex'):
|
||||
yield from Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
|
||||
chatbot, history, system_prompt, mode='proofread_en',
|
||||
switch_prompt=_switch_prompt_)
|
||||
|
||||
# <-------------- compile PDF ------------->
|
||||
success = yield from 编译Latex(chatbot, history, main_file_original='merge',
|
||||
main_file_modified='merge_proofread_en',
|
||||
work_folder_original=project_folder, work_folder_modified=project_folder,
|
||||
work_folder=project_folder)
|
||||
|
||||
# <-------------- zip PDF ------------->
|
||||
zip_res = zip_result(project_folder)
|
||||
if success:
|
||||
chatbot.append((f"成功啦", '请查收结果(压缩包)...'))
|
||||
yield from update_ui(chatbot=chatbot, history=history);
|
||||
time.sleep(1) # 刷新界面
|
||||
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
|
||||
else:
|
||||
chatbot.append((f"失败了",
|
||||
'虽然PDF生成失败了, 但请查收结果(压缩包), 内含已经翻译的Tex文档, 也是可读的, 您可以到Github Issue区, 用该压缩包+对话历史存档进行反馈 ...'))
|
||||
yield from update_ui(chatbot=chatbot, history=history);
|
||||
time.sleep(1) # 刷新界面
|
||||
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
|
||||
|
||||
# <-------------- we are done ------------->
|
||||
return success
|
||||
|
||||
|
||||
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= 插件主程序2 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
|
||||
@CatchException
|
||||
def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
# <-------------- information about this plugin ------------->
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"对整个Latex项目进行翻译, 生成中文PDF。函数插件贡献者: Binary-Husky。注意事项: 此插件Windows支持最佳,Linux下必须使用Docker安装,详见项目主README.md。目前仅支持GPT3.5/GPT4,其他模型转化效果未知。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# <-------------- more requirements ------------->
|
||||
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
||||
more_req = plugin_kwargs.get("advanced_arg", "")
|
||||
no_cache = more_req.startswith("--no-cache")
|
||||
if no_cache: more_req.lstrip("--no-cache")
|
||||
allow_cache = not no_cache
|
||||
_switch_prompt_ = partial(switch_prompt, more_requirement=more_req)
|
||||
|
||||
# <-------------- check deps ------------->
|
||||
try:
|
||||
import glob, os, time, subprocess
|
||||
subprocess.Popen(['pdflatex', '-version'])
|
||||
from .latex_fns.latex_actions import Latex精细分解与转化, 编译Latex
|
||||
except Exception as e:
|
||||
chatbot.append([f"解析项目: {txt}",
|
||||
f"尝试执行Latex指令失败。Latex没有安装, 或者不在环境变量PATH中。安装方法https://tug.org/texlive/。报错信息\n\n```\n\n{trimmed_format_exc()}\n\n```\n\n"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# <-------------- clear history and read input ------------->
|
||||
history = []
|
||||
try:
|
||||
txt, arxiv_id = yield from arxiv_download(chatbot, history, txt, allow_cache)
|
||||
except tarfile.ReadError as e:
|
||||
yield from update_ui_lastest_msg(
|
||||
"无法自动下载该论文的Latex源码,请前往arxiv打开此论文下载页面,点other Formats,然后download source手动下载latex源码包。接下来调用本地Latex翻译插件即可。",
|
||||
chatbot=chatbot, history=history)
|
||||
return
|
||||
|
||||
if txt.endswith('.pdf'):
|
||||
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"发现已经存在翻译好的PDF文档")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
if os.path.exists(txt):
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无法处理: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.tex文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# <-------------- if is a zip/tar file ------------->
|
||||
project_folder = desend_to_extracted_folder_if_exist(project_folder)
|
||||
|
||||
# <-------------- move latex project away from temp folder ------------->
|
||||
project_folder = move_project(project_folder, arxiv_id)
|
||||
|
||||
# <-------------- if merge_translate_zh is already generated, skip gpt req ------------->
|
||||
if not os.path.exists(project_folder + '/merge_translate_zh.tex'):
|
||||
yield from Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
|
||||
chatbot, history, system_prompt, mode='translate_zh',
|
||||
switch_prompt=_switch_prompt_)
|
||||
|
||||
# <-------------- compile PDF ------------->
|
||||
success = yield from 编译Latex(chatbot, history, main_file_original='merge',
|
||||
main_file_modified='merge_translate_zh', mode='translate_zh',
|
||||
work_folder_original=project_folder, work_folder_modified=project_folder,
|
||||
work_folder=project_folder)
|
||||
|
||||
# <-------------- zip PDF ------------->
|
||||
zip_res = zip_result(project_folder)
|
||||
if success:
|
||||
chatbot.append((f"成功啦", '请查收结果(压缩包)...'))
|
||||
yield from update_ui(chatbot=chatbot, history=history);
|
||||
time.sleep(1) # 刷新界面
|
||||
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
|
||||
else:
|
||||
chatbot.append((f"失败了",
|
||||
'虽然PDF生成失败了, 但请查收结果(压缩包), 内含已经翻译的Tex文档, 您可以到Github Issue区, 用该压缩包进行反馈。如系统是Linux,请检查系统字体(见Github wiki) ...'))
|
||||
yield from update_ui(chatbot=chatbot, history=history);
|
||||
time.sleep(1) # 刷新界面
|
||||
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
|
||||
|
||||
# <-------------- we are done ------------->
|
||||
return success
|
||||
|
||||
|
||||
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- 插件主程序3 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
|
||||
@CatchException
|
||||
def PDF翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
# <-------------- information about this plugin ------------->
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"将PDF转换为Latex项目,翻译为中文后重新编译为PDF。函数插件贡献者: Marroh。注意事项: 此插件Windows支持最佳,Linux下必须使用Docker安装,详见项目主README.md。目前仅支持GPT3.5/GPT4,其他模型转化效果未知。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# <-------------- more requirements ------------->
|
||||
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
||||
more_req = plugin_kwargs.get("advanced_arg", "")
|
||||
no_cache = more_req.startswith("--no-cache")
|
||||
if no_cache: more_req.lstrip("--no-cache")
|
||||
allow_cache = not no_cache
|
||||
_switch_prompt_ = partial(switch_prompt, more_requirement=more_req)
|
||||
|
||||
# <-------------- check deps ------------->
|
||||
try:
|
||||
import glob, os, time, subprocess
|
||||
subprocess.Popen(['pdflatex', '-version'])
|
||||
from .latex_fns.latex_actions import Latex精细分解与转化, 编译Latex
|
||||
except Exception as e:
|
||||
chatbot.append([f"解析项目: {txt}",
|
||||
f"尝试执行Latex指令失败。Latex没有安装, 或者不在环境变量PATH中。安装方法https://tug.org/texlive/。报错信息\n\n```\n\n{trimmed_format_exc()}\n\n```\n\n"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# <-------------- clear history and read input ------------->
|
||||
if os.path.exists(txt):
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无法处理: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.pdf文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
if len(file_manifest) != 1:
|
||||
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"不支持同时处理多个pdf文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
app_id, app_key = get_conf('MATHPIX_APPID', 'MATHPIX_APPKEY')
|
||||
if len(app_id) == 0 or len(app_key) == 0:
|
||||
report_exception(chatbot, history, a="缺失 MATHPIX_APPID 和 MATHPIX_APPKEY。", b=f"请配置 MATHPIX_APPID 和 MATHPIX_APPKEY")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# <-------------- convert pdf into tex ------------->
|
||||
project_folder = pdf2tex_project(file_manifest[0])
|
||||
|
||||
# Translate English Latex to Chinese Latex, and compile it
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.tex文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# <-------------- if is a zip/tar file ------------->
|
||||
project_folder = desend_to_extracted_folder_if_exist(project_folder)
|
||||
|
||||
# <-------------- move latex project away from temp folder ------------->
|
||||
project_folder = move_project(project_folder)
|
||||
|
||||
# <-------------- if merge_translate_zh is already generated, skip gpt req ------------->
|
||||
if not os.path.exists(project_folder + '/merge_translate_zh.tex'):
|
||||
yield from Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
|
||||
chatbot, history, system_prompt, mode='translate_zh',
|
||||
switch_prompt=_switch_prompt_)
|
||||
|
||||
# <-------------- compile PDF ------------->
|
||||
success = yield from 编译Latex(chatbot, history, main_file_original='merge',
|
||||
main_file_modified='merge_translate_zh', mode='translate_zh',
|
||||
work_folder_original=project_folder, work_folder_modified=project_folder,
|
||||
work_folder=project_folder)
|
||||
|
||||
# <-------------- zip PDF ------------->
|
||||
zip_res = zip_result(project_folder)
|
||||
if success:
|
||||
chatbot.append((f"成功啦", '请查收结果(压缩包)...'))
|
||||
yield from update_ui(chatbot=chatbot, history=history);
|
||||
time.sleep(1) # 刷新界面
|
||||
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
|
||||
else:
|
||||
chatbot.append((f"失败了",
|
||||
'虽然PDF生成失败了, 但请查收结果(压缩包), 内含已经翻译的Tex文档, 您可以到Github Issue区, 用该压缩包进行反馈。如系统是Linux,请检查系统字体(见Github wiki) ...'))
|
||||
yield from update_ui(chatbot=chatbot, history=history);
|
||||
time.sleep(1) # 刷新界面
|
||||
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
|
||||
|
||||
# <-------------- we are done ------------->
|
||||
return success
|
||||
@@ -1,7 +1,7 @@
|
||||
from toolbox import update_ui, trimmed_format_exc, get_conf, get_log_folder, promote_file_to_downloadzone
|
||||
from toolbox import CatchException, report_exception, update_ui_lastest_msg, zip_result, gen_time_str
|
||||
from functools import partial
|
||||
import glob, os, requests, time, tarfile
|
||||
import glob, os, requests, time
|
||||
pj = os.path.join
|
||||
ARXIV_CACHE_DIR = os.path.expanduser(f"~/arxiv_cache/")
|
||||
|
||||
@@ -104,7 +104,7 @@ def arxiv_download(chatbot, history, txt, allow_cache=True):
|
||||
if ('.' in txt) and ('/' not in txt) and is_float(txt[:10]): # is arxiv ID
|
||||
txt = 'https://arxiv.org/abs/' + txt[:10]
|
||||
if not txt.startswith('https://arxiv.org'):
|
||||
return txt, None # 是本地文件,跳过下载
|
||||
return txt, None
|
||||
|
||||
# <-------------- inspect format ------------->
|
||||
chatbot.append([f"检测到arxiv文档连接", '尝试下载 ...'])
|
||||
@@ -146,7 +146,7 @@ def arxiv_download(chatbot, history, txt, allow_cache=True):
|
||||
|
||||
|
||||
@CatchException
|
||||
def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
# <-------------- information about this plugin ------------->
|
||||
chatbot.append([ "函数插件功能?",
|
||||
"对整个Latex项目进行纠错, 用latex编译为PDF对修正处做高亮。函数插件贡献者: Binary-Husky。注意事项: 目前仅支持GPT3.5/GPT4,其他模型转化效果未知。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。仅在Windows系统进行了测试,其他操作系统表现未知。"])
|
||||
@@ -221,7 +221,7 @@ def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, histo
|
||||
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= 插件主程序2 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
|
||||
@CatchException
|
||||
def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
# <-------------- information about this plugin ------------->
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
@@ -250,14 +250,7 @@ def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot,
|
||||
|
||||
# <-------------- clear history and read input ------------->
|
||||
history = []
|
||||
try:
|
||||
txt, arxiv_id = yield from arxiv_download(chatbot, history, txt, allow_cache)
|
||||
except tarfile.ReadError as e:
|
||||
yield from update_ui_lastest_msg(
|
||||
"无法自动下载该论文的Latex源码,请前往arxiv打开此论文下载页面,点other Formats,然后download source手动下载latex源码包。接下来调用本地Latex翻译插件即可。",
|
||||
chatbot=chatbot, history=history)
|
||||
return
|
||||
|
||||
txt, arxiv_id = yield from arxiv_download(chatbot, history, txt, allow_cache)
|
||||
if txt.endswith('.pdf'):
|
||||
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"发现已经存在翻译好的PDF文档")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
231
crazy_functions/crazy_functions_test.py
Normal file
231
crazy_functions/crazy_functions_test.py
Normal file
@@ -0,0 +1,231 @@
|
||||
"""
|
||||
这是什么?
|
||||
这个文件用于函数插件的单元测试
|
||||
运行方法 python crazy_functions/crazy_functions_test.py
|
||||
"""
|
||||
|
||||
# ==============================================================================================================================
|
||||
|
||||
def validate_path():
|
||||
import os, sys
|
||||
dir_name = os.path.dirname(__file__)
|
||||
root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..')
|
||||
os.chdir(root_dir_assume)
|
||||
sys.path.append(root_dir_assume)
|
||||
validate_path() # validate path so you can run from base directory
|
||||
|
||||
# ==============================================================================================================================
|
||||
|
||||
from colorful import *
|
||||
from toolbox import get_conf, ChatBotWithCookies
|
||||
import contextlib
|
||||
import os
|
||||
import sys
|
||||
from functools import wraps
|
||||
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY = \
|
||||
get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY')
|
||||
|
||||
llm_kwargs = {
|
||||
'api_key': API_KEY,
|
||||
'llm_model': LLM_MODEL,
|
||||
'top_p':1.0,
|
||||
'max_length': None,
|
||||
'temperature':1.0,
|
||||
}
|
||||
plugin_kwargs = { }
|
||||
chatbot = ChatBotWithCookies(llm_kwargs)
|
||||
history = []
|
||||
system_prompt = "Serve me as a writing and programming assistant."
|
||||
web_port = 1024
|
||||
|
||||
# ==============================================================================================================================
|
||||
|
||||
def silence_stdout(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
_original_stdout = sys.stdout
|
||||
sys.stdout = open(os.devnull, 'w')
|
||||
for q in func(*args, **kwargs):
|
||||
sys.stdout = _original_stdout
|
||||
yield q
|
||||
sys.stdout = open(os.devnull, 'w')
|
||||
sys.stdout.close()
|
||||
sys.stdout = _original_stdout
|
||||
return wrapper
|
||||
|
||||
class CLI_Printer():
|
||||
def __init__(self) -> None:
|
||||
self.pre_buf = ""
|
||||
|
||||
def print(self, buf):
|
||||
bufp = ""
|
||||
for index, chat in enumerate(buf):
|
||||
a, b = chat
|
||||
bufp += sprint亮靛('[Me]:' + a) + '\n'
|
||||
bufp += '[GPT]:' + b
|
||||
if index < len(buf)-1:
|
||||
bufp += '\n'
|
||||
|
||||
if self.pre_buf!="" and bufp.startswith(self.pre_buf):
|
||||
print(bufp[len(self.pre_buf):], end='')
|
||||
else:
|
||||
print('\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n'+bufp, end='')
|
||||
self.pre_buf = bufp
|
||||
return
|
||||
|
||||
cli_printer = CLI_Printer()
|
||||
# ==============================================================================================================================
|
||||
def test_解析一个Python项目():
|
||||
from crazy_functions.解析项目源代码 import 解析一个Python项目
|
||||
txt = "crazy_functions/test_project/python/dqn"
|
||||
for cookies, cb, hist, msg in 解析一个Python项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
print(cb)
|
||||
|
||||
def test_解析一个Cpp项目():
|
||||
from crazy_functions.解析项目源代码 import 解析一个C项目
|
||||
txt = "crazy_functions/test_project/cpp/cppipc"
|
||||
for cookies, cb, hist, msg in 解析一个C项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
print(cb)
|
||||
|
||||
def test_Latex英文润色():
|
||||
from crazy_functions.Latex全文润色 import Latex英文润色
|
||||
txt = "crazy_functions/test_project/latex/attention"
|
||||
for cookies, cb, hist, msg in Latex英文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
print(cb)
|
||||
|
||||
def test_Markdown中译英():
|
||||
from crazy_functions.批量Markdown翻译 import Markdown中译英
|
||||
txt = "README.md"
|
||||
for cookies, cb, hist, msg in Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
print(cb)
|
||||
|
||||
def test_批量翻译PDF文档():
|
||||
from crazy_functions.批量翻译PDF文档_多线程 import 批量翻译PDF文档
|
||||
txt = "crazy_functions/test_project/pdf_and_word"
|
||||
for cookies, cb, hist, msg in 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
print(cb)
|
||||
|
||||
def test_谷歌检索小助手():
|
||||
from crazy_functions.谷歌检索小助手 import 谷歌检索小助手
|
||||
txt = "https://scholar.google.com/scholar?hl=en&as_sdt=0%2C5&q=auto+reinforcement+learning&btnG="
|
||||
for cookies, cb, hist, msg in 谷歌检索小助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
print(cb)
|
||||
|
||||
def test_总结word文档():
|
||||
from crazy_functions.总结word文档 import 总结word文档
|
||||
txt = "crazy_functions/test_project/pdf_and_word"
|
||||
for cookies, cb, hist, msg in 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
print(cb)
|
||||
|
||||
def test_下载arxiv论文并翻译摘要():
|
||||
from crazy_functions.下载arxiv论文翻译摘要 import 下载arxiv论文并翻译摘要
|
||||
txt = "1812.10695"
|
||||
for cookies, cb, hist, msg in 下载arxiv论文并翻译摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
print(cb)
|
||||
|
||||
def test_联网回答问题():
|
||||
from crazy_functions.联网的ChatGPT import 连接网络回答问题
|
||||
# txt = "谁是应急食品?"
|
||||
# >> '根据以上搜索结果可以得知,应急食品是“原神”游戏中的角色派蒙的外号。'
|
||||
# txt = "道路千万条,安全第一条。后面两句是?"
|
||||
# >> '行车不规范,亲人两行泪。'
|
||||
# txt = "You should have gone for the head. What does that mean?"
|
||||
# >> The phrase "You should have gone for the head" is a quote from the Marvel movies, Avengers: Infinity War and Avengers: Endgame. It was spoken by the character Thanos in Infinity War and by Thor in Endgame.
|
||||
txt = "AutoGPT是什么?"
|
||||
for cookies, cb, hist, msg in 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
print("当前问答:", cb[-1][-1].replace("\n"," "))
|
||||
for i, it in enumerate(cb): print亮蓝(it[0]); print亮黄(it[1])
|
||||
|
||||
def test_解析ipynb文件():
|
||||
from crazy_functions.解析JupyterNotebook import 解析ipynb文件
|
||||
txt = "crazy_functions/test_samples"
|
||||
for cookies, cb, hist, msg in 解析ipynb文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
print(cb)
|
||||
|
||||
|
||||
def test_数学动画生成manim():
|
||||
from crazy_functions.数学动画生成manim import 动画生成
|
||||
txt = "A ball split into 2, and then split into 4, and finally split into 8."
|
||||
for cookies, cb, hist, msg in 动画生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
print(cb)
|
||||
|
||||
|
||||
|
||||
def test_Markdown多语言():
|
||||
from crazy_functions.批量Markdown翻译 import Markdown翻译指定语言
|
||||
txt = "README.md"
|
||||
history = []
|
||||
for lang in ["English", "French", "Japanese", "Korean", "Russian", "Italian", "German", "Portuguese", "Arabic"]:
|
||||
plugin_kwargs = {"advanced_arg": lang}
|
||||
for cookies, cb, hist, msg in Markdown翻译指定语言(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
print(cb)
|
||||
|
||||
def test_Langchain知识库():
|
||||
from crazy_functions.Langchain知识库 import 知识库问答
|
||||
txt = "./"
|
||||
chatbot = ChatBotWithCookies(llm_kwargs)
|
||||
for cookies, cb, hist, msg in silence_stdout(知识库问答)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
cli_printer.print(cb) # print(cb)
|
||||
|
||||
chatbot = ChatBotWithCookies(cookies)
|
||||
from crazy_functions.Langchain知识库 import 读取知识库作答
|
||||
txt = "What is the installation method?"
|
||||
for cookies, cb, hist, msg in silence_stdout(读取知识库作答)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
cli_printer.print(cb) # print(cb)
|
||||
|
||||
def test_Langchain知识库读取():
|
||||
from crazy_functions.Langchain知识库 import 读取知识库作答
|
||||
txt = "远程云服务器部署?"
|
||||
for cookies, cb, hist, msg in silence_stdout(读取知识库作答)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
cli_printer.print(cb) # print(cb)
|
||||
|
||||
def test_Latex():
|
||||
from crazy_functions.Latex输出PDF结果 import Latex英文纠错加PDF对比, Latex翻译中文并重新编译PDF
|
||||
|
||||
# txt = r"https://arxiv.org/abs/1706.03762"
|
||||
# txt = r"https://arxiv.org/abs/1902.03185"
|
||||
# txt = r"https://arxiv.org/abs/2305.18290"
|
||||
# txt = r"https://arxiv.org/abs/2305.17608"
|
||||
# txt = r"https://arxiv.org/abs/2211.16068" # ACE
|
||||
# txt = r"C:\Users\x\arxiv_cache\2211.16068\workfolder" # ACE
|
||||
# txt = r"https://arxiv.org/abs/2002.09253"
|
||||
# txt = r"https://arxiv.org/abs/2306.07831"
|
||||
# txt = r"https://arxiv.org/abs/2212.10156"
|
||||
# txt = r"https://arxiv.org/abs/2211.11559"
|
||||
# txt = r"https://arxiv.org/abs/2303.08774"
|
||||
txt = r"https://arxiv.org/abs/2303.12712"
|
||||
# txt = r"C:\Users\fuqingxu\arxiv_cache\2303.12712\workfolder"
|
||||
|
||||
|
||||
for cookies, cb, hist, msg in (Latex翻译中文并重新编译PDF)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
cli_printer.print(cb) # print(cb)
|
||||
|
||||
|
||||
|
||||
# txt = "2302.02948.tar"
|
||||
# print(txt)
|
||||
# main_tex, work_folder = Latex预处理(txt)
|
||||
# print('main tex:', main_tex)
|
||||
# res = 编译Latex(main_tex, work_folder)
|
||||
# # for cookies, cb, hist, msg in silence_stdout(编译Latex)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
# cli_printer.print(cb) # print(cb)
|
||||
|
||||
|
||||
|
||||
# test_解析一个Python项目()
|
||||
# test_Latex英文润色()
|
||||
# test_Markdown中译英()
|
||||
# test_批量翻译PDF文档()
|
||||
# test_谷歌检索小助手()
|
||||
# test_总结word文档()
|
||||
# test_下载arxiv论文并翻译摘要()
|
||||
# test_解析一个Cpp项目()
|
||||
# test_联网回答问题()
|
||||
# test_解析ipynb文件()
|
||||
# test_数学动画生成manim()
|
||||
# test_Langchain知识库()
|
||||
# test_Langchain知识库读取()
|
||||
if __name__ == "__main__":
|
||||
test_Latex()
|
||||
input("程序完成,回车退出。")
|
||||
print("退出。")
|
||||
@@ -12,7 +12,7 @@ def input_clipping(inputs, history, max_token_limit):
|
||||
mode = 'input-and-history'
|
||||
# 当 输入部分的token占比 小于 全文的一半时,只裁剪历史
|
||||
input_token_num = get_token_num(inputs)
|
||||
if input_token_num < max_token_limit//2:
|
||||
if input_token_num < max_token_limit//2:
|
||||
mode = 'only-history'
|
||||
max_token_limit = max_token_limit - input_token_num
|
||||
|
||||
@@ -21,7 +21,7 @@ def input_clipping(inputs, history, max_token_limit):
|
||||
n_token = get_token_num('\n'.join(everything))
|
||||
everything_token = [get_token_num(e) for e in everything]
|
||||
delta = max(everything_token) // 16 # 截断时的颗粒度
|
||||
|
||||
|
||||
while n_token > max_token_limit:
|
||||
where = np.argmax(everything_token)
|
||||
encoded = enc.encode(everything[where], disallowed_special=())
|
||||
@@ -38,9 +38,9 @@ def input_clipping(inputs, history, max_token_limit):
|
||||
return inputs, history
|
||||
|
||||
def request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs, inputs_show_user, llm_kwargs,
|
||||
inputs, inputs_show_user, llm_kwargs,
|
||||
chatbot, history, sys_prompt, refresh_interval=0.2,
|
||||
handle_token_exceed=True,
|
||||
handle_token_exceed=True,
|
||||
retry_times_at_unknown_error=2,
|
||||
):
|
||||
"""
|
||||
@@ -77,7 +77,7 @@ def request_gpt_model_in_new_thread_with_ui_alive(
|
||||
exceeded_cnt = 0
|
||||
while True:
|
||||
# watchdog error
|
||||
if len(mutable) >= 2 and (time.time()-mutable[1]) > watch_dog_patience:
|
||||
if len(mutable) >= 2 and (time.time()-mutable[1]) > watch_dog_patience:
|
||||
raise RuntimeError("检测到程序终止。")
|
||||
try:
|
||||
# 【第一种情况】:顺利完成
|
||||
@@ -140,12 +140,12 @@ def can_multi_process(llm):
|
||||
if llm.startswith('api2d-'): return True
|
||||
if llm.startswith('azure-'): return True
|
||||
if llm.startswith('spark'): return True
|
||||
if llm.startswith('zhipuai'): return True
|
||||
if llm.startswith('zhipuai') or llm.startswith('glm-'): return True
|
||||
return False
|
||||
|
||||
def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
inputs_array, inputs_show_user_array, llm_kwargs,
|
||||
chatbot, history_array, sys_prompt_array,
|
||||
inputs_array, inputs_show_user_array, llm_kwargs,
|
||||
chatbot, history_array, sys_prompt_array,
|
||||
refresh_interval=0.2, max_workers=-1, scroller_max_len=30,
|
||||
handle_token_exceed=True, show_user_at_complete=False,
|
||||
retry_times_at_unknown_error=2,
|
||||
@@ -189,7 +189,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
# 屏蔽掉 chatglm的多线程,可能会导致严重卡顿
|
||||
if not can_multi_process(llm_kwargs['llm_model']):
|
||||
max_workers = 1
|
||||
|
||||
|
||||
executor = ThreadPoolExecutor(max_workers=max_workers)
|
||||
n_frag = len(inputs_array)
|
||||
# 用户反馈
|
||||
@@ -214,7 +214,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
try:
|
||||
# 【第一种情况】:顺利完成
|
||||
gpt_say = predict_no_ui_long_connection(
|
||||
inputs=inputs, llm_kwargs=llm_kwargs, history=history,
|
||||
inputs=inputs, llm_kwargs=llm_kwargs, history=history,
|
||||
sys_prompt=sys_prompt, observe_window=mutable[index], console_slience=True
|
||||
)
|
||||
mutable[index][2] = "已成功"
|
||||
@@ -246,7 +246,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
print(tb_str)
|
||||
gpt_say += f"[Local Message] 警告,线程{index}在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
|
||||
if len(mutable[index][0]) > 0: gpt_say += "此线程失败前收到的回答:\n\n" + mutable[index][0]
|
||||
if retry_op > 0:
|
||||
if retry_op > 0:
|
||||
retry_op -= 1
|
||||
wait = random.randint(5, 20)
|
||||
if ("Rate limit reached" in tb_str) or ("Too Many Requests" in tb_str):
|
||||
@@ -287,8 +287,8 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
replace('\n', '').replace('`', '.').replace(' ', '.').replace('<br/>', '.....').replace('$', '.')+"`... ]"
|
||||
observe_win.append(print_something_really_funny)
|
||||
# 在前端打印些好玩的东西
|
||||
stat_str = ''.join([f'`{mutable[thread_index][2]}`: {obs}\n\n'
|
||||
if not done else f'`{mutable[thread_index][2]}`\n\n'
|
||||
stat_str = ''.join([f'`{mutable[thread_index][2]}`: {obs}\n\n'
|
||||
if not done else f'`{mutable[thread_index][2]}`\n\n'
|
||||
for thread_index, done, obs in zip(range(len(worker_done)), worker_done, observe_win)])
|
||||
# 在前端打印些好玩的东西
|
||||
chatbot[-1] = [chatbot[-1][0], f'多线程操作已经开始,完成情况: \n\n{stat_str}' + ''.join(['.']*(cnt % 10+1))]
|
||||
@@ -302,7 +302,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
for inputs_show_user, f in zip(inputs_show_user_array, futures):
|
||||
gpt_res = f.result()
|
||||
gpt_response_collection.extend([inputs_show_user, gpt_res])
|
||||
|
||||
|
||||
# 是否在结束时,在界面上显示结果
|
||||
if show_user_at_complete:
|
||||
for inputs_show_user, f in zip(inputs_show_user_array, futures):
|
||||
@@ -352,7 +352,7 @@ def read_and_clean_pdf_text(fp):
|
||||
if wtf['size'] not in fsize_statiscs: fsize_statiscs[wtf['size']] = 0
|
||||
fsize_statiscs[wtf['size']] += len(wtf['text'])
|
||||
return max(fsize_statiscs, key=fsize_statiscs.get)
|
||||
|
||||
|
||||
def ffsize_same(a,b):
|
||||
"""
|
||||
提取字体大小是否近似相等
|
||||
@@ -388,7 +388,7 @@ def read_and_clean_pdf_text(fp):
|
||||
if index == 0:
|
||||
page_one_meta = [" ".join(["".join([wtf['text'] for wtf in l['spans']]) for l in t['lines']]).replace(
|
||||
'- ', '') for t in text_areas['blocks'] if 'lines' in t]
|
||||
|
||||
|
||||
############################## <第 2 步,获取正文主字体> ##################################
|
||||
try:
|
||||
fsize_statiscs = {}
|
||||
@@ -404,7 +404,7 @@ def read_and_clean_pdf_text(fp):
|
||||
mega_sec = []
|
||||
sec = []
|
||||
for index, line in enumerate(meta_line):
|
||||
if index == 0:
|
||||
if index == 0:
|
||||
sec.append(line[fc])
|
||||
continue
|
||||
if REMOVE_FOOT_NOTE:
|
||||
@@ -501,12 +501,12 @@ def get_files_from_everything(txt, type): # type='.md'
|
||||
"""
|
||||
这个函数是用来获取指定目录下所有指定类型(如.md)的文件,并且对于网络上的文件,也可以获取它。
|
||||
下面是对每个参数和返回值的说明:
|
||||
参数
|
||||
- txt: 路径或网址,表示要搜索的文件或者文件夹路径或网络上的文件。
|
||||
参数
|
||||
- txt: 路径或网址,表示要搜索的文件或者文件夹路径或网络上的文件。
|
||||
- type: 字符串,表示要搜索的文件类型。默认是.md。
|
||||
返回值
|
||||
- success: 布尔值,表示函数是否成功执行。
|
||||
- file_manifest: 文件路径列表,里面包含以指定类型为后缀名的所有文件的绝对路径。
|
||||
返回值
|
||||
- success: 布尔值,表示函数是否成功执行。
|
||||
- file_manifest: 文件路径列表,里面包含以指定类型为后缀名的所有文件的绝对路径。
|
||||
- project_folder: 字符串,表示文件所在的文件夹路径。如果是网络上的文件,就是临时文件夹的路径。
|
||||
该函数详细注释已添加,请确认是否满足您的需要。
|
||||
"""
|
||||
@@ -570,7 +570,7 @@ class nougat_interface():
|
||||
def NOUGAT_parse_pdf(self, fp, chatbot, history):
|
||||
from toolbox import update_ui_lastest_msg
|
||||
|
||||
yield from update_ui_lastest_msg("正在解析论文, 请稍候。进度:正在排队, 等待线程锁...",
|
||||
yield from update_ui_lastest_msg("正在解析论文, 请稍候。进度:正在排队, 等待线程锁...",
|
||||
chatbot=chatbot, history=history, delay=0)
|
||||
self.threadLock.acquire()
|
||||
import glob, threading, os
|
||||
@@ -578,7 +578,7 @@ class nougat_interface():
|
||||
dst = os.path.join(get_log_folder(plugin_name='nougat'), gen_time_str())
|
||||
os.makedirs(dst)
|
||||
|
||||
yield from update_ui_lastest_msg("正在解析论文, 请稍候。进度:正在加载NOUGAT... (提示:首次运行需要花费较长时间下载NOUGAT参数)",
|
||||
yield from update_ui_lastest_msg("正在解析论文, 请稍候。进度:正在加载NOUGAT... (提示:首次运行需要花费较长时间下载NOUGAT参数)",
|
||||
chatbot=chatbot, history=history, delay=0)
|
||||
self.nougat_with_timeout(f'nougat --out "{os.path.abspath(dst)}" "{os.path.abspath(fp)}"', os.getcwd(), timeout=3600)
|
||||
res = glob.glob(os.path.join(dst,'*.mmd'))
|
||||
|
||||
788
crazy_functions/latex_utils.py
Normal file
788
crazy_functions/latex_utils.py
Normal file
@@ -0,0 +1,788 @@
|
||||
from toolbox import update_ui, update_ui_lastest_msg # 刷新Gradio前端界面
|
||||
from toolbox import zip_folder, objdump, objload, promote_file_to_downloadzone
|
||||
import os, shutil
|
||||
import re
|
||||
import numpy as np
|
||||
pj = os.path.join
|
||||
|
||||
"""
|
||||
========================================================================
|
||||
Part One
|
||||
Latex segmentation with a binary mask (PRESERVE=0, TRANSFORM=1)
|
||||
========================================================================
|
||||
"""
|
||||
PRESERVE = 0
|
||||
TRANSFORM = 1
|
||||
|
||||
def set_forbidden_text(text, mask, pattern, flags=0):
|
||||
"""
|
||||
Add a preserve text area in this paper
|
||||
e.g. with pattern = r"\\begin\{algorithm\}(.*?)\\end\{algorithm\}"
|
||||
you can mask out (mask = PRESERVE so that text become untouchable for GPT)
|
||||
everything between "\begin{equation}" and "\end{equation}"
|
||||
"""
|
||||
if isinstance(pattern, list): pattern = '|'.join(pattern)
|
||||
pattern_compile = re.compile(pattern, flags)
|
||||
for res in pattern_compile.finditer(text):
|
||||
mask[res.span()[0]:res.span()[1]] = PRESERVE
|
||||
return text, mask
|
||||
|
||||
def reverse_forbidden_text(text, mask, pattern, flags=0, forbid_wrapper=True):
|
||||
"""
|
||||
Move area out of preserve area (make text editable for GPT)
|
||||
count the number of the braces so as to catch compelete text area.
|
||||
e.g.
|
||||
\begin{abstract} blablablablablabla. \end{abstract}
|
||||
"""
|
||||
if isinstance(pattern, list): pattern = '|'.join(pattern)
|
||||
pattern_compile = re.compile(pattern, flags)
|
||||
for res in pattern_compile.finditer(text):
|
||||
if not forbid_wrapper:
|
||||
mask[res.span()[0]:res.span()[1]] = TRANSFORM
|
||||
else:
|
||||
mask[res.regs[0][0]: res.regs[1][0]] = PRESERVE # '\\begin{abstract}'
|
||||
mask[res.regs[1][0]: res.regs[1][1]] = TRANSFORM # abstract
|
||||
mask[res.regs[1][1]: res.regs[0][1]] = PRESERVE # abstract
|
||||
return text, mask
|
||||
|
||||
def set_forbidden_text_careful_brace(text, mask, pattern, flags=0):
|
||||
"""
|
||||
Add a preserve text area in this paper (text become untouchable for GPT).
|
||||
count the number of the braces so as to catch compelete text area.
|
||||
e.g.
|
||||
\caption{blablablablabla\texbf{blablabla}blablabla.}
|
||||
"""
|
||||
pattern_compile = re.compile(pattern, flags)
|
||||
for res in pattern_compile.finditer(text):
|
||||
brace_level = -1
|
||||
p = begin = end = res.regs[0][0]
|
||||
for _ in range(1024*16):
|
||||
if text[p] == '}' and brace_level == 0: break
|
||||
elif text[p] == '}': brace_level -= 1
|
||||
elif text[p] == '{': brace_level += 1
|
||||
p += 1
|
||||
end = p+1
|
||||
mask[begin:end] = PRESERVE
|
||||
return text, mask
|
||||
|
||||
def reverse_forbidden_text_careful_brace(text, mask, pattern, flags=0, forbid_wrapper=True):
|
||||
"""
|
||||
Move area out of preserve area (make text editable for GPT)
|
||||
count the number of the braces so as to catch compelete text area.
|
||||
e.g.
|
||||
\caption{blablablablabla\texbf{blablabla}blablabla.}
|
||||
"""
|
||||
pattern_compile = re.compile(pattern, flags)
|
||||
for res in pattern_compile.finditer(text):
|
||||
brace_level = 0
|
||||
p = begin = end = res.regs[1][0]
|
||||
for _ in range(1024*16):
|
||||
if text[p] == '}' and brace_level == 0: break
|
||||
elif text[p] == '}': brace_level -= 1
|
||||
elif text[p] == '{': brace_level += 1
|
||||
p += 1
|
||||
end = p
|
||||
mask[begin:end] = TRANSFORM
|
||||
if forbid_wrapper:
|
||||
mask[res.regs[0][0]:begin] = PRESERVE
|
||||
mask[end:res.regs[0][1]] = PRESERVE
|
||||
return text, mask
|
||||
|
||||
def set_forbidden_text_begin_end(text, mask, pattern, flags=0, limit_n_lines=42):
|
||||
"""
|
||||
Find all \begin{} ... \end{} text block that with less than limit_n_lines lines.
|
||||
Add it to preserve area
|
||||
"""
|
||||
pattern_compile = re.compile(pattern, flags)
|
||||
def search_with_line_limit(text, mask):
|
||||
for res in pattern_compile.finditer(text):
|
||||
cmd = res.group(1) # begin{what}
|
||||
this = res.group(2) # content between begin and end
|
||||
this_mask = mask[res.regs[2][0]:res.regs[2][1]]
|
||||
white_list = ['document', 'abstract', 'lemma', 'definition', 'sproof',
|
||||
'em', 'emph', 'textit', 'textbf', 'itemize', 'enumerate']
|
||||
if (cmd in white_list) or this.count('\n') >= limit_n_lines: # use a magical number 42
|
||||
this, this_mask = search_with_line_limit(this, this_mask)
|
||||
mask[res.regs[2][0]:res.regs[2][1]] = this_mask
|
||||
else:
|
||||
mask[res.regs[0][0]:res.regs[0][1]] = PRESERVE
|
||||
return text, mask
|
||||
return search_with_line_limit(text, mask)
|
||||
|
||||
class LinkedListNode():
|
||||
"""
|
||||
Linked List Node
|
||||
"""
|
||||
def __init__(self, string, preserve=True) -> None:
|
||||
self.string = string
|
||||
self.preserve = preserve
|
||||
self.next = None
|
||||
# self.begin_line = 0
|
||||
# self.begin_char = 0
|
||||
|
||||
def convert_to_linklist(text, mask):
|
||||
root = LinkedListNode("", preserve=True)
|
||||
current_node = root
|
||||
for c, m, i in zip(text, mask, range(len(text))):
|
||||
if (m==PRESERVE and current_node.preserve) \
|
||||
or (m==TRANSFORM and not current_node.preserve):
|
||||
# add
|
||||
current_node.string += c
|
||||
else:
|
||||
current_node.next = LinkedListNode(c, preserve=(m==PRESERVE))
|
||||
current_node = current_node.next
|
||||
return root
|
||||
"""
|
||||
========================================================================
|
||||
Latex Merge File
|
||||
========================================================================
|
||||
"""
|
||||
|
||||
def 寻找Latex主文件(file_manifest, mode):
|
||||
"""
|
||||
在多Tex文档中,寻找主文件,必须包含documentclass,返回找到的第一个。
|
||||
P.S. 但愿没人把latex模板放在里面传进来 (6.25 加入判定latex模板的代码)
|
||||
"""
|
||||
canidates = []
|
||||
for texf in file_manifest:
|
||||
if os.path.basename(texf).startswith('merge'):
|
||||
continue
|
||||
with open(texf, 'r', encoding='utf8') as f:
|
||||
file_content = f.read()
|
||||
if r'\documentclass' in file_content:
|
||||
canidates.append(texf)
|
||||
else:
|
||||
continue
|
||||
|
||||
if len(canidates) == 0:
|
||||
raise RuntimeError('无法找到一个主Tex文件(包含documentclass关键字)')
|
||||
elif len(canidates) == 1:
|
||||
return canidates[0]
|
||||
else: # if len(canidates) >= 2 通过一些Latex模板中常见(但通常不会出现在正文)的单词,对不同latex源文件扣分,取评分最高者返回
|
||||
canidates_score = []
|
||||
# 给出一些判定模板文档的词作为扣分项
|
||||
unexpected_words = ['\LaTeX', 'manuscript', 'Guidelines', 'font', 'citations', 'rejected', 'blind review', 'reviewers']
|
||||
expected_words = ['\input', '\ref', '\cite']
|
||||
for texf in canidates:
|
||||
canidates_score.append(0)
|
||||
with open(texf, 'r', encoding='utf8') as f:
|
||||
file_content = f.read()
|
||||
for uw in unexpected_words:
|
||||
if uw in file_content:
|
||||
canidates_score[-1] -= 1
|
||||
for uw in expected_words:
|
||||
if uw in file_content:
|
||||
canidates_score[-1] += 1
|
||||
select = np.argmax(canidates_score) # 取评分最高者返回
|
||||
return canidates[select]
|
||||
|
||||
def rm_comments(main_file):
|
||||
new_file_remove_comment_lines = []
|
||||
for l in main_file.splitlines():
|
||||
# 删除整行的空注释
|
||||
if l.lstrip().startswith("%"):
|
||||
pass
|
||||
else:
|
||||
new_file_remove_comment_lines.append(l)
|
||||
main_file = '\n'.join(new_file_remove_comment_lines)
|
||||
# main_file = re.sub(r"\\include{(.*?)}", r"\\input{\1}", main_file) # 将 \include 命令转换为 \input 命令
|
||||
main_file = re.sub(r'(?<!\\)%.*', '', main_file) # 使用正则表达式查找半行注释, 并替换为空字符串
|
||||
return main_file
|
||||
|
||||
def merge_tex_files_(project_foler, main_file, mode):
|
||||
"""
|
||||
Merge Tex project recrusively
|
||||
"""
|
||||
main_file = rm_comments(main_file)
|
||||
for s in reversed([q for q in re.finditer(r"\\input\{(.*?)\}", main_file, re.M)]):
|
||||
f = s.group(1)
|
||||
fp = os.path.join(project_foler, f)
|
||||
if os.path.exists(fp):
|
||||
# e.g., \input{srcs/07_appendix.tex}
|
||||
with open(fp, 'r', encoding='utf-8', errors='replace') as fx:
|
||||
c = fx.read()
|
||||
else:
|
||||
# e.g., \input{srcs/07_appendix}
|
||||
with open(fp+'.tex', 'r', encoding='utf-8', errors='replace') as fx:
|
||||
c = fx.read()
|
||||
c = merge_tex_files_(project_foler, c, mode)
|
||||
main_file = main_file[:s.span()[0]] + c + main_file[s.span()[1]:]
|
||||
return main_file
|
||||
|
||||
def merge_tex_files(project_foler, main_file, mode):
|
||||
"""
|
||||
Merge Tex project recrusively
|
||||
P.S. 顺便把CTEX塞进去以支持中文
|
||||
P.S. 顺便把Latex的注释去除
|
||||
"""
|
||||
main_file = merge_tex_files_(project_foler, main_file, mode)
|
||||
main_file = rm_comments(main_file)
|
||||
|
||||
if mode == 'translate_zh':
|
||||
# find paper documentclass
|
||||
pattern = re.compile(r'\\documentclass.*\n')
|
||||
match = pattern.search(main_file)
|
||||
assert match is not None, "Cannot find documentclass statement!"
|
||||
position = match.end()
|
||||
add_ctex = '\\usepackage{ctex}\n'
|
||||
add_url = '\\usepackage{url}\n' if '{url}' not in main_file else ''
|
||||
main_file = main_file[:position] + add_ctex + add_url + main_file[position:]
|
||||
# fontset=windows
|
||||
import platform
|
||||
main_file = re.sub(r"\\documentclass\[(.*?)\]{(.*?)}", r"\\documentclass[\1,fontset=windows,UTF8]{\2}",main_file)
|
||||
main_file = re.sub(r"\\documentclass{(.*?)}", r"\\documentclass[fontset=windows,UTF8]{\1}",main_file)
|
||||
# find paper abstract
|
||||
pattern_opt1 = re.compile(r'\\begin\{abstract\}.*\n')
|
||||
pattern_opt2 = re.compile(r"\\abstract\{(.*?)\}", flags=re.DOTALL)
|
||||
match_opt1 = pattern_opt1.search(main_file)
|
||||
match_opt2 = pattern_opt2.search(main_file)
|
||||
assert (match_opt1 is not None) or (match_opt2 is not None), "Cannot find paper abstract section!"
|
||||
return main_file
|
||||
|
||||
|
||||
|
||||
"""
|
||||
========================================================================
|
||||
Post process
|
||||
========================================================================
|
||||
"""
|
||||
def mod_inbraket(match):
|
||||
"""
|
||||
为啥chatgpt会把cite里面的逗号换成中文逗号呀
|
||||
"""
|
||||
# get the matched string
|
||||
cmd = match.group(1)
|
||||
str_to_modify = match.group(2)
|
||||
# modify the matched string
|
||||
str_to_modify = str_to_modify.replace(':', ':') # 前面是中文冒号,后面是英文冒号
|
||||
str_to_modify = str_to_modify.replace(',', ',') # 前面是中文逗号,后面是英文逗号
|
||||
# str_to_modify = 'BOOM'
|
||||
return "\\" + cmd + "{" + str_to_modify + "}"
|
||||
|
||||
def fix_content(final_tex, node_string):
|
||||
"""
|
||||
Fix common GPT errors to increase success rate
|
||||
"""
|
||||
final_tex = re.sub(r"(?<!\\)%", "\\%", final_tex)
|
||||
final_tex = re.sub(r"\\([a-z]{2,10})\ \{", r"\\\1{", string=final_tex)
|
||||
final_tex = re.sub(r"\\\ ([a-z]{2,10})\{", r"\\\1{", string=final_tex)
|
||||
final_tex = re.sub(r"\\([a-z]{2,10})\{([^\}]*?)\}", mod_inbraket, string=final_tex)
|
||||
|
||||
if "Traceback" in final_tex and "[Local Message]" in final_tex:
|
||||
final_tex = node_string # 出问题了,还原原文
|
||||
if node_string.count('\\begin') != final_tex.count('\\begin'):
|
||||
final_tex = node_string # 出问题了,还原原文
|
||||
if node_string.count('\_') > 0 and node_string.count('\_') > final_tex.count('\_'):
|
||||
# walk and replace any _ without \
|
||||
final_tex = re.sub(r"(?<!\\)_", "\\_", final_tex)
|
||||
|
||||
def compute_brace_level(string):
|
||||
# this function count the number of { and }
|
||||
brace_level = 0
|
||||
for c in string:
|
||||
if c == "{": brace_level += 1
|
||||
elif c == "}": brace_level -= 1
|
||||
return brace_level
|
||||
def join_most(tex_t, tex_o):
|
||||
# this function join translated string and original string when something goes wrong
|
||||
p_t = 0
|
||||
p_o = 0
|
||||
def find_next(string, chars, begin):
|
||||
p = begin
|
||||
while p < len(string):
|
||||
if string[p] in chars: return p, string[p]
|
||||
p += 1
|
||||
return None, None
|
||||
while True:
|
||||
res1, char = find_next(tex_o, ['{','}'], p_o)
|
||||
if res1 is None: break
|
||||
res2, char = find_next(tex_t, [char], p_t)
|
||||
if res2 is None: break
|
||||
p_o = res1 + 1
|
||||
p_t = res2 + 1
|
||||
return tex_t[:p_t] + tex_o[p_o:]
|
||||
|
||||
if compute_brace_level(final_tex) != compute_brace_level(node_string):
|
||||
# 出问题了,还原部分原文,保证括号正确
|
||||
final_tex = join_most(final_tex, node_string)
|
||||
return final_tex
|
||||
|
||||
def split_subprocess(txt, project_folder, return_dict, opts):
|
||||
"""
|
||||
break down latex file to a linked list,
|
||||
each node use a preserve flag to indicate whether it should
|
||||
be proccessed by GPT.
|
||||
"""
|
||||
text = txt
|
||||
mask = np.zeros(len(txt), dtype=np.uint8) + TRANSFORM
|
||||
|
||||
# 吸收title与作者以上的部分
|
||||
text, mask = set_forbidden_text(text, mask, r"(.*?)\\maketitle", re.DOTALL)
|
||||
# 吸收iffalse注释
|
||||
text, mask = set_forbidden_text(text, mask, r"\\iffalse(.*?)\\fi", re.DOTALL)
|
||||
# 吸收在42行以内的begin-end组合
|
||||
text, mask = set_forbidden_text_begin_end(text, mask, r"\\begin\{([a-z\*]*)\}(.*?)\\end\{\1\}", re.DOTALL, limit_n_lines=42)
|
||||
# 吸收匿名公式
|
||||
text, mask = set_forbidden_text(text, mask, [ r"\$\$(.*?)\$\$", r"\\\[.*?\\\]" ], re.DOTALL)
|
||||
# 吸收其他杂项
|
||||
text, mask = set_forbidden_text(text, mask, [ r"\\section\{(.*?)\}", r"\\section\*\{(.*?)\}", r"\\subsection\{(.*?)\}", r"\\subsubsection\{(.*?)\}" ])
|
||||
text, mask = set_forbidden_text(text, mask, [ r"\\bibliography\{(.*?)\}", r"\\bibliographystyle\{(.*?)\}" ])
|
||||
text, mask = set_forbidden_text(text, mask, r"\\begin\{thebibliography\}.*?\\end\{thebibliography\}", re.DOTALL)
|
||||
text, mask = set_forbidden_text(text, mask, r"\\begin\{lstlisting\}(.*?)\\end\{lstlisting\}", re.DOTALL)
|
||||
text, mask = set_forbidden_text(text, mask, r"\\begin\{wraptable\}(.*?)\\end\{wraptable\}", re.DOTALL)
|
||||
text, mask = set_forbidden_text(text, mask, r"\\begin\{algorithm\}(.*?)\\end\{algorithm\}", re.DOTALL)
|
||||
text, mask = set_forbidden_text(text, mask, [r"\\begin\{wrapfigure\}(.*?)\\end\{wrapfigure\}", r"\\begin\{wrapfigure\*\}(.*?)\\end\{wrapfigure\*\}"], re.DOTALL)
|
||||
text, mask = set_forbidden_text(text, mask, [r"\\begin\{figure\}(.*?)\\end\{figure\}", r"\\begin\{figure\*\}(.*?)\\end\{figure\*\}"], re.DOTALL)
|
||||
text, mask = set_forbidden_text(text, mask, [r"\\begin\{multline\}(.*?)\\end\{multline\}", r"\\begin\{multline\*\}(.*?)\\end\{multline\*\}"], re.DOTALL)
|
||||
text, mask = set_forbidden_text(text, mask, [r"\\begin\{table\}(.*?)\\end\{table\}", r"\\begin\{table\*\}(.*?)\\end\{table\*\}"], re.DOTALL)
|
||||
text, mask = set_forbidden_text(text, mask, [r"\\begin\{minipage\}(.*?)\\end\{minipage\}", r"\\begin\{minipage\*\}(.*?)\\end\{minipage\*\}"], re.DOTALL)
|
||||
text, mask = set_forbidden_text(text, mask, [r"\\begin\{align\*\}(.*?)\\end\{align\*\}", r"\\begin\{align\}(.*?)\\end\{align\}"], re.DOTALL)
|
||||
text, mask = set_forbidden_text(text, mask, [r"\\begin\{equation\}(.*?)\\end\{equation\}", r"\\begin\{equation\*\}(.*?)\\end\{equation\*\}"], re.DOTALL)
|
||||
text, mask = set_forbidden_text(text, mask, [r"\\includepdf\[(.*?)\]\{(.*?)\}", r"\\clearpage", r"\\newpage", r"\\appendix", r"\\tableofcontents", r"\\include\{(.*?)\}"])
|
||||
text, mask = set_forbidden_text(text, mask, [r"\\vspace\{(.*?)\}", r"\\hspace\{(.*?)\}", r"\\label\{(.*?)\}", r"\\begin\{(.*?)\}", r"\\end\{(.*?)\}", r"\\item "])
|
||||
text, mask = set_forbidden_text_careful_brace(text, mask, r"\\hl\{(.*?)\}", re.DOTALL)
|
||||
# reverse 操作必须放在最后
|
||||
text, mask = reverse_forbidden_text_careful_brace(text, mask, r"\\caption\{(.*?)\}", re.DOTALL, forbid_wrapper=True)
|
||||
text, mask = reverse_forbidden_text_careful_brace(text, mask, r"\\abstract\{(.*?)\}", re.DOTALL, forbid_wrapper=True)
|
||||
text, mask = reverse_forbidden_text(text, mask, r"\\begin\{abstract\}(.*?)\\end\{abstract\}", re.DOTALL, forbid_wrapper=True)
|
||||
root = convert_to_linklist(text, mask)
|
||||
|
||||
# 修复括号
|
||||
node = root
|
||||
while True:
|
||||
string = node.string
|
||||
if node.preserve:
|
||||
node = node.next
|
||||
if node is None: break
|
||||
continue
|
||||
def break_check(string):
|
||||
str_stack = [""] # (lv, index)
|
||||
for i, c in enumerate(string):
|
||||
if c == '{':
|
||||
str_stack.append('{')
|
||||
elif c == '}':
|
||||
if len(str_stack) == 1:
|
||||
print('stack fix')
|
||||
return i
|
||||
str_stack.pop(-1)
|
||||
else:
|
||||
str_stack[-1] += c
|
||||
return -1
|
||||
bp = break_check(string)
|
||||
|
||||
if bp == -1:
|
||||
pass
|
||||
elif bp == 0:
|
||||
node.string = string[:1]
|
||||
q = LinkedListNode(string[1:], False)
|
||||
q.next = node.next
|
||||
node.next = q
|
||||
else:
|
||||
node.string = string[:bp]
|
||||
q = LinkedListNode(string[bp:], False)
|
||||
q.next = node.next
|
||||
node.next = q
|
||||
|
||||
node = node.next
|
||||
if node is None: break
|
||||
|
||||
# 屏蔽空行和太短的句子
|
||||
node = root
|
||||
while True:
|
||||
if len(node.string.strip('\n').strip(''))==0: node.preserve = True
|
||||
if len(node.string.strip('\n').strip(''))<42: node.preserve = True
|
||||
node = node.next
|
||||
if node is None: break
|
||||
node = root
|
||||
while True:
|
||||
if node.next and node.preserve and node.next.preserve:
|
||||
node.string += node.next.string
|
||||
node.next = node.next.next
|
||||
node = node.next
|
||||
if node is None: break
|
||||
|
||||
# 将前后断行符脱离
|
||||
node = root
|
||||
prev_node = None
|
||||
while True:
|
||||
if not node.preserve:
|
||||
lstriped_ = node.string.lstrip().lstrip('\n')
|
||||
if (prev_node is not None) and (prev_node.preserve) and (len(lstriped_)!=len(node.string)):
|
||||
prev_node.string += node.string[:-len(lstriped_)]
|
||||
node.string = lstriped_
|
||||
rstriped_ = node.string.rstrip().rstrip('\n')
|
||||
if (node.next is not None) and (node.next.preserve) and (len(rstriped_)!=len(node.string)):
|
||||
node.next.string = node.string[len(rstriped_):] + node.next.string
|
||||
node.string = rstriped_
|
||||
# =====
|
||||
prev_node = node
|
||||
node = node.next
|
||||
if node is None: break
|
||||
# 输出html调试文件,用红色标注处保留区(PRESERVE),用黑色标注转换区(TRANSFORM)
|
||||
with open(pj(project_folder, 'debug_log.html'), 'w', encoding='utf8') as f:
|
||||
segment_parts_for_gpt = []
|
||||
nodes = []
|
||||
node = root
|
||||
while True:
|
||||
nodes.append(node)
|
||||
show_html = node.string.replace('\n','<br/>')
|
||||
if not node.preserve:
|
||||
segment_parts_for_gpt.append(node.string)
|
||||
f.write(f'<p style="color:black;">#{show_html}#</p>')
|
||||
else:
|
||||
f.write(f'<p style="color:red;">{show_html}</p>')
|
||||
node = node.next
|
||||
if node is None: break
|
||||
|
||||
for n in nodes: n.next = None # break
|
||||
return_dict['nodes'] = nodes
|
||||
return_dict['segment_parts_for_gpt'] = segment_parts_for_gpt
|
||||
return return_dict
|
||||
|
||||
|
||||
|
||||
class LatexPaperSplit():
|
||||
"""
|
||||
break down latex file to a linked list,
|
||||
each node use a preserve flag to indicate whether it should
|
||||
be proccessed by GPT.
|
||||
"""
|
||||
def __init__(self) -> None:
|
||||
self.nodes = None
|
||||
self.msg = "*{\\scriptsize\\textbf{警告:该PDF由GPT-Academic开源项目调用大语言模型+Latex翻译插件一键生成," + \
|
||||
"版权归原文作者所有。翻译内容可靠性无保障,请仔细鉴别并以原文为准。" + \
|
||||
"项目Github地址 \\url{https://github.com/binary-husky/gpt_academic/}。"
|
||||
# 请您不要删除或修改这行警告,除非您是论文的原作者(如果您是论文原作者,欢迎加REAME中的QQ联系开发者)
|
||||
self.msg_declare = "为了防止大语言模型的意外谬误产生扩散影响,禁止移除或修改此警告。}}\\\\"
|
||||
|
||||
def merge_result(self, arr, mode, msg):
|
||||
"""
|
||||
Merge the result after the GPT process completed
|
||||
"""
|
||||
result_string = ""
|
||||
p = 0
|
||||
for node in self.nodes:
|
||||
if node.preserve:
|
||||
result_string += node.string
|
||||
else:
|
||||
result_string += fix_content(arr[p], node.string)
|
||||
p += 1
|
||||
if mode == 'translate_zh':
|
||||
pattern = re.compile(r'\\begin\{abstract\}.*\n')
|
||||
match = pattern.search(result_string)
|
||||
if not match:
|
||||
# match \abstract{xxxx}
|
||||
pattern_compile = re.compile(r"\\abstract\{(.*?)\}", flags=re.DOTALL)
|
||||
match = pattern_compile.search(result_string)
|
||||
position = match.regs[1][0]
|
||||
else:
|
||||
# match \begin{abstract}xxxx\end{abstract}
|
||||
position = match.end()
|
||||
result_string = result_string[:position] + self.msg + msg + self.msg_declare + result_string[position:]
|
||||
return result_string
|
||||
|
||||
def split(self, txt, project_folder, opts):
|
||||
"""
|
||||
break down latex file to a linked list,
|
||||
each node use a preserve flag to indicate whether it should
|
||||
be proccessed by GPT.
|
||||
P.S. use multiprocessing to avoid timeout error
|
||||
"""
|
||||
import multiprocessing
|
||||
manager = multiprocessing.Manager()
|
||||
return_dict = manager.dict()
|
||||
p = multiprocessing.Process(
|
||||
target=split_subprocess,
|
||||
args=(txt, project_folder, return_dict, opts))
|
||||
p.start()
|
||||
p.join()
|
||||
p.close()
|
||||
self.nodes = return_dict['nodes']
|
||||
self.sp = return_dict['segment_parts_for_gpt']
|
||||
return self.sp
|
||||
|
||||
|
||||
|
||||
class LatexPaperFileGroup():
|
||||
"""
|
||||
use tokenizer to break down text according to max_token_limit
|
||||
"""
|
||||
def __init__(self):
|
||||
self.file_paths = []
|
||||
self.file_contents = []
|
||||
self.sp_file_contents = []
|
||||
self.sp_file_index = []
|
||||
self.sp_file_tag = []
|
||||
|
||||
# count_token
|
||||
from request_llm.bridge_all import model_info
|
||||
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
||||
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
|
||||
self.get_token_num = get_token_num
|
||||
|
||||
def run_file_split(self, max_token_limit=1900):
|
||||
"""
|
||||
use tokenizer to break down text according to max_token_limit
|
||||
"""
|
||||
for index, file_content in enumerate(self.file_contents):
|
||||
if self.get_token_num(file_content) < max_token_limit:
|
||||
self.sp_file_contents.append(file_content)
|
||||
self.sp_file_index.append(index)
|
||||
self.sp_file_tag.append(self.file_paths[index])
|
||||
else:
|
||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
||||
segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit)
|
||||
for j, segment in enumerate(segments):
|
||||
self.sp_file_contents.append(segment)
|
||||
self.sp_file_index.append(index)
|
||||
self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.tex")
|
||||
print('Segmentation: done')
|
||||
|
||||
def merge_result(self):
|
||||
self.file_result = ["" for _ in range(len(self.file_paths))]
|
||||
for r, k in zip(self.sp_file_result, self.sp_file_index):
|
||||
self.file_result[k] += r
|
||||
|
||||
def write_result(self):
|
||||
manifest = []
|
||||
for path, res in zip(self.file_paths, self.file_result):
|
||||
with open(path + '.polish.tex', 'w', encoding='utf8') as f:
|
||||
manifest.append(path + '.polish.tex')
|
||||
f.write(res)
|
||||
return manifest
|
||||
|
||||
def write_html(sp_file_contents, sp_file_result, chatbot, project_folder):
|
||||
|
||||
# write html
|
||||
try:
|
||||
import shutil
|
||||
from .crazy_utils import construct_html
|
||||
from toolbox import gen_time_str
|
||||
ch = construct_html()
|
||||
orig = ""
|
||||
trans = ""
|
||||
final = []
|
||||
for c,r in zip(sp_file_contents, sp_file_result):
|
||||
final.append(c)
|
||||
final.append(r)
|
||||
for i, k in enumerate(final):
|
||||
if i%2==0:
|
||||
orig = k
|
||||
if i%2==1:
|
||||
trans = k
|
||||
ch.add_row(a=orig, b=trans)
|
||||
create_report_file_name = f"{gen_time_str()}.trans.html"
|
||||
ch.save_file(create_report_file_name)
|
||||
shutil.copyfile(pj('./gpt_log/', create_report_file_name), pj(project_folder, create_report_file_name))
|
||||
promote_file_to_downloadzone(file=f'./gpt_log/{create_report_file_name}', chatbot=chatbot)
|
||||
except:
|
||||
from toolbox import trimmed_format_exc
|
||||
print('writing html result failed:', trimmed_format_exc())
|
||||
|
||||
def Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, mode='proofread', switch_prompt=None, opts=[]):
|
||||
import time, os, re
|
||||
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
||||
from .latex_utils import LatexPaperFileGroup, merge_tex_files, LatexPaperSplit, 寻找Latex主文件
|
||||
|
||||
# <-------- 寻找主tex文件 ---------->
|
||||
maintex = 寻找Latex主文件(file_manifest, mode)
|
||||
chatbot.append((f"定位主Latex文件", f'[Local Message] 分析结果:该项目的Latex主文件是{maintex}, 如果分析错误, 请立即终止程序, 删除或修改歧义文件, 然后重试。主程序即将开始, 请稍候。'))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
time.sleep(3)
|
||||
|
||||
# <-------- 读取Latex文件, 将多文件tex工程融合为一个巨型tex ---------->
|
||||
main_tex_basename = os.path.basename(maintex)
|
||||
assert main_tex_basename.endswith('.tex')
|
||||
main_tex_basename_bare = main_tex_basename[:-4]
|
||||
may_exist_bbl = pj(project_folder, f'{main_tex_basename_bare}.bbl')
|
||||
if os.path.exists(may_exist_bbl):
|
||||
shutil.copyfile(may_exist_bbl, pj(project_folder, f'merge.bbl'))
|
||||
shutil.copyfile(may_exist_bbl, pj(project_folder, f'merge_{mode}.bbl'))
|
||||
shutil.copyfile(may_exist_bbl, pj(project_folder, f'merge_diff.bbl'))
|
||||
|
||||
with open(maintex, 'r', encoding='utf-8', errors='replace') as f:
|
||||
content = f.read()
|
||||
merged_content = merge_tex_files(project_folder, content, mode)
|
||||
|
||||
with open(project_folder + '/merge.tex', 'w', encoding='utf-8', errors='replace') as f:
|
||||
f.write(merged_content)
|
||||
|
||||
# <-------- 精细切分latex文件 ---------->
|
||||
chatbot.append((f"Latex文件融合完成", f'[Local Message] 正在精细切分latex文件,这需要一段时间计算,文档越长耗时越长,请耐心等待。'))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
lps = LatexPaperSplit()
|
||||
res = lps.split(merged_content, project_folder, opts) # 消耗时间的函数
|
||||
|
||||
# <-------- 拆分过长的latex片段 ---------->
|
||||
pfg = LatexPaperFileGroup()
|
||||
for index, r in enumerate(res):
|
||||
pfg.file_paths.append('segment-' + str(index))
|
||||
pfg.file_contents.append(r)
|
||||
|
||||
pfg.run_file_split(max_token_limit=1024)
|
||||
n_split = len(pfg.sp_file_contents)
|
||||
|
||||
# <-------- 根据需要切换prompt ---------->
|
||||
inputs_array, sys_prompt_array = switch_prompt(pfg, mode)
|
||||
inputs_show_user_array = [f"{mode} {f}" for f in pfg.sp_file_tag]
|
||||
|
||||
if os.path.exists(pj(project_folder,'temp.pkl')):
|
||||
|
||||
# <-------- 【仅调试】如果存在调试缓存文件,则跳过GPT请求环节 ---------->
|
||||
pfg = objload(file=pj(project_folder,'temp.pkl'))
|
||||
|
||||
else:
|
||||
# <-------- gpt 多线程请求 ---------->
|
||||
gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
inputs_array=inputs_array,
|
||||
inputs_show_user_array=inputs_show_user_array,
|
||||
llm_kwargs=llm_kwargs,
|
||||
chatbot=chatbot,
|
||||
history_array=[[""] for _ in range(n_split)],
|
||||
sys_prompt_array=sys_prompt_array,
|
||||
# max_workers=5, # 并行任务数量限制, 最多同时执行5个, 其他的排队等待
|
||||
scroller_max_len = 40
|
||||
)
|
||||
|
||||
# <-------- 文本碎片重组为完整的tex片段 ---------->
|
||||
pfg.sp_file_result = []
|
||||
for i_say, gpt_say, orig_content in zip(gpt_response_collection[0::2], gpt_response_collection[1::2], pfg.sp_file_contents):
|
||||
pfg.sp_file_result.append(gpt_say)
|
||||
pfg.merge_result()
|
||||
|
||||
# <-------- 临时存储用于调试 ---------->
|
||||
pfg.get_token_num = None
|
||||
objdump(pfg, file=pj(project_folder,'temp.pkl'))
|
||||
|
||||
write_html(pfg.sp_file_contents, pfg.sp_file_result, chatbot=chatbot, project_folder=project_folder)
|
||||
|
||||
# <-------- 写出文件 ---------->
|
||||
msg = f"当前大语言模型: {llm_kwargs['llm_model']},当前语言模型温度设定: {llm_kwargs['temperature']}。"
|
||||
final_tex = lps.merge_result(pfg.file_result, mode, msg)
|
||||
with open(project_folder + f'/merge_{mode}.tex', 'w', encoding='utf-8', errors='replace') as f:
|
||||
if mode != 'translate_zh' or "binary" in final_tex: f.write(final_tex)
|
||||
|
||||
|
||||
# <-------- 整理结果, 退出 ---------->
|
||||
chatbot.append((f"完成了吗?", 'GPT结果已输出, 正在编译PDF'))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# <-------- 返回 ---------->
|
||||
return project_folder + f'/merge_{mode}.tex'
|
||||
|
||||
|
||||
|
||||
def remove_buggy_lines(file_path, log_path, tex_name, tex_name_pure, n_fix, work_folder_modified):
|
||||
try:
|
||||
with open(log_path, 'r', encoding='utf-8', errors='replace') as f:
|
||||
log = f.read()
|
||||
with open(file_path, 'r', encoding='utf-8', errors='replace') as f:
|
||||
file_lines = f.readlines()
|
||||
import re
|
||||
buggy_lines = re.findall(tex_name+':([0-9]{1,5}):', log)
|
||||
buggy_lines = [int(l) for l in buggy_lines]
|
||||
buggy_lines = sorted(buggy_lines)
|
||||
print("removing lines that has errors", buggy_lines)
|
||||
file_lines.pop(buggy_lines[0]-1)
|
||||
with open(pj(work_folder_modified, f"{tex_name_pure}_fix_{n_fix}.tex"), 'w', encoding='utf-8', errors='replace') as f:
|
||||
f.writelines(file_lines)
|
||||
return True, f"{tex_name_pure}_fix_{n_fix}", buggy_lines
|
||||
except:
|
||||
print("Fatal error occurred, but we cannot identify error, please download zip, read latex log, and compile manually.")
|
||||
return False, -1, [-1]
|
||||
|
||||
def compile_latex_with_timeout(command, cwd, timeout=60):
|
||||
import subprocess
|
||||
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
|
||||
try:
|
||||
stdout, stderr = process.communicate(timeout=timeout)
|
||||
except subprocess.TimeoutExpired:
|
||||
process.kill()
|
||||
stdout, stderr = process.communicate()
|
||||
print("Process timed out!")
|
||||
return False
|
||||
return True
|
||||
|
||||
def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_folder_original, work_folder_modified, work_folder, mode='default'):
|
||||
import os, time
|
||||
current_dir = os.getcwd()
|
||||
n_fix = 1
|
||||
max_try = 32
|
||||
chatbot.append([f"正在编译PDF文档", f'编译已经开始。当前工作路径为{work_folder},如果程序停顿5分钟以上,请直接去该路径下取回翻译结果,或者重启之后再度尝试 ...']); yield from update_ui(chatbot=chatbot, history=history)
|
||||
chatbot.append([f"正在编译PDF文档", '...']); yield from update_ui(chatbot=chatbot, history=history); time.sleep(1); chatbot[-1] = list(chatbot[-1]) # 刷新界面
|
||||
yield from update_ui_lastest_msg('编译已经开始...', chatbot, history) # 刷新Gradio前端界面
|
||||
|
||||
while True:
|
||||
import os
|
||||
|
||||
# https://stackoverflow.com/questions/738755/dont-make-me-manually-abort-a-latex-compile-when-theres-an-error
|
||||
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译原始PDF ...', chatbot, history) # 刷新Gradio前端界面
|
||||
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_original}.tex', work_folder_original)
|
||||
|
||||
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译转化后的PDF ...', chatbot, history) # 刷新Gradio前端界面
|
||||
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_modified}.tex', work_folder_modified)
|
||||
|
||||
if ok and os.path.exists(pj(work_folder_modified, f'{main_file_modified}.pdf')):
|
||||
# 只有第二步成功,才能继续下面的步骤
|
||||
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译BibTex ...', chatbot, history) # 刷新Gradio前端界面
|
||||
if not os.path.exists(pj(work_folder_original, f'{main_file_original}.bbl')):
|
||||
ok = compile_latex_with_timeout(f'bibtex {main_file_original}.aux', work_folder_original)
|
||||
if not os.path.exists(pj(work_folder_modified, f'{main_file_modified}.bbl')):
|
||||
ok = compile_latex_with_timeout(f'bibtex {main_file_modified}.aux', work_folder_modified)
|
||||
|
||||
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译文献交叉引用 ...', chatbot, history) # 刷新Gradio前端界面
|
||||
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_original}.tex', work_folder_original)
|
||||
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_modified}.tex', work_folder_modified)
|
||||
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_original}.tex', work_folder_original)
|
||||
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_modified}.tex', work_folder_modified)
|
||||
|
||||
if mode!='translate_zh':
|
||||
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 使用latexdiff生成论文转化前后对比 ...', chatbot, history) # 刷新Gradio前端界面
|
||||
print( f'latexdiff --encoding=utf8 --append-safecmd=subfile {work_folder_original}/{main_file_original}.tex {work_folder_modified}/{main_file_modified}.tex --flatten > {work_folder}/merge_diff.tex')
|
||||
ok = compile_latex_with_timeout(f'latexdiff --encoding=utf8 --append-safecmd=subfile {work_folder_original}/{main_file_original}.tex {work_folder_modified}/{main_file_modified}.tex --flatten > {work_folder}/merge_diff.tex')
|
||||
|
||||
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 正在编译对比PDF ...', chatbot, history) # 刷新Gradio前端界面
|
||||
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error merge_diff.tex', work_folder)
|
||||
ok = compile_latex_with_timeout(f'bibtex merge_diff.aux', work_folder)
|
||||
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error merge_diff.tex', work_folder)
|
||||
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error merge_diff.tex', work_folder)
|
||||
|
||||
|
||||
# <---------- 检查结果 ----------->
|
||||
results_ = ""
|
||||
original_pdf_success = os.path.exists(pj(work_folder_original, f'{main_file_original}.pdf'))
|
||||
modified_pdf_success = os.path.exists(pj(work_folder_modified, f'{main_file_modified}.pdf'))
|
||||
diff_pdf_success = os.path.exists(pj(work_folder, f'merge_diff.pdf'))
|
||||
results_ += f"原始PDF编译是否成功: {original_pdf_success};"
|
||||
results_ += f"转化PDF编译是否成功: {modified_pdf_success};"
|
||||
results_ += f"对比PDF编译是否成功: {diff_pdf_success};"
|
||||
yield from update_ui_lastest_msg(f'第{n_fix}编译结束:<br/>{results_}...', chatbot, history) # 刷新Gradio前端界面
|
||||
|
||||
if diff_pdf_success:
|
||||
result_pdf = pj(work_folder_modified, f'merge_diff.pdf') # get pdf path
|
||||
promote_file_to_downloadzone(result_pdf, rename_file=None, chatbot=chatbot) # promote file to web UI
|
||||
if modified_pdf_success:
|
||||
yield from update_ui_lastest_msg(f'转化PDF编译已经成功, 即将退出 ...', chatbot, history) # 刷新Gradio前端界面
|
||||
result_pdf = pj(work_folder_modified, f'{main_file_modified}.pdf') # get pdf path
|
||||
if os.path.exists(pj(work_folder, '..', 'translation')):
|
||||
shutil.copyfile(result_pdf, pj(work_folder, '..', 'translation', 'translate_zh.pdf'))
|
||||
promote_file_to_downloadzone(result_pdf, rename_file=None, chatbot=chatbot) # promote file to web UI
|
||||
return True # 成功啦
|
||||
else:
|
||||
if n_fix>=max_try: break
|
||||
n_fix += 1
|
||||
can_retry, main_file_modified, buggy_lines = remove_buggy_lines(
|
||||
file_path=pj(work_folder_modified, f'{main_file_modified}.tex'),
|
||||
log_path=pj(work_folder_modified, f'{main_file_modified}.log'),
|
||||
tex_name=f'{main_file_modified}.tex',
|
||||
tex_name_pure=f'{main_file_modified}',
|
||||
n_fix=n_fix,
|
||||
work_folder_modified=work_folder_modified,
|
||||
)
|
||||
yield from update_ui_lastest_msg(f'由于最为关键的转化PDF编译失败, 将根据报错信息修正tex源文件并重试, 当前报错的latex代码处于第{buggy_lines}行 ...', chatbot, history) # 刷新Gradio前端界面
|
||||
if not can_retry: break
|
||||
|
||||
return False # 失败啦
|
||||
|
||||
|
||||
|
||||
85
crazy_functions/pdf_fns/parse_word.py
Normal file
85
crazy_functions/pdf_fns/parse_word.py
Normal file
@@ -0,0 +1,85 @@
|
||||
from crazy_functions.crazy_utils import read_and_clean_pdf_text, get_files_from_everything
|
||||
import os
|
||||
import re
|
||||
def extract_text_from_files(txt, chatbot, history):
|
||||
"""
|
||||
查找pdf/md/word并获取文本内容并返回状态以及文本
|
||||
|
||||
输入参数 Args:
|
||||
chatbot: chatbot inputs and outputs (用户界面对话窗口句柄,用于数据流可视化)
|
||||
history (list): List of chat history (历史,对话历史列表)
|
||||
|
||||
输出 Returns:
|
||||
文件是否存在(bool)
|
||||
final_result(list):文本内容
|
||||
page_one(list):第一页内容/摘要
|
||||
file_manifest(list):文件路径
|
||||
excption(string):需要用户手动处理的信息,如没出错则保持为空
|
||||
"""
|
||||
|
||||
final_result = []
|
||||
page_one = []
|
||||
file_manifest = []
|
||||
excption = ""
|
||||
|
||||
if txt == "":
|
||||
final_result.append(txt)
|
||||
return False, final_result, page_one, file_manifest, excption #如输入区内容不是文件则直接返回输入区内容
|
||||
|
||||
#查找输入区内容中的文件
|
||||
file_pdf,pdf_manifest,folder_pdf = get_files_from_everything(txt, '.pdf')
|
||||
file_md,md_manifest,folder_md = get_files_from_everything(txt, '.md')
|
||||
file_word,word_manifest,folder_word = get_files_from_everything(txt, '.docx')
|
||||
file_doc,doc_manifest,folder_doc = get_files_from_everything(txt, '.doc')
|
||||
|
||||
if file_doc:
|
||||
excption = "word"
|
||||
return False, final_result, page_one, file_manifest, excption
|
||||
|
||||
file_num = len(pdf_manifest) + len(md_manifest) + len(word_manifest)
|
||||
if file_num == 0:
|
||||
final_result.append(txt)
|
||||
return False, final_result, page_one, file_manifest, excption #如输入区内容不是文件则直接返回输入区内容
|
||||
|
||||
if file_pdf:
|
||||
try: # 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
import fitz
|
||||
except:
|
||||
excption = "pdf"
|
||||
return False, final_result, page_one, file_manifest, excption
|
||||
for index, fp in enumerate(pdf_manifest):
|
||||
file_content, pdf_one = read_and_clean_pdf_text(fp) # (尝试)按照章节切割PDF
|
||||
file_content = file_content.encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars
|
||||
pdf_one = str(pdf_one).encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars
|
||||
final_result.append(file_content)
|
||||
page_one.append(pdf_one)
|
||||
file_manifest.append(os.path.relpath(fp, folder_pdf))
|
||||
|
||||
if file_md:
|
||||
for index, fp in enumerate(md_manifest):
|
||||
with open(fp, 'r', encoding='utf-8', errors='replace') as f:
|
||||
file_content = f.read()
|
||||
file_content = file_content.encode('utf-8', 'ignore').decode()
|
||||
headers = re.findall(r'^#\s(.*)$', file_content, re.MULTILINE) #接下来提取md中的一级/二级标题作为摘要
|
||||
if len(headers) > 0:
|
||||
page_one.append("\n".join(headers)) #合并所有的标题,以换行符分割
|
||||
else:
|
||||
page_one.append("")
|
||||
final_result.append(file_content)
|
||||
file_manifest.append(os.path.relpath(fp, folder_md))
|
||||
|
||||
if file_word:
|
||||
try: # 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
from docx import Document
|
||||
except:
|
||||
excption = "word_pip"
|
||||
return False, final_result, page_one, file_manifest, excption
|
||||
for index, fp in enumerate(word_manifest):
|
||||
doc = Document(fp)
|
||||
file_content = '\n'.join([p.text for p in doc.paragraphs])
|
||||
file_content = file_content.encode('utf-8', 'ignore').decode()
|
||||
page_one.append(file_content[:200])
|
||||
final_result.append(file_content)
|
||||
file_manifest.append(os.path.relpath(fp, folder_word))
|
||||
|
||||
return True, final_result, page_one, file_manifest, excption
|
||||
87
crazy_functions/test_project/cpp/cppipc/buffer.cpp
Normal file
87
crazy_functions/test_project/cpp/cppipc/buffer.cpp
Normal file
@@ -0,0 +1,87 @@
|
||||
#include "libipc/buffer.h"
|
||||
#include "libipc/utility/pimpl.h"
|
||||
|
||||
#include <cstring>
|
||||
|
||||
namespace ipc {
|
||||
|
||||
bool operator==(buffer const & b1, buffer const & b2) {
|
||||
return (b1.size() == b2.size()) && (std::memcmp(b1.data(), b2.data(), b1.size()) == 0);
|
||||
}
|
||||
|
||||
bool operator!=(buffer const & b1, buffer const & b2) {
|
||||
return !(b1 == b2);
|
||||
}
|
||||
|
||||
class buffer::buffer_ : public pimpl<buffer_> {
|
||||
public:
|
||||
void* p_;
|
||||
std::size_t s_;
|
||||
void* a_;
|
||||
buffer::destructor_t d_;
|
||||
|
||||
buffer_(void* p, std::size_t s, buffer::destructor_t d, void* a)
|
||||
: p_(p), s_(s), a_(a), d_(d) {
|
||||
}
|
||||
|
||||
~buffer_() {
|
||||
if (d_ == nullptr) return;
|
||||
d_((a_ == nullptr) ? p_ : a_, s_);
|
||||
}
|
||||
};
|
||||
|
||||
buffer::buffer()
|
||||
: buffer(nullptr, 0, nullptr, nullptr) {
|
||||
}
|
||||
|
||||
buffer::buffer(void* p, std::size_t s, destructor_t d)
|
||||
: p_(p_->make(p, s, d, nullptr)) {
|
||||
}
|
||||
|
||||
buffer::buffer(void* p, std::size_t s, destructor_t d, void* additional)
|
||||
: p_(p_->make(p, s, d, additional)) {
|
||||
}
|
||||
|
||||
buffer::buffer(void* p, std::size_t s)
|
||||
: buffer(p, s, nullptr) {
|
||||
}
|
||||
|
||||
buffer::buffer(char const & c)
|
||||
: buffer(const_cast<char*>(&c), 1) {
|
||||
}
|
||||
|
||||
buffer::buffer(buffer&& rhs)
|
||||
: buffer() {
|
||||
swap(rhs);
|
||||
}
|
||||
|
||||
buffer::~buffer() {
|
||||
p_->clear();
|
||||
}
|
||||
|
||||
void buffer::swap(buffer& rhs) {
|
||||
std::swap(p_, rhs.p_);
|
||||
}
|
||||
|
||||
buffer& buffer::operator=(buffer rhs) {
|
||||
swap(rhs);
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool buffer::empty() const noexcept {
|
||||
return (impl(p_)->p_ == nullptr) || (impl(p_)->s_ == 0);
|
||||
}
|
||||
|
||||
void* buffer::data() noexcept {
|
||||
return impl(p_)->p_;
|
||||
}
|
||||
|
||||
void const * buffer::data() const noexcept {
|
||||
return impl(p_)->p_;
|
||||
}
|
||||
|
||||
std::size_t buffer::size() const noexcept {
|
||||
return impl(p_)->s_;
|
||||
}
|
||||
|
||||
} // namespace ipc
|
||||
701
crazy_functions/test_project/cpp/cppipc/ipc.cpp
Normal file
701
crazy_functions/test_project/cpp/cppipc/ipc.cpp
Normal file
@@ -0,0 +1,701 @@
|
||||
|
||||
#include <type_traits>
|
||||
#include <cstring>
|
||||
#include <algorithm>
|
||||
#include <utility> // std::pair, std::move, std::forward
|
||||
#include <atomic>
|
||||
#include <type_traits> // aligned_storage_t
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <array>
|
||||
#include <cassert>
|
||||
|
||||
#include "libipc/ipc.h"
|
||||
#include "libipc/def.h"
|
||||
#include "libipc/shm.h"
|
||||
#include "libipc/pool_alloc.h"
|
||||
#include "libipc/queue.h"
|
||||
#include "libipc/policy.h"
|
||||
#include "libipc/rw_lock.h"
|
||||
#include "libipc/waiter.h"
|
||||
|
||||
#include "libipc/utility/log.h"
|
||||
#include "libipc/utility/id_pool.h"
|
||||
#include "libipc/utility/scope_guard.h"
|
||||
#include "libipc/utility/utility.h"
|
||||
|
||||
#include "libipc/memory/resource.h"
|
||||
#include "libipc/platform/detail.h"
|
||||
#include "libipc/circ/elem_array.h"
|
||||
|
||||
namespace {
|
||||
|
||||
using msg_id_t = std::uint32_t;
|
||||
using acc_t = std::atomic<msg_id_t>;
|
||||
|
||||
template <std::size_t DataSize, std::size_t AlignSize>
|
||||
struct msg_t;
|
||||
|
||||
template <std::size_t AlignSize>
|
||||
struct msg_t<0, AlignSize> {
|
||||
msg_id_t cc_id_;
|
||||
msg_id_t id_;
|
||||
std::int32_t remain_;
|
||||
bool storage_;
|
||||
};
|
||||
|
||||
template <std::size_t DataSize, std::size_t AlignSize>
|
||||
struct msg_t : msg_t<0, AlignSize> {
|
||||
std::aligned_storage_t<DataSize, AlignSize> data_ {};
|
||||
|
||||
msg_t() = default;
|
||||
msg_t(msg_id_t cc_id, msg_id_t id, std::int32_t remain, void const * data, std::size_t size)
|
||||
: msg_t<0, AlignSize> {cc_id, id, remain, (data == nullptr) || (size == 0)} {
|
||||
if (this->storage_) {
|
||||
if (data != nullptr) {
|
||||
// copy storage-id
|
||||
*reinterpret_cast<ipc::storage_id_t*>(&data_) =
|
||||
*static_cast<ipc::storage_id_t const *>(data);
|
||||
}
|
||||
}
|
||||
else std::memcpy(&data_, data, size);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
ipc::buff_t make_cache(T& data, std::size_t size) {
|
||||
auto ptr = ipc::mem::alloc(size);
|
||||
std::memcpy(ptr, &data, (ipc::detail::min)(sizeof(data), size));
|
||||
return { ptr, size, ipc::mem::free };
|
||||
}
|
||||
|
||||
struct cache_t {
|
||||
std::size_t fill_;
|
||||
ipc::buff_t buff_;
|
||||
|
||||
cache_t(std::size_t f, ipc::buff_t && b)
|
||||
: fill_(f), buff_(std::move(b))
|
||||
{}
|
||||
|
||||
void append(void const * data, std::size_t size) {
|
||||
if (fill_ >= buff_.size() || data == nullptr || size == 0) return;
|
||||
auto new_fill = (ipc::detail::min)(fill_ + size, buff_.size());
|
||||
std::memcpy(static_cast<ipc::byte_t*>(buff_.data()) + fill_, data, new_fill - fill_);
|
||||
fill_ = new_fill;
|
||||
}
|
||||
};
|
||||
|
||||
auto cc_acc() {
|
||||
static ipc::shm::handle acc_h("__CA_CONN__", sizeof(acc_t));
|
||||
return static_cast<acc_t*>(acc_h.get());
|
||||
}
|
||||
|
||||
IPC_CONSTEXPR_ std::size_t align_chunk_size(std::size_t size) noexcept {
|
||||
return (((size - 1) / ipc::large_msg_align) + 1) * ipc::large_msg_align;
|
||||
}
|
||||
|
||||
IPC_CONSTEXPR_ std::size_t calc_chunk_size(std::size_t size) noexcept {
|
||||
return ipc::make_align(alignof(std::max_align_t), align_chunk_size(
|
||||
ipc::make_align(alignof(std::max_align_t), sizeof(std::atomic<ipc::circ::cc_t>)) + size));
|
||||
}
|
||||
|
||||
struct chunk_t {
|
||||
std::atomic<ipc::circ::cc_t> &conns() noexcept {
|
||||
return *reinterpret_cast<std::atomic<ipc::circ::cc_t> *>(this);
|
||||
}
|
||||
|
||||
void *data() noexcept {
|
||||
return reinterpret_cast<ipc::byte_t *>(this)
|
||||
+ ipc::make_align(alignof(std::max_align_t), sizeof(std::atomic<ipc::circ::cc_t>));
|
||||
}
|
||||
};
|
||||
|
||||
struct chunk_info_t {
|
||||
ipc::id_pool<> pool_;
|
||||
ipc::spin_lock lock_;
|
||||
|
||||
IPC_CONSTEXPR_ static std::size_t chunks_mem_size(std::size_t chunk_size) noexcept {
|
||||
return ipc::id_pool<>::max_count * chunk_size;
|
||||
}
|
||||
|
||||
ipc::byte_t *chunks_mem() noexcept {
|
||||
return reinterpret_cast<ipc::byte_t *>(this + 1);
|
||||
}
|
||||
|
||||
chunk_t *at(std::size_t chunk_size, ipc::storage_id_t id) noexcept {
|
||||
if (id < 0) return nullptr;
|
||||
return reinterpret_cast<chunk_t *>(chunks_mem() + (chunk_size * id));
|
||||
}
|
||||
};
|
||||
|
||||
auto& chunk_storages() {
|
||||
class chunk_handle_t {
|
||||
ipc::shm::handle handle_;
|
||||
|
||||
public:
|
||||
chunk_info_t *get_info(std::size_t chunk_size) {
|
||||
if (!handle_.valid() &&
|
||||
!handle_.acquire( ("__CHUNK_INFO__" + ipc::to_string(chunk_size)).c_str(),
|
||||
sizeof(chunk_info_t) + chunk_info_t::chunks_mem_size(chunk_size) )) {
|
||||
ipc::error("[chunk_storages] chunk_shm.id_info_.acquire failed: chunk_size = %zd\n", chunk_size);
|
||||
return nullptr;
|
||||
}
|
||||
auto info = static_cast<chunk_info_t*>(handle_.get());
|
||||
if (info == nullptr) {
|
||||
ipc::error("[chunk_storages] chunk_shm.id_info_.get failed: chunk_size = %zd\n", chunk_size);
|
||||
return nullptr;
|
||||
}
|
||||
return info;
|
||||
}
|
||||
};
|
||||
static ipc::map<std::size_t, chunk_handle_t> chunk_hs;
|
||||
return chunk_hs;
|
||||
}
|
||||
|
||||
chunk_info_t *chunk_storage_info(std::size_t chunk_size) {
|
||||
auto &storages = chunk_storages();
|
||||
std::decay_t<decltype(storages)>::iterator it;
|
||||
{
|
||||
static ipc::rw_lock lock;
|
||||
IPC_UNUSED_ std::shared_lock<ipc::rw_lock> guard {lock};
|
||||
if ((it = storages.find(chunk_size)) == storages.end()) {
|
||||
using chunk_handle_t = std::decay_t<decltype(storages)>::value_type::second_type;
|
||||
guard.unlock();
|
||||
IPC_UNUSED_ std::lock_guard<ipc::rw_lock> guard {lock};
|
||||
it = storages.emplace(chunk_size, chunk_handle_t{}).first;
|
||||
}
|
||||
}
|
||||
return it->second.get_info(chunk_size);
|
||||
}
|
||||
|
||||
std::pair<ipc::storage_id_t, void*> acquire_storage(std::size_t size, ipc::circ::cc_t conns) {
|
||||
std::size_t chunk_size = calc_chunk_size(size);
|
||||
auto info = chunk_storage_info(chunk_size);
|
||||
if (info == nullptr) return {};
|
||||
|
||||
info->lock_.lock();
|
||||
info->pool_.prepare();
|
||||
// got an unique id
|
||||
auto id = info->pool_.acquire();
|
||||
info->lock_.unlock();
|
||||
|
||||
auto chunk = info->at(chunk_size, id);
|
||||
if (chunk == nullptr) return {};
|
||||
chunk->conns().store(conns, std::memory_order_relaxed);
|
||||
return { id, chunk->data() };
|
||||
}
|
||||
|
||||
void *find_storage(ipc::storage_id_t id, std::size_t size) {
|
||||
if (id < 0) {
|
||||
ipc::error("[find_storage] id is invalid: id = %ld, size = %zd\n", (long)id, size);
|
||||
return nullptr;
|
||||
}
|
||||
std::size_t chunk_size = calc_chunk_size(size);
|
||||
auto info = chunk_storage_info(chunk_size);
|
||||
if (info == nullptr) return nullptr;
|
||||
return info->at(chunk_size, id)->data();
|
||||
}
|
||||
|
||||
void release_storage(ipc::storage_id_t id, std::size_t size) {
|
||||
if (id < 0) {
|
||||
ipc::error("[release_storage] id is invalid: id = %ld, size = %zd\n", (long)id, size);
|
||||
return;
|
||||
}
|
||||
std::size_t chunk_size = calc_chunk_size(size);
|
||||
auto info = chunk_storage_info(chunk_size);
|
||||
if (info == nullptr) return;
|
||||
info->lock_.lock();
|
||||
info->pool_.release(id);
|
||||
info->lock_.unlock();
|
||||
}
|
||||
|
||||
template <ipc::relat Rp, ipc::relat Rc>
|
||||
bool sub_rc(ipc::wr<Rp, Rc, ipc::trans::unicast>,
|
||||
std::atomic<ipc::circ::cc_t> &/*conns*/, ipc::circ::cc_t /*curr_conns*/, ipc::circ::cc_t /*conn_id*/) noexcept {
|
||||
return true;
|
||||
}
|
||||
|
||||
template <ipc::relat Rp, ipc::relat Rc>
|
||||
bool sub_rc(ipc::wr<Rp, Rc, ipc::trans::broadcast>,
|
||||
std::atomic<ipc::circ::cc_t> &conns, ipc::circ::cc_t curr_conns, ipc::circ::cc_t conn_id) noexcept {
|
||||
auto last_conns = curr_conns & ~conn_id;
|
||||
for (unsigned k = 0;;) {
|
||||
auto chunk_conns = conns.load(std::memory_order_acquire);
|
||||
if (conns.compare_exchange_weak(chunk_conns, chunk_conns & last_conns, std::memory_order_release)) {
|
||||
return (chunk_conns & last_conns) == 0;
|
||||
}
|
||||
ipc::yield(k);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Flag>
|
||||
void recycle_storage(ipc::storage_id_t id, std::size_t size, ipc::circ::cc_t curr_conns, ipc::circ::cc_t conn_id) {
|
||||
if (id < 0) {
|
||||
ipc::error("[recycle_storage] id is invalid: id = %ld, size = %zd\n", (long)id, size);
|
||||
return;
|
||||
}
|
||||
std::size_t chunk_size = calc_chunk_size(size);
|
||||
auto info = chunk_storage_info(chunk_size);
|
||||
if (info == nullptr) return;
|
||||
|
||||
auto chunk = info->at(chunk_size, id);
|
||||
if (chunk == nullptr) return;
|
||||
|
||||
if (!sub_rc(Flag{}, chunk->conns(), curr_conns, conn_id)) {
|
||||
return;
|
||||
}
|
||||
info->lock_.lock();
|
||||
info->pool_.release(id);
|
||||
info->lock_.unlock();
|
||||
}
|
||||
|
||||
template <typename MsgT>
|
||||
bool clear_message(void* p) {
|
||||
auto msg = static_cast<MsgT*>(p);
|
||||
if (msg->storage_) {
|
||||
std::int32_t r_size = static_cast<std::int32_t>(ipc::data_length) + msg->remain_;
|
||||
if (r_size <= 0) {
|
||||
ipc::error("[clear_message] invalid msg size: %d\n", (int)r_size);
|
||||
return true;
|
||||
}
|
||||
release_storage(
|
||||
*reinterpret_cast<ipc::storage_id_t*>(&msg->data_),
|
||||
static_cast<std::size_t>(r_size));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
struct conn_info_head {
|
||||
|
||||
ipc::string name_;
|
||||
msg_id_t cc_id_; // connection-info id
|
||||
ipc::detail::waiter cc_waiter_, wt_waiter_, rd_waiter_;
|
||||
ipc::shm::handle acc_h_;
|
||||
|
||||
conn_info_head(char const * name)
|
||||
: name_ {name}
|
||||
, cc_id_ {(cc_acc() == nullptr) ? 0 : cc_acc()->fetch_add(1, std::memory_order_relaxed)}
|
||||
, cc_waiter_{("__CC_CONN__" + name_).c_str()}
|
||||
, wt_waiter_{("__WT_CONN__" + name_).c_str()}
|
||||
, rd_waiter_{("__RD_CONN__" + name_).c_str()}
|
||||
, acc_h_ {("__AC_CONN__" + name_).c_str(), sizeof(acc_t)} {
|
||||
}
|
||||
|
||||
void quit_waiting() {
|
||||
cc_waiter_.quit_waiting();
|
||||
wt_waiter_.quit_waiting();
|
||||
rd_waiter_.quit_waiting();
|
||||
}
|
||||
|
||||
auto acc() {
|
||||
return static_cast<acc_t*>(acc_h_.get());
|
||||
}
|
||||
|
||||
auto& recv_cache() {
|
||||
thread_local ipc::unordered_map<msg_id_t, cache_t> tls;
|
||||
return tls;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename W, typename F>
|
||||
bool wait_for(W& waiter, F&& pred, std::uint64_t tm) {
|
||||
if (tm == 0) return !pred();
|
||||
for (unsigned k = 0; pred();) {
|
||||
bool ret = true;
|
||||
ipc::sleep(k, [&k, &ret, &waiter, &pred, tm] {
|
||||
ret = waiter.wait_if(std::forward<F>(pred), tm);
|
||||
k = 0;
|
||||
});
|
||||
if (!ret) return false; // timeout or fail
|
||||
if (k == 0) break; // k has been reset
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename Policy,
|
||||
std::size_t DataSize = ipc::data_length,
|
||||
std::size_t AlignSize = (ipc::detail::min)(DataSize, alignof(std::max_align_t))>
|
||||
struct queue_generator {
|
||||
|
||||
using queue_t = ipc::queue<msg_t<DataSize, AlignSize>, Policy>;
|
||||
|
||||
struct conn_info_t : conn_info_head {
|
||||
queue_t que_;
|
||||
|
||||
conn_info_t(char const * name)
|
||||
: conn_info_head{name}
|
||||
, que_{("__QU_CONN__" +
|
||||
ipc::to_string(DataSize) + "__" +
|
||||
ipc::to_string(AlignSize) + "__" + name).c_str()} {
|
||||
}
|
||||
|
||||
void disconnect_receiver() {
|
||||
bool dis = que_.disconnect();
|
||||
this->quit_waiting();
|
||||
if (dis) {
|
||||
this->recv_cache().clear();
|
||||
}
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
template <typename Policy>
|
||||
struct detail_impl {
|
||||
|
||||
using policy_t = Policy;
|
||||
using flag_t = typename policy_t::flag_t;
|
||||
using queue_t = typename queue_generator<policy_t>::queue_t;
|
||||
using conn_info_t = typename queue_generator<policy_t>::conn_info_t;
|
||||
|
||||
constexpr static conn_info_t* info_of(ipc::handle_t h) noexcept {
|
||||
return static_cast<conn_info_t*>(h);
|
||||
}
|
||||
|
||||
constexpr static queue_t* queue_of(ipc::handle_t h) noexcept {
|
||||
return (info_of(h) == nullptr) ? nullptr : &(info_of(h)->que_);
|
||||
}
|
||||
|
||||
/* API implementations */
|
||||
|
||||
static void disconnect(ipc::handle_t h) {
|
||||
auto que = queue_of(h);
|
||||
if (que == nullptr) {
|
||||
return;
|
||||
}
|
||||
que->shut_sending();
|
||||
assert(info_of(h) != nullptr);
|
||||
info_of(h)->disconnect_receiver();
|
||||
}
|
||||
|
||||
static bool reconnect(ipc::handle_t * ph, bool start_to_recv) {
|
||||
assert(ph != nullptr);
|
||||
assert(*ph != nullptr);
|
||||
auto que = queue_of(*ph);
|
||||
if (que == nullptr) {
|
||||
return false;
|
||||
}
|
||||
if (start_to_recv) {
|
||||
que->shut_sending();
|
||||
if (que->connect()) { // wouldn't connect twice
|
||||
info_of(*ph)->cc_waiter_.broadcast();
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
// start_to_recv == false
|
||||
if (que->connected()) {
|
||||
info_of(*ph)->disconnect_receiver();
|
||||
}
|
||||
return que->ready_sending();
|
||||
}
|
||||
|
||||
static bool connect(ipc::handle_t * ph, char const * name, bool start_to_recv) {
|
||||
assert(ph != nullptr);
|
||||
if (*ph == nullptr) {
|
||||
*ph = ipc::mem::alloc<conn_info_t>(name);
|
||||
}
|
||||
return reconnect(ph, start_to_recv);
|
||||
}
|
||||
|
||||
static void destroy(ipc::handle_t h) {
|
||||
disconnect(h);
|
||||
ipc::mem::free(info_of(h));
|
||||
}
|
||||
|
||||
static std::size_t recv_count(ipc::handle_t h) noexcept {
|
||||
auto que = queue_of(h);
|
||||
if (que == nullptr) {
|
||||
return ipc::invalid_value;
|
||||
}
|
||||
return que->conn_count();
|
||||
}
|
||||
|
||||
static bool wait_for_recv(ipc::handle_t h, std::size_t r_count, std::uint64_t tm) {
|
||||
auto que = queue_of(h);
|
||||
if (que == nullptr) {
|
||||
return false;
|
||||
}
|
||||
return wait_for(info_of(h)->cc_waiter_, [que, r_count] {
|
||||
return que->conn_count() < r_count;
|
||||
}, tm);
|
||||
}
|
||||
|
||||
template <typename F>
|
||||
static bool send(F&& gen_push, ipc::handle_t h, void const * data, std::size_t size) {
|
||||
if (data == nullptr || size == 0) {
|
||||
ipc::error("fail: send(%p, %zd)\n", data, size);
|
||||
return false;
|
||||
}
|
||||
auto que = queue_of(h);
|
||||
if (que == nullptr) {
|
||||
ipc::error("fail: send, queue_of(h) == nullptr\n");
|
||||
return false;
|
||||
}
|
||||
if (que->elems() == nullptr) {
|
||||
ipc::error("fail: send, queue_of(h)->elems() == nullptr\n");
|
||||
return false;
|
||||
}
|
||||
if (!que->ready_sending()) {
|
||||
ipc::error("fail: send, que->ready_sending() == false\n");
|
||||
return false;
|
||||
}
|
||||
ipc::circ::cc_t conns = que->elems()->connections(std::memory_order_relaxed);
|
||||
if (conns == 0) {
|
||||
ipc::error("fail: send, there is no receiver on this connection.\n");
|
||||
return false;
|
||||
}
|
||||
// calc a new message id
|
||||
auto acc = info_of(h)->acc();
|
||||
if (acc == nullptr) {
|
||||
ipc::error("fail: send, info_of(h)->acc() == nullptr\n");
|
||||
return false;
|
||||
}
|
||||
auto msg_id = acc->fetch_add(1, std::memory_order_relaxed);
|
||||
auto try_push = std::forward<F>(gen_push)(info_of(h), que, msg_id);
|
||||
if (size > ipc::large_msg_limit) {
|
||||
auto dat = acquire_storage(size, conns);
|
||||
void * buf = dat.second;
|
||||
if (buf != nullptr) {
|
||||
std::memcpy(buf, data, size);
|
||||
return try_push(static_cast<std::int32_t>(size) -
|
||||
static_cast<std::int32_t>(ipc::data_length), &(dat.first), 0);
|
||||
}
|
||||
// try using message fragment
|
||||
//ipc::log("fail: shm::handle for big message. msg_id: %zd, size: %zd\n", msg_id, size);
|
||||
}
|
||||
// push message fragment
|
||||
std::int32_t offset = 0;
|
||||
for (std::int32_t i = 0; i < static_cast<std::int32_t>(size / ipc::data_length); ++i, offset += ipc::data_length) {
|
||||
if (!try_push(static_cast<std::int32_t>(size) - offset - static_cast<std::int32_t>(ipc::data_length),
|
||||
static_cast<ipc::byte_t const *>(data) + offset, ipc::data_length)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
// if remain > 0, this is the last message fragment
|
||||
std::int32_t remain = static_cast<std::int32_t>(size) - offset;
|
||||
if (remain > 0) {
|
||||
if (!try_push(remain - static_cast<std::int32_t>(ipc::data_length),
|
||||
static_cast<ipc::byte_t const *>(data) + offset,
|
||||
static_cast<std::size_t>(remain))) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool send(ipc::handle_t h, void const * data, std::size_t size, std::uint64_t tm) {
|
||||
return send([tm](auto info, auto que, auto msg_id) {
|
||||
return [tm, info, que, msg_id](std::int32_t remain, void const * data, std::size_t size) {
|
||||
if (!wait_for(info->wt_waiter_, [&] {
|
||||
return !que->push(
|
||||
[](void*) { return true; },
|
||||
info->cc_id_, msg_id, remain, data, size);
|
||||
}, tm)) {
|
||||
ipc::log("force_push: msg_id = %zd, remain = %d, size = %zd\n", msg_id, remain, size);
|
||||
if (!que->force_push(
|
||||
clear_message<typename queue_t::value_t>,
|
||||
info->cc_id_, msg_id, remain, data, size)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
info->rd_waiter_.broadcast();
|
||||
return true;
|
||||
};
|
||||
}, h, data, size);
|
||||
}
|
||||
|
||||
static bool try_send(ipc::handle_t h, void const * data, std::size_t size, std::uint64_t tm) {
|
||||
return send([tm](auto info, auto que, auto msg_id) {
|
||||
return [tm, info, que, msg_id](std::int32_t remain, void const * data, std::size_t size) {
|
||||
if (!wait_for(info->wt_waiter_, [&] {
|
||||
return !que->push(
|
||||
[](void*) { return true; },
|
||||
info->cc_id_, msg_id, remain, data, size);
|
||||
}, tm)) {
|
||||
return false;
|
||||
}
|
||||
info->rd_waiter_.broadcast();
|
||||
return true;
|
||||
};
|
||||
}, h, data, size);
|
||||
}
|
||||
|
||||
static ipc::buff_t recv(ipc::handle_t h, std::uint64_t tm) {
|
||||
auto que = queue_of(h);
|
||||
if (que == nullptr) {
|
||||
ipc::error("fail: recv, queue_of(h) == nullptr\n");
|
||||
return {};
|
||||
}
|
||||
if (!que->connected()) {
|
||||
// hasn't connected yet, just return.
|
||||
return {};
|
||||
}
|
||||
auto& rc = info_of(h)->recv_cache();
|
||||
for (;;) {
|
||||
// pop a new message
|
||||
typename queue_t::value_t msg;
|
||||
if (!wait_for(info_of(h)->rd_waiter_, [que, &msg] {
|
||||
return !que->pop(msg);
|
||||
}, tm)) {
|
||||
// pop failed, just return.
|
||||
return {};
|
||||
}
|
||||
info_of(h)->wt_waiter_.broadcast();
|
||||
if ((info_of(h)->acc() != nullptr) && (msg.cc_id_ == info_of(h)->cc_id_)) {
|
||||
continue; // ignore message to self
|
||||
}
|
||||
// msg.remain_ may minus & abs(msg.remain_) < data_length
|
||||
std::int32_t r_size = static_cast<std::int32_t>(ipc::data_length) + msg.remain_;
|
||||
if (r_size <= 0) {
|
||||
ipc::error("fail: recv, r_size = %d\n", (int)r_size);
|
||||
return {};
|
||||
}
|
||||
std::size_t msg_size = static_cast<std::size_t>(r_size);
|
||||
// large message
|
||||
if (msg.storage_) {
|
||||
ipc::storage_id_t buf_id = *reinterpret_cast<ipc::storage_id_t*>(&msg.data_);
|
||||
void* buf = find_storage(buf_id, msg_size);
|
||||
if (buf != nullptr) {
|
||||
struct recycle_t {
|
||||
ipc::storage_id_t storage_id;
|
||||
ipc::circ::cc_t curr_conns;
|
||||
ipc::circ::cc_t conn_id;
|
||||
} *r_info = ipc::mem::alloc<recycle_t>(recycle_t{
|
||||
buf_id, que->elems()->connections(std::memory_order_relaxed), que->connected_id()
|
||||
});
|
||||
if (r_info == nullptr) {
|
||||
ipc::log("fail: ipc::mem::alloc<recycle_t>.\n");
|
||||
return ipc::buff_t{buf, msg_size}; // no recycle
|
||||
} else {
|
||||
return ipc::buff_t{buf, msg_size, [](void* p_info, std::size_t size) {
|
||||
auto r_info = static_cast<recycle_t *>(p_info);
|
||||
IPC_UNUSED_ auto finally = ipc::guard([r_info] {
|
||||
ipc::mem::free(r_info);
|
||||
});
|
||||
recycle_storage<flag_t>(r_info->storage_id, size, r_info->curr_conns, r_info->conn_id);
|
||||
}, r_info};
|
||||
}
|
||||
} else {
|
||||
ipc::log("fail: shm::handle for large message. msg_id: %zd, buf_id: %zd, size: %zd\n", msg.id_, buf_id, msg_size);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
// find cache with msg.id_
|
||||
auto cac_it = rc.find(msg.id_);
|
||||
if (cac_it == rc.end()) {
|
||||
if (msg_size <= ipc::data_length) {
|
||||
return make_cache(msg.data_, msg_size);
|
||||
}
|
||||
// gc
|
||||
if (rc.size() > 1024) {
|
||||
std::vector<msg_id_t> need_del;
|
||||
for (auto const & pair : rc) {
|
||||
auto cmp = std::minmax(msg.id_, pair.first);
|
||||
if (cmp.second - cmp.first > 8192) {
|
||||
need_del.push_back(pair.first);
|
||||
}
|
||||
}
|
||||
for (auto id : need_del) rc.erase(id);
|
||||
}
|
||||
// cache the first message fragment
|
||||
rc.emplace(msg.id_, cache_t { ipc::data_length, make_cache(msg.data_, msg_size) });
|
||||
}
|
||||
// has cached before this message
|
||||
else {
|
||||
auto& cac = cac_it->second;
|
||||
// this is the last message fragment
|
||||
if (msg.remain_ <= 0) {
|
||||
cac.append(&(msg.data_), msg_size);
|
||||
// finish this message, erase it from cache
|
||||
auto buff = std::move(cac.buff_);
|
||||
rc.erase(cac_it);
|
||||
return buff;
|
||||
}
|
||||
// there are remain datas after this message
|
||||
cac.append(&(msg.data_), ipc::data_length);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static ipc::buff_t try_recv(ipc::handle_t h) {
|
||||
return recv(h, 0);
|
||||
}
|
||||
|
||||
}; // detail_impl<Policy>
|
||||
|
||||
template <typename Flag>
|
||||
using policy_t = ipc::policy::choose<ipc::circ::elem_array, Flag>;
|
||||
|
||||
} // internal-linkage
|
||||
|
||||
namespace ipc {
|
||||
|
||||
template <typename Flag>
|
||||
ipc::handle_t chan_impl<Flag>::inited() {
|
||||
ipc::detail::waiter::init();
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
template <typename Flag>
|
||||
bool chan_impl<Flag>::connect(ipc::handle_t * ph, char const * name, unsigned mode) {
|
||||
return detail_impl<policy_t<Flag>>::connect(ph, name, mode & receiver);
|
||||
}
|
||||
|
||||
template <typename Flag>
|
||||
bool chan_impl<Flag>::reconnect(ipc::handle_t * ph, unsigned mode) {
|
||||
return detail_impl<policy_t<Flag>>::reconnect(ph, mode & receiver);
|
||||
}
|
||||
|
||||
template <typename Flag>
|
||||
void chan_impl<Flag>::disconnect(ipc::handle_t h) {
|
||||
detail_impl<policy_t<Flag>>::disconnect(h);
|
||||
}
|
||||
|
||||
template <typename Flag>
|
||||
void chan_impl<Flag>::destroy(ipc::handle_t h) {
|
||||
detail_impl<policy_t<Flag>>::destroy(h);
|
||||
}
|
||||
|
||||
template <typename Flag>
|
||||
char const * chan_impl<Flag>::name(ipc::handle_t h) {
|
||||
auto info = detail_impl<policy_t<Flag>>::info_of(h);
|
||||
return (info == nullptr) ? nullptr : info->name_.c_str();
|
||||
}
|
||||
|
||||
template <typename Flag>
|
||||
std::size_t chan_impl<Flag>::recv_count(ipc::handle_t h) {
|
||||
return detail_impl<policy_t<Flag>>::recv_count(h);
|
||||
}
|
||||
|
||||
template <typename Flag>
|
||||
bool chan_impl<Flag>::wait_for_recv(ipc::handle_t h, std::size_t r_count, std::uint64_t tm) {
|
||||
return detail_impl<policy_t<Flag>>::wait_for_recv(h, r_count, tm);
|
||||
}
|
||||
|
||||
template <typename Flag>
|
||||
bool chan_impl<Flag>::send(ipc::handle_t h, void const * data, std::size_t size, std::uint64_t tm) {
|
||||
return detail_impl<policy_t<Flag>>::send(h, data, size, tm);
|
||||
}
|
||||
|
||||
template <typename Flag>
|
||||
buff_t chan_impl<Flag>::recv(ipc::handle_t h, std::uint64_t tm) {
|
||||
return detail_impl<policy_t<Flag>>::recv(h, tm);
|
||||
}
|
||||
|
||||
template <typename Flag>
|
||||
bool chan_impl<Flag>::try_send(ipc::handle_t h, void const * data, std::size_t size, std::uint64_t tm) {
|
||||
return detail_impl<policy_t<Flag>>::try_send(h, data, size, tm);
|
||||
}
|
||||
|
||||
template <typename Flag>
|
||||
buff_t chan_impl<Flag>::try_recv(ipc::handle_t h) {
|
||||
return detail_impl<policy_t<Flag>>::try_recv(h);
|
||||
}
|
||||
|
||||
template struct chan_impl<ipc::wr<relat::single, relat::single, trans::unicast >>;
|
||||
// template struct chan_impl<ipc::wr<relat::single, relat::multi , trans::unicast >>; // TBD
|
||||
// template struct chan_impl<ipc::wr<relat::multi , relat::multi , trans::unicast >>; // TBD
|
||||
template struct chan_impl<ipc::wr<relat::single, relat::multi , trans::broadcast>>;
|
||||
template struct chan_impl<ipc::wr<relat::multi , relat::multi , trans::broadcast>>;
|
||||
|
||||
} // namespace ipc
|
||||
25
crazy_functions/test_project/cpp/cppipc/policy.h
Normal file
25
crazy_functions/test_project/cpp/cppipc/policy.h
Normal file
@@ -0,0 +1,25 @@
|
||||
#pragma once
|
||||
|
||||
#include <type_traits>
|
||||
|
||||
#include "libipc/def.h"
|
||||
#include "libipc/prod_cons.h"
|
||||
|
||||
#include "libipc/circ/elem_array.h"
|
||||
|
||||
namespace ipc {
|
||||
namespace policy {
|
||||
|
||||
template <template <typename, std::size_t...> class Elems, typename Flag>
|
||||
struct choose;
|
||||
|
||||
template <typename Flag>
|
||||
struct choose<circ::elem_array, Flag> {
|
||||
using flag_t = Flag;
|
||||
|
||||
template <std::size_t DataSize, std::size_t AlignSize>
|
||||
using elems_t = circ::elem_array<ipc::prod_cons_impl<flag_t>, DataSize, AlignSize>;
|
||||
};
|
||||
|
||||
} // namespace policy
|
||||
} // namespace ipc
|
||||
17
crazy_functions/test_project/cpp/cppipc/pool_alloc.cpp
Normal file
17
crazy_functions/test_project/cpp/cppipc/pool_alloc.cpp
Normal file
@@ -0,0 +1,17 @@
|
||||
#include "libipc/pool_alloc.h"
|
||||
|
||||
#include "libipc/memory/resource.h"
|
||||
|
||||
namespace ipc {
|
||||
namespace mem {
|
||||
|
||||
void* pool_alloc::alloc(std::size_t size) {
|
||||
return async_pool_alloc::alloc(size);
|
||||
}
|
||||
|
||||
void pool_alloc::free(void* p, std::size_t size) {
|
||||
async_pool_alloc::free(p, size);
|
||||
}
|
||||
|
||||
} // namespace mem
|
||||
} // namespace ipc
|
||||
433
crazy_functions/test_project/cpp/cppipc/prod_cons.h
Normal file
433
crazy_functions/test_project/cpp/cppipc/prod_cons.h
Normal file
@@ -0,0 +1,433 @@
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <utility>
|
||||
#include <cstring>
|
||||
#include <type_traits>
|
||||
#include <cstdint>
|
||||
|
||||
#include "libipc/def.h"
|
||||
|
||||
#include "libipc/platform/detail.h"
|
||||
#include "libipc/circ/elem_def.h"
|
||||
#include "libipc/utility/log.h"
|
||||
#include "libipc/utility/utility.h"
|
||||
|
||||
namespace ipc {
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
/// producer-consumer implementation
|
||||
////////////////////////////////////////////////////////////////
|
||||
|
||||
template <typename Flag>
|
||||
struct prod_cons_impl;
|
||||
|
||||
template <>
|
||||
struct prod_cons_impl<wr<relat::single, relat::single, trans::unicast>> {
|
||||
|
||||
template <std::size_t DataSize, std::size_t AlignSize>
|
||||
struct elem_t {
|
||||
std::aligned_storage_t<DataSize, AlignSize> data_ {};
|
||||
};
|
||||
|
||||
alignas(cache_line_size) std::atomic<circ::u2_t> rd_; // read index
|
||||
alignas(cache_line_size) std::atomic<circ::u2_t> wt_; // write index
|
||||
|
||||
constexpr circ::u2_t cursor() const noexcept {
|
||||
return 0;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename E>
|
||||
bool push(W* /*wrapper*/, F&& f, E* elems) {
|
||||
auto cur_wt = circ::index_of(wt_.load(std::memory_order_relaxed));
|
||||
if (cur_wt == circ::index_of(rd_.load(std::memory_order_acquire) - 1)) {
|
||||
return false; // full
|
||||
}
|
||||
std::forward<F>(f)(&(elems[cur_wt].data_));
|
||||
wt_.fetch_add(1, std::memory_order_release);
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* In single-single-unicast, 'force_push' means 'no reader' or 'the only one reader is dead'.
|
||||
* So we could just disconnect all connections of receiver, and return false.
|
||||
*/
|
||||
template <typename W, typename F, typename E>
|
||||
bool force_push(W* wrapper, F&&, E*) {
|
||||
wrapper->elems()->disconnect_receiver(~static_cast<circ::cc_t>(0u));
|
||||
return false;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename R, typename E>
|
||||
bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) {
|
||||
auto cur_rd = circ::index_of(rd_.load(std::memory_order_relaxed));
|
||||
if (cur_rd == circ::index_of(wt_.load(std::memory_order_acquire))) {
|
||||
return false; // empty
|
||||
}
|
||||
std::forward<F>(f)(&(elems[cur_rd].data_));
|
||||
std::forward<R>(out)(true);
|
||||
rd_.fetch_add(1, std::memory_order_release);
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct prod_cons_impl<wr<relat::single, relat::multi , trans::unicast>>
|
||||
: prod_cons_impl<wr<relat::single, relat::single, trans::unicast>> {
|
||||
|
||||
template <typename W, typename F, typename E>
|
||||
bool force_push(W* wrapper, F&&, E*) {
|
||||
wrapper->elems()->disconnect_receiver(1);
|
||||
return false;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename R,
|
||||
template <std::size_t, std::size_t> class E, std::size_t DS, std::size_t AS>
|
||||
bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E<DS, AS>* elems) {
|
||||
byte_t buff[DS];
|
||||
for (unsigned k = 0;;) {
|
||||
auto cur_rd = rd_.load(std::memory_order_relaxed);
|
||||
if (circ::index_of(cur_rd) ==
|
||||
circ::index_of(wt_.load(std::memory_order_acquire))) {
|
||||
return false; // empty
|
||||
}
|
||||
std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff));
|
||||
if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) {
|
||||
std::forward<F>(f)(buff);
|
||||
std::forward<R>(out)(true);
|
||||
return true;
|
||||
}
|
||||
ipc::yield(k);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct prod_cons_impl<wr<relat::multi , relat::multi, trans::unicast>>
|
||||
: prod_cons_impl<wr<relat::single, relat::multi, trans::unicast>> {
|
||||
|
||||
using flag_t = std::uint64_t;
|
||||
|
||||
template <std::size_t DataSize, std::size_t AlignSize>
|
||||
struct elem_t {
|
||||
std::aligned_storage_t<DataSize, AlignSize> data_ {};
|
||||
std::atomic<flag_t> f_ct_ { 0 }; // commit flag
|
||||
};
|
||||
|
||||
alignas(cache_line_size) std::atomic<circ::u2_t> ct_; // commit index
|
||||
|
||||
template <typename W, typename F, typename E>
|
||||
bool push(W* /*wrapper*/, F&& f, E* elems) {
|
||||
circ::u2_t cur_ct, nxt_ct;
|
||||
for (unsigned k = 0;;) {
|
||||
cur_ct = ct_.load(std::memory_order_relaxed);
|
||||
if (circ::index_of(nxt_ct = cur_ct + 1) ==
|
||||
circ::index_of(rd_.load(std::memory_order_acquire))) {
|
||||
return false; // full
|
||||
}
|
||||
if (ct_.compare_exchange_weak(cur_ct, nxt_ct, std::memory_order_acq_rel)) {
|
||||
break;
|
||||
}
|
||||
ipc::yield(k);
|
||||
}
|
||||
auto* el = elems + circ::index_of(cur_ct);
|
||||
std::forward<F>(f)(&(el->data_));
|
||||
// set flag & try update wt
|
||||
el->f_ct_.store(~static_cast<flag_t>(cur_ct), std::memory_order_release);
|
||||
while (1) {
|
||||
auto cac_ct = el->f_ct_.load(std::memory_order_acquire);
|
||||
if (cur_ct != wt_.load(std::memory_order_relaxed)) {
|
||||
return true;
|
||||
}
|
||||
if ((~cac_ct) != cur_ct) {
|
||||
return true;
|
||||
}
|
||||
if (!el->f_ct_.compare_exchange_strong(cac_ct, 0, std::memory_order_relaxed)) {
|
||||
return true;
|
||||
}
|
||||
wt_.store(nxt_ct, std::memory_order_release);
|
||||
cur_ct = nxt_ct;
|
||||
nxt_ct = cur_ct + 1;
|
||||
el = elems + circ::index_of(cur_ct);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename E>
|
||||
bool force_push(W* wrapper, F&&, E*) {
|
||||
wrapper->elems()->disconnect_receiver(1);
|
||||
return false;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename R,
|
||||
template <std::size_t, std::size_t> class E, std::size_t DS, std::size_t AS>
|
||||
bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E<DS, AS>* elems) {
|
||||
byte_t buff[DS];
|
||||
for (unsigned k = 0;;) {
|
||||
auto cur_rd = rd_.load(std::memory_order_relaxed);
|
||||
auto cur_wt = wt_.load(std::memory_order_acquire);
|
||||
auto id_rd = circ::index_of(cur_rd);
|
||||
auto id_wt = circ::index_of(cur_wt);
|
||||
if (id_rd == id_wt) {
|
||||
auto* el = elems + id_wt;
|
||||
auto cac_ct = el->f_ct_.load(std::memory_order_acquire);
|
||||
if ((~cac_ct) != cur_wt) {
|
||||
return false; // empty
|
||||
}
|
||||
if (el->f_ct_.compare_exchange_weak(cac_ct, 0, std::memory_order_relaxed)) {
|
||||
wt_.store(cur_wt + 1, std::memory_order_release);
|
||||
}
|
||||
k = 0;
|
||||
}
|
||||
else {
|
||||
std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff));
|
||||
if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) {
|
||||
std::forward<F>(f)(buff);
|
||||
std::forward<R>(out)(true);
|
||||
return true;
|
||||
}
|
||||
ipc::yield(k);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct prod_cons_impl<wr<relat::single, relat::multi, trans::broadcast>> {
|
||||
|
||||
using rc_t = std::uint64_t;
|
||||
|
||||
enum : rc_t {
|
||||
ep_mask = 0x00000000ffffffffull,
|
||||
ep_incr = 0x0000000100000000ull
|
||||
};
|
||||
|
||||
template <std::size_t DataSize, std::size_t AlignSize>
|
||||
struct elem_t {
|
||||
std::aligned_storage_t<DataSize, AlignSize> data_ {};
|
||||
std::atomic<rc_t> rc_ { 0 }; // read-counter
|
||||
};
|
||||
|
||||
alignas(cache_line_size) std::atomic<circ::u2_t> wt_; // write index
|
||||
alignas(cache_line_size) rc_t epoch_ { 0 }; // only one writer
|
||||
|
||||
circ::u2_t cursor() const noexcept {
|
||||
return wt_.load(std::memory_order_acquire);
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename E>
|
||||
bool push(W* wrapper, F&& f, E* elems) {
|
||||
E* el;
|
||||
for (unsigned k = 0;;) {
|
||||
circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
|
||||
if (cc == 0) return false; // no reader
|
||||
el = elems + circ::index_of(wt_.load(std::memory_order_relaxed));
|
||||
// check all consumers have finished reading this element
|
||||
auto cur_rc = el->rc_.load(std::memory_order_acquire);
|
||||
circ::cc_t rem_cc = cur_rc & ep_mask;
|
||||
if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch_)) {
|
||||
return false; // has not finished yet
|
||||
}
|
||||
// consider rem_cc to be 0 here
|
||||
if (el->rc_.compare_exchange_weak(
|
||||
cur_rc, epoch_ | static_cast<rc_t>(cc), std::memory_order_release)) {
|
||||
break;
|
||||
}
|
||||
ipc::yield(k);
|
||||
}
|
||||
std::forward<F>(f)(&(el->data_));
|
||||
wt_.fetch_add(1, std::memory_order_release);
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename E>
|
||||
bool force_push(W* wrapper, F&& f, E* elems) {
|
||||
E* el;
|
||||
epoch_ += ep_incr;
|
||||
for (unsigned k = 0;;) {
|
||||
circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
|
||||
if (cc == 0) return false; // no reader
|
||||
el = elems + circ::index_of(wt_.load(std::memory_order_relaxed));
|
||||
// check all consumers have finished reading this element
|
||||
auto cur_rc = el->rc_.load(std::memory_order_acquire);
|
||||
circ::cc_t rem_cc = cur_rc & ep_mask;
|
||||
if (cc & rem_cc) {
|
||||
ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc);
|
||||
cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers
|
||||
if (cc == 0) return false; // no reader
|
||||
}
|
||||
// just compare & exchange
|
||||
if (el->rc_.compare_exchange_weak(
|
||||
cur_rc, epoch_ | static_cast<rc_t>(cc), std::memory_order_release)) {
|
||||
break;
|
||||
}
|
||||
ipc::yield(k);
|
||||
}
|
||||
std::forward<F>(f)(&(el->data_));
|
||||
wt_.fetch_add(1, std::memory_order_release);
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename R, typename E>
|
||||
bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E* elems) {
|
||||
if (cur == cursor()) return false; // acquire
|
||||
auto* el = elems + circ::index_of(cur++);
|
||||
std::forward<F>(f)(&(el->data_));
|
||||
for (unsigned k = 0;;) {
|
||||
auto cur_rc = el->rc_.load(std::memory_order_acquire);
|
||||
if ((cur_rc & ep_mask) == 0) {
|
||||
std::forward<R>(out)(true);
|
||||
return true;
|
||||
}
|
||||
auto nxt_rc = cur_rc & ~static_cast<rc_t>(wrapper->connected_id());
|
||||
if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) {
|
||||
std::forward<R>(out)((nxt_rc & ep_mask) == 0);
|
||||
return true;
|
||||
}
|
||||
ipc::yield(k);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct prod_cons_impl<wr<relat::multi, relat::multi, trans::broadcast>> {
|
||||
|
||||
using rc_t = std::uint64_t;
|
||||
using flag_t = std::uint64_t;
|
||||
|
||||
enum : rc_t {
|
||||
rc_mask = 0x00000000ffffffffull,
|
||||
ep_mask = 0x00ffffffffffffffull,
|
||||
ep_incr = 0x0100000000000000ull,
|
||||
ic_mask = 0xff000000ffffffffull,
|
||||
ic_incr = 0x0000000100000000ull
|
||||
};
|
||||
|
||||
template <std::size_t DataSize, std::size_t AlignSize>
|
||||
struct elem_t {
|
||||
std::aligned_storage_t<DataSize, AlignSize> data_ {};
|
||||
std::atomic<rc_t > rc_ { 0 }; // read-counter
|
||||
std::atomic<flag_t> f_ct_ { 0 }; // commit flag
|
||||
};
|
||||
|
||||
alignas(cache_line_size) std::atomic<circ::u2_t> ct_; // commit index
|
||||
alignas(cache_line_size) std::atomic<rc_t> epoch_ { 0 };
|
||||
|
||||
circ::u2_t cursor() const noexcept {
|
||||
return ct_.load(std::memory_order_acquire);
|
||||
}
|
||||
|
||||
constexpr static rc_t inc_rc(rc_t rc) noexcept {
|
||||
return (rc & ic_mask) | ((rc + ic_incr) & ~ic_mask);
|
||||
}
|
||||
|
||||
constexpr static rc_t inc_mask(rc_t rc) noexcept {
|
||||
return inc_rc(rc) & ~rc_mask;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename E>
|
||||
bool push(W* wrapper, F&& f, E* elems) {
|
||||
E* el;
|
||||
circ::u2_t cur_ct;
|
||||
rc_t epoch = epoch_.load(std::memory_order_acquire);
|
||||
for (unsigned k = 0;;) {
|
||||
circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
|
||||
if (cc == 0) return false; // no reader
|
||||
el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed));
|
||||
// check all consumers have finished reading this element
|
||||
auto cur_rc = el->rc_.load(std::memory_order_relaxed);
|
||||
circ::cc_t rem_cc = cur_rc & rc_mask;
|
||||
if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch)) {
|
||||
return false; // has not finished yet
|
||||
}
|
||||
else if (!rem_cc) {
|
||||
auto cur_fl = el->f_ct_.load(std::memory_order_acquire);
|
||||
if ((cur_fl != cur_ct) && cur_fl) {
|
||||
return false; // full
|
||||
}
|
||||
}
|
||||
// consider rem_cc to be 0 here
|
||||
if (el->rc_.compare_exchange_weak(
|
||||
cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast<rc_t>(cc), std::memory_order_relaxed) &&
|
||||
epoch_.compare_exchange_weak(epoch, epoch, std::memory_order_acq_rel)) {
|
||||
break;
|
||||
}
|
||||
ipc::yield(k);
|
||||
}
|
||||
// only one thread/process would touch here at one time
|
||||
ct_.store(cur_ct + 1, std::memory_order_release);
|
||||
std::forward<F>(f)(&(el->data_));
|
||||
// set flag & try update wt
|
||||
el->f_ct_.store(~static_cast<flag_t>(cur_ct), std::memory_order_release);
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename E>
|
||||
bool force_push(W* wrapper, F&& f, E* elems) {
|
||||
E* el;
|
||||
circ::u2_t cur_ct;
|
||||
rc_t epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr;
|
||||
for (unsigned k = 0;;) {
|
||||
circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
|
||||
if (cc == 0) return false; // no reader
|
||||
el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed));
|
||||
// check all consumers have finished reading this element
|
||||
auto cur_rc = el->rc_.load(std::memory_order_acquire);
|
||||
circ::cc_t rem_cc = cur_rc & rc_mask;
|
||||
if (cc & rem_cc) {
|
||||
ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc);
|
||||
cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers
|
||||
if (cc == 0) return false; // no reader
|
||||
}
|
||||
// just compare & exchange
|
||||
if (el->rc_.compare_exchange_weak(
|
||||
cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast<rc_t>(cc), std::memory_order_relaxed)) {
|
||||
if (epoch == epoch_.load(std::memory_order_acquire)) {
|
||||
break;
|
||||
}
|
||||
else if (push(wrapper, std::forward<F>(f), elems)) {
|
||||
return true;
|
||||
}
|
||||
epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr;
|
||||
}
|
||||
ipc::yield(k);
|
||||
}
|
||||
// only one thread/process would touch here at one time
|
||||
ct_.store(cur_ct + 1, std::memory_order_release);
|
||||
std::forward<F>(f)(&(el->data_));
|
||||
// set flag & try update wt
|
||||
el->f_ct_.store(~static_cast<flag_t>(cur_ct), std::memory_order_release);
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename R, typename E, std::size_t N>
|
||||
bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E(& elems)[N]) {
|
||||
auto* el = elems + circ::index_of(cur);
|
||||
auto cur_fl = el->f_ct_.load(std::memory_order_acquire);
|
||||
if (cur_fl != ~static_cast<flag_t>(cur)) {
|
||||
return false; // empty
|
||||
}
|
||||
++cur;
|
||||
std::forward<F>(f)(&(el->data_));
|
||||
for (unsigned k = 0;;) {
|
||||
auto cur_rc = el->rc_.load(std::memory_order_acquire);
|
||||
if ((cur_rc & rc_mask) == 0) {
|
||||
std::forward<R>(out)(true);
|
||||
el->f_ct_.store(cur + N - 1, std::memory_order_release);
|
||||
return true;
|
||||
}
|
||||
auto nxt_rc = inc_rc(cur_rc) & ~static_cast<rc_t>(wrapper->connected_id());
|
||||
bool last_one = false;
|
||||
if ((last_one = (nxt_rc & rc_mask) == 0)) {
|
||||
el->f_ct_.store(cur + N - 1, std::memory_order_release);
|
||||
}
|
||||
if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) {
|
||||
std::forward<R>(out)(last_one);
|
||||
return true;
|
||||
}
|
||||
ipc::yield(k);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace ipc
|
||||
216
crazy_functions/test_project/cpp/cppipc/queue.h
Normal file
216
crazy_functions/test_project/cpp/cppipc/queue.h
Normal file
@@ -0,0 +1,216 @@
|
||||
#pragma once
|
||||
|
||||
#include <type_traits>
|
||||
#include <new>
|
||||
#include <utility> // [[since C++14]]: std::exchange
|
||||
#include <algorithm>
|
||||
#include <atomic>
|
||||
#include <tuple>
|
||||
#include <thread>
|
||||
#include <chrono>
|
||||
#include <string>
|
||||
#include <cassert> // assert
|
||||
|
||||
#include "libipc/def.h"
|
||||
#include "libipc/shm.h"
|
||||
#include "libipc/rw_lock.h"
|
||||
|
||||
#include "libipc/utility/log.h"
|
||||
#include "libipc/platform/detail.h"
|
||||
#include "libipc/circ/elem_def.h"
|
||||
|
||||
namespace ipc {
|
||||
namespace detail {
|
||||
|
||||
class queue_conn {
|
||||
protected:
|
||||
circ::cc_t connected_ = 0;
|
||||
shm::handle elems_h_;
|
||||
|
||||
template <typename Elems>
|
||||
Elems* open(char const * name) {
|
||||
if (name == nullptr || name[0] == '\0') {
|
||||
ipc::error("fail open waiter: name is empty!\n");
|
||||
return nullptr;
|
||||
}
|
||||
if (!elems_h_.acquire(name, sizeof(Elems))) {
|
||||
return nullptr;
|
||||
}
|
||||
auto elems = static_cast<Elems*>(elems_h_.get());
|
||||
if (elems == nullptr) {
|
||||
ipc::error("fail acquire elems: %s\n", name);
|
||||
return nullptr;
|
||||
}
|
||||
elems->init();
|
||||
return elems;
|
||||
}
|
||||
|
||||
void close() {
|
||||
elems_h_.release();
|
||||
}
|
||||
|
||||
public:
|
||||
queue_conn() = default;
|
||||
queue_conn(const queue_conn&) = delete;
|
||||
queue_conn& operator=(const queue_conn&) = delete;
|
||||
|
||||
bool connected() const noexcept {
|
||||
return connected_ != 0;
|
||||
}
|
||||
|
||||
circ::cc_t connected_id() const noexcept {
|
||||
return connected_;
|
||||
}
|
||||
|
||||
template <typename Elems>
|
||||
auto connect(Elems* elems) noexcept
|
||||
/*needs 'optional' here*/
|
||||
-> std::tuple<bool, bool, decltype(std::declval<Elems>().cursor())> {
|
||||
if (elems == nullptr) return {};
|
||||
// if it's already connected, just return
|
||||
if (connected()) return {connected(), false, 0};
|
||||
connected_ = elems->connect_receiver();
|
||||
return {connected(), true, elems->cursor()};
|
||||
}
|
||||
|
||||
template <typename Elems>
|
||||
bool disconnect(Elems* elems) noexcept {
|
||||
if (elems == nullptr) return false;
|
||||
// if it's already disconnected, just return false
|
||||
if (!connected()) return false;
|
||||
elems->disconnect_receiver(std::exchange(connected_, 0));
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Elems>
|
||||
class queue_base : public queue_conn {
|
||||
using base_t = queue_conn;
|
||||
|
||||
public:
|
||||
using elems_t = Elems;
|
||||
using policy_t = typename elems_t::policy_t;
|
||||
|
||||
protected:
|
||||
elems_t * elems_ = nullptr;
|
||||
decltype(std::declval<elems_t>().cursor()) cursor_ = 0;
|
||||
bool sender_flag_ = false;
|
||||
|
||||
public:
|
||||
using base_t::base_t;
|
||||
|
||||
queue_base() = default;
|
||||
|
||||
explicit queue_base(char const * name)
|
||||
: queue_base{} {
|
||||
elems_ = open<elems_t>(name);
|
||||
}
|
||||
|
||||
explicit queue_base(elems_t * elems) noexcept
|
||||
: queue_base{} {
|
||||
assert(elems != nullptr);
|
||||
elems_ = elems;
|
||||
}
|
||||
|
||||
/* not virtual */ ~queue_base() {
|
||||
base_t::close();
|
||||
}
|
||||
|
||||
elems_t * elems() noexcept { return elems_; }
|
||||
elems_t const * elems() const noexcept { return elems_; }
|
||||
|
||||
bool ready_sending() noexcept {
|
||||
if (elems_ == nullptr) return false;
|
||||
return sender_flag_ || (sender_flag_ = elems_->connect_sender());
|
||||
}
|
||||
|
||||
void shut_sending() noexcept {
|
||||
if (elems_ == nullptr) return;
|
||||
if (!sender_flag_) return;
|
||||
elems_->disconnect_sender();
|
||||
}
|
||||
|
||||
bool connect() noexcept {
|
||||
auto tp = base_t::connect(elems_);
|
||||
if (std::get<0>(tp) && std::get<1>(tp)) {
|
||||
cursor_ = std::get<2>(tp);
|
||||
return true;
|
||||
}
|
||||
return std::get<0>(tp);
|
||||
}
|
||||
|
||||
bool disconnect() noexcept {
|
||||
return base_t::disconnect(elems_);
|
||||
}
|
||||
|
||||
std::size_t conn_count() const noexcept {
|
||||
return (elems_ == nullptr) ? static_cast<std::size_t>(invalid_value) : elems_->conn_count();
|
||||
}
|
||||
|
||||
bool valid() const noexcept {
|
||||
return elems_ != nullptr;
|
||||
}
|
||||
|
||||
bool empty() const noexcept {
|
||||
return !valid() || (cursor_ == elems_->cursor());
|
||||
}
|
||||
|
||||
template <typename T, typename F, typename... P>
|
||||
bool push(F&& prep, P&&... params) {
|
||||
if (elems_ == nullptr) return false;
|
||||
return elems_->push(this, [&](void* p) {
|
||||
if (prep(p)) ::new (p) T(std::forward<P>(params)...);
|
||||
});
|
||||
}
|
||||
|
||||
template <typename T, typename F, typename... P>
|
||||
bool force_push(F&& prep, P&&... params) {
|
||||
if (elems_ == nullptr) return false;
|
||||
return elems_->force_push(this, [&](void* p) {
|
||||
if (prep(p)) ::new (p) T(std::forward<P>(params)...);
|
||||
});
|
||||
}
|
||||
|
||||
template <typename T, typename F>
|
||||
bool pop(T& item, F&& out) {
|
||||
if (elems_ == nullptr) {
|
||||
return false;
|
||||
}
|
||||
return elems_->pop(this, &(this->cursor_), [&item](void* p) {
|
||||
::new (&item) T(std::move(*static_cast<T*>(p)));
|
||||
}, std::forward<F>(out));
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
|
||||
template <typename T, typename Policy>
|
||||
class queue final : public detail::queue_base<typename Policy::template elems_t<sizeof(T), alignof(T)>> {
|
||||
using base_t = detail::queue_base<typename Policy::template elems_t<sizeof(T), alignof(T)>>;
|
||||
|
||||
public:
|
||||
using value_t = T;
|
||||
|
||||
using base_t::base_t;
|
||||
|
||||
template <typename... P>
|
||||
bool push(P&&... params) {
|
||||
return base_t::template push<T>(std::forward<P>(params)...);
|
||||
}
|
||||
|
||||
template <typename... P>
|
||||
bool force_push(P&&... params) {
|
||||
return base_t::template force_push<T>(std::forward<P>(params)...);
|
||||
}
|
||||
|
||||
bool pop(T& item) {
|
||||
return base_t::pop(item, [](bool) {});
|
||||
}
|
||||
|
||||
template <typename F>
|
||||
bool pop(T& item, F&& out) {
|
||||
return base_t::pop(item, std::forward<F>(out));
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace ipc
|
||||
103
crazy_functions/test_project/cpp/cppipc/shm.cpp
Normal file
103
crazy_functions/test_project/cpp/cppipc/shm.cpp
Normal file
@@ -0,0 +1,103 @@
|
||||
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
#include "libipc/shm.h"
|
||||
|
||||
#include "libipc/utility/pimpl.h"
|
||||
#include "libipc/memory/resource.h"
|
||||
|
||||
namespace ipc {
|
||||
namespace shm {
|
||||
|
||||
class handle::handle_ : public pimpl<handle_> {
|
||||
public:
|
||||
shm::id_t id_ = nullptr;
|
||||
void* m_ = nullptr;
|
||||
|
||||
ipc::string n_;
|
||||
std::size_t s_ = 0;
|
||||
};
|
||||
|
||||
handle::handle()
|
||||
: p_(p_->make()) {
|
||||
}
|
||||
|
||||
handle::handle(char const * name, std::size_t size, unsigned mode)
|
||||
: handle() {
|
||||
acquire(name, size, mode);
|
||||
}
|
||||
|
||||
handle::handle(handle&& rhs)
|
||||
: handle() {
|
||||
swap(rhs);
|
||||
}
|
||||
|
||||
handle::~handle() {
|
||||
release();
|
||||
p_->clear();
|
||||
}
|
||||
|
||||
void handle::swap(handle& rhs) {
|
||||
std::swap(p_, rhs.p_);
|
||||
}
|
||||
|
||||
handle& handle::operator=(handle rhs) {
|
||||
swap(rhs);
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool handle::valid() const noexcept {
|
||||
return impl(p_)->m_ != nullptr;
|
||||
}
|
||||
|
||||
std::size_t handle::size() const noexcept {
|
||||
return impl(p_)->s_;
|
||||
}
|
||||
|
||||
char const * handle::name() const noexcept {
|
||||
return impl(p_)->n_.c_str();
|
||||
}
|
||||
|
||||
std::int32_t handle::ref() const noexcept {
|
||||
return shm::get_ref(impl(p_)->id_);
|
||||
}
|
||||
|
||||
void handle::sub_ref() noexcept {
|
||||
shm::sub_ref(impl(p_)->id_);
|
||||
}
|
||||
|
||||
bool handle::acquire(char const * name, std::size_t size, unsigned mode) {
|
||||
release();
|
||||
impl(p_)->id_ = shm::acquire((impl(p_)->n_ = name).c_str(), size, mode);
|
||||
impl(p_)->m_ = shm::get_mem(impl(p_)->id_, &(impl(p_)->s_));
|
||||
return valid();
|
||||
}
|
||||
|
||||
std::int32_t handle::release() {
|
||||
if (impl(p_)->id_ == nullptr) return -1;
|
||||
return shm::release(detach());
|
||||
}
|
||||
|
||||
void* handle::get() const {
|
||||
return impl(p_)->m_;
|
||||
}
|
||||
|
||||
void handle::attach(id_t id) {
|
||||
if (id == nullptr) return;
|
||||
release();
|
||||
impl(p_)->id_ = id;
|
||||
impl(p_)->m_ = shm::get_mem(impl(p_)->id_, &(impl(p_)->s_));
|
||||
}
|
||||
|
||||
id_t handle::detach() {
|
||||
auto old = impl(p_)->id_;
|
||||
impl(p_)->id_ = nullptr;
|
||||
impl(p_)->m_ = nullptr;
|
||||
impl(p_)->s_ = 0;
|
||||
impl(p_)->n_.clear();
|
||||
return old;
|
||||
}
|
||||
|
||||
} // namespace shm
|
||||
} // namespace ipc
|
||||
83
crazy_functions/test_project/cpp/cppipc/waiter.h
Normal file
83
crazy_functions/test_project/cpp/cppipc/waiter.h
Normal file
@@ -0,0 +1,83 @@
|
||||
#pragma once
|
||||
|
||||
#include <utility>
|
||||
#include <string>
|
||||
#include <mutex>
|
||||
#include <atomic>
|
||||
|
||||
#include "libipc/def.h"
|
||||
#include "libipc/mutex.h"
|
||||
#include "libipc/condition.h"
|
||||
#include "libipc/platform/detail.h"
|
||||
|
||||
namespace ipc {
|
||||
namespace detail {
|
||||
|
||||
class waiter {
|
||||
ipc::sync::condition cond_;
|
||||
ipc::sync::mutex lock_;
|
||||
std::atomic<bool> quit_ {false};
|
||||
|
||||
public:
|
||||
static void init();
|
||||
|
||||
waiter() = default;
|
||||
waiter(char const *name) {
|
||||
open(name);
|
||||
}
|
||||
|
||||
~waiter() {
|
||||
close();
|
||||
}
|
||||
|
||||
bool valid() const noexcept {
|
||||
return cond_.valid() && lock_.valid();
|
||||
}
|
||||
|
||||
bool open(char const *name) noexcept {
|
||||
quit_.store(false, std::memory_order_relaxed);
|
||||
if (!cond_.open((std::string{"_waiter_cond_"} + name).c_str())) {
|
||||
return false;
|
||||
}
|
||||
if (!lock_.open((std::string{"_waiter_lock_"} + name).c_str())) {
|
||||
cond_.close();
|
||||
return false;
|
||||
}
|
||||
return valid();
|
||||
}
|
||||
|
||||
void close() noexcept {
|
||||
cond_.close();
|
||||
lock_.close();
|
||||
}
|
||||
|
||||
template <typename F>
|
||||
bool wait_if(F &&pred, std::uint64_t tm = ipc::invalid_value) noexcept {
|
||||
IPC_UNUSED_ std::lock_guard<ipc::sync::mutex> guard {lock_};
|
||||
while ([this, &pred] {
|
||||
return !quit_.load(std::memory_order_relaxed)
|
||||
&& std::forward<F>(pred)();
|
||||
}()) {
|
||||
if (!cond_.wait(lock_, tm)) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool notify() noexcept {
|
||||
std::lock_guard<ipc::sync::mutex>{lock_}; // barrier
|
||||
return cond_.notify(lock_);
|
||||
}
|
||||
|
||||
bool broadcast() noexcept {
|
||||
std::lock_guard<ipc::sync::mutex>{lock_}; // barrier
|
||||
return cond_.broadcast(lock_);
|
||||
}
|
||||
|
||||
bool quit_waiting() {
|
||||
quit_.store(true, std::memory_order_release);
|
||||
return broadcast();
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
} // namespace ipc
|
||||
3
crazy_functions/test_project/cpp/cppipc/来源
Normal file
3
crazy_functions/test_project/cpp/cppipc/来源
Normal file
@@ -0,0 +1,3 @@
|
||||
https://github.com/mutouyun/cpp-ipc
|
||||
|
||||
A high-performance inter-process communication library using shared memory on Linux/Windows.
|
||||
3276
crazy_functions/test_project/cpp/libJPG/jpgd.cpp
Normal file
3276
crazy_functions/test_project/cpp/libJPG/jpgd.cpp
Normal file
File diff suppressed because it is too large
Load Diff
316
crazy_functions/test_project/cpp/libJPG/jpgd.h
Normal file
316
crazy_functions/test_project/cpp/libJPG/jpgd.h
Normal file
@@ -0,0 +1,316 @@
|
||||
// jpgd.h - C++ class for JPEG decompression.
|
||||
// Public domain, Rich Geldreich <richgel99@gmail.com>
|
||||
#ifndef JPEG_DECODER_H
|
||||
#define JPEG_DECODER_H
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <setjmp.h>
|
||||
|
||||
namespace jpgd
|
||||
{
|
||||
typedef unsigned char uint8;
|
||||
typedef signed short int16;
|
||||
typedef unsigned short uint16;
|
||||
typedef unsigned int uint;
|
||||
typedef signed int int32;
|
||||
|
||||
// Loads a JPEG image from a memory buffer or a file.
|
||||
// req_comps can be 1 (grayscale), 3 (RGB), or 4 (RGBA).
|
||||
// On return, width/height will be set to the image's dimensions, and actual_comps will be set to the either 1 (grayscale) or 3 (RGB).
|
||||
// Notes: For more control over where and how the source data is read, see the decompress_jpeg_image_from_stream() function below, or call the jpeg_decoder class directly.
|
||||
// Requesting a 8 or 32bpp image is currently a little faster than 24bpp because the jpeg_decoder class itself currently always unpacks to either 8 or 32bpp.
|
||||
// BEGIN EPIC MOD
|
||||
//unsigned char *decompress_jpeg_image_from_memory(const unsigned char *pSrc_data, int src_data_size, int *width, int *height, int *actual_comps, int req_comps);
|
||||
unsigned char *decompress_jpeg_image_from_memory(const unsigned char *pSrc_data, int src_data_size, int *width, int *height, int *actual_comps, int req_comps, int format);
|
||||
// END EPIC MOD
|
||||
unsigned char *decompress_jpeg_image_from_file(const char *pSrc_filename, int *width, int *height, int *actual_comps, int req_comps);
|
||||
|
||||
// Success/failure error codes.
|
||||
enum jpgd_status
|
||||
{
|
||||
JPGD_SUCCESS = 0, JPGD_FAILED = -1, JPGD_DONE = 1,
|
||||
JPGD_BAD_DHT_COUNTS = -256, JPGD_BAD_DHT_INDEX, JPGD_BAD_DHT_MARKER, JPGD_BAD_DQT_MARKER, JPGD_BAD_DQT_TABLE,
|
||||
JPGD_BAD_PRECISION, JPGD_BAD_HEIGHT, JPGD_BAD_WIDTH, JPGD_TOO_MANY_COMPONENTS,
|
||||
JPGD_BAD_SOF_LENGTH, JPGD_BAD_VARIABLE_MARKER, JPGD_BAD_DRI_LENGTH, JPGD_BAD_SOS_LENGTH,
|
||||
JPGD_BAD_SOS_COMP_ID, JPGD_W_EXTRA_BYTES_BEFORE_MARKER, JPGD_NO_ARITHMITIC_SUPPORT, JPGD_UNEXPECTED_MARKER,
|
||||
JPGD_NOT_JPEG, JPGD_UNSUPPORTED_MARKER, JPGD_BAD_DQT_LENGTH, JPGD_TOO_MANY_BLOCKS,
|
||||
JPGD_UNDEFINED_QUANT_TABLE, JPGD_UNDEFINED_HUFF_TABLE, JPGD_NOT_SINGLE_SCAN, JPGD_UNSUPPORTED_COLORSPACE,
|
||||
JPGD_UNSUPPORTED_SAMP_FACTORS, JPGD_DECODE_ERROR, JPGD_BAD_RESTART_MARKER, JPGD_ASSERTION_ERROR,
|
||||
JPGD_BAD_SOS_SPECTRAL, JPGD_BAD_SOS_SUCCESSIVE, JPGD_STREAM_READ, JPGD_NOTENOUGHMEM
|
||||
};
|
||||
|
||||
// Input stream interface.
|
||||
// Derive from this class to read input data from sources other than files or memory. Set m_eof_flag to true when no more data is available.
|
||||
// The decoder is rather greedy: it will keep on calling this method until its internal input buffer is full, or until the EOF flag is set.
|
||||
// It the input stream contains data after the JPEG stream's EOI (end of image) marker it will probably be pulled into the internal buffer.
|
||||
// Call the get_total_bytes_read() method to determine the actual size of the JPEG stream after successful decoding.
|
||||
class jpeg_decoder_stream
|
||||
{
|
||||
public:
|
||||
jpeg_decoder_stream() { }
|
||||
virtual ~jpeg_decoder_stream() { }
|
||||
|
||||
// The read() method is called when the internal input buffer is empty.
|
||||
// Parameters:
|
||||
// pBuf - input buffer
|
||||
// max_bytes_to_read - maximum bytes that can be written to pBuf
|
||||
// pEOF_flag - set this to true if at end of stream (no more bytes remaining)
|
||||
// Returns -1 on error, otherwise return the number of bytes actually written to the buffer (which may be 0).
|
||||
// Notes: This method will be called in a loop until you set *pEOF_flag to true or the internal buffer is full.
|
||||
virtual int read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag) = 0;
|
||||
};
|
||||
|
||||
// stdio FILE stream class.
|
||||
class jpeg_decoder_file_stream : public jpeg_decoder_stream
|
||||
{
|
||||
jpeg_decoder_file_stream(const jpeg_decoder_file_stream &);
|
||||
jpeg_decoder_file_stream &operator =(const jpeg_decoder_file_stream &);
|
||||
|
||||
FILE *m_pFile;
|
||||
bool m_eof_flag, m_error_flag;
|
||||
|
||||
public:
|
||||
jpeg_decoder_file_stream();
|
||||
virtual ~jpeg_decoder_file_stream();
|
||||
|
||||
bool open(const char *Pfilename);
|
||||
void close();
|
||||
|
||||
virtual int read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag);
|
||||
};
|
||||
|
||||
// Memory stream class.
|
||||
class jpeg_decoder_mem_stream : public jpeg_decoder_stream
|
||||
{
|
||||
const uint8 *m_pSrc_data;
|
||||
uint m_ofs, m_size;
|
||||
|
||||
public:
|
||||
jpeg_decoder_mem_stream() : m_pSrc_data(NULL), m_ofs(0), m_size(0) { }
|
||||
jpeg_decoder_mem_stream(const uint8 *pSrc_data, uint size) : m_pSrc_data(pSrc_data), m_ofs(0), m_size(size) { }
|
||||
|
||||
virtual ~jpeg_decoder_mem_stream() { }
|
||||
|
||||
bool open(const uint8 *pSrc_data, uint size);
|
||||
void close() { m_pSrc_data = NULL; m_ofs = 0; m_size = 0; }
|
||||
|
||||
virtual int read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag);
|
||||
};
|
||||
|
||||
// Loads JPEG file from a jpeg_decoder_stream.
|
||||
unsigned char *decompress_jpeg_image_from_stream(jpeg_decoder_stream *pStream, int *width, int *height, int *actual_comps, int req_comps);
|
||||
|
||||
enum
|
||||
{
|
||||
JPGD_IN_BUF_SIZE = 8192, JPGD_MAX_BLOCKS_PER_MCU = 10, JPGD_MAX_HUFF_TABLES = 8, JPGD_MAX_QUANT_TABLES = 4,
|
||||
JPGD_MAX_COMPONENTS = 4, JPGD_MAX_COMPS_IN_SCAN = 4, JPGD_MAX_BLOCKS_PER_ROW = 8192, JPGD_MAX_HEIGHT = 16384, JPGD_MAX_WIDTH = 16384
|
||||
};
|
||||
|
||||
typedef int16 jpgd_quant_t;
|
||||
typedef int16 jpgd_block_t;
|
||||
|
||||
class jpeg_decoder
|
||||
{
|
||||
public:
|
||||
// Call get_error_code() after constructing to determine if the stream is valid or not. You may call the get_width(), get_height(), etc.
|
||||
// methods after the constructor is called. You may then either destruct the object, or begin decoding the image by calling begin_decoding(), then decode() on each scanline.
|
||||
jpeg_decoder(jpeg_decoder_stream *pStream);
|
||||
|
||||
~jpeg_decoder();
|
||||
|
||||
// Call this method after constructing the object to begin decompression.
|
||||
// If JPGD_SUCCESS is returned you may then call decode() on each scanline.
|
||||
int begin_decoding();
|
||||
|
||||
// Returns the next scan line.
|
||||
// For grayscale images, pScan_line will point to a buffer containing 8-bit pixels (get_bytes_per_pixel() will return 1).
|
||||
// Otherwise, it will always point to a buffer containing 32-bit RGBA pixels (A will always be 255, and get_bytes_per_pixel() will return 4).
|
||||
// Returns JPGD_SUCCESS if a scan line has been returned.
|
||||
// Returns JPGD_DONE if all scan lines have been returned.
|
||||
// Returns JPGD_FAILED if an error occurred. Call get_error_code() for a more info.
|
||||
int decode(const void** pScan_line, uint* pScan_line_len);
|
||||
|
||||
inline jpgd_status get_error_code() const { return m_error_code; }
|
||||
|
||||
inline int get_width() const { return m_image_x_size; }
|
||||
inline int get_height() const { return m_image_y_size; }
|
||||
|
||||
inline int get_num_components() const { return m_comps_in_frame; }
|
||||
|
||||
inline int get_bytes_per_pixel() const { return m_dest_bytes_per_pixel; }
|
||||
inline int get_bytes_per_scan_line() const { return m_image_x_size * get_bytes_per_pixel(); }
|
||||
|
||||
// Returns the total number of bytes actually consumed by the decoder (which should equal the actual size of the JPEG file).
|
||||
inline int get_total_bytes_read() const { return m_total_bytes_read; }
|
||||
|
||||
private:
|
||||
jpeg_decoder(const jpeg_decoder &);
|
||||
jpeg_decoder &operator =(const jpeg_decoder &);
|
||||
|
||||
typedef void (*pDecode_block_func)(jpeg_decoder *, int, int, int);
|
||||
|
||||
struct huff_tables
|
||||
{
|
||||
bool ac_table;
|
||||
uint look_up[256];
|
||||
uint look_up2[256];
|
||||
uint8 code_size[256];
|
||||
uint tree[512];
|
||||
};
|
||||
|
||||
struct coeff_buf
|
||||
{
|
||||
uint8 *pData;
|
||||
int block_num_x, block_num_y;
|
||||
int block_len_x, block_len_y;
|
||||
int block_size;
|
||||
};
|
||||
|
||||
struct mem_block
|
||||
{
|
||||
mem_block *m_pNext;
|
||||
size_t m_used_count;
|
||||
size_t m_size;
|
||||
char m_data[1];
|
||||
};
|
||||
|
||||
jmp_buf m_jmp_state;
|
||||
mem_block *m_pMem_blocks;
|
||||
int m_image_x_size;
|
||||
int m_image_y_size;
|
||||
jpeg_decoder_stream *m_pStream;
|
||||
int m_progressive_flag;
|
||||
uint8 m_huff_ac[JPGD_MAX_HUFF_TABLES];
|
||||
uint8* m_huff_num[JPGD_MAX_HUFF_TABLES]; // pointer to number of Huffman codes per bit size
|
||||
uint8* m_huff_val[JPGD_MAX_HUFF_TABLES]; // pointer to Huffman codes per bit size
|
||||
jpgd_quant_t* m_quant[JPGD_MAX_QUANT_TABLES]; // pointer to quantization tables
|
||||
int m_scan_type; // Gray, Yh1v1, Yh1v2, Yh2v1, Yh2v2 (CMYK111, CMYK4114 no longer supported)
|
||||
int m_comps_in_frame; // # of components in frame
|
||||
int m_comp_h_samp[JPGD_MAX_COMPONENTS]; // component's horizontal sampling factor
|
||||
int m_comp_v_samp[JPGD_MAX_COMPONENTS]; // component's vertical sampling factor
|
||||
int m_comp_quant[JPGD_MAX_COMPONENTS]; // component's quantization table selector
|
||||
int m_comp_ident[JPGD_MAX_COMPONENTS]; // component's ID
|
||||
int m_comp_h_blocks[JPGD_MAX_COMPONENTS];
|
||||
int m_comp_v_blocks[JPGD_MAX_COMPONENTS];
|
||||
int m_comps_in_scan; // # of components in scan
|
||||
int m_comp_list[JPGD_MAX_COMPS_IN_SCAN]; // components in this scan
|
||||
int m_comp_dc_tab[JPGD_MAX_COMPONENTS]; // component's DC Huffman coding table selector
|
||||
int m_comp_ac_tab[JPGD_MAX_COMPONENTS]; // component's AC Huffman coding table selector
|
||||
int m_spectral_start; // spectral selection start
|
||||
int m_spectral_end; // spectral selection end
|
||||
int m_successive_low; // successive approximation low
|
||||
int m_successive_high; // successive approximation high
|
||||
int m_max_mcu_x_size; // MCU's max. X size in pixels
|
||||
int m_max_mcu_y_size; // MCU's max. Y size in pixels
|
||||
int m_blocks_per_mcu;
|
||||
int m_max_blocks_per_row;
|
||||
int m_mcus_per_row, m_mcus_per_col;
|
||||
int m_mcu_org[JPGD_MAX_BLOCKS_PER_MCU];
|
||||
int m_total_lines_left; // total # lines left in image
|
||||
int m_mcu_lines_left; // total # lines left in this MCU
|
||||
int m_real_dest_bytes_per_scan_line;
|
||||
int m_dest_bytes_per_scan_line; // rounded up
|
||||
int m_dest_bytes_per_pixel; // 4 (RGB) or 1 (Y)
|
||||
huff_tables* m_pHuff_tabs[JPGD_MAX_HUFF_TABLES];
|
||||
coeff_buf* m_dc_coeffs[JPGD_MAX_COMPONENTS];
|
||||
coeff_buf* m_ac_coeffs[JPGD_MAX_COMPONENTS];
|
||||
int m_eob_run;
|
||||
int m_block_y_mcu[JPGD_MAX_COMPONENTS];
|
||||
uint8* m_pIn_buf_ofs;
|
||||
int m_in_buf_left;
|
||||
int m_tem_flag;
|
||||
bool m_eof_flag;
|
||||
uint8 m_in_buf_pad_start[128];
|
||||
uint8 m_in_buf[JPGD_IN_BUF_SIZE + 128];
|
||||
uint8 m_in_buf_pad_end[128];
|
||||
int m_bits_left;
|
||||
uint m_bit_buf;
|
||||
int m_restart_interval;
|
||||
int m_restarts_left;
|
||||
int m_next_restart_num;
|
||||
int m_max_mcus_per_row;
|
||||
int m_max_blocks_per_mcu;
|
||||
int m_expanded_blocks_per_mcu;
|
||||
int m_expanded_blocks_per_row;
|
||||
int m_expanded_blocks_per_component;
|
||||
bool m_freq_domain_chroma_upsample;
|
||||
int m_max_mcus_per_col;
|
||||
uint m_last_dc_val[JPGD_MAX_COMPONENTS];
|
||||
jpgd_block_t* m_pMCU_coefficients;
|
||||
int m_mcu_block_max_zag[JPGD_MAX_BLOCKS_PER_MCU];
|
||||
uint8* m_pSample_buf;
|
||||
int m_crr[256];
|
||||
int m_cbb[256];
|
||||
int m_crg[256];
|
||||
int m_cbg[256];
|
||||
uint8* m_pScan_line_0;
|
||||
uint8* m_pScan_line_1;
|
||||
jpgd_status m_error_code;
|
||||
bool m_ready_flag;
|
||||
int m_total_bytes_read;
|
||||
|
||||
void free_all_blocks();
|
||||
// BEGIN EPIC MOD
|
||||
UE_NORETURN void stop_decoding(jpgd_status status);
|
||||
// END EPIC MOD
|
||||
void *alloc(size_t n, bool zero = false);
|
||||
void word_clear(void *p, uint16 c, uint n);
|
||||
void prep_in_buffer();
|
||||
void read_dht_marker();
|
||||
void read_dqt_marker();
|
||||
void read_sof_marker();
|
||||
void skip_variable_marker();
|
||||
void read_dri_marker();
|
||||
void read_sos_marker();
|
||||
int next_marker();
|
||||
int process_markers();
|
||||
void locate_soi_marker();
|
||||
void locate_sof_marker();
|
||||
int locate_sos_marker();
|
||||
void init(jpeg_decoder_stream * pStream);
|
||||
void create_look_ups();
|
||||
void fix_in_buffer();
|
||||
void transform_mcu(int mcu_row);
|
||||
void transform_mcu_expand(int mcu_row);
|
||||
coeff_buf* coeff_buf_open(int block_num_x, int block_num_y, int block_len_x, int block_len_y);
|
||||
inline jpgd_block_t *coeff_buf_getp(coeff_buf *cb, int block_x, int block_y);
|
||||
void load_next_row();
|
||||
void decode_next_row();
|
||||
void make_huff_table(int index, huff_tables *pH);
|
||||
void check_quant_tables();
|
||||
void check_huff_tables();
|
||||
void calc_mcu_block_order();
|
||||
int init_scan();
|
||||
void init_frame();
|
||||
void process_restart();
|
||||
void decode_scan(pDecode_block_func decode_block_func);
|
||||
void init_progressive();
|
||||
void init_sequential();
|
||||
void decode_start();
|
||||
void decode_init(jpeg_decoder_stream * pStream);
|
||||
void H2V2Convert();
|
||||
void H2V1Convert();
|
||||
void H1V2Convert();
|
||||
void H1V1Convert();
|
||||
void gray_convert();
|
||||
void expanded_convert();
|
||||
void find_eoi();
|
||||
inline uint get_char();
|
||||
inline uint get_char(bool *pPadding_flag);
|
||||
inline void stuff_char(uint8 q);
|
||||
inline uint8 get_octet();
|
||||
inline uint get_bits(int num_bits);
|
||||
inline uint get_bits_no_markers(int numbits);
|
||||
inline int huff_decode(huff_tables *pH);
|
||||
inline int huff_decode(huff_tables *pH, int& extrabits);
|
||||
static inline uint8 clamp(int i);
|
||||
static void decode_block_dc_first(jpeg_decoder *pD, int component_id, int block_x, int block_y);
|
||||
static void decode_block_dc_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y);
|
||||
static void decode_block_ac_first(jpeg_decoder *pD, int component_id, int block_x, int block_y);
|
||||
static void decode_block_ac_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y);
|
||||
};
|
||||
|
||||
} // namespace jpgd
|
||||
|
||||
#endif // JPEG_DECODER_H
|
||||
1049
crazy_functions/test_project/cpp/libJPG/jpge.cpp
Normal file
1049
crazy_functions/test_project/cpp/libJPG/jpge.cpp
Normal file
File diff suppressed because it is too large
Load Diff
172
crazy_functions/test_project/cpp/libJPG/jpge.h
Normal file
172
crazy_functions/test_project/cpp/libJPG/jpge.h
Normal file
@@ -0,0 +1,172 @@
|
||||
|
||||
// jpge.h - C++ class for JPEG compression.
|
||||
// Public domain, Rich Geldreich <richgel99@gmail.com>
|
||||
// Alex Evans: Added RGBA support, linear memory allocator.
|
||||
#ifndef JPEG_ENCODER_H
|
||||
#define JPEG_ENCODER_H
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
namespace jpge
|
||||
{
|
||||
typedef unsigned char uint8;
|
||||
typedef signed short int16;
|
||||
typedef signed int int32;
|
||||
typedef unsigned short uint16;
|
||||
typedef unsigned int uint32;
|
||||
typedef unsigned int uint;
|
||||
|
||||
// JPEG chroma subsampling factors. Y_ONLY (grayscale images) and H2V2 (color images) are the most common.
|
||||
enum subsampling_t { Y_ONLY = 0, H1V1 = 1, H2V1 = 2, H2V2 = 3 };
|
||||
|
||||
// JPEG compression parameters structure.
|
||||
struct params
|
||||
{
|
||||
inline params() : m_quality(85), m_subsampling(H2V2), m_no_chroma_discrim_flag(false), m_two_pass_flag(false) { }
|
||||
|
||||
inline bool check_valid() const
|
||||
{
|
||||
if ((m_quality < 1) || (m_quality > 100)) return false;
|
||||
if ((uint)m_subsampling > (uint)H2V2) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Quality: 1-100, higher is better. Typical values are around 50-95.
|
||||
int m_quality;
|
||||
|
||||
// m_subsampling:
|
||||
// 0 = Y (grayscale) only
|
||||
// 1 = YCbCr, no subsampling (H1V1, YCbCr 1x1x1, 3 blocks per MCU)
|
||||
// 2 = YCbCr, H2V1 subsampling (YCbCr 2x1x1, 4 blocks per MCU)
|
||||
// 3 = YCbCr, H2V2 subsampling (YCbCr 4x1x1, 6 blocks per MCU-- very common)
|
||||
subsampling_t m_subsampling;
|
||||
|
||||
// Disables CbCr discrimination - only intended for testing.
|
||||
// If true, the Y quantization table is also used for the CbCr channels.
|
||||
bool m_no_chroma_discrim_flag;
|
||||
|
||||
bool m_two_pass_flag;
|
||||
};
|
||||
|
||||
// Writes JPEG image to a file.
|
||||
// num_channels must be 1 (Y) or 3 (RGB), image pitch must be width*num_channels.
|
||||
bool compress_image_to_jpeg_file(const char *pFilename, int64_t width, int64_t height, int64_t num_channels, const uint8 *pImage_data, const params &comp_params = params());
|
||||
|
||||
// Writes JPEG image to memory buffer.
|
||||
// On entry, buf_size is the size of the output buffer pointed at by pBuf, which should be at least ~1024 bytes.
|
||||
// If return value is true, buf_size will be set to the size of the compressed data.
|
||||
bool compress_image_to_jpeg_file_in_memory(void *pBuf, int64_t &buf_size, int64_t width, int64_t height, int64_t num_channels, const uint8 *pImage_data, const params &comp_params = params());
|
||||
|
||||
// Output stream abstract class - used by the jpeg_encoder class to write to the output stream.
|
||||
// put_buf() is generally called with len==JPGE_OUT_BUF_SIZE bytes, but for headers it'll be called with smaller amounts.
|
||||
class output_stream
|
||||
{
|
||||
public:
|
||||
virtual ~output_stream() { };
|
||||
virtual bool put_buf(const void* Pbuf, int64_t len) = 0;
|
||||
template<class T> inline bool put_obj(const T& obj) { return put_buf(&obj, sizeof(T)); }
|
||||
};
|
||||
|
||||
// Lower level jpeg_encoder class - useful if more control is needed than the above helper functions.
|
||||
class jpeg_encoder
|
||||
{
|
||||
public:
|
||||
jpeg_encoder();
|
||||
~jpeg_encoder();
|
||||
|
||||
// Initializes the compressor.
|
||||
// pStream: The stream object to use for writing compressed data.
|
||||
// params - Compression parameters structure, defined above.
|
||||
// width, height - Image dimensions.
|
||||
// channels - May be 1, or 3. 1 indicates grayscale, 3 indicates RGB source data.
|
||||
// Returns false on out of memory or if a stream write fails.
|
||||
bool init(output_stream *pStream, int64_t width, int64_t height, int64_t src_channels, const params &comp_params = params());
|
||||
|
||||
const params &get_params() const { return m_params; }
|
||||
|
||||
// Deinitializes the compressor, freeing any allocated memory. May be called at any time.
|
||||
void deinit();
|
||||
|
||||
uint get_total_passes() const { return m_params.m_two_pass_flag ? 2 : 1; }
|
||||
inline uint get_cur_pass() { return m_pass_num; }
|
||||
|
||||
// Call this method with each source scanline.
|
||||
// width * src_channels bytes per scanline is expected (RGB or Y format).
|
||||
// You must call with NULL after all scanlines are processed to finish compression.
|
||||
// Returns false on out of memory or if a stream write fails.
|
||||
bool process_scanline(const void* pScanline);
|
||||
|
||||
private:
|
||||
jpeg_encoder(const jpeg_encoder &);
|
||||
jpeg_encoder &operator =(const jpeg_encoder &);
|
||||
|
||||
typedef int32 sample_array_t;
|
||||
|
||||
output_stream *m_pStream;
|
||||
params m_params;
|
||||
uint8 m_num_components;
|
||||
uint8 m_comp_h_samp[3], m_comp_v_samp[3];
|
||||
int m_image_x, m_image_y, m_image_bpp, m_image_bpl;
|
||||
int m_image_x_mcu, m_image_y_mcu;
|
||||
int m_image_bpl_xlt, m_image_bpl_mcu;
|
||||
int m_mcus_per_row;
|
||||
int m_mcu_x, m_mcu_y;
|
||||
uint8 *m_mcu_lines[16];
|
||||
uint8 m_mcu_y_ofs;
|
||||
sample_array_t m_sample_array[64];
|
||||
int16 m_coefficient_array[64];
|
||||
int32 m_quantization_tables[2][64];
|
||||
uint m_huff_codes[4][256];
|
||||
uint8 m_huff_code_sizes[4][256];
|
||||
uint8 m_huff_bits[4][17];
|
||||
uint8 m_huff_val[4][256];
|
||||
uint32 m_huff_count[4][256];
|
||||
int m_last_dc_val[3];
|
||||
enum { JPGE_OUT_BUF_SIZE = 2048 };
|
||||
uint8 m_out_buf[JPGE_OUT_BUF_SIZE];
|
||||
uint8 *m_pOut_buf;
|
||||
uint m_out_buf_left;
|
||||
uint32 m_bit_buffer;
|
||||
uint m_bits_in;
|
||||
uint8 m_pass_num;
|
||||
bool m_all_stream_writes_succeeded;
|
||||
|
||||
void optimize_huffman_table(int table_num, int table_len);
|
||||
void emit_byte(uint8 i);
|
||||
void emit_word(uint i);
|
||||
void emit_marker(int marker);
|
||||
void emit_jfif_app0();
|
||||
void emit_dqt();
|
||||
void emit_sof();
|
||||
void emit_dht(uint8 *bits, uint8 *val, int index, bool ac_flag);
|
||||
void emit_dhts();
|
||||
void emit_sos();
|
||||
void emit_markers();
|
||||
void compute_huffman_table(uint *codes, uint8 *code_sizes, uint8 *bits, uint8 *val);
|
||||
void compute_quant_table(int32 *dst, int16 *src);
|
||||
void adjust_quant_table(int32 *dst, int32 *src);
|
||||
void first_pass_init();
|
||||
bool second_pass_init();
|
||||
bool jpg_open(int p_x_res, int p_y_res, int src_channels);
|
||||
void load_block_8_8_grey(int x);
|
||||
void load_block_8_8(int x, int y, int c);
|
||||
void load_block_16_8(int x, int c);
|
||||
void load_block_16_8_8(int x, int c);
|
||||
void load_quantized_coefficients(int component_num);
|
||||
void flush_output_buffer();
|
||||
void put_bits(uint bits, uint len);
|
||||
void code_coefficients_pass_one(int component_num);
|
||||
void code_coefficients_pass_two(int component_num);
|
||||
void code_block(int component_num);
|
||||
void process_mcu_row();
|
||||
bool terminate_pass_one();
|
||||
bool terminate_pass_two();
|
||||
bool process_end_of_image();
|
||||
void load_mcu(const void* src);
|
||||
void clear();
|
||||
void init();
|
||||
};
|
||||
|
||||
} // namespace jpge
|
||||
|
||||
#endif // JPEG_ENCODER
|
||||
3
crazy_functions/test_project/cpp/libJPG/来源
Normal file
3
crazy_functions/test_project/cpp/libJPG/来源
Normal file
@@ -0,0 +1,3 @@
|
||||
jpge.h - C++ class for JPEG compression.
|
||||
Public domain, Rich Geldreich <richgel99@gmail.com>
|
||||
Alex Evans: Added RGBA support, linear memory allocator.
|
||||
3276
crazy_functions/test_project/cpp/longcode/jpgd.cpp
Normal file
3276
crazy_functions/test_project/cpp/longcode/jpgd.cpp
Normal file
File diff suppressed because it is too large
Load Diff
1049
crazy_functions/test_project/cpp/longcode/jpge.cpp
Normal file
1049
crazy_functions/test_project/cpp/longcode/jpge.cpp
Normal file
File diff suppressed because it is too large
Load Diff
433
crazy_functions/test_project/cpp/longcode/prod_cons.h
Normal file
433
crazy_functions/test_project/cpp/longcode/prod_cons.h
Normal file
@@ -0,0 +1,433 @@
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <utility>
|
||||
#include <cstring>
|
||||
#include <type_traits>
|
||||
#include <cstdint>
|
||||
|
||||
#include "libipc/def.h"
|
||||
|
||||
#include "libipc/platform/detail.h"
|
||||
#include "libipc/circ/elem_def.h"
|
||||
#include "libipc/utility/log.h"
|
||||
#include "libipc/utility/utility.h"
|
||||
|
||||
namespace ipc {
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
/// producer-consumer implementation
|
||||
////////////////////////////////////////////////////////////////
|
||||
|
||||
template <typename Flag>
|
||||
struct prod_cons_impl;
|
||||
|
||||
template <>
|
||||
struct prod_cons_impl<wr<relat::single, relat::single, trans::unicast>> {
|
||||
|
||||
template <std::size_t DataSize, std::size_t AlignSize>
|
||||
struct elem_t {
|
||||
std::aligned_storage_t<DataSize, AlignSize> data_ {};
|
||||
};
|
||||
|
||||
alignas(cache_line_size) std::atomic<circ::u2_t> rd_; // read index
|
||||
alignas(cache_line_size) std::atomic<circ::u2_t> wt_; // write index
|
||||
|
||||
constexpr circ::u2_t cursor() const noexcept {
|
||||
return 0;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename E>
|
||||
bool push(W* /*wrapper*/, F&& f, E* elems) {
|
||||
auto cur_wt = circ::index_of(wt_.load(std::memory_order_relaxed));
|
||||
if (cur_wt == circ::index_of(rd_.load(std::memory_order_acquire) - 1)) {
|
||||
return false; // full
|
||||
}
|
||||
std::forward<F>(f)(&(elems[cur_wt].data_));
|
||||
wt_.fetch_add(1, std::memory_order_release);
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* In single-single-unicast, 'force_push' means 'no reader' or 'the only one reader is dead'.
|
||||
* So we could just disconnect all connections of receiver, and return false.
|
||||
*/
|
||||
template <typename W, typename F, typename E>
|
||||
bool force_push(W* wrapper, F&&, E*) {
|
||||
wrapper->elems()->disconnect_receiver(~static_cast<circ::cc_t>(0u));
|
||||
return false;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename R, typename E>
|
||||
bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) {
|
||||
auto cur_rd = circ::index_of(rd_.load(std::memory_order_relaxed));
|
||||
if (cur_rd == circ::index_of(wt_.load(std::memory_order_acquire))) {
|
||||
return false; // empty
|
||||
}
|
||||
std::forward<F>(f)(&(elems[cur_rd].data_));
|
||||
std::forward<R>(out)(true);
|
||||
rd_.fetch_add(1, std::memory_order_release);
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct prod_cons_impl<wr<relat::single, relat::multi , trans::unicast>>
|
||||
: prod_cons_impl<wr<relat::single, relat::single, trans::unicast>> {
|
||||
|
||||
template <typename W, typename F, typename E>
|
||||
bool force_push(W* wrapper, F&&, E*) {
|
||||
wrapper->elems()->disconnect_receiver(1);
|
||||
return false;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename R,
|
||||
template <std::size_t, std::size_t> class E, std::size_t DS, std::size_t AS>
|
||||
bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E<DS, AS>* elems) {
|
||||
byte_t buff[DS];
|
||||
for (unsigned k = 0;;) {
|
||||
auto cur_rd = rd_.load(std::memory_order_relaxed);
|
||||
if (circ::index_of(cur_rd) ==
|
||||
circ::index_of(wt_.load(std::memory_order_acquire))) {
|
||||
return false; // empty
|
||||
}
|
||||
std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff));
|
||||
if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) {
|
||||
std::forward<F>(f)(buff);
|
||||
std::forward<R>(out)(true);
|
||||
return true;
|
||||
}
|
||||
ipc::yield(k);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct prod_cons_impl<wr<relat::multi , relat::multi, trans::unicast>>
|
||||
: prod_cons_impl<wr<relat::single, relat::multi, trans::unicast>> {
|
||||
|
||||
using flag_t = std::uint64_t;
|
||||
|
||||
template <std::size_t DataSize, std::size_t AlignSize>
|
||||
struct elem_t {
|
||||
std::aligned_storage_t<DataSize, AlignSize> data_ {};
|
||||
std::atomic<flag_t> f_ct_ { 0 }; // commit flag
|
||||
};
|
||||
|
||||
alignas(cache_line_size) std::atomic<circ::u2_t> ct_; // commit index
|
||||
|
||||
template <typename W, typename F, typename E>
|
||||
bool push(W* /*wrapper*/, F&& f, E* elems) {
|
||||
circ::u2_t cur_ct, nxt_ct;
|
||||
for (unsigned k = 0;;) {
|
||||
cur_ct = ct_.load(std::memory_order_relaxed);
|
||||
if (circ::index_of(nxt_ct = cur_ct + 1) ==
|
||||
circ::index_of(rd_.load(std::memory_order_acquire))) {
|
||||
return false; // full
|
||||
}
|
||||
if (ct_.compare_exchange_weak(cur_ct, nxt_ct, std::memory_order_acq_rel)) {
|
||||
break;
|
||||
}
|
||||
ipc::yield(k);
|
||||
}
|
||||
auto* el = elems + circ::index_of(cur_ct);
|
||||
std::forward<F>(f)(&(el->data_));
|
||||
// set flag & try update wt
|
||||
el->f_ct_.store(~static_cast<flag_t>(cur_ct), std::memory_order_release);
|
||||
while (1) {
|
||||
auto cac_ct = el->f_ct_.load(std::memory_order_acquire);
|
||||
if (cur_ct != wt_.load(std::memory_order_relaxed)) {
|
||||
return true;
|
||||
}
|
||||
if ((~cac_ct) != cur_ct) {
|
||||
return true;
|
||||
}
|
||||
if (!el->f_ct_.compare_exchange_strong(cac_ct, 0, std::memory_order_relaxed)) {
|
||||
return true;
|
||||
}
|
||||
wt_.store(nxt_ct, std::memory_order_release);
|
||||
cur_ct = nxt_ct;
|
||||
nxt_ct = cur_ct + 1;
|
||||
el = elems + circ::index_of(cur_ct);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename E>
|
||||
bool force_push(W* wrapper, F&&, E*) {
|
||||
wrapper->elems()->disconnect_receiver(1);
|
||||
return false;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename R,
|
||||
template <std::size_t, std::size_t> class E, std::size_t DS, std::size_t AS>
|
||||
bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E<DS, AS>* elems) {
|
||||
byte_t buff[DS];
|
||||
for (unsigned k = 0;;) {
|
||||
auto cur_rd = rd_.load(std::memory_order_relaxed);
|
||||
auto cur_wt = wt_.load(std::memory_order_acquire);
|
||||
auto id_rd = circ::index_of(cur_rd);
|
||||
auto id_wt = circ::index_of(cur_wt);
|
||||
if (id_rd == id_wt) {
|
||||
auto* el = elems + id_wt;
|
||||
auto cac_ct = el->f_ct_.load(std::memory_order_acquire);
|
||||
if ((~cac_ct) != cur_wt) {
|
||||
return false; // empty
|
||||
}
|
||||
if (el->f_ct_.compare_exchange_weak(cac_ct, 0, std::memory_order_relaxed)) {
|
||||
wt_.store(cur_wt + 1, std::memory_order_release);
|
||||
}
|
||||
k = 0;
|
||||
}
|
||||
else {
|
||||
std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff));
|
||||
if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) {
|
||||
std::forward<F>(f)(buff);
|
||||
std::forward<R>(out)(true);
|
||||
return true;
|
||||
}
|
||||
ipc::yield(k);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct prod_cons_impl<wr<relat::single, relat::multi, trans::broadcast>> {
|
||||
|
||||
using rc_t = std::uint64_t;
|
||||
|
||||
enum : rc_t {
|
||||
ep_mask = 0x00000000ffffffffull,
|
||||
ep_incr = 0x0000000100000000ull
|
||||
};
|
||||
|
||||
template <std::size_t DataSize, std::size_t AlignSize>
|
||||
struct elem_t {
|
||||
std::aligned_storage_t<DataSize, AlignSize> data_ {};
|
||||
std::atomic<rc_t> rc_ { 0 }; // read-counter
|
||||
};
|
||||
|
||||
alignas(cache_line_size) std::atomic<circ::u2_t> wt_; // write index
|
||||
alignas(cache_line_size) rc_t epoch_ { 0 }; // only one writer
|
||||
|
||||
circ::u2_t cursor() const noexcept {
|
||||
return wt_.load(std::memory_order_acquire);
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename E>
|
||||
bool push(W* wrapper, F&& f, E* elems) {
|
||||
E* el;
|
||||
for (unsigned k = 0;;) {
|
||||
circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
|
||||
if (cc == 0) return false; // no reader
|
||||
el = elems + circ::index_of(wt_.load(std::memory_order_relaxed));
|
||||
// check all consumers have finished reading this element
|
||||
auto cur_rc = el->rc_.load(std::memory_order_acquire);
|
||||
circ::cc_t rem_cc = cur_rc & ep_mask;
|
||||
if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch_)) {
|
||||
return false; // has not finished yet
|
||||
}
|
||||
// consider rem_cc to be 0 here
|
||||
if (el->rc_.compare_exchange_weak(
|
||||
cur_rc, epoch_ | static_cast<rc_t>(cc), std::memory_order_release)) {
|
||||
break;
|
||||
}
|
||||
ipc::yield(k);
|
||||
}
|
||||
std::forward<F>(f)(&(el->data_));
|
||||
wt_.fetch_add(1, std::memory_order_release);
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename E>
|
||||
bool force_push(W* wrapper, F&& f, E* elems) {
|
||||
E* el;
|
||||
epoch_ += ep_incr;
|
||||
for (unsigned k = 0;;) {
|
||||
circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
|
||||
if (cc == 0) return false; // no reader
|
||||
el = elems + circ::index_of(wt_.load(std::memory_order_relaxed));
|
||||
// check all consumers have finished reading this element
|
||||
auto cur_rc = el->rc_.load(std::memory_order_acquire);
|
||||
circ::cc_t rem_cc = cur_rc & ep_mask;
|
||||
if (cc & rem_cc) {
|
||||
ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc);
|
||||
cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers
|
||||
if (cc == 0) return false; // no reader
|
||||
}
|
||||
// just compare & exchange
|
||||
if (el->rc_.compare_exchange_weak(
|
||||
cur_rc, epoch_ | static_cast<rc_t>(cc), std::memory_order_release)) {
|
||||
break;
|
||||
}
|
||||
ipc::yield(k);
|
||||
}
|
||||
std::forward<F>(f)(&(el->data_));
|
||||
wt_.fetch_add(1, std::memory_order_release);
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename R, typename E>
|
||||
bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E* elems) {
|
||||
if (cur == cursor()) return false; // acquire
|
||||
auto* el = elems + circ::index_of(cur++);
|
||||
std::forward<F>(f)(&(el->data_));
|
||||
for (unsigned k = 0;;) {
|
||||
auto cur_rc = el->rc_.load(std::memory_order_acquire);
|
||||
if ((cur_rc & ep_mask) == 0) {
|
||||
std::forward<R>(out)(true);
|
||||
return true;
|
||||
}
|
||||
auto nxt_rc = cur_rc & ~static_cast<rc_t>(wrapper->connected_id());
|
||||
if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) {
|
||||
std::forward<R>(out)((nxt_rc & ep_mask) == 0);
|
||||
return true;
|
||||
}
|
||||
ipc::yield(k);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct prod_cons_impl<wr<relat::multi, relat::multi, trans::broadcast>> {
|
||||
|
||||
using rc_t = std::uint64_t;
|
||||
using flag_t = std::uint64_t;
|
||||
|
||||
enum : rc_t {
|
||||
rc_mask = 0x00000000ffffffffull,
|
||||
ep_mask = 0x00ffffffffffffffull,
|
||||
ep_incr = 0x0100000000000000ull,
|
||||
ic_mask = 0xff000000ffffffffull,
|
||||
ic_incr = 0x0000000100000000ull
|
||||
};
|
||||
|
||||
template <std::size_t DataSize, std::size_t AlignSize>
|
||||
struct elem_t {
|
||||
std::aligned_storage_t<DataSize, AlignSize> data_ {};
|
||||
std::atomic<rc_t > rc_ { 0 }; // read-counter
|
||||
std::atomic<flag_t> f_ct_ { 0 }; // commit flag
|
||||
};
|
||||
|
||||
alignas(cache_line_size) std::atomic<circ::u2_t> ct_; // commit index
|
||||
alignas(cache_line_size) std::atomic<rc_t> epoch_ { 0 };
|
||||
|
||||
circ::u2_t cursor() const noexcept {
|
||||
return ct_.load(std::memory_order_acquire);
|
||||
}
|
||||
|
||||
constexpr static rc_t inc_rc(rc_t rc) noexcept {
|
||||
return (rc & ic_mask) | ((rc + ic_incr) & ~ic_mask);
|
||||
}
|
||||
|
||||
constexpr static rc_t inc_mask(rc_t rc) noexcept {
|
||||
return inc_rc(rc) & ~rc_mask;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename E>
|
||||
bool push(W* wrapper, F&& f, E* elems) {
|
||||
E* el;
|
||||
circ::u2_t cur_ct;
|
||||
rc_t epoch = epoch_.load(std::memory_order_acquire);
|
||||
for (unsigned k = 0;;) {
|
||||
circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
|
||||
if (cc == 0) return false; // no reader
|
||||
el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed));
|
||||
// check all consumers have finished reading this element
|
||||
auto cur_rc = el->rc_.load(std::memory_order_relaxed);
|
||||
circ::cc_t rem_cc = cur_rc & rc_mask;
|
||||
if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch)) {
|
||||
return false; // has not finished yet
|
||||
}
|
||||
else if (!rem_cc) {
|
||||
auto cur_fl = el->f_ct_.load(std::memory_order_acquire);
|
||||
if ((cur_fl != cur_ct) && cur_fl) {
|
||||
return false; // full
|
||||
}
|
||||
}
|
||||
// consider rem_cc to be 0 here
|
||||
if (el->rc_.compare_exchange_weak(
|
||||
cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast<rc_t>(cc), std::memory_order_relaxed) &&
|
||||
epoch_.compare_exchange_weak(epoch, epoch, std::memory_order_acq_rel)) {
|
||||
break;
|
||||
}
|
||||
ipc::yield(k);
|
||||
}
|
||||
// only one thread/process would touch here at one time
|
||||
ct_.store(cur_ct + 1, std::memory_order_release);
|
||||
std::forward<F>(f)(&(el->data_));
|
||||
// set flag & try update wt
|
||||
el->f_ct_.store(~static_cast<flag_t>(cur_ct), std::memory_order_release);
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename E>
|
||||
bool force_push(W* wrapper, F&& f, E* elems) {
|
||||
E* el;
|
||||
circ::u2_t cur_ct;
|
||||
rc_t epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr;
|
||||
for (unsigned k = 0;;) {
|
||||
circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
|
||||
if (cc == 0) return false; // no reader
|
||||
el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed));
|
||||
// check all consumers have finished reading this element
|
||||
auto cur_rc = el->rc_.load(std::memory_order_acquire);
|
||||
circ::cc_t rem_cc = cur_rc & rc_mask;
|
||||
if (cc & rem_cc) {
|
||||
ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc);
|
||||
cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers
|
||||
if (cc == 0) return false; // no reader
|
||||
}
|
||||
// just compare & exchange
|
||||
if (el->rc_.compare_exchange_weak(
|
||||
cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast<rc_t>(cc), std::memory_order_relaxed)) {
|
||||
if (epoch == epoch_.load(std::memory_order_acquire)) {
|
||||
break;
|
||||
}
|
||||
else if (push(wrapper, std::forward<F>(f), elems)) {
|
||||
return true;
|
||||
}
|
||||
epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr;
|
||||
}
|
||||
ipc::yield(k);
|
||||
}
|
||||
// only one thread/process would touch here at one time
|
||||
ct_.store(cur_ct + 1, std::memory_order_release);
|
||||
std::forward<F>(f)(&(el->data_));
|
||||
// set flag & try update wt
|
||||
el->f_ct_.store(~static_cast<flag_t>(cur_ct), std::memory_order_release);
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename R, typename E, std::size_t N>
|
||||
bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E(& elems)[N]) {
|
||||
auto* el = elems + circ::index_of(cur);
|
||||
auto cur_fl = el->f_ct_.load(std::memory_order_acquire);
|
||||
if (cur_fl != ~static_cast<flag_t>(cur)) {
|
||||
return false; // empty
|
||||
}
|
||||
++cur;
|
||||
std::forward<F>(f)(&(el->data_));
|
||||
for (unsigned k = 0;;) {
|
||||
auto cur_rc = el->rc_.load(std::memory_order_acquire);
|
||||
if ((cur_rc & rc_mask) == 0) {
|
||||
std::forward<R>(out)(true);
|
||||
el->f_ct_.store(cur + N - 1, std::memory_order_release);
|
||||
return true;
|
||||
}
|
||||
auto nxt_rc = inc_rc(cur_rc) & ~static_cast<rc_t>(wrapper->connected_id());
|
||||
bool last_one = false;
|
||||
if ((last_one = (nxt_rc & rc_mask) == 0)) {
|
||||
el->f_ct_.store(cur + N - 1, std::memory_order_release);
|
||||
}
|
||||
if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) {
|
||||
std::forward<R>(out)(last_one);
|
||||
return true;
|
||||
}
|
||||
ipc::yield(k);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace ipc
|
||||
58
crazy_functions/test_project/latex/attention/background.tex
Normal file
58
crazy_functions/test_project/latex/attention/background.tex
Normal file
@@ -0,0 +1,58 @@
|
||||
The goal of reducing sequential computation also forms the foundation of the Extended Neural GPU \citep{extendedngpu}, ByteNet \citep{NalBytenet2017} and ConvS2S \citep{JonasFaceNet2017}, all of which use convolutional neural networks as basic building block, computing hidden representations in parallel for all input and output positions. In these models, the number of operations required to relate signals from two arbitrary input or output positions grows in the distance between positions, linearly for ConvS2S and logarithmically for ByteNet. This makes it more difficult to learn dependencies between distant positions \citep{hochreiter2001gradient}. In the Transformer this is reduced to a constant number of operations, albeit at the cost of reduced effective resolution due to averaging attention-weighted positions, an effect we counteract with Multi-Head Attention as described in section~\ref{sec:attention}.
|
||||
|
||||
Self-attention, sometimes called intra-attention is an attention mechanism relating different positions of a single sequence in order to compute a representation of the sequence. Self-attention has been used successfully in a variety of tasks including reading comprehension, abstractive summarization, textual entailment and learning task-independent sentence representations \citep{cheng2016long, decomposableAttnModel, paulus2017deep, lin2017structured}.
|
||||
|
||||
End-to-end memory networks are based on a recurrent attention mechanism instead of sequence-aligned recurrence and have been shown to perform well on simple-language question answering and language modeling tasks \citep{sukhbaatar2015}.
|
||||
|
||||
To the best of our knowledge, however, the Transformer is the first transduction model relying entirely on self-attention to compute representations of its input and output without using sequence-aligned RNNs or convolution.
|
||||
In the following sections, we will describe the Transformer, motivate self-attention and discuss its advantages over models such as \citep{neural_gpu, NalBytenet2017} and \citep{JonasFaceNet2017}.
|
||||
|
||||
|
||||
%\citep{JonasFaceNet2017} report new SOTA on machine translation for English-to-German (EnDe), Enlish-to-French (EnFr) and English-to-Romanian language pairs.
|
||||
|
||||
%For example,! in MT, we must draw information from both input and previous output words to translate an output word accurately. An attention layer \citep{bahdanau2014neural} can connect a very large number of positions at low computation cost, making it an essential ingredient in competitive recurrent models for machine translation.
|
||||
|
||||
%A natural question to ask then is, "Could we replace recurrence with attention?". \marginpar{Don't know if it's the most natural question to ask given the previous statements. Also, need to say that the complexity table summarizes these statements} Such a model would be blessed with the computational efficiency of attention and the power of cross-positional communication. In this work, show that pure attention models work remarkably well for MT, achieving new SOTA results on EnDe and EnFr, and can be trained in under $2$ days on xyz architecture.
|
||||
|
||||
%After the seminal models introduced in \citep{sutskever14, bahdanau2014neural, cho2014learning}, recurrent models have become the dominant solution for both sequence modeling and sequence-to-sequence transduction. Many efforts such as \citep{wu2016google,luong2015effective,jozefowicz2016exploring} have pushed the boundaries of machine translation (MT) and language modeling with recurrent endoder-decoder and recurrent language models. Recent effort \citep{shazeer2017outrageously} has successfully combined the power of conditional computation with sequence models to train very large models for MT, pushing SOTA at lower computational cost.
|
||||
|
||||
%Recurrent models compute a vector of hidden states $h_t$, for each time step $t$ of computation. $h_t$ is a function of both the input at time $t$ and the previous hidden state $h_t$. This dependence on the previous hidden state precludes processing all timesteps at once, instead requiring long sequences of sequential operations. In practice, this results in greatly reduced computational efficiency, as on modern computing hardware, a single operation on a large batch is much faster than a large number of operations on small batches. The problem gets worse at longer sequence lengths. Although sequential computation is not a severe bottleneck at inference time, as autoregressively generating each output requires all previous outputs, the inability to compute scores at all output positions at once hinders us from rapidly training our models over large datasets. Although impressive work such as \citep{Kuchaiev2017Factorization} is able to significantly accelerate the training of LSTMs with factorization tricks, we are still bound by the linear dependence on sequence length.
|
||||
|
||||
%If the model could compute hidden states at each time step using only the inputs and outputs, it would be liberated from the dependence on results from previous time steps during training. This line of thought is the foundation of recent efforts such as the Markovian neural GPU \citep{neural_gpu}, ByteNet \citep{NalBytenet2017} and ConvS2S \citep{JonasFaceNet2017}, all of which use convolutional neural networks as a building block to compute hidden representations simultaneously for all timesteps, resulting in $O(1)$ sequential time complexity. \citep{JonasFaceNet2017} report new SOTA on machine translation for English-to-German (EnDe), Enlish-to-French (EnFr) and English-to-Romanian language pairs.
|
||||
|
||||
%A crucial component for accurate sequence prediction is modeling cross-positional communication. For example, in MT, we must draw information from both input and previous output words to translate an output word accurately. An attention layer \citep{bahdanau2014neural} can connect a very large number of positions at a low computation cost, also $O(1)$ sequential time complexity, making it an essential ingredient in recurrent encoder-decoder architectures for MT. A natural question to ask then is, "Could we replace recurrence with attention?". \marginpar{Don't know if it's the most natural question to ask given the previous statements. Also, need to say that the complexity table summarizes these statements} Such a model would be blessed with the computational efficiency of attention and the power of cross-positional communication. In this work, show that pure attention models work remarkably well for MT, achieving new SOTA results on EnDe and EnFr, and can be trained in under $2$ days on xyz architecture.
|
||||
|
||||
|
||||
|
||||
%Note: Facebook model is no better than RNNs in this regard, since it requires a number of layers proportional to the distance you want to communicate. Bytenet is more promising, since it requires a logarithmnic number of layers (does bytenet have SOTA results)?
|
||||
|
||||
%Note: An attention layer can connect a very large number of positions at a low computation cost in O(1) sequential operations. This is why encoder-decoder attention has been so successful in seq-to-seq models so far. It is only natural, then, to also use attention to connect the timesteps of the same sequence.
|
||||
|
||||
%Note: I wouldn't say that long sequences are not a problem during inference. It would be great if we could infer with no long sequences. We could just say later on that, while our training graph is constant-depth, our model still requires sequential operations in the decoder part during inference due to the autoregressive nature of the model.
|
||||
|
||||
%\begin{table}[h!]
|
||||
%\caption{Attention models are quite efficient for cross-positional communications when sequence length is smaller than channel depth. $n$ represents the sequence length and $d$ represents the channel depth.}
|
||||
%\label{tab:op_complexities}
|
||||
%\begin{center}
|
||||
%\vspace{-5pt}
|
||||
%\scalebox{0.75}{
|
||||
|
||||
%\begin{tabular}{l|c|c|c}
|
||||
%\hline \hline
|
||||
%Layer Type & Receptive & Complexity & Sequential \\
|
||||
% & Field & & Operations \\
|
||||
%\hline
|
||||
%Pointwise Feed-Forward & $1$ & $O(n \cdot d^2)$ & $O(1)$ \\
|
||||
%\hline
|
||||
%Recurrent & $n$ & $O(n \cdot d^2)$ & $O(n)$ \\
|
||||
%\hline
|
||||
%Convolutional & $r$ & $O(r \cdot n \cdot d^2)$ & $O(1)$ \\
|
||||
%\hline
|
||||
%Convolutional (separable) & $r$ & $O(r \cdot n \cdot d + n %\cdot d^2)$ & $O(1)$ \\
|
||||
%\hline
|
||||
%Attention & $r$ & $O(r \cdot n \cdot d)$ & $O(1)$ \\
|
||||
%\hline \hline
|
||||
%\end{tabular}
|
||||
%}
|
||||
%\end{center}
|
||||
%\end{table}
|
||||
@@ -0,0 +1,18 @@
|
||||
Recurrent neural networks, long short-term memory \citep{hochreiter1997} and gated recurrent \citep{gruEval14} neural networks in particular, have been firmly established as state of the art approaches in sequence modeling and transduction problems such as language modeling and machine translation \citep{sutskever14, bahdanau2014neural, cho2014learning}. Numerous efforts have since continued to push the boundaries of recurrent language models and encoder-decoder architectures \citep{wu2016google,luong2015effective,jozefowicz2016exploring}.
|
||||
|
||||
Recurrent models typically factor computation along the symbol positions of the input and output sequences. Aligning the positions to steps in computation time, they generate a sequence of hidden states $h_t$, as a function of the previous hidden state $h_{t-1}$ and the input for position $t$. This inherently sequential nature precludes parallelization within training examples, which becomes critical at longer sequence lengths, as memory constraints limit batching across examples.
|
||||
%\marginpar{not sure if the memory constraints are understandable here}
|
||||
Recent work has achieved significant improvements in computational efficiency through factorization tricks \citep{Kuchaiev2017Factorization} and conditional computation \citep{shazeer2017outrageously}, while also improving model performance in case of the latter. The fundamental constraint of sequential computation, however, remains.
|
||||
|
||||
%\marginpar{@all: there is work on analyzing what attention really does in seq2seq models, couldn't find it right away}
|
||||
|
||||
Attention mechanisms have become an integral part of compelling sequence modeling and transduction models in various tasks, allowing modeling of dependencies without regard to their distance in the input or output sequences \citep{bahdanau2014neural, structuredAttentionNetworks}. In all but a few cases \citep{decomposableAttnModel}, however, such attention mechanisms are used in conjunction with a recurrent network.
|
||||
|
||||
%\marginpar{not sure if "cross-positional communication" is understandable without explanation}
|
||||
%\marginpar{insert exact training times and stats for the model that reaches sota earliest, maybe even a single GPU model?}
|
||||
|
||||
In this work we propose the Transformer, a model architecture eschewing recurrence and instead relying entirely on an attention mechanism to draw global dependencies between input and output. The Transformer allows for significantly more parallelization and can reach a new state of the art in translation quality after being trained for as little as twelve hours on eight P100 GPUs.
|
||||
%\marginpar{you removed the constant number of repetitions part. I wrote it because I wanted to make it clear that the model does not only perform attention once, while it's also not recurrent. I thought that might be important to get across early.}
|
||||
|
||||
% Just a standard paragraph with citations, rewrite.
|
||||
%After the seminal papers of \citep{sutskever14}, \citep{bahdanau2014neural}, and \citep{cho2014learning}, recurrent models have become the dominant solution for both sequence modeling and sequence-to-sequence transduction. Many efforts such as \citep{wu2016google,luong2015effective,jozefowicz2016exploring} have pushed the boundaries of machine translation and language modeling with recurrent sequence models. Recent effort \citep{shazeer2017outrageously} has combined the power of conditional computation with sequence models to train very large models for machine translation, pushing SOTA at lower computational cost. Recurrent models compute a vector of hidden states $h_t$, for each time step $t$ of computation. $h_t$ is a function of both the input at time $t$ and the previous hidden state $h_t$. This dependence on the previous hidden state encumbers recurrnet models to process multiple inputs at once, and their time complexity is a linear function of the length of the input and output, both during training and inference. [What I want to say here is that although this is fine during decoding, at training time, we are given both input and output and this linear nature does not allow the RNN to process all inputs and outputs simultaneously and haven't been used on datasets that are the of the scale of the web. What's the largest dataset we have ? . Talk about Nividia and possibly other's effors to speed up things, and possibly other efforts that alleviate this, but are still limited by it's comptuational nature]. Rest of the intro: What if you could construct the state based on the actual inputs and outputs, then you could construct them all at once. This has been the foundation of many promising recent efforts, bytenet,facenet (Also talk about quasi rnn here). Now we talk about attention!! Along with cell architectures such as long short-term meory (LSTM) \citep{hochreiter1997}, and gated recurrent units (GRUs) \citep{cho2014learning}, attention has emerged as an essential ingredient in successful sequence models, in particular for machine translation. In recent years, many, if not all, state-of-the-art (SOTA) results in machine translation have been achieved with attention-based sequence models \citep{wu2016google,luong2015effective,jozefowicz2016exploring}. Talk about the neon work on how it played with attention to do self attention! Then talk about what we do.
|
||||
@@ -0,0 +1,155 @@
|
||||
|
||||
\begin{figure}
|
||||
\centering
|
||||
\includegraphics[scale=0.6]{Figures/ModalNet-21}
|
||||
\caption{The Transformer - model architecture.}
|
||||
\label{fig:model-arch}
|
||||
\end{figure}
|
||||
|
||||
% Although the primary workhorse of our model is attention,
|
||||
%Our model maintains the encoder-decoder structure that is common to many so-called sequence-to-sequence models \citep{bahdanau2014neural,sutskever14}. As in all such architectures, the encoder computes a representation of the input sequence, and the decoder consumes these representations along with the output tokens to autoregressively produce the output sequence. Where, traditionally, the encoder and decoder contain stacks of recurrent or convolutional layers, our encoder and decoder stacks are composed of attention layers and position-wise feed-forward layers (Figure~\ref{fig:model-arch}). The following sections describe the gross architecture and these particular components in detail.
|
||||
|
||||
Most competitive neural sequence transduction models have an encoder-decoder structure \citep{cho2014learning,bahdanau2014neural,sutskever14}. Here, the encoder maps an input sequence of symbol representations $(x_1, ..., x_n)$ to a sequence of continuous representations $\mathbf{z} = (z_1, ..., z_n)$. Given $\mathbf{z}$, the decoder then generates an output sequence $(y_1,...,y_m)$ of symbols one element at a time. At each step the model is auto-regressive \citep{graves2013generating}, consuming the previously generated symbols as additional input when generating the next.
|
||||
|
||||
The Transformer follows this overall architecture using stacked self-attention and point-wise, fully connected layers for both the encoder and decoder, shown in the left and right halves of Figure~\ref{fig:model-arch}, respectively.
|
||||
|
||||
\subsection{Encoder and Decoder Stacks}
|
||||
|
||||
\paragraph{Encoder:}The encoder is composed of a stack of $N=6$ identical layers. Each layer has two sub-layers. The first is a multi-head self-attention mechanism, and the second is a simple, position-wise fully connected feed-forward network. We employ a residual connection \citep{he2016deep} around each of the two sub-layers, followed by layer normalization \cite{layernorm2016}. That is, the output of each sub-layer is $\mathrm{LayerNorm}(x + \mathrm{Sublayer}(x))$, where $\mathrm{Sublayer}(x)$ is the function implemented by the sub-layer itself. To facilitate these residual connections, all sub-layers in the model, as well as the embedding layers, produce outputs of dimension $\dmodel=512$.
|
||||
|
||||
\paragraph{Decoder:}The decoder is also composed of a stack of $N=6$ identical layers. In addition to the two sub-layers in each encoder layer, the decoder inserts a third sub-layer, which performs multi-head attention over the output of the encoder stack. Similar to the encoder, we employ residual connections around each of the sub-layers, followed by layer normalization. We also modify the self-attention sub-layer in the decoder stack to prevent positions from attending to subsequent positions. This masking, combined with fact that the output embeddings are offset by one position, ensures that the predictions for position $i$ can depend only on the known outputs at positions less than $i$.
|
||||
|
||||
% In our model (Figure~\ref{fig:model-arch}), the encoder and decoder are composed of stacks of alternating self-attention layers (for cross-positional communication) and position-wise feed-forward layers (for in-place computation). In addition, the decoder stack contains encoder-decoder attention layers. Since attention is agnostic to the distances between words, our model requires a "positional encoding" to be added to the encoder and decoder input. The following sections describe all of these components in detail.
|
||||
|
||||
\subsection{Attention} \label{sec:attention}
|
||||
An attention function can be described as mapping a query and a set of key-value pairs to an output, where the query, keys, values, and output are all vectors. The output is computed as a weighted sum of the values, where the weight assigned to each value is computed by a compatibility function of the query with the corresponding key.
|
||||
|
||||
\subsubsection{Scaled Dot-Product Attention} \label{sec:scaled-dot-prod}
|
||||
|
||||
% \begin{figure}
|
||||
% \centering
|
||||
% \includegraphics[scale=0.6]{Figures/ModalNet-19}
|
||||
% \caption{Scaled Dot-Product Attention.}
|
||||
% \label{fig:multi-head-att}
|
||||
% \end{figure}
|
||||
|
||||
We call our particular attention "Scaled Dot-Product Attention" (Figure~\ref{fig:multi-head-att}). The input consists of queries and keys of dimension $d_k$, and values of dimension $d_v$. We compute the dot products of the query with all keys, divide each by $\sqrt{d_k}$, and apply a softmax function to obtain the weights on the values.
|
||||
|
||||
In practice, we compute the attention function on a set of queries simultaneously, packed together into a matrix $Q$. The keys and values are also packed together into matrices $K$ and $V$. We compute the matrix of outputs as:
|
||||
|
||||
\begin{equation}
|
||||
\mathrm{Attention}(Q, K, V) = \mathrm{softmax}(\frac{QK^T}{\sqrt{d_k}})V
|
||||
\end{equation}
|
||||
|
||||
The two most commonly used attention functions are additive attention \citep{bahdanau2014neural}, and dot-product (multiplicative) attention. Dot-product attention is identical to our algorithm, except for the scaling factor of $\frac{1}{\sqrt{d_k}}$. Additive attention computes the compatibility function using a feed-forward network with a single hidden layer. While the two are similar in theoretical complexity, dot-product attention is much faster and more space-efficient in practice, since it can be implemented using highly optimized matrix multiplication code.
|
||||
|
||||
%We scale the dot products by $1/\sqrt{d_k}$ to limit the magnitude of the dot products, which works well in practice. Otherwise, we found applying the softmax to often result in weights very close to 0 or 1, and hence minuscule gradients.
|
||||
|
||||
% Already described in the subsequent section
|
||||
%When used as part of decoder self-attention, an optional mask function is applied just before the softmax to prevent positions from attending to subsequent positions. This mask simply sets the logits corresponding to all illegal connections (those outside of the lower triangle) to $-\infty$.
|
||||
|
||||
%\paragraph{Comparison to Additive Attention: } We choose dot product attention over additive attention \citep{bahdanau2014neural} since it can be computed using highly optimized matrix multiplication code. This optimization is particularly important to us, as we employ many attention layers in our model.
|
||||
|
||||
While for small values of $d_k$ the two mechanisms perform similarly, additive attention outperforms dot product attention without scaling for larger values of $d_k$ \citep{DBLP:journals/corr/BritzGLL17}. We suspect that for large values of $d_k$, the dot products grow large in magnitude, pushing the softmax function into regions where it has extremely small gradients \footnote{To illustrate why the dot products get large, assume that the components of $q$ and $k$ are independent random variables with mean $0$ and variance $1$. Then their dot product, $q \cdot k = \sum_{i=1}^{d_k} q_ik_i$, has mean $0$ and variance $d_k$.}. To counteract this effect, we scale the dot products by $\frac{1}{\sqrt{d_k}}$.
|
||||
|
||||
|
||||
%We suspect this to be caused by the dot products growing too large in magnitude to result in useful gradients after applying the softmax function. To counteract this, we scale the dot product by $1/\sqrt{d_k}$.
|
||||
|
||||
|
||||
\subsubsection{Multi-Head Attention} \label{sec:multihead}
|
||||
|
||||
\begin{figure}
|
||||
\begin{minipage}[t]{0.5\textwidth}
|
||||
\centering
|
||||
Scaled Dot-Product Attention \\
|
||||
\vspace{0.5cm}
|
||||
\includegraphics[scale=0.6]{Figures/ModalNet-19}
|
||||
\end{minipage}
|
||||
\begin{minipage}[t]{0.5\textwidth}
|
||||
\centering
|
||||
Multi-Head Attention \\
|
||||
\vspace{0.1cm}
|
||||
\includegraphics[scale=0.6]{Figures/ModalNet-20}
|
||||
\end{minipage}
|
||||
|
||||
|
||||
% \centering
|
||||
|
||||
\caption{(left) Scaled Dot-Product Attention. (right) Multi-Head Attention consists of several attention layers running in parallel.}
|
||||
\label{fig:multi-head-att}
|
||||
\end{figure}
|
||||
|
||||
Instead of performing a single attention function with $\dmodel$-dimensional keys, values and queries, we found it beneficial to linearly project the queries, keys and values $h$ times with different, learned linear projections to $d_k$, $d_k$ and $d_v$ dimensions, respectively.
|
||||
On each of these projected versions of queries, keys and values we then perform the attention function in parallel, yielding $d_v$-dimensional output values. These are concatenated and once again projected, resulting in the final values, as depicted in Figure~\ref{fig:multi-head-att}.
|
||||
|
||||
Multi-head attention allows the model to jointly attend to information from different representation subspaces at different positions. With a single attention head, averaging inhibits this.
|
||||
|
||||
\begin{align*}
|
||||
\mathrm{MultiHead}(Q, K, V) &= \mathrm{Concat}(\mathrm{head_1}, ..., \mathrm{head_h})W^O\\
|
||||
% \mathrm{where} \mathrm{head_i} &= \mathrm{Attention}(QW_Q_i^{\dmodel \times d_q}, KW_K_i^{\dmodel \times d_k}, VW^V_i^{\dmodel \times d_v})\\
|
||||
\text{where}~\mathrm{head_i} &= \mathrm{Attention}(QW^Q_i, KW^K_i, VW^V_i)\\
|
||||
\end{align*}
|
||||
|
||||
Where the projections are parameter matrices $W^Q_i \in \mathbb{R}^{\dmodel \times d_k}$, $W^K_i \in \mathbb{R}^{\dmodel \times d_k}$, $W^V_i \in \mathbb{R}^{\dmodel \times d_v}$ and $W^O \in \mathbb{R}^{hd_v \times \dmodel}$.
|
||||
|
||||
|
||||
%find it better (and no more expensive) to have multiple parallel attention layers (each over the full set of positions) with proportionally lower-dimensional keys, values and queries. We call this "Multi-Head Attention" (Figure~\ref{fig:multi-head-att}). The keys, values, and queries for each of these parallel attention layers are computed by learned linear transformations of the inputs to the multi-head attention. We use different linear transformations across different parallel attention layers. The output of the parallel attention layers are concatenated, and then passed through a final learned linear transformation.
|
||||
|
||||
In this work we employ $h=8$ parallel attention layers, or heads. For each of these we use $d_k=d_v=\dmodel/h=64$.
|
||||
Due to the reduced dimension of each head, the total computational cost is similar to that of single-head attention with full dimensionality.
|
||||
|
||||
\subsubsection{Applications of Attention in our Model}
|
||||
|
||||
The Transformer uses multi-head attention in three different ways:
|
||||
\begin{itemize}
|
||||
\item In "encoder-decoder attention" layers, the queries come from the previous decoder layer, and the memory keys and values come from the output of the encoder. This allows every position in the decoder to attend over all positions in the input sequence. This mimics the typical encoder-decoder attention mechanisms in sequence-to-sequence models such as \citep{wu2016google, bahdanau2014neural,JonasFaceNet2017}.
|
||||
|
||||
\item The encoder contains self-attention layers. In a self-attention layer all of the keys, values and queries come from the same place, in this case, the output of the previous layer in the encoder. Each position in the encoder can attend to all positions in the previous layer of the encoder.
|
||||
|
||||
\item Similarly, self-attention layers in the decoder allow each position in the decoder to attend to all positions in the decoder up to and including that position. We need to prevent leftward information flow in the decoder to preserve the auto-regressive property. We implement this inside of scaled dot-product attention by masking out (setting to $-\infty$) all values in the input of the softmax which correspond to illegal connections. See Figure~\ref{fig:multi-head-att}.
|
||||
|
||||
\end{itemize}
|
||||
|
||||
\subsection{Position-wise Feed-Forward Networks}\label{sec:ffn}
|
||||
|
||||
In addition to attention sub-layers, each of the layers in our encoder and decoder contains a fully connected feed-forward network, which is applied to each position separately and identically. This consists of two linear transformations with a ReLU activation in between.
|
||||
|
||||
\begin{equation}
|
||||
\mathrm{FFN}(x)=\max(0, xW_1 + b_1) W_2 + b_2
|
||||
\end{equation}
|
||||
|
||||
While the linear transformations are the same across different positions, they use different parameters from layer to layer. Another way of describing this is as two convolutions with kernel size 1. The dimensionality of input and output is $\dmodel=512$, and the inner-layer has dimensionality $d_{ff}=2048$.
|
||||
|
||||
|
||||
|
||||
%In the appendix, we describe how the position-wise feed-forward network can also be seen as a form of attention.
|
||||
|
||||
%from Jakob: The number of operations required for the model to relate signals from two arbitrary input or output positions grows in the distance between positions in input or output, linearly for ConvS2S and logarithmically for ByteNet, making it harder to learn dependencies between these positions \citep{hochreiter2001gradient}. In the transformer this is reduced to a constant number of operations, albeit at the cost of effective resolution caused by averaging attention-weighted positions, an effect we aim to counteract with multi-headed attention.
|
||||
|
||||
|
||||
%Figure~\ref{fig:simple-att} presents a simple attention function, $A$, with a single head, that forms the basis of our multi-head attention. $A$ takes a query key vector $\kq$, matrices of memory keys $\km$ and memory values $\vm$ ,and produces a query value vector $\vq$ as
|
||||
%\begin{equation*} \label{eq:attention}
|
||||
% A(\kq, \km, \vm) = {\vm}^T (Softmax(\km \kq).
|
||||
%\end{equation*}
|
||||
%We linearly transform $\kq,\,\km$, and $\vm$ with learned matrices ${\Wkq \text{,} \, \Wkm}$, and ${\Wvm}$ before calling the attention function, and transform the output query with $\Wvq$ before handing it to the feed forward layer. Each attention layer has it's own set of transformation matrices, which are shared across all query positions. $A$ is applied in parallel for each query position, and is implemented very efficiently as a batch of matrix multiplies. The self-attention and encoder-decoder attention layers use $A$, but with different arguments. For example, in encdoder self-attention, queries in encoder layer $i$ attention to memories in encoder layer $i-1$. To ensure that decoder self-attention layers do not look at future words, we add $- \inf$ to the softmax logits in positions $j+1$ to query length for query position $l$.
|
||||
|
||||
%In simple attention, the query value is a weighted combination of the memory values where the attention weights sum to one. Although this function performs well in practice, the constraint on attention weights can restrict the amount of information that flows from memories to queries because the query cannot focus on multiple memory positions at once, which might be desirable when translating long sequences. \marginpar{@usz, could you think of an example of this ?} We remedy this by maintaining multiple attention heads at each query position that attend to all memory positions in parallel, with a different set of parameters per attention head $h$.
|
||||
%\marginpar{}
|
||||
|
||||
\subsection{Embeddings and Softmax}
|
||||
Similarly to other sequence transduction models, we use learned embeddings to convert the input tokens and output tokens to vectors of dimension $\dmodel$. We also use the usual learned linear transformation and softmax function to convert the decoder output to predicted next-token probabilities. In our model, we share the same weight matrix between the two embedding layers and the pre-softmax linear transformation, similar to \citep{press2016using}. In the embedding layers, we multiply those weights by $\sqrt{\dmodel}$.
|
||||
|
||||
|
||||
\subsection{Positional Encoding}
|
||||
Since our model contains no recurrence and no convolution, in order for the model to make use of the order of the sequence, we must inject some information about the relative or absolute position of the tokens in the sequence. To this end, we add "positional encodings" to the input embeddings at the bottoms of the encoder and decoder stacks. The positional encodings have the same dimension $\dmodel$ as the embeddings, so that the two can be summed. There are many choices of positional encodings, learned and fixed \citep{JonasFaceNet2017}.
|
||||
|
||||
In this work, we use sine and cosine functions of different frequencies:
|
||||
|
||||
\begin{align*}
|
||||
PE_{(pos,2i)} = sin(pos / 10000^{2i/\dmodel}) \\
|
||||
PE_{(pos,2i+1)} = cos(pos / 10000^{2i/\dmodel})
|
||||
\end{align*}
|
||||
|
||||
where $pos$ is the position and $i$ is the dimension. That is, each dimension of the positional encoding corresponds to a sinusoid. The wavelengths form a geometric progression from $2\pi$ to $10000 \cdot 2\pi$. We chose this function because we hypothesized it would allow the model to easily learn to attend by relative positions, since for any fixed offset $k$, $PE_{pos+k}$ can be represented as a linear function of $PE_{pos}$.
|
||||
|
||||
We also experimented with using learned positional embeddings \citep{JonasFaceNet2017} instead, and found that the two versions produced nearly identical results (see Table~\ref{tab:variations} row (E)). We chose the sinusoidal version because it may allow the model to extrapolate to sequence lengths longer than the ones encountered during training.
|
||||
@@ -0,0 +1,45 @@
|
||||
\pagebreak
|
||||
\section*{Two Feed-Forward Layers = Attention over Parameters}\label{sec:parameter_attention}
|
||||
|
||||
In addition to attention layers, our model contains position-wise feed-forward networks (Section \ref{sec:ffn}), which consist of two linear transformations with a ReLU activation in between. In fact, these networks too can be seen as a form of attention. Compare the formula for such a network with the formula for a simple dot-product attention layer (biases and scaling factors omitted):
|
||||
|
||||
\begin{align*}
|
||||
FFN(x, W_1, W_2) = ReLU(xW_1)W_2 \\
|
||||
A(q, K, V) = Softmax(qK^T)V
|
||||
\end{align*}
|
||||
|
||||
Based on the similarity of these formulae, the two-layer feed-forward network can be seen as a kind of attention, where the keys and values are the rows of the trainable parameter matrices $W_1$ and $W_2$, and where we use ReLU instead of Softmax in the compatibility function.
|
||||
|
||||
%the compatablity function is $compat(q, k_i) = ReLU(q \cdot k_i)$ instead of $Softmax(qK_T)_i$.
|
||||
|
||||
Given this similarity, we experimented with replacing the position-wise feed-forward networks with attention layers similar to the ones we use everywhere else our model. The multi-head-attention-over-parameters sublayer is identical to the multi-head attention described in \ref{sec:multihead}, except that the "keys" and "values" inputs to each attention head are trainable model parameters, as opposed to being linear projections of a previous layer. These parameters are scaled up by a factor of $\sqrt{d_{model}}$ in order to be more similar to activations.
|
||||
|
||||
In our first experiment, we replaced each position-wise feed-forward network with a multi-head-attention-over-parameters sublayer with $h_p=8$ heads, key-dimensionality $d_{pk}=64$, and value-dimensionality $d_{pv}=64$, using $n_p=1536$ key-value pairs for each attention head. The sublayer has a total of $2097152$ parameters, including the parameters in the query projection and the output projection. This matches the number of parameters in the position-wise feed-forward network that we replaced. While the theoretical amount of computation is also the same, in practice, the attention version caused the step times to be about 30\% longer.
|
||||
|
||||
In our second experiment, we used $h_p=8$ heads, and $n_p=512$ key-value pairs for each attention head, again matching the total number of parameters in the base model.
|
||||
|
||||
Results for the first experiment were slightly worse than for the base model, and results for the second experiment were slightly better, see Table~\ref{tab:parameter_attention}.
|
||||
|
||||
\begin{table}[h]
|
||||
\caption{Replacing the position-wise feed-forward networks with multihead-attention-over-parameters produces similar results to the base model. All metrics are on the English-to-German translation development set, newstest2013.}
|
||||
\label{tab:parameter_attention}
|
||||
\begin{center}
|
||||
\vspace{-2mm}
|
||||
%\scalebox{1.0}{
|
||||
\begin{tabular}{c|cccccc|cccc}
|
||||
\hline\rule{0pt}{2.0ex}
|
||||
& \multirow{2}{*}{$\dmodel$} & \multirow{2}{*}{$\dff$} &
|
||||
\multirow{2}{*}{$h_p$} & \multirow{2}{*}{$d_{pk}$} & \multirow{2}{*}{$d_{pv}$} &
|
||||
\multirow{2}{*}{$n_p$} &
|
||||
PPL & BLEU & params & training\\
|
||||
& & & & & & & (dev) & (dev) & $\times10^6$ & time \\
|
||||
\hline\rule{0pt}{2.0ex}
|
||||
base & 512 & 2048 & & & & & 4.92 & 25.8 & 65 & 12 hours\\
|
||||
\hline\rule{0pt}{2.0ex}
|
||||
AOP$_1$ & 512 & & 8 & 64 & 64 & 1536 & 4.92& 25.5 & 65 & 16 hours\\
|
||||
AOP$_2$ & 512 & & 16 & 64 & 64 & 512 & \textbf{4.86} & \textbf{25.9} & 65 & 16 hours \\
|
||||
\hline
|
||||
\end{tabular}
|
||||
%}
|
||||
\end{center}
|
||||
\end{table}
|
||||
8
crazy_functions/test_project/latex/attention/来源
Normal file
8
crazy_functions/test_project/latex/attention/来源
Normal file
@@ -0,0 +1,8 @@
|
||||
chatgpt的老祖宗《Attention is all you need》
|
||||
|
||||
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, Illia Polosukhin
|
||||
|
||||
真实的摘要如下
|
||||
The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.
|
||||
|
||||
https://arxiv.org/abs/1706.03762
|
||||
2
crazy_functions/test_project/python/dqn/__init__.py
Normal file
2
crazy_functions/test_project/python/dqn/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
||||
from stable_baselines3.dqn.dqn import DQN
|
||||
from stable_baselines3.dqn.policies import CnnPolicy, MlpPolicy
|
||||
245
crazy_functions/test_project/python/dqn/dqn.py
Normal file
245
crazy_functions/test_project/python/dqn/dqn.py
Normal file
@@ -0,0 +1,245 @@
|
||||
from typing import Any, Dict, List, Optional, Tuple, Type, Union
|
||||
|
||||
import gym
|
||||
import numpy as np
|
||||
import torch as th
|
||||
from torch.nn import functional as F
|
||||
|
||||
from stable_baselines3.common import logger
|
||||
from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm
|
||||
from stable_baselines3.common.preprocessing import maybe_transpose
|
||||
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
|
||||
from stable_baselines3.common.utils import get_linear_fn, is_vectorized_observation, polyak_update
|
||||
from stable_baselines3.dqn.policies import DQNPolicy
|
||||
|
||||
|
||||
class DQN(OffPolicyAlgorithm):
|
||||
"""
|
||||
Deep Q-Network (DQN)
|
||||
|
||||
Paper: https://arxiv.org/abs/1312.5602, https://www.nature.com/articles/nature14236
|
||||
Default hyperparameters are taken from the nature paper,
|
||||
except for the optimizer and learning rate that were taken from Stable Baselines defaults.
|
||||
|
||||
:param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
|
||||
:param env: The environment to learn from (if registered in Gym, can be str)
|
||||
:param learning_rate: The learning rate, it can be a function
|
||||
of the current progress remaining (from 1 to 0)
|
||||
:param buffer_size: size of the replay buffer
|
||||
:param learning_starts: how many steps of the model to collect transitions for before learning starts
|
||||
:param batch_size: Minibatch size for each gradient update
|
||||
:param tau: the soft update coefficient ("Polyak update", between 0 and 1) default 1 for hard update
|
||||
:param gamma: the discount factor
|
||||
:param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit
|
||||
like ``(5, "step")`` or ``(2, "episode")``.
|
||||
:param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``)
|
||||
Set to ``-1`` means to do as many gradient steps as steps done in the environment
|
||||
during the rollout.
|
||||
:param optimize_memory_usage: Enable a memory efficient variant of the replay buffer
|
||||
at a cost of more complexity.
|
||||
See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195
|
||||
:param target_update_interval: update the target network every ``target_update_interval``
|
||||
environment steps.
|
||||
:param exploration_fraction: fraction of entire training period over which the exploration rate is reduced
|
||||
:param exploration_initial_eps: initial value of random action probability
|
||||
:param exploration_final_eps: final value of random action probability
|
||||
:param max_grad_norm: The maximum value for the gradient clipping
|
||||
:param tensorboard_log: the log location for tensorboard (if None, no logging)
|
||||
:param create_eval_env: Whether to create a second environment that will be
|
||||
used for evaluating the agent periodically. (Only available when passing string for the environment)
|
||||
:param policy_kwargs: additional arguments to be passed to the policy on creation
|
||||
:param verbose: the verbosity level: 0 no output, 1 info, 2 debug
|
||||
:param seed: Seed for the pseudo random generators
|
||||
:param device: Device (cpu, cuda, ...) on which the code should be run.
|
||||
Setting it to auto, the code will be run on the GPU if possible.
|
||||
:param _init_setup_model: Whether or not to build the network at the creation of the instance
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
policy: Union[str, Type[DQNPolicy]],
|
||||
env: Union[GymEnv, str],
|
||||
learning_rate: Union[float, Schedule] = 1e-4,
|
||||
buffer_size: int = 1000000,
|
||||
learning_starts: int = 50000,
|
||||
batch_size: Optional[int] = 32,
|
||||
tau: float = 1.0,
|
||||
gamma: float = 0.99,
|
||||
train_freq: Union[int, Tuple[int, str]] = 4,
|
||||
gradient_steps: int = 1,
|
||||
optimize_memory_usage: bool = False,
|
||||
target_update_interval: int = 10000,
|
||||
exploration_fraction: float = 0.1,
|
||||
exploration_initial_eps: float = 1.0,
|
||||
exploration_final_eps: float = 0.05,
|
||||
max_grad_norm: float = 10,
|
||||
tensorboard_log: Optional[str] = None,
|
||||
create_eval_env: bool = False,
|
||||
policy_kwargs: Optional[Dict[str, Any]] = None,
|
||||
verbose: int = 0,
|
||||
seed: Optional[int] = None,
|
||||
device: Union[th.device, str] = "auto",
|
||||
_init_setup_model: bool = True,
|
||||
):
|
||||
|
||||
super(DQN, self).__init__(
|
||||
policy,
|
||||
env,
|
||||
DQNPolicy,
|
||||
learning_rate,
|
||||
buffer_size,
|
||||
learning_starts,
|
||||
batch_size,
|
||||
tau,
|
||||
gamma,
|
||||
train_freq,
|
||||
gradient_steps,
|
||||
action_noise=None, # No action noise
|
||||
policy_kwargs=policy_kwargs,
|
||||
tensorboard_log=tensorboard_log,
|
||||
verbose=verbose,
|
||||
device=device,
|
||||
create_eval_env=create_eval_env,
|
||||
seed=seed,
|
||||
sde_support=False,
|
||||
optimize_memory_usage=optimize_memory_usage,
|
||||
supported_action_spaces=(gym.spaces.Discrete,),
|
||||
)
|
||||
|
||||
self.exploration_initial_eps = exploration_initial_eps
|
||||
self.exploration_final_eps = exploration_final_eps
|
||||
self.exploration_fraction = exploration_fraction
|
||||
self.target_update_interval = target_update_interval
|
||||
self.max_grad_norm = max_grad_norm
|
||||
# "epsilon" for the epsilon-greedy exploration
|
||||
self.exploration_rate = 0.0
|
||||
# Linear schedule will be defined in `_setup_model()`
|
||||
self.exploration_schedule = None
|
||||
self.q_net, self.q_net_target = None, None
|
||||
|
||||
if _init_setup_model:
|
||||
self._setup_model()
|
||||
|
||||
def _setup_model(self) -> None:
|
||||
super(DQN, self)._setup_model()
|
||||
self._create_aliases()
|
||||
self.exploration_schedule = get_linear_fn(
|
||||
self.exploration_initial_eps, self.exploration_final_eps, self.exploration_fraction
|
||||
)
|
||||
|
||||
def _create_aliases(self) -> None:
|
||||
self.q_net = self.policy.q_net
|
||||
self.q_net_target = self.policy.q_net_target
|
||||
|
||||
def _on_step(self) -> None:
|
||||
"""
|
||||
Update the exploration rate and target network if needed.
|
||||
This method is called in ``collect_rollouts()`` after each step in the environment.
|
||||
"""
|
||||
if self.num_timesteps % self.target_update_interval == 0:
|
||||
polyak_update(self.q_net.parameters(), self.q_net_target.parameters(), self.tau)
|
||||
|
||||
self.exploration_rate = self.exploration_schedule(self._current_progress_remaining)
|
||||
logger.record("rollout/exploration rate", self.exploration_rate)
|
||||
|
||||
def train(self, gradient_steps: int, batch_size: int = 100) -> None:
|
||||
# Update learning rate according to schedule
|
||||
self._update_learning_rate(self.policy.optimizer)
|
||||
|
||||
losses = []
|
||||
for _ in range(gradient_steps):
|
||||
# Sample replay buffer
|
||||
replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env)
|
||||
|
||||
with th.no_grad():
|
||||
# Compute the next Q-values using the target network
|
||||
next_q_values = self.q_net_target(replay_data.next_observations)
|
||||
# Follow greedy policy: use the one with the highest value
|
||||
next_q_values, _ = next_q_values.max(dim=1)
|
||||
# Avoid potential broadcast issue
|
||||
next_q_values = next_q_values.reshape(-1, 1)
|
||||
# 1-step TD target
|
||||
target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values
|
||||
|
||||
# Get current Q-values estimates
|
||||
current_q_values = self.q_net(replay_data.observations)
|
||||
|
||||
# Retrieve the q-values for the actions from the replay buffer
|
||||
current_q_values = th.gather(current_q_values, dim=1, index=replay_data.actions.long())
|
||||
|
||||
# Compute Huber loss (less sensitive to outliers)
|
||||
loss = F.smooth_l1_loss(current_q_values, target_q_values)
|
||||
losses.append(loss.item())
|
||||
|
||||
# Optimize the policy
|
||||
self.policy.optimizer.zero_grad()
|
||||
loss.backward()
|
||||
# Clip gradient norm
|
||||
th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)
|
||||
self.policy.optimizer.step()
|
||||
|
||||
# Increase update counter
|
||||
self._n_updates += gradient_steps
|
||||
|
||||
logger.record("train/n_updates", self._n_updates, exclude="tensorboard")
|
||||
logger.record("train/loss", np.mean(losses))
|
||||
|
||||
def predict(
|
||||
self,
|
||||
observation: np.ndarray,
|
||||
state: Optional[np.ndarray] = None,
|
||||
mask: Optional[np.ndarray] = None,
|
||||
deterministic: bool = False,
|
||||
) -> Tuple[np.ndarray, Optional[np.ndarray]]:
|
||||
"""
|
||||
Overrides the base_class predict function to include epsilon-greedy exploration.
|
||||
|
||||
:param observation: the input observation
|
||||
:param state: The last states (can be None, used in recurrent policies)
|
||||
:param mask: The last masks (can be None, used in recurrent policies)
|
||||
:param deterministic: Whether or not to return deterministic actions.
|
||||
:return: the model's action and the next state
|
||||
(used in recurrent policies)
|
||||
"""
|
||||
if not deterministic and np.random.rand() < self.exploration_rate:
|
||||
if is_vectorized_observation(maybe_transpose(observation, self.observation_space), self.observation_space):
|
||||
n_batch = observation.shape[0]
|
||||
action = np.array([self.action_space.sample() for _ in range(n_batch)])
|
||||
else:
|
||||
action = np.array(self.action_space.sample())
|
||||
else:
|
||||
action, state = self.policy.predict(observation, state, mask, deterministic)
|
||||
return action, state
|
||||
|
||||
def learn(
|
||||
self,
|
||||
total_timesteps: int,
|
||||
callback: MaybeCallback = None,
|
||||
log_interval: int = 4,
|
||||
eval_env: Optional[GymEnv] = None,
|
||||
eval_freq: int = -1,
|
||||
n_eval_episodes: int = 5,
|
||||
tb_log_name: str = "DQN",
|
||||
eval_log_path: Optional[str] = None,
|
||||
reset_num_timesteps: bool = True,
|
||||
) -> OffPolicyAlgorithm:
|
||||
|
||||
return super(DQN, self).learn(
|
||||
total_timesteps=total_timesteps,
|
||||
callback=callback,
|
||||
log_interval=log_interval,
|
||||
eval_env=eval_env,
|
||||
eval_freq=eval_freq,
|
||||
n_eval_episodes=n_eval_episodes,
|
||||
tb_log_name=tb_log_name,
|
||||
eval_log_path=eval_log_path,
|
||||
reset_num_timesteps=reset_num_timesteps,
|
||||
)
|
||||
|
||||
def _excluded_save_params(self) -> List[str]:
|
||||
return super(DQN, self)._excluded_save_params() + ["q_net", "q_net_target"]
|
||||
|
||||
def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:
|
||||
state_dicts = ["policy", "policy.optimizer"]
|
||||
|
||||
return state_dicts, []
|
||||
237
crazy_functions/test_project/python/dqn/policies.py
Normal file
237
crazy_functions/test_project/python/dqn/policies.py
Normal file
@@ -0,0 +1,237 @@
|
||||
from typing import Any, Dict, List, Optional, Type
|
||||
|
||||
import gym
|
||||
import torch as th
|
||||
from torch import nn
|
||||
|
||||
from stable_baselines3.common.policies import BasePolicy, register_policy
|
||||
from stable_baselines3.common.torch_layers import BaseFeaturesExtractor, FlattenExtractor, NatureCNN, create_mlp
|
||||
from stable_baselines3.common.type_aliases import Schedule
|
||||
|
||||
|
||||
class QNetwork(BasePolicy):
|
||||
"""
|
||||
Action-Value (Q-Value) network for DQN
|
||||
|
||||
:param observation_space: Observation space
|
||||
:param action_space: Action space
|
||||
:param net_arch: The specification of the policy and value networks.
|
||||
:param activation_fn: Activation function
|
||||
:param normalize_images: Whether to normalize images or not,
|
||||
dividing by 255.0 (True by default)
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
observation_space: gym.spaces.Space,
|
||||
action_space: gym.spaces.Space,
|
||||
features_extractor: nn.Module,
|
||||
features_dim: int,
|
||||
net_arch: Optional[List[int]] = None,
|
||||
activation_fn: Type[nn.Module] = nn.ReLU,
|
||||
normalize_images: bool = True,
|
||||
):
|
||||
super(QNetwork, self).__init__(
|
||||
observation_space,
|
||||
action_space,
|
||||
features_extractor=features_extractor,
|
||||
normalize_images=normalize_images,
|
||||
)
|
||||
|
||||
if net_arch is None:
|
||||
net_arch = [64, 64]
|
||||
|
||||
self.net_arch = net_arch
|
||||
self.activation_fn = activation_fn
|
||||
self.features_extractor = features_extractor
|
||||
self.features_dim = features_dim
|
||||
self.normalize_images = normalize_images
|
||||
action_dim = self.action_space.n # number of actions
|
||||
q_net = create_mlp(self.features_dim, action_dim, self.net_arch, self.activation_fn)
|
||||
self.q_net = nn.Sequential(*q_net)
|
||||
|
||||
def forward(self, obs: th.Tensor) -> th.Tensor:
|
||||
"""
|
||||
Predict the q-values.
|
||||
|
||||
:param obs: Observation
|
||||
:return: The estimated Q-Value for each action.
|
||||
"""
|
||||
return self.q_net(self.extract_features(obs))
|
||||
|
||||
def _predict(self, observation: th.Tensor, deterministic: bool = True) -> th.Tensor:
|
||||
q_values = self.forward(observation)
|
||||
# Greedy action
|
||||
action = q_values.argmax(dim=1).reshape(-1)
|
||||
return action
|
||||
|
||||
def _get_constructor_parameters(self) -> Dict[str, Any]:
|
||||
data = super()._get_constructor_parameters()
|
||||
|
||||
data.update(
|
||||
dict(
|
||||
net_arch=self.net_arch,
|
||||
features_dim=self.features_dim,
|
||||
activation_fn=self.activation_fn,
|
||||
features_extractor=self.features_extractor,
|
||||
)
|
||||
)
|
||||
return data
|
||||
|
||||
|
||||
class DQNPolicy(BasePolicy):
|
||||
"""
|
||||
Policy class with Q-Value Net and target net for DQN
|
||||
|
||||
:param observation_space: Observation space
|
||||
:param action_space: Action space
|
||||
:param lr_schedule: Learning rate schedule (could be constant)
|
||||
:param net_arch: The specification of the policy and value networks.
|
||||
:param activation_fn: Activation function
|
||||
:param features_extractor_class: Features extractor to use.
|
||||
:param features_extractor_kwargs: Keyword arguments
|
||||
to pass to the features extractor.
|
||||
:param normalize_images: Whether to normalize images or not,
|
||||
dividing by 255.0 (True by default)
|
||||
:param optimizer_class: The optimizer to use,
|
||||
``th.optim.Adam`` by default
|
||||
:param optimizer_kwargs: Additional keyword arguments,
|
||||
excluding the learning rate, to pass to the optimizer
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
observation_space: gym.spaces.Space,
|
||||
action_space: gym.spaces.Space,
|
||||
lr_schedule: Schedule,
|
||||
net_arch: Optional[List[int]] = None,
|
||||
activation_fn: Type[nn.Module] = nn.ReLU,
|
||||
features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,
|
||||
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
|
||||
normalize_images: bool = True,
|
||||
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
|
||||
optimizer_kwargs: Optional[Dict[str, Any]] = None,
|
||||
):
|
||||
super(DQNPolicy, self).__init__(
|
||||
observation_space,
|
||||
action_space,
|
||||
features_extractor_class,
|
||||
features_extractor_kwargs,
|
||||
optimizer_class=optimizer_class,
|
||||
optimizer_kwargs=optimizer_kwargs,
|
||||
)
|
||||
|
||||
if net_arch is None:
|
||||
if features_extractor_class == FlattenExtractor:
|
||||
net_arch = [64, 64]
|
||||
else:
|
||||
net_arch = []
|
||||
|
||||
self.net_arch = net_arch
|
||||
self.activation_fn = activation_fn
|
||||
self.normalize_images = normalize_images
|
||||
|
||||
self.net_args = {
|
||||
"observation_space": self.observation_space,
|
||||
"action_space": self.action_space,
|
||||
"net_arch": self.net_arch,
|
||||
"activation_fn": self.activation_fn,
|
||||
"normalize_images": normalize_images,
|
||||
}
|
||||
|
||||
self.q_net, self.q_net_target = None, None
|
||||
self._build(lr_schedule)
|
||||
|
||||
def _build(self, lr_schedule: Schedule) -> None:
|
||||
"""
|
||||
Create the network and the optimizer.
|
||||
|
||||
:param lr_schedule: Learning rate schedule
|
||||
lr_schedule(1) is the initial learning rate
|
||||
"""
|
||||
|
||||
self.q_net = self.make_q_net()
|
||||
self.q_net_target = self.make_q_net()
|
||||
self.q_net_target.load_state_dict(self.q_net.state_dict())
|
||||
|
||||
# Setup optimizer with initial learning rate
|
||||
self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs)
|
||||
|
||||
def make_q_net(self) -> QNetwork:
|
||||
# Make sure we always have separate networks for features extractors etc
|
||||
net_args = self._update_features_extractor(self.net_args, features_extractor=None)
|
||||
return QNetwork(**net_args).to(self.device)
|
||||
|
||||
def forward(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor:
|
||||
return self._predict(obs, deterministic=deterministic)
|
||||
|
||||
def _predict(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor:
|
||||
return self.q_net._predict(obs, deterministic=deterministic)
|
||||
|
||||
def _get_constructor_parameters(self) -> Dict[str, Any]:
|
||||
data = super()._get_constructor_parameters()
|
||||
|
||||
data.update(
|
||||
dict(
|
||||
net_arch=self.net_args["net_arch"],
|
||||
activation_fn=self.net_args["activation_fn"],
|
||||
lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone
|
||||
optimizer_class=self.optimizer_class,
|
||||
optimizer_kwargs=self.optimizer_kwargs,
|
||||
features_extractor_class=self.features_extractor_class,
|
||||
features_extractor_kwargs=self.features_extractor_kwargs,
|
||||
)
|
||||
)
|
||||
return data
|
||||
|
||||
|
||||
MlpPolicy = DQNPolicy
|
||||
|
||||
|
||||
class CnnPolicy(DQNPolicy):
|
||||
"""
|
||||
Policy class for DQN when using images as input.
|
||||
|
||||
:param observation_space: Observation space
|
||||
:param action_space: Action space
|
||||
:param lr_schedule: Learning rate schedule (could be constant)
|
||||
:param net_arch: The specification of the policy and value networks.
|
||||
:param activation_fn: Activation function
|
||||
:param features_extractor_class: Features extractor to use.
|
||||
:param normalize_images: Whether to normalize images or not,
|
||||
dividing by 255.0 (True by default)
|
||||
:param optimizer_class: The optimizer to use,
|
||||
``th.optim.Adam`` by default
|
||||
:param optimizer_kwargs: Additional keyword arguments,
|
||||
excluding the learning rate, to pass to the optimizer
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
observation_space: gym.spaces.Space,
|
||||
action_space: gym.spaces.Space,
|
||||
lr_schedule: Schedule,
|
||||
net_arch: Optional[List[int]] = None,
|
||||
activation_fn: Type[nn.Module] = nn.ReLU,
|
||||
features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN,
|
||||
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
|
||||
normalize_images: bool = True,
|
||||
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
|
||||
optimizer_kwargs: Optional[Dict[str, Any]] = None,
|
||||
):
|
||||
super(CnnPolicy, self).__init__(
|
||||
observation_space,
|
||||
action_space,
|
||||
lr_schedule,
|
||||
net_arch,
|
||||
activation_fn,
|
||||
features_extractor_class,
|
||||
features_extractor_kwargs,
|
||||
normalize_images,
|
||||
optimizer_class,
|
||||
optimizer_kwargs,
|
||||
)
|
||||
|
||||
|
||||
register_policy("MlpPolicy", MlpPolicy)
|
||||
register_policy("CnnPolicy", CnnPolicy)
|
||||
2
crazy_functions/test_project/python/dqn/来源
Normal file
2
crazy_functions/test_project/python/dqn/来源
Normal file
@@ -0,0 +1,2 @@
|
||||
github stablebaseline3
|
||||
https://github.com/DLR-RM/stable-baselines3
|
||||
27
crazy_functions/test_project/其他测试
Normal file
27
crazy_functions/test_project/其他测试
Normal file
@@ -0,0 +1,27 @@
|
||||
"In practice, we found that a high-entropy initial state is more likely to increase the speed of training.
|
||||
The entropy is calculated by:
|
||||
$$H=-\sum_{k= 1}^{n_k} p(k) \cdot \log p(k), p(k)=\frac{|A_k|}{|\mathcal{A}|}$$
|
||||
where $H$ is the entropy, $|A_k|$ is the number of agent nodes in $k$-th cluster, $|\mathcal{A}|$ is the total number of agents.
|
||||
To ensure the Cooperation Graph initialization has higher entropy,
|
||||
we will randomly generate multiple initial states,
|
||||
rank by their entropy and then pick the one with maximum $H$."
|
||||
|
||||
```
|
||||
FROM ubuntu:latest
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y python3 python3-pip && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN echo '[global]' > /etc/pip.conf && \
|
||||
echo 'index-url = https://mirrors.aliyun.com/pypi/simple/' >> /etc/pip.conf && \
|
||||
echo 'trusted-host = mirrors.aliyun.com' >> /etc/pip.conf
|
||||
|
||||
RUN pip3 install gradio requests[socks] mdtex2html
|
||||
|
||||
COPY . /gpt
|
||||
WORKDIR /gpt
|
||||
|
||||
|
||||
CMD ["python3", "main.py"]
|
||||
```
|
||||
138
crazy_functions/代码重写为全英文_多线程.py
Normal file
138
crazy_functions/代码重写为全英文_多线程.py
Normal file
@@ -0,0 +1,138 @@
|
||||
import threading
|
||||
from request_llm.bridge_all import predict_no_ui_long_connection
|
||||
from toolbox import update_ui
|
||||
from toolbox import CatchException, write_results_to_file, report_execption
|
||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit
|
||||
|
||||
def extract_code_block_carefully(txt):
|
||||
splitted = txt.split('```')
|
||||
n_code_block_seg = len(splitted) - 1
|
||||
if n_code_block_seg <= 1: return txt
|
||||
# 剩下的情况都开头除去 ``` 结尾除去一次 ```
|
||||
txt_out = '```'.join(splitted[1:-1])
|
||||
return txt_out
|
||||
|
||||
|
||||
|
||||
def break_txt_into_half_at_some_linebreak(txt):
|
||||
lines = txt.split('\n')
|
||||
n_lines = len(lines)
|
||||
pre = lines[:(n_lines//2)]
|
||||
post = lines[(n_lines//2):]
|
||||
return "\n".join(pre), "\n".join(post)
|
||||
|
||||
|
||||
@CatchException
|
||||
def 全项目切换英文(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys_prompt, web_port):
|
||||
# 第1步:清空历史,以免输入溢出
|
||||
history = []
|
||||
|
||||
# 第2步:尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
import tiktoken
|
||||
except:
|
||||
report_execption(chatbot, history,
|
||||
a = f"解析项目: {txt}",
|
||||
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 第3步:集合文件
|
||||
import time, glob, os, shutil, re
|
||||
os.makedirs('gpt_log/generated_english_version', exist_ok=True)
|
||||
os.makedirs('gpt_log/generated_english_version/crazy_functions', exist_ok=True)
|
||||
file_manifest = [f for f in glob.glob('./*.py') if ('test_project' not in f) and ('gpt_log' not in f)] + \
|
||||
[f for f in glob.glob('./crazy_functions/*.py') if ('test_project' not in f) and ('gpt_log' not in f)]
|
||||
# file_manifest = ['./toolbox.py']
|
||||
i_say_show_user_buffer = []
|
||||
|
||||
# 第4步:随便显示点什么防止卡顿的感觉
|
||||
for index, fp in enumerate(file_manifest):
|
||||
# if 'test_project' in fp: continue
|
||||
with open(fp, 'r', encoding='utf-8', errors='replace') as f:
|
||||
file_content = f.read()
|
||||
i_say_show_user =f'[{index}/{len(file_manifest)}] 接下来请将以下代码中包含的所有中文转化为英文,只输出转化后的英文代码,请用代码块输出代码: {os.path.abspath(fp)}'
|
||||
i_say_show_user_buffer.append(i_say_show_user)
|
||||
chatbot.append((i_say_show_user, "[Local Message] 等待多线程操作,中间过程不予显示."))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
|
||||
# 第5步:Token限制下的截断与处理
|
||||
MAX_TOKEN = 3000
|
||||
from request_llm.bridge_all import model_info
|
||||
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
||||
def get_token_fn(txt): return len(enc.encode(txt, disallowed_special=()))
|
||||
|
||||
|
||||
# 第6步:任务函数
|
||||
mutable_return = [None for _ in file_manifest]
|
||||
observe_window = [[""] for _ in file_manifest]
|
||||
def thread_worker(fp,index):
|
||||
if index > 10:
|
||||
time.sleep(60)
|
||||
print('Openai 限制免费用户每分钟20次请求,降低请求频率中。')
|
||||
with open(fp, 'r', encoding='utf-8', errors='replace') as f:
|
||||
file_content = f.read()
|
||||
i_say_template = lambda fp, file_content: f'接下来请将以下代码中包含的所有中文转化为英文,只输出代码,文件名是{fp},文件代码是 ```{file_content}```'
|
||||
try:
|
||||
gpt_say = ""
|
||||
# 分解代码文件
|
||||
file_content_breakdown = breakdown_txt_to_satisfy_token_limit(file_content, get_token_fn, MAX_TOKEN)
|
||||
for file_content_partial in file_content_breakdown:
|
||||
i_say = i_say_template(fp, file_content_partial)
|
||||
# # ** gpt request **
|
||||
gpt_say_partial = predict_no_ui_long_connection(inputs=i_say, llm_kwargs=llm_kwargs, history=[], sys_prompt=sys_prompt, observe_window=observe_window[index])
|
||||
gpt_say_partial = extract_code_block_carefully(gpt_say_partial)
|
||||
gpt_say += gpt_say_partial
|
||||
mutable_return[index] = gpt_say
|
||||
except ConnectionAbortedError as token_exceed_err:
|
||||
print('至少一个线程任务Token溢出而失败', e)
|
||||
except Exception as e:
|
||||
print('至少一个线程任务意外失败', e)
|
||||
|
||||
# 第7步:所有线程同时开始执行任务函数
|
||||
handles = [threading.Thread(target=thread_worker, args=(fp,index)) for index, fp in enumerate(file_manifest)]
|
||||
for h in handles:
|
||||
h.daemon = True
|
||||
h.start()
|
||||
chatbot.append(('开始了吗?', f'多线程操作已经开始'))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 第8步:循环轮询各个线程是否执行完毕
|
||||
cnt = 0
|
||||
while True:
|
||||
cnt += 1
|
||||
time.sleep(0.2)
|
||||
th_alive = [h.is_alive() for h in handles]
|
||||
if not any(th_alive): break
|
||||
# 更好的UI视觉效果
|
||||
observe_win = []
|
||||
for thread_index, alive in enumerate(th_alive):
|
||||
observe_win.append("[ ..."+observe_window[thread_index][0][-60:].replace('\n','').replace('```','...').replace(' ','.').replace('<br/>','.....').replace('$','.')+"... ]")
|
||||
stat = [f'执行中: {obs}\n\n' if alive else '已完成\n\n' for alive, obs in zip(th_alive, observe_win)]
|
||||
stat_str = ''.join(stat)
|
||||
chatbot[-1] = (chatbot[-1][0], f'多线程操作已经开始,完成情况: \n\n{stat_str}' + ''.join(['.']*(cnt%10+1)))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 第9步:把结果写入文件
|
||||
for index, h in enumerate(handles):
|
||||
h.join() # 这里其实不需要join了,肯定已经都结束了
|
||||
fp = file_manifest[index]
|
||||
gpt_say = mutable_return[index]
|
||||
i_say_show_user = i_say_show_user_buffer[index]
|
||||
|
||||
where_to_relocate = f'gpt_log/generated_english_version/{fp}'
|
||||
if gpt_say is not None:
|
||||
with open(where_to_relocate, 'w+', encoding='utf-8') as f:
|
||||
f.write(gpt_say)
|
||||
else: # 失败
|
||||
shutil.copyfile(file_manifest[index], where_to_relocate)
|
||||
chatbot.append((i_say_show_user, f'[Local Message] 已完成{os.path.abspath(fp)}的转化,\n\n存入{os.path.abspath(where_to_relocate)}'))
|
||||
history.append(i_say_show_user); history.append(gpt_say)
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
time.sleep(1)
|
||||
|
||||
# 第10步:备份一个文件
|
||||
res = write_results_to_file(history)
|
||||
chatbot.append(("生成一份任务执行报告", res))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
@@ -1,6 +1,5 @@
|
||||
from toolbox import CatchException, update_ui, report_exception
|
||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
from .crazy_utils import read_and_clean_pdf_text
|
||||
import datetime
|
||||
|
||||
#以下是每类图表的PROMPT
|
||||
@@ -162,7 +161,7 @@ mindmap
|
||||
```
|
||||
"""
|
||||
|
||||
def 解析历史输入(history,llm_kwargs,chatbot,plugin_kwargs):
|
||||
def 解析历史输入(history,llm_kwargs,file_manifest,chatbot,plugin_kwargs):
|
||||
############################## <第 0 步,切割输入> ##################################
|
||||
# 借用PDF切割中的函数对文本进行切割
|
||||
TOKEN_LIMIT_PER_FRAGMENT = 2500
|
||||
@@ -170,8 +169,6 @@ def 解析历史输入(history,llm_kwargs,chatbot,plugin_kwargs):
|
||||
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
|
||||
txt = breakdown_text_to_satisfy_token_limit(txt=txt, limit=TOKEN_LIMIT_PER_FRAGMENT, llm_model=llm_kwargs['llm_model'])
|
||||
############################## <第 1 步,迭代地历遍整个文章,提取精炼信息> ##################################
|
||||
i_say_show_user = f'首先你从历史记录或文件中提取摘要。'; gpt_say = "[Local Message] 收到。" # 用户提示
|
||||
chatbot.append([i_say_show_user, gpt_say]); yield from update_ui(chatbot=chatbot, history=history) # 更新UI
|
||||
results = []
|
||||
MAX_WORD_TOTAL = 4096
|
||||
n_txt = len(txt)
|
||||
@@ -179,7 +176,7 @@ def 解析历史输入(history,llm_kwargs,chatbot,plugin_kwargs):
|
||||
if n_txt >= 20: print('文章极长,不能达到预期效果')
|
||||
for i in range(n_txt):
|
||||
NUM_OF_WORD = MAX_WORD_TOTAL // n_txt
|
||||
i_say = f"Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {txt[i]}"
|
||||
i_say = f"Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words in Chinese: {txt[i]}"
|
||||
i_say_show_user = f"[{i+1}/{n_txt}] Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {txt[i][:200]} ...."
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, # i_say=真正给chatgpt的提问, i_say_show_user=给用户看的提问
|
||||
llm_kwargs, chatbot,
|
||||
@@ -232,34 +229,10 @@ def 解析历史输入(history,llm_kwargs,chatbot,plugin_kwargs):
|
||||
inputs=i_say,
|
||||
inputs_show_user=i_say_show_user,
|
||||
llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
|
||||
sys_prompt="你精通使用mermaid语法来绘制图表,首先确保语法正确,其次避免在mermaid语法中使用不允许的字符,此外也应当分考虑图表的可读性。"
|
||||
sys_prompt=""
|
||||
)
|
||||
history.append(gpt_say)
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
|
||||
|
||||
def 输入区文件处理(txt):
|
||||
if txt == "": return False, txt
|
||||
success = True
|
||||
import glob
|
||||
from .crazy_utils import get_files_from_everything
|
||||
file_pdf,pdf_manifest,folder_pdf = get_files_from_everything(txt, '.pdf')
|
||||
file_md,md_manifest,folder_md = get_files_from_everything(txt, '.md')
|
||||
if len(pdf_manifest) == 0 and len(md_manifest) == 0:
|
||||
return False, txt #如输入区内容不是文件则直接返回输入区内容
|
||||
|
||||
final_result = ""
|
||||
if file_pdf:
|
||||
for index, fp in enumerate(pdf_manifest):
|
||||
file_content, page_one = read_and_clean_pdf_text(fp) # (尝试)按照章节切割PDF
|
||||
file_content = file_content.encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars
|
||||
final_result += "\n" + file_content
|
||||
if file_md:
|
||||
for index, fp in enumerate(md_manifest):
|
||||
with open(fp, 'r', encoding='utf-8', errors='replace') as f:
|
||||
file_content = f.read()
|
||||
file_content = file_content.encode('utf-8', 'ignore').decode()
|
||||
final_result += "\n" + file_content
|
||||
return True, final_result
|
||||
|
||||
@CatchException
|
||||
def 生成多种Mermaid图表(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
@@ -277,26 +250,47 @@ def 生成多种Mermaid图表(txt, llm_kwargs, plugin_kwargs, chatbot, history,
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"根据当前聊天历史或文件中(文件内容优先)绘制多种mermaid图表,将会由对话模型首先判断适合的图表类型,随后绘制图表。\
|
||||
"根据当前聊天历史或指定的路径文件(文件内容优先)绘制多种mermaid图表,将会由对话模型首先判断适合的图表类型,随后绘制图表。\
|
||||
\n您也可以使用插件参数指定绘制的图表类型,函数插件贡献者: Menghuan1918"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
import fitz
|
||||
except:
|
||||
report_exception(chatbot, history,
|
||||
a = f"解析项目: {txt}",
|
||||
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
if os.path.exists(txt): #如输入区无内容则直接解析历史记录
|
||||
file_exist, txt = 输入区文件处理(txt)
|
||||
from crazy_functions.pdf_fns.parse_word import extract_text_from_files
|
||||
file_exist, final_result, page_one, file_manifest, excption = extract_text_from_files(txt, chatbot, history)
|
||||
else:
|
||||
file_exist = False
|
||||
excption = ""
|
||||
file_manifest = []
|
||||
|
||||
if file_exist : history = [] #如输入区内容为文件则清空历史记录
|
||||
history.append(txt) #将解析后的txt传递加入到历史中
|
||||
|
||||
yield from 解析历史输入(history,llm_kwargs,chatbot,plugin_kwargs)
|
||||
if excption != "":
|
||||
if excption == "word":
|
||||
report_exception(chatbot, history,
|
||||
a = f"解析项目: {txt}",
|
||||
b = f"找到了.doc文件,但是该文件格式不被支持,请先转化为.docx格式。")
|
||||
|
||||
elif excption == "pdf":
|
||||
report_exception(chatbot, history,
|
||||
a = f"解析项目: {txt}",
|
||||
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。")
|
||||
|
||||
elif excption == "word_pip":
|
||||
report_exception(chatbot, history,
|
||||
a=f"解析项目: {txt}",
|
||||
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade python-docx pywin32```。")
|
||||
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
else:
|
||||
if not file_exist:
|
||||
history.append(txt) #如输入区不是文件则将输入区内容加入历史记录
|
||||
i_say_show_user = f'首先你从历史记录中提取摘要。'; gpt_say = "[Local Message] 收到。" # 用户提示
|
||||
chatbot.append([i_say_show_user, gpt_say]); yield from update_ui(chatbot=chatbot, history=history) # 更新UI
|
||||
yield from 解析历史输入(history,llm_kwargs,file_manifest,chatbot,plugin_kwargs)
|
||||
else:
|
||||
file_num = len(file_manifest)
|
||||
for i in range(file_num): #依次处理文件
|
||||
i_say_show_user = f"[{i+1}/{file_num}]处理文件{file_manifest[i]}"; gpt_say = "[Local Message] 收到。" # 用户提示
|
||||
chatbot.append([i_say_show_user, gpt_say]); yield from update_ui(chatbot=chatbot, history=history) # 更新UI
|
||||
history = [] #如输入区内容为文件则清空历史记录
|
||||
history.append(final_result[i])
|
||||
yield from 解析历史输入(history,llm_kwargs,file_manifest,chatbot,plugin_kwargs)
|
||||
@@ -12,6 +12,12 @@ class PaperFileGroup():
|
||||
self.sp_file_index = []
|
||||
self.sp_file_tag = []
|
||||
|
||||
# count_token
|
||||
from request_llms.bridge_all import model_info
|
||||
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
||||
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
|
||||
self.get_token_num = get_token_num
|
||||
|
||||
def run_file_split(self, max_token_limit=1900):
|
||||
"""
|
||||
将长文本分离开来
|
||||
@@ -54,7 +60,7 @@ def parseNotebook(filename, enable_markdown=1):
|
||||
Code += f"This is {idx+1}th code block: \n"
|
||||
Code += code+"\n"
|
||||
|
||||
return Code
|
||||
return Code
|
||||
|
||||
|
||||
def ipynb解释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
||||
|
||||
28
crazy_functions/辅助回答.py
Normal file
28
crazy_functions/辅助回答.py
Normal file
@@ -0,0 +1,28 @@
|
||||
# encoding: utf-8
|
||||
# @Time : 2023/4/19
|
||||
# @Author : Spike
|
||||
# @Descr :
|
||||
from toolbox import update_ui
|
||||
from toolbox import CatchException, report_execption, write_results_to_file
|
||||
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
|
||||
|
||||
@CatchException
|
||||
def 猜你想问(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
if txt:
|
||||
show_say = txt
|
||||
prompt = txt+'\n回答完问题后,再列出用户可能提出的三个问题。'
|
||||
else:
|
||||
prompt = history[-1]+"\n分析上述回答,再列出用户可能提出的三个问题。"
|
||||
show_say = '分析上述回答,再列出用户可能提出的三个问题。'
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=prompt,
|
||||
inputs_show_user=show_say,
|
||||
llm_kwargs=llm_kwargs,
|
||||
chatbot=chatbot,
|
||||
history=history,
|
||||
sys_prompt=system_prompt
|
||||
)
|
||||
chatbot[-1] = (show_say, gpt_say)
|
||||
history.extend([show_say, gpt_say])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
@@ -10,6 +10,9 @@ ENV PATH "$PATH:/usr/local/texlive/2024/bin/x86_64-linux"
|
||||
ENV PATH "$PATH:/usr/local/texlive/2025/bin/x86_64-linux"
|
||||
ENV PATH "$PATH:/usr/local/texlive/2026/bin/x86_64-linux"
|
||||
|
||||
# 删除文档文件以节约空间
|
||||
RUN rm -rf /usr/local/texlive/2023/texmf-dist/doc
|
||||
|
||||
# 指定路径
|
||||
WORKDIR /gpt
|
||||
|
||||
|
||||
307
docs/README.md.German.md
Normal file
307
docs/README.md.German.md
Normal file
@@ -0,0 +1,307 @@
|
||||
> **Hinweis**
|
||||
>
|
||||
> Bei der Installation von Abhängigkeiten sollten nur die in **requirements.txt** **angegebenen Versionen** streng ausgewählt werden.
|
||||
>
|
||||
> `pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/`
|
||||
|
||||
# <img src="docs/logo.png" width="40" > GPT Akademisch optimiert (GPT Academic)
|
||||
|
||||
**Wenn Ihnen dieses Projekt gefällt, geben Sie ihm bitte einen Stern; wenn Sie bessere Tastenkombinationen oder Funktions-Plugins entwickelt haben, können Sie gerne einen Pull Request eröffnen.**
|
||||
|
||||
Wenn Sie dieses Projekt mögen, geben Sie ihm bitte einen Stern. Wenn Sie weitere nützliche wissenschaftliche Abkürzungen oder funktionale Plugins entwickelt haben, können Sie gerne ein Problem oder eine Pull-Anforderung öffnen. Wir haben auch ein README in [Englisch|](docs/README_EN.md)[日本語|](docs/README_JP.md)[한국어|](https://github.com/mldljyh/ko_gpt_academic)[Русский|](docs/README_RS.md)[Français](docs/README_FR.md), das von diesem Projekt selbst übersetzt wurde.
|
||||
Um dieses Projekt in eine beliebige Sprache mit GPT zu übersetzen, lesen Sie `multi_language.py` (experimentell).
|
||||
|
||||
> **Hinweis**
|
||||
>
|
||||
> 1. Beachten Sie bitte, dass nur Funktionserweiterungen (Schaltflächen) mit **roter Farbe** Dateien lesen können und einige Erweiterungen im **Dropdown-Menü** des Erweiterungsbereichs zu finden sind. Außerdem begrüßen wir jede neue Funktionserweiterung mit **höchster Priorität** und bearbeiten sie.
|
||||
>
|
||||
> 2. Die Funktionalität jeder Datei in diesem Projekt wird in der Selbstanalyse [`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) detailliert beschrieben. Mit der Weiterentwicklung der Versionen können Sie jederzeit die zugehörigen Funktions-Erweiterungen aufrufen, um durch Aufruf von GPT einen Selbstanalysebericht des Projekts zu erstellen. Häufig gestellte Fragen finden Sie in der [`Wiki`](https://github.com/binary-husky/gpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98). [Installationsanweisungen](#Installation).
|
||||
>
|
||||
> 3. Dieses Projekt ist kompatibel und fördert die Verwendung von inländischen Sprachmodellen wie ChatGLM und RWKV, Pangu, etc. Es unterstützt das Vorhandensein mehrerer api-keys, die in der Konfigurationsdatei wie folgt angegeben werden können: `API_KEY="openai-key1,openai-key2,api2d-key3"`. Wenn ein `API_KEY` temporär geändert werden muss, geben Sie den temporären `API_KEY` im Eingabebereich ein und drücken Sie dann die Eingabetaste, um ihn zu übernehmen.Funktion | Beschreibung
|
||||
--- | ---
|
||||
Ein-Klick-Polieren | Unterstützt ein-Klick-Polieren und ein-Klick-Suche nach grammatikalischen Fehlern in wissenschaftlichen Arbeiten
|
||||
Ein-Klick Chinesisch-Englisch Übersetzung | Ein-Klick Chinesisch-Englisch Übersetzung
|
||||
Ein-Klick-Code-Erklärung | Zeigt Code, erklärt Code, erzeugt Code und fügt Kommentare zum Code hinzu
|
||||
[Benutzerdefinierte Tastenkombinationen](https://www.bilibili.com/video/BV14s4y1E7jN) | Unterstützt benutzerdefinierte Tastenkombinationen
|
||||
Modulare Gestaltung | Unterstützt leistungsstarke individuelle [Funktions-Plugins](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions). Plugins unterstützen [Hot-Updates](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)
|
||||
[Selbstprogramm-Analyse](https://www.bilibili.com/video/BV1cj411A7VW) | [Funktions-Plugin] [Ein-Klick Verstehen](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) der Quellcode dieses Projekts
|
||||
[Programmanalyse](https://www.bilibili.com/video/BV1cj411A7VW) | [Funktions-Plugin] Ein-Klick-Analyse des Projektbaums anderer Python/C/C++/Java/Lua/...-Projekte
|
||||
Lesen von Papieren, [Übersetzen](https://www.bilibili.com/video/BV1KT411x7Wn) von Papieren | [Funktions-Plugin] Ein-Klick Erklärung des gesamten LaTeX/PDF-Artikels und Erstellung einer Zusammenfassung
|
||||
LaTeX-Volltext-Übersetzung und [Polieren](https://www.bilibili.com/video/BV1FT411H7c5/) | [Funktions-Plugin] Ein-Klick-Übersetzung oder-Polieren des LaTeX-Artikels
|
||||
Bulk-Kommentargenerierung | [Funktions-Plugin] Ein-Klick Massenerstellung von Funktionskommentaren
|
||||
Markdown [Chinesisch-Englisch Übersetzung](https://www.bilibili.com/video/BV1yo4y157jV/) | [Funktions-Plugin] Haben Sie die [README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md) in den oben genannten 5 Sprachen gesehen?
|
||||
Analyse-Berichtserstellung von chat | [Funktions-Plugin] Automatische Zusammenfassung nach der Ausführung
|
||||
[Funktion zur vollständigen Übersetzung von PDF-Artikeln](https://www.bilibili.com/video/BV1KT411x7Wn) | [Funktions-Plugin] Extrahiert Titel und Zusammenfassung der PDF-Artikel und übersetzt den gesamten Text (mehrere Threads)
|
||||
[Arxiv-Assistent](https://www.bilibili.com/video/BV1LM4y1279X) | [Funktions-Plugin] Geben Sie die Arxiv-Artikel-URL ein und klicken Sie auf Eine-Klick-Übersetzung-Zusammenfassung + PDF-Download
|
||||
[Google Scholar Integrations-Assistent](https://www.bilibili.com/video/BV19L411U7ia) | [Funktions-Plugin] Geben Sie eine beliebige Google Scholar Such-URL ein und lassen Sie gpt Ihnen bei der Erstellung von [relatedworks](https://www.bilibili.com/video/BV1GP411U7Az/) helfen
|
||||
Internet-Informationen Aggregation + GPT | [Funktions-Plugin] Lassen Sie GPT eine Frage beantworten, indem es [zuerst Informationen aus dem Internet](https://www.bilibili.com/video/BV1om4y127ck/) sammelt und so die Informationen nie veralten
|
||||
Anzeige von Formeln / Bildern / Tabellen | Zeigt Formeln in beiden Formen, [TeX-Format und gerendeter Form](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png), unterstützt Formeln und Code-Highlights
|
||||
Unterstützung von PlugIns mit mehreren Threads | Unterstützt den Aufruf mehrerer Threads in Chatgpt, um Text oder Programme [Batch zu verarbeiten](https://www.bilibili.com/video/BV1FT411H7c5/)
|
||||
Starten Sie das dunkle Gradio-[Thema](https://github.com/binary-husky/gpt_academic/issues/173) | Fügen Sie ```/?__theme=dark``` an das Ende der Browser-URL an, um das dunkle Thema zu aktivieren
|
||||
[Unterstützung für mehrere LLM-Modelle](https://www.bilibili.com/video/BV1wT411p7yf), [API2D](https://api2d.com/) Interface-Unterstützung | Das Gefühl, gleichzeitig von GPT3.5, GPT4, [Tshinghua ChatGLM](https://github.com/THUDM/ChatGLM-6B), [Fudan MOSS](https://github.com/OpenLMLab/MOSS) bedient zu werden, muss toll sein, oder?
|
||||
Zugriff auf weitere LLM-Modelle, Unterstützung von [huggingface deployment](https://huggingface.co/spaces/qingxu98/gpt-academic) | Hinzufügen der Newbing-Schnittstelle (neues Bing), Einführung der Unterstützung von [Jittorllms](https://github.com/Jittor/JittorLLMs) der Tsinghua-Universität, [LLaMA](https://github.com/facebookresearch/llama), [RWKV](https://github.com/BlinkDL/ChatRWKV) und [Pangu alpha](https://openi.org.cn/pangu/)
|
||||
Weitere neue Funktionen (wie Bildgenerierung) …… | Siehe Ende dieses Dokuments ……
|
||||
|
||||
- Neue Oberfläche (Ändern Sie die LAYOUT-Option in `config.py`, um zwischen "Seitenlayout" und "Oben-unten-Layout" zu wechseln)
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/230361456-61078362-a966-4eb5-b49e-3c62ef18b860.gif" width="700" >
|
||||
</div>- All buttons are dynamically generated by reading `functional.py`, and custom functions can be easily added, freeing up the clipboard.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/231975334-b4788e91-4887-412f-8b43-2b9c5f41d248.gif" width="700" >
|
||||
</div>
|
||||
|
||||
- Proofreading/Correcting
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/231980294-f374bdcb-3309-4560-b424-38ef39f04ebd.gif" width="700" >
|
||||
</div>
|
||||
|
||||
- If the output contains formulas, they will be displayed in both tex format and rendered format for easy copying and reading.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png" width="700" >
|
||||
</div>
|
||||
|
||||
- Don't feel like reading the project code? Show off the entire project to chatgpt.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="700" >
|
||||
</div>
|
||||
|
||||
- Multiple large language models are mixed and called together (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4).
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/232537274-deca0563-7aa6-4b5d-94a2-b7c453c47794.png" width="700" >
|
||||
</div>
|
||||
|
||||
---
|
||||
# Installation
|
||||
## Installation-Method 1: Run directly (Windows, Linux or MacOS)
|
||||
|
||||
1. Download the project
|
||||
```sh
|
||||
git clone https://github.com/binary-husky/gpt_academic.git
|
||||
cd gpt_academic
|
||||
```
|
||||
|
||||
2. Configure API_KEY
|
||||
|
||||
Configure API KEY and other settings in `config.py`. [Special Network Environment Settings](https://github.com/binary-husky/gpt_academic/issues/1).
|
||||
|
||||
(P.S. When the program is running, it will first check whether there is a "config_private.py" private configuration file, and use the configuration defined in it to override the configuration of "config.py". Therefore, if you understand our configuration reading logic, we strongly recommend that you create a new configuration file named "config_private.py" next to "config.py" and transfer (copy) the configurations in "config.py" to "config_private.py". "config_private.py" is not controlled by git, which can make your privacy information more secure. P.S. The project also supports configuring most options through `environment variables`, and the writing format of environment variables refers to the `docker-compose` file. Reading priority: `environment variable` > `config_private.py` >`config.py`)
|
||||
|
||||
|
||||
3. Install dependencies
|
||||
```sh
|
||||
# (Option I: If familar with Python) (Python version 3.9 or above, the newer the better), Note: Use the official pip source or Ali pip source, temporary switching method: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
|
||||
python -m pip install -r requirements.txt
|
||||
|
||||
# (Option II: If not familiar with Python) Use anaconda with similar steps (https://www.bilibili.com/video/BV1rc411W7Dr):
|
||||
conda create -n gptac_venv python=3.11 # Create an anaconda environment
|
||||
conda activate gptac_venv # Activate the anaconda environment
|
||||
python -m pip install -r requirements.txt # Same step as pip installation
|
||||
```
|
||||
|
||||
<details><summary>Click to expand if supporting Tsinghua ChatGLM/Fudan MOSS as backend</summary>
|
||||
<p>
|
||||
|
||||
[Optional Step] If supporting Tsinghua ChatGLM/Fudan MOSS as backend, additional dependencies need to be installed (Prerequisites: Familiar with Python + Used Pytorch + Sufficient computer configuration):
|
||||
```sh
|
||||
# [Optional Step I] Support Tsinghua ChatGLM. Remark: If encountering "Call ChatGLM fail Cannot load ChatGLM parameters", please refer to the following: 1: The above default installation is torch+cpu version. To use cuda, uninstall torch and reinstall torch+cuda; 2: If the model cannot be loaded due to insufficient machine configuration, you can modify the model precision in `request_llm/bridge_chatglm.py`, and modify all AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
|
||||
python -m pip install -r request_llm/requirements_chatglm.txt
|
||||
|
||||
# [Optional Step II] Support Fudan MOSS
|
||||
python -m pip install -r request_llm/requirements_moss.txt
|
||||
git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # When executing this line of code, you must be in the project root path
|
||||
|
||||
# [Optional Step III] Make sure the AVAIL_LLM_MODELS in the config.py configuration file contains the expected models. Currently supported models are as follows (jittorllms series currently only supports docker solutions):
|
||||
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
|
||||
```
|
||||
|
||||
</p>
|
||||
</details>
|
||||
|
||||
|
||||
|
||||
4. Run
|
||||
```sh
|
||||
python main.py
|
||||
```5. Testing Function Plugin
|
||||
```
|
||||
- Test function plugin template function (requires gpt to answer what happened today in history), you can use this function as a template to implement more complex functions
|
||||
Click "[Function Plugin Template Demo] Today in History"
|
||||
```
|
||||
|
||||
## Installation-Method 2: Using Docker
|
||||
|
||||
1. Only ChatGPT (Recommended for most people)
|
||||
|
||||
``` sh
|
||||
git clone https://github.com/binary-husky/gpt_academic.git # Download the project
|
||||
cd gpt_academic # Enter the path
|
||||
nano config.py # Edit config.py with any text editor, Configure "Proxy","API_KEY"and"WEB_PORT" (e.g 50923) etc.
|
||||
docker build -t gpt-academic . # Install
|
||||
|
||||
# (Last step-option 1) Under Linux environment, use `--net=host` is more convenient and quick
|
||||
docker run --rm -it --net=host gpt-academic
|
||||
# (Last step-option 2) Under macOS/windows environment, can only use the -p option to expose the container's port(eg.50923) to the port on the host.
|
||||
docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic
|
||||
```
|
||||
|
||||
2. ChatGPT + ChatGLM + MOSS (Requires familiarity with Docker)
|
||||
|
||||
``` sh
|
||||
# Modify docker-compose.yml, delete solution 1 and solution 3, and retain solution 2. Modify the configuration of solution 2 in docker-compose.yml, referring to the comments in it.
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
3. ChatGPT+LLAMA+Pangu+RWKV(Requires familiarity with Docker)
|
||||
``` sh
|
||||
# Modify docker-compose.yml, delete solution 1 and solution 2, and retain solution 3. Modify the configuration of solution 3 in docker-compose.yml, referring to the comments in it.
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
|
||||
## Installation-Method 3: Other Deployment Options
|
||||
|
||||
1. How to use reverse proxy URL/Microsoft Azure API
|
||||
Configure API_URL_REDIRECT according to the instructions in `config.py`.
|
||||
|
||||
2. Remote cloud server deployment (requires cloud server knowledge and experience)
|
||||
Please visit [Deployment wiki-1](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97)
|
||||
|
||||
3. Using WSL 2 (Windows subsystem for Linux)
|
||||
Please visit [Deployment wiki-2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2)
|
||||
|
||||
4. How to run at a secondary URL (such as `http://localhost/subpath`)
|
||||
Please visit [FastAPI operating instructions](docs/WithFastapi.md)
|
||||
|
||||
5. Use docker-compose to run
|
||||
Please read docker-compose.yml and follow the prompts to operate.
|
||||
|
||||
---
|
||||
# Advanced Usage
|
||||
## Customize new convenience buttons / custom function plugins.
|
||||
|
||||
1. Customize new convenience buttons (Academic Shortcut Keys)
|
||||
Open `core_functional.py` with any text editor, add an entry as follows, and then restart the program. (If the button has been added successfully and is visible, then the prefix and suffix can be hot-modified, and it will take effect without restarting the program.)
|
||||
For example
|
||||
```
|
||||
"Super English to Chinese": {
|
||||
# Prefix, will be added before your input. For example, used to describe your requirements, such as translation, explaining code, polishing, etc.
|
||||
"Prefix": "Please translate the following content into Chinese, and then use a markdown table to explain the proper nouns that appear in the text one by one:\n\n",
|
||||
|
||||
# Suffix, will be added after your input. For example, combined with prefix, you can enclose your input content in quotes.
|
||||
"Suffix": "",
|
||||
},
|
||||
```
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226899272-477c2134-ed71-4326-810c-29891fe4a508.png" width="500" >
|
||||
</div>
|
||||
|
||||
2. Custom function plugins
|
||||
|
||||
Write powerful function plugins to perform any task you want and can't think of.
|
||||
The difficulty of plugin writing and debugging is very low in this project. As long as you have a certain knowledge of Python, you can implement your own plugin functions by imitating the template we provided.
|
||||
For more information, please refer to the [Function Plugin Guide](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97).
|
||||
|
||||
---
|
||||
# Latest Update
|
||||
## New feature dynamics1. Funktion zur Speicherung von Dialogen. Rufen Sie im Bereich der Funktions-Plugins "Aktuellen Dialog speichern" auf, um den aktuellen Dialog als lesbares und wiederherstellbares HTML-Datei zu speichern. Darüber hinaus können Sie im Funktions-Plugin-Bereich (Dropdown-Menü) "Laden von Dialogverlauf" aufrufen, um den vorherigen Dialog wiederherzustellen. Tipp: Wenn Sie keine Datei angeben und stattdessen direkt auf "Laden des Dialogverlaufs" klicken, können Sie das HTML-Cache-Archiv anzeigen. Durch Klicken auf "Löschen aller lokalen Dialogverlaufsdatensätze" können alle HTML-Archiv-Caches gelöscht werden.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/235222390-24a9acc0-680f-49f5-bc81-2f3161f1e049.png" width="500" >
|
||||
</div>
|
||||
|
||||
2. Berichterstellung. Die meisten Plugins generieren nach Abschluss der Ausführung einen Arbeitsbericht.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227503770-fe29ce2c-53fd-47b0-b0ff-93805f0c2ff4.png" height="300" >
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227504617-7a497bb3-0a2a-4b50-9a8a-95ae60ea7afd.png" height="300" >
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227504005-efeaefe0-b687-49d0-bf95-2d7b7e66c348.png" height="300" >
|
||||
</div>
|
||||
|
||||
3. Modularisierte Funktionsgestaltung, einfache Schnittstellen mit leistungsstarken Funktionen.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/229288270-093643c1-0018-487a-81e6-1d7809b6e90f.png" height="400" >
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227504931-19955f78-45cd-4d1c-adac-e71e50957915.png" height="400" >
|
||||
</div>
|
||||
|
||||
4. Dies ist ein Open-Source-Projekt, das sich "selbst übersetzen" kann.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226936850-c77d7183-0749-4c1c-9875-fd4891842d0c.png" width="500" >
|
||||
</div>
|
||||
|
||||
5. Die Übersetzung anderer Open-Source-Projekte ist kein Problem.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="500" >
|
||||
</div>
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226969067-968a27c1-1b9c-486b-8b81-ab2de8d3f88a.png" width="500" >
|
||||
</div>
|
||||
|
||||
6. Dekorieren Sie [`live2d`](https://github.com/fghrsh/live2d_demo) mit kleinen Funktionen (standardmäßig deaktiviert, Änderungen an `config.py` erforderlich).
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/236432361-67739153-73e8-43fe-8111-b61296edabd9.png" width="500" >
|
||||
</div>
|
||||
|
||||
7. Neue MOSS-Sprachmodellunterstützung.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/236639178-92836f37-13af-4fdd-984d-b4450fe30336.png" width="500" >
|
||||
</div>
|
||||
|
||||
8. OpenAI-Bildgenerierung.
|
||||
<div align="center">
|
||||
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/bc7ab234-ad90-48a0-8d62-f703d9e74665" width="500" >
|
||||
</div>
|
||||
|
||||
9. OpenAI-Audio-Analyse und Zusammenfassung.
|
||||
<div align="center">
|
||||
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/709ccf95-3aee-498a-934a-e1c22d3d5d5b" width="500" >
|
||||
</div>
|
||||
|
||||
10. Latex-Proofreading des gesamten Textes.
|
||||
<div align="center">
|
||||
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/651ccd98-02c9-4464-91e1-77a6b7d1b033" width="500" >
|
||||
</div>
|
||||
|
||||
|
||||
## Version:
|
||||
- Version 3.5 (Todo): Rufen Sie alle Funktionserweiterungen dieses Projekts mit natürlicher Sprache auf (hohe Priorität).
|
||||
- Version 3.4 (Todo): Verbesserte Unterstützung mehrerer Threads für Local Large Model (LLM).
|
||||
- Version 3.3: + Internet-Informationssynthese-Funktion
|
||||
- Version 3.2: Funktionserweiterungen unterstützen mehr Parameter-Schnittstellen (Speicherung von Dialogen, Interpretation beliebigen Sprachcodes + gleichzeitige Abfrage jeder LLM-Kombination)
|
||||
- Version 3.1: Unterstützung mehrerer GPT-Modelle gleichzeitig! Unterstützung für API2D, Unterstützung für Lastenausgleich von mehreren API-Schlüsseln.
|
||||
- Version 3.0: Unterstützung von Chatglm und anderen kleinen LLMs
|
||||
- Version 2.6: Umstrukturierung der Plugin-Struktur zur Verbesserung der Interaktivität, Einführung weiterer Plugins
|
||||
- Version 2.5: Automatische Aktualisierung, Problembehebung bei Quelltexten großer Projekte, wenn der Text zu lang ist oder Token überlaufen.
|
||||
- Version 2.4: (1) Neue Funktion zur Übersetzung des gesamten PDF-Texts; (2) Neue Funktion zum Wechseln der Position des Eingabebereichs; (3) Neue Option für vertikales Layout; (4) Optimierung von Multithread-Funktions-Plugins.
|
||||
- Version 2.3: Verbesserte Interaktivität mit mehreren Threads
|
||||
- Version 2.2: Funktionserweiterungen unterstützen "Hot-Reload"
|
||||
- Version 2.1: Faltbares Layout
|
||||
- Version 2.0: Einführung von modularisierten Funktionserweiterungen
|
||||
- Version 1.0: Grundlegende Funktionengpt_academic Entwickler QQ-Gruppe-2: 610599535
|
||||
|
||||
- Bekannte Probleme
|
||||
- Einige Browser-Übersetzungs-Plugins können die Frontend-Ausführung dieser Software stören.
|
||||
- Sowohl eine zu hohe als auch eine zu niedrige Version von Gradio führt zu verschiedenen Ausnahmen.
|
||||
|
||||
## Referenz und Lernen
|
||||
|
||||
```
|
||||
Der Code bezieht sich auf viele Designs von anderen herausragenden Projekten, insbesondere:
|
||||
|
||||
# Projekt 1: ChatGLM-6B der Tsinghua Universität:
|
||||
https://github.com/THUDM/ChatGLM-6B
|
||||
|
||||
# Projekt 2: JittorLLMs der Tsinghua Universität:
|
||||
https://github.com/Jittor/JittorLLMs
|
||||
|
||||
# Projekt 3: Edge-GPT:
|
||||
https://github.com/acheong08/EdgeGPT
|
||||
|
||||
# Projekt 4: ChuanhuChatGPT:
|
||||
https://github.com/GaiZhenbiao/ChuanhuChatGPT
|
||||
|
||||
# Projekt 5: ChatPaper:
|
||||
https://github.com/kaixindelele/ChatPaper
|
||||
|
||||
# Mehr:
|
||||
https://github.com/gradio-app/gradio
|
||||
https://github.com/fghrsh/live2d_demo
|
||||
```
|
||||
316
docs/README.md.Italian.md
Normal file
316
docs/README.md.Italian.md
Normal file
@@ -0,0 +1,316 @@
|
||||
> **Nota**
|
||||
>
|
||||
> Durante l'installazione delle dipendenze, selezionare rigorosamente le **versioni specificate** nel file requirements.txt.
|
||||
>
|
||||
> ` pip install -r requirements.txt`
|
||||
|
||||
# <img src="logo.png" width="40" > GPT Ottimizzazione Accademica (GPT Academic)
|
||||
|
||||
**Se ti piace questo progetto, ti preghiamo di dargli una stella. Se hai sviluppato scorciatoie accademiche o plugin funzionali più utili, non esitare ad aprire una issue o pull request. Abbiamo anche una README in [Inglese|](README_EN.md)[Giapponese|](README_JP.md)[Coreano|](https://github.com/mldljyh/ko_gpt_academic)[Russo|](README_RS.md)[Francese](README_FR.md) tradotta da questo stesso progetto.
|
||||
Per tradurre questo progetto in qualsiasi lingua con GPT, leggere e eseguire [`multi_language.py`](multi_language.py) (sperimentale).
|
||||
|
||||
> **Nota**
|
||||
>
|
||||
> 1. Si prega di notare che solo i plugin (pulsanti) contrassegnati in **rosso** supportano la lettura di file, alcuni plugin sono posizionati nel **menu a discesa** nella zona dei plugin. Accettiamo e gestiamo PR per qualsiasi nuovo plugin con **massima priorità**!
|
||||
>
|
||||
> 2. Le funzionalità di ogni file di questo progetto sono descritte dettagliatamente nella propria analisi di autotraduzione [`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A). Con l'iterazione delle versioni, è possibile fare clic sui plugin funzionali correlati in qualsiasi momento per richiamare GPT e generare nuovamente il rapporto di analisi automatica del progetto. Le domande frequenti sono riassunte nella [`wiki`](https://github.com/binary-husky/gpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98). [Metodo di installazione] (#installazione).
|
||||
>
|
||||
> 3. Questo progetto è compatibile e incoraggia l'utilizzo di grandi modelli di linguaggio di produzione nazionale come chatglm, RWKV, Pangu ecc. Supporta la coesistenza di più api-key e può essere compilato nel file di configurazione come `API_KEY="openai-key1,openai-key2,api2d-key3"`. Per sostituire temporaneamente `API_KEY`, inserire `API_KEY` temporaneo nell'area di input e premere Invio per renderlo effettivo.
|
||||
|
||||
<div align="center">
|
||||
|
||||
Funzione | Descrizione
|
||||
--- | ---
|
||||
Correzione immediata | Supporta correzione immediata e ricerca degli errori di grammatica del documento con un solo clic
|
||||
Traduzione cinese-inglese immediata | Traduzione cinese-inglese immediata con un solo clic
|
||||
Spiegazione del codice immediata | Visualizzazione del codice, spiegazione del codice, generazione del codice, annotazione del codice con un solo clic
|
||||
[Scorciatoie personalizzate](https://www.bilibili.com/video/BV14s4y1E7jN) | Supporta scorciatoie personalizzate
|
||||
Design modularizzato | Supporta potenti [plugin di funzioni](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions) personalizzati, i plugin supportano l'[aggiornamento in tempo reale](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)
|
||||
[Auto-profiling del programma](https://www.bilibili.com/video/BV1cj411A7VW) | [Plugin di funzioni] [Comprensione immediata](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) del codice sorgente di questo progetto
|
||||
[Analisi del programma](https://www.bilibili.com/video/BV1cj411A7VW) | [Plugin di funzioni] Un clic può analizzare l'albero di altri progetti Python/C/C++/Java/Lua/...
|
||||
Lettura del documento, [traduzione](https://www.bilibili.com/video/BV1KT411x7Wn) del documento | [Plugin di funzioni] La lettura immediata dell'intero documento latex/pdf di un documento e la generazione di un riassunto
|
||||
Traduzione completa di un documento Latex, [correzione immediata](https://www.bilibili.com/video/BV1FT411H7c5/) | [Plugin di funzioni] Una traduzione o correzione immediata di un documento Latex
|
||||
Generazione di annotazioni in batch | [Plugin di funzioni] Generazione automatica delle annotazioni di funzione con un solo clic
|
||||
[Traduzione cinese-inglese di Markdown](https://www.bilibili.com/video/BV1yo4y157jV/) | [Plugin di funzioni] Hai letto il [README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md) delle cinque lingue sopra?
|
||||
Generazione di report di analisi di chat | [Plugin di funzioni] Generazione automatica di un rapporto di sintesi dopo l'esecuzione
|
||||
[Funzione di traduzione di tutto il documento PDF](https://www.bilibili.com/video/BV1KT411x7Wn) | [Plugin di funzioni] Estrarre il titolo e il sommario dell'articolo PDF + tradurre l'intero testo (multithreading)
|
||||
[Assistente di Arxiv](https://www.bilibili.com/video/BV1LM4y1279X) | [Plugin di funzioni] Inserire l'URL dell'articolo di Arxiv e tradurre il sommario con un clic + scaricare il PDF
|
||||
[Assistente integrato di Google Scholar](https://www.bilibili.com/video/BV19L411U7ia) | [Plugin di funzioni] Con qualsiasi URL di pagina di ricerca di Google Scholar, lascia che GPT ti aiuti a scrivere il tuo [relatedworks](https://www.bilibili.com/video/BV1GP411U7Az/)
|
||||
Aggregazione delle informazioni su Internet + GPT | [Plugin di funzioni] Fai in modo che GPT rilevi le informazioni su Internet prima di rispondere alle domande, senza mai diventare obsolete
|
||||
Visualizzazione di formule/img/tabelle | È possibile visualizzare un'equazione in forma [tex e render](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png) contemporaneamente, supporta equazioni e evidenziazione del codice
|
||||
Supporto per plugin di funzioni multithreading | Supporto per chiamata multithreaded di chatgpt, elaborazione con un clic di grandi quantità di testo o di un programma
|
||||
Avvia il tema di gradio [scuro](https://github.com/binary-husky/gpt_academic/issues/173) | Aggiungere ```/?__theme=dark``` dopo l'URL del browser per passare a un tema scuro
|
||||
Supporto per maggiori modelli LLM, supporto API2D | Sentirsi serviti simultaneamente da GPT3.5, GPT4, [Tsinghua ChatGLM](https://github.com/THUDM/ChatGLM-6B), [Fudan MOSS](https://github.com/OpenLMLab/MOSS) deve essere una grande sensazione, giusto?
|
||||
Ulteriori modelli LLM supportat,i supporto per l'implementazione di Huggingface | Aggiunta di un'interfaccia Newbing (Nuovo Bing), introdotta la compatibilità con Tsinghua [Jittorllms](https://github.com/Jittor/JittorLLMs), [LLaMA](https://github.com/facebookresearch/llama), [RWKV](https://github.com/BlinkDL/ChatRWKV) e [PanGu-α](https://openi.org.cn/pangu/)
|
||||
Ulteriori dimostrazioni di nuove funzionalità (generazione di immagini, ecc.)... | Vedere la fine di questo documento...
|
||||
</div>
|
||||
|
||||
|
||||
- Nuova interfaccia (modificare l'opzione LAYOUT in `config.py` per passare dal layout a sinistra e a destra al layout superiore e inferiore)
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/230361456-61078362-a966-4eb5-b49e-3c62ef18b860.gif" width="700" >
|
||||
</div>Sei un traduttore professionista di paper accademici.
|
||||
|
||||
- Tutti i pulsanti vengono generati dinamicamente leggendo il file functional.py, e aggiungerci nuove funzionalità è facile, liberando la clipboard.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/231975334-b4788e91-4887-412f-8b43-2b9c5f41d248.gif" width="700" >
|
||||
</div>
|
||||
|
||||
- Revisione/Correzione
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/231980294-f374bdcb-3309-4560-b424-38ef39f04ebd.gif" width="700" >
|
||||
</div>
|
||||
|
||||
- Se l'output contiene una formula, viene visualizzata sia come testo che come formula renderizzata, per facilitare la copia e la visualizzazione.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png" width="700" >
|
||||
</div>
|
||||
|
||||
- Non hai tempo di leggere il codice del progetto? Passa direttamente a chatgpt e chiedi informazioni.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="700" >
|
||||
</div>
|
||||
|
||||
- Chiamata mista di vari modelli di lingua di grandi dimensioni (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4)
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/232537274-deca0563-7aa6-4b5d-94a2-b7c453c47794.png" width="700" >
|
||||
</div>
|
||||
|
||||
---
|
||||
# Installazione
|
||||
## Installazione - Metodo 1: Esecuzione diretta (Windows, Linux o MacOS)
|
||||
|
||||
1. Scarica il progetto
|
||||
```sh
|
||||
git clone https://github.com/binary-husky/gpt_academic.git
|
||||
cd gpt_academic
|
||||
```
|
||||
|
||||
2. Configura API_KEY
|
||||
|
||||
In `config.py`, configura la tua API KEY e altre impostazioni, [configs for special network environments](https://github.com/binary-husky/gpt_academic/issues/1).
|
||||
|
||||
(N.B. Quando il programma viene eseguito, verifica prima se esiste un file di configurazione privato chiamato `config_private.py` e sovrascrive le stesse configurazioni in `config.py`. Pertanto, se capisci come funziona la nostra logica di lettura della configurazione, ti consigliamo vivamente di creare un nuovo file di configurazione chiamato `config_private.py` accanto a `config.py`, e spostare (copiare) le configurazioni di `config.py` in `config_private.py`. 'config_private.py' non è sotto la gestione di git e può proteggere ulteriormente le tue informazioni personali. NB Il progetto supporta anche la configurazione della maggior parte delle opzioni tramite "variabili d'ambiente". La sintassi della variabile d'ambiente è descritta nel file `docker-compose`. Priorità di lettura: "variabili d'ambiente" > "config_private.py" > "config.py")
|
||||
|
||||
|
||||
3. Installa le dipendenze
|
||||
```sh
|
||||
# (Scelta I: se sei familiare con python) (python 3.9 o superiore, più nuovo è meglio), N.B.: utilizza il repository ufficiale pip o l'aliyun pip repository, metodo temporaneo per cambiare il repository: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
|
||||
python -m pip install -r requirements.txt
|
||||
|
||||
# (Scelta II: se non conosci Python) utilizza anaconda, il processo è simile (https://www.bilibili.com/video/BV1rc411W7Dr):
|
||||
conda create -n gptac_venv python=3.11 # crea l'ambiente anaconda
|
||||
conda activate gptac_venv # attiva l'ambiente anaconda
|
||||
python -m pip install -r requirements.txt # questo passaggio funziona allo stesso modo dell'installazione con pip
|
||||
```
|
||||
|
||||
<details><summary>Se si desidera supportare ChatGLM di Tsinghua/MOSS di Fudan come backend, fare clic qui per espandere</summary>
|
||||
<p>
|
||||
|
||||
【Passaggio facoltativo】 Se si desidera supportare ChatGLM di Tsinghua/MOSS di Fudan come backend, è necessario installare ulteriori dipendenze (prerequisiti: conoscenza di Python, esperienza con Pytorch e computer sufficientemente potente):
|
||||
```sh
|
||||
# 【Passaggio facoltativo I】 Supporto a ChatGLM di Tsinghua. Note su ChatGLM di Tsinghua: in caso di errore "Call ChatGLM fail 不能正常加载ChatGLM的参数" , fare quanto segue: 1. Per impostazione predefinita, viene installata la versione di torch + cpu; per usare CUDA, è necessario disinstallare torch e installare nuovamente torch + cuda; 2. Se non è possibile caricare il modello a causa di una configurazione insufficiente del computer, è possibile modificare la precisione del modello in request_llm/bridge_chatglm.py, cambiando AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) in AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
|
||||
python -m pip install -r request_llm/requirements_chatglm.txt
|
||||
|
||||
# 【Passaggio facoltativo II】 Supporto a MOSS di Fudan
|
||||
python -m pip install -r request_llm/requirements_moss.txt
|
||||
git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Si prega di notare che quando si esegue questa riga di codice, si deve essere nella directory radice del progetto
|
||||
|
||||
# 【Passaggio facoltativo III】 Assicurati che il file di configurazione config.py includa tutti i modelli desiderati, al momento tutti i modelli supportati sono i seguenti (i modelli della serie jittorllms attualmente supportano solo la soluzione docker):
|
||||
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
|
||||
```
|
||||
|
||||
</p>
|
||||
</details>
|
||||
|
||||
|
||||
|
||||
4. Esegui
|
||||
```sh
|
||||
python main.py
|
||||
```5. Plugin di test delle funzioni
|
||||
```
|
||||
- Funzione plugin di test (richiede una risposta gpt su cosa è successo oggi in passato), puoi utilizzare questa funzione come template per implementare funzionalità più complesse
|
||||
Clicca su "[Demo del plugin di funzione] Oggi nella storia"
|
||||
```
|
||||
|
||||
## Installazione - Metodo 2: Utilizzo di Docker
|
||||
|
||||
1. Solo ChatGPT (consigliato per la maggior parte delle persone)
|
||||
|
||||
``` sh
|
||||
git clone https://github.com/binary-husky/gpt_academic.git # scarica il progetto
|
||||
cd gpt_academic # entra nel percorso
|
||||
nano config.py # con un qualsiasi editor di testo, modifica config.py configurando "Proxy", "API_KEY" e "WEB_PORT" (ad esempio 50923)
|
||||
docker build -t gpt-academic . # installa
|
||||
|
||||
#(ultimo passaggio - selezione 1) In un ambiente Linux, utilizzare '--net=host' è più conveniente e veloce
|
||||
docker run --rm -it --net=host gpt-academic
|
||||
#(ultimo passaggio - selezione 2) In un ambiente MacOS/Windows, l'opzione -p può essere utilizzata per esporre la porta del contenitore (ad es. 50923) alla porta della macchina
|
||||
docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic
|
||||
```
|
||||
|
||||
2. ChatGPT + ChatGLM + MOSS (richiede familiarità con Docker)
|
||||
|
||||
``` sh
|
||||
# Modifica docker-compose.yml, elimina i piani 1 e 3, mantieni il piano 2. Modifica la configurazione del piano 2 in docker-compose.yml, si prega di fare riferimento alle relative annotazioni
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
3. ChatGPT + LLAMA + Pangu + RWKV (richiede familiarità con Docker)
|
||||
|
||||
``` sh
|
||||
# Modifica docker-compose.yml, elimina i piani 1 e 2, mantieni il piano 3. Modifica la configurazione del piano 3 in docker-compose.yml, si prega di fare riferimento alle relative annotazioni
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
|
||||
## Installazione - Metodo 3: Altre modalità di distribuzione
|
||||
|
||||
1. Come utilizzare un URL di reindirizzamento / AzureAPI Cloud Microsoft
|
||||
Configura API_URL_REDIRECT seguendo le istruzioni nel file `config.py`.
|
||||
|
||||
2. Distribuzione su un server cloud remoto (richiede conoscenze ed esperienza di server cloud)
|
||||
Si prega di visitare [wiki di distribuzione-1] (https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97)
|
||||
|
||||
3. Utilizzo di WSL2 (Windows Subsystem for Linux)
|
||||
Si prega di visitare [wiki di distribuzione-2] (https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2)
|
||||
|
||||
4. Come far funzionare ChatGPT all'interno di un sottodominio (ad es. `http://localhost/subpath`)
|
||||
Si prega di visitare [Istruzioni per l'esecuzione con FastAPI] (docs/WithFastapi.md)
|
||||
|
||||
5. Utilizzo di docker-compose per l'esecuzione
|
||||
Si prega di leggere il file docker-compose.yml e seguire le istruzioni fornite.
|
||||
|
||||
---
|
||||
# Uso avanzato
|
||||
## Personalizzazione dei pulsanti / Plugin di funzione personalizzati
|
||||
|
||||
1. Personalizzazione dei pulsanti (scorciatoie accademiche)
|
||||
Apri `core_functional.py` con qualsiasi editor di testo e aggiungi la voce seguente, quindi riavvia il programma (se il pulsante è già stato aggiunto con successo e visibile, il prefisso e il suffisso supportano la modifica in tempo reale, senza bisogno di riavviare il programma).
|
||||
|
||||
ad esempio
|
||||
```
|
||||
"超级英译中": {
|
||||
# Prefisso, verrà aggiunto prima del tuo input. Ad esempio, descrivi la tua richiesta, come tradurre, spiegare il codice, correggere errori, ecc.
|
||||
"Prefix": "Per favore traduci questo testo in Cinese, e poi spiega tutti i termini tecnici nel testo con una tabella markdown:\n\n",
|
||||
|
||||
# Suffisso, verrà aggiunto dopo il tuo input. Ad esempio, con il prefisso puoi circondare il tuo input con le virgolette.
|
||||
"Suffix": "",
|
||||
},
|
||||
```
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226899272-477c2134-ed71-4326-810c-29891fe4a508.png" width="500" >
|
||||
</div>
|
||||
|
||||
2. Plugin di funzione personalizzati
|
||||
|
||||
Scrivi plugin di funzione personalizzati e esegui tutte le attività che desideri o non hai mai pensato di fare.
|
||||
La difficoltà di scrittura e debug dei plugin del nostro progetto è molto bassa. Se si dispone di una certa conoscenza di base di Python, è possibile realizzare la propria funzione del plugin seguendo il nostro modello. Per maggiori dettagli, consultare la [guida al plugin per funzioni](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97).
|
||||
|
||||
---
|
||||
# Ultimo aggiornamento
|
||||
## Nuove funzionalità dinamiche
|
||||
|
||||
1. Funzionalità di salvataggio della conversazione. Nell'area dei plugin della funzione, fare clic su "Salva la conversazione corrente" per salvare la conversazione corrente come file html leggibile e ripristinabile, inoltre, nell'area dei plugin della funzione (menu a discesa), fare clic su "Carica la cronologia della conversazione archiviata" per ripristinare la conversazione precedente. Suggerimento: fare clic su "Carica la cronologia della conversazione archiviata" senza specificare il file consente di visualizzare la cache degli archivi html di cronologia, fare clic su "Elimina tutti i record di cronologia delle conversazioni locali" per eliminare tutte le cache degli archivi html.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/235222390-24a9acc0-680f-49f5-bc81-2f3161f1e049.png" width="500" >
|
||||
</div>
|
||||
|
||||
2. Generazione di rapporti. La maggior parte dei plugin genera un rapporto di lavoro dopo l'esecuzione.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227503770-fe29ce2c-53fd-47b0-b0ff-93805f0c2ff4.png" height="300" >
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227504617-7a497bb3-0a2a-4b50-9a8a-95ae60ea7afd.png" height="300" >
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227504005-efeaefe0-b687-49d0-bf95-2d7b7e66c348.png" height="300" >
|
||||
</div>
|
||||
|
||||
3. Progettazione modulare delle funzioni, semplici interfacce ma in grado di supportare potenti funzionalità.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/229288270-093643c1-0018-487a-81e6-1d7809b6e90f.png" height="400" >
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227504931-19955f78-45cd-4d1c-adac-e71e50957915.png" height="400" >
|
||||
</div>
|
||||
|
||||
4. Questo è un progetto open source che può "tradursi da solo".
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226936850-c77d7183-0749-4c1c-9875-fd4891842d0c.png" width="500" >
|
||||
</div>
|
||||
|
||||
5. Tradurre altri progetti open source è semplice.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="500" >
|
||||
</div>
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226969067-968a27c1-1b9c-486b-8b81-ab2de8d3f88a.png" width="500" >
|
||||
</div>
|
||||
|
||||
6. Piccola funzione decorativa per [live2d](https://github.com/fghrsh/live2d_demo) (disattivata per impostazione predefinita, è necessario modificare `config.py`).
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/236432361-67739153-73e8-43fe-8111-b61296edabd9.png" width="500" >
|
||||
</div>
|
||||
|
||||
7. Supporto del grande modello linguistico MOSS
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/236639178-92836f37-13af-4fdd-984d-b4450fe30336.png" width="500" >
|
||||
</div>
|
||||
|
||||
8. Generazione di immagini OpenAI
|
||||
<div align="center">
|
||||
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/bc7ab234-ad90-48a0-8d62-f703d9e74665" width="500" >
|
||||
</div>
|
||||
|
||||
9. Analisi e sintesi audio OpenAI
|
||||
<div align="center">
|
||||
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/709ccf95-3aee-498a-934a-e1c22d3d5d5b" width="500" >
|
||||
</div>
|
||||
|
||||
10. Verifica completa dei testi in LaTeX
|
||||
<div align="center">
|
||||
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/651ccd98-02c9-4464-91e1-77a6b7d1b033" width="500" >
|
||||
</div>
|
||||
|
||||
|
||||
## Versione:
|
||||
- versione 3.5(Todo): utilizzo del linguaggio naturale per chiamare tutti i plugin di funzioni del progetto (alta priorità)
|
||||
- versione 3.4(Todo): supporto multi-threading per il grande modello linguistico locale Chatglm
|
||||
- versione 3.3: +funzionalità di sintesi delle informazioni su Internet
|
||||
- versione 3.2: i plugin di funzioni supportano più interfacce dei parametri (funzionalità di salvataggio della conversazione, lettura del codice in qualsiasi lingua + richiesta simultanea di qualsiasi combinazione di LLM)
|
||||
- versione 3.1: supporto per interrogare contemporaneamente più modelli gpt! Supporto api2d, bilanciamento del carico per più apikey
|
||||
- versione 3.0: supporto per Chatglm e altri piccoli LLM
|
||||
- versione 2.6: ristrutturazione della struttura del plugin, miglioramento dell'interattività, aggiunta di più plugin
|
||||
- versione 2.5: auto-aggiornamento, risoluzione del problema di testo troppo lungo e overflow del token durante la sintesi di grandi progetti di ingegneria
|
||||
- versione 2.4: (1) funzionalità di traduzione dell'intero documento in formato PDF aggiunta; (2) funzionalità di scambio dell'area di input aggiunta; (3) opzione di layout verticale aggiunta; (4) ottimizzazione della funzione di plugin multi-threading.
|
||||
- versione 2.3: miglioramento dell'interattività multi-threading
|
||||
- versione 2.2: i plugin di funzioni supportano l'hot-reload
|
||||
- versione 2.1: layout ripiegabile
|
||||
- versione 2.0: introduzione di plugin di funzioni modulari
|
||||
- versione 1.0: funzione di basegpt_academic sviluppatori gruppo QQ-2: 610599535
|
||||
|
||||
- Problemi noti
|
||||
- Alcuni plugin di traduzione del browser interferiscono con l'esecuzione del frontend di questo software
|
||||
- La versione di gradio troppo alta o troppo bassa può causare diversi malfunzionamenti
|
||||
|
||||
## Riferimenti e apprendimento
|
||||
|
||||
```
|
||||
Il codice fa riferimento a molte altre eccellenti progettazioni di progetti, principalmente:
|
||||
|
||||
# Progetto 1: ChatGLM-6B di Tsinghua:
|
||||
https://github.com/THUDM/ChatGLM-6B
|
||||
|
||||
# Progetto 2: JittorLLMs di Tsinghua:
|
||||
https://github.com/Jittor/JittorLLMs
|
||||
|
||||
# Progetto 3: Edge-GPT:
|
||||
https://github.com/acheong08/EdgeGPT
|
||||
|
||||
# Progetto 4: ChuanhuChatGPT:
|
||||
https://github.com/GaiZhenbiao/ChuanhuChatGPT
|
||||
|
||||
# Progetto 5: ChatPaper:
|
||||
https://github.com/kaixindelele/ChatPaper
|
||||
|
||||
# Altro:
|
||||
https://github.com/gradio-app/gradio
|
||||
https://github.com/fghrsh/live2d_demo
|
||||
```
|
||||
270
docs/README.md.Korean.md
Normal file
270
docs/README.md.Korean.md
Normal file
@@ -0,0 +1,270 @@
|
||||
> **노트**
|
||||
>
|
||||
> 의존성을 설치할 때는 반드시 requirements.txt에서 **지정된 버전**을 엄격하게 선택하십시오.
|
||||
>
|
||||
> `pip install -r requirements.txt`
|
||||
|
||||
# <img src="docs/logo.png" width="40" > GPT 학술 최적화 (GPT Academic)
|
||||
|
||||
**이 프로젝트가 마음에 드신다면 Star를 주세요. 추가로 유용한 학술 단축키나 기능 플러그인이 있다면 이슈나 pull request를 남기세요. 이 프로젝트에 대한 [영어 |](docs/README_EN.md)[일본어 |](docs/README_JP.md)[한국어 |](https://github.com/mldljyh/ko_gpt_academic)[러시아어 |](docs/README_RS.md)[프랑스어](docs/README_FR.md)로 된 README도 있습니다.
|
||||
GPT를 이용하여 프로젝트를 임의의 언어로 번역하려면 [`multi_language.py`](multi_language.py)를 읽고 실행하십시오. (실험적)
|
||||
|
||||
> **노트**
|
||||
>
|
||||
> 1. 파일을 읽기 위해 **빨간색**으로 표시된 기능 플러그인 (버튼) 만 지원됩니다. 일부 플러그인은 플러그인 영역의 **드롭다운 메뉴**에 있습니다. 또한 새로운 플러그인은 **가장 높은 우선순위**로 환영하며 처리합니다!
|
||||
>
|
||||
> 2. 이 프로젝트의 각 파일의 기능을 [`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A)에서 자세히 설명합니다. 버전이 업데이트 됨에 따라 관련된 기능 플러그인을 클릭하고 GPT를 호출하여 프로젝트의 자체 분석 보고서를 다시 생성할 수도 있습니다. 자주 묻는 질문은 [`위키`](https://github.com/binary-husky/gpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98)에서 볼 수 있습니다. [설치 방법](#installation).
|
||||
>
|
||||
> 3. 이 프로젝트는 국내 언어 모델 chatglm과 RWKV, 판고 등의 시도와 호환 가능합니다. 여러 개의 api-key를 지원하며 설정 파일에 "API_KEY="openai-key1,openai-key2,api2d-key3""와 같이 작성할 수 있습니다. `API_KEY`를 임시로 변경해야하는 경우 입력 영역에 임시 `API_KEY`를 입력 한 후 엔터 키를 누르면 즉시 적용됩니다.
|
||||
|
||||
<div align="center">
|
||||
|
||||
기능 | 설명
|
||||
--- | ---
|
||||
원 키워드 | 원 키워드 및 논문 문법 오류를 찾는 기능 지원
|
||||
한-영 키워드 | 한-영 키워드 지원
|
||||
코드 설명 | 코드 표시, 코드 설명, 코드 생성, 코드에 주석 추가
|
||||
[사용자 정의 바로 가기 키](https://www.bilibili.com/video/BV14s4y1E7jN) | 사용자 정의 바로 가기 키 지원
|
||||
모듈식 설계 | 강력한[함수 플러그인](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions) 지원, 플러그인이 [램 업데이트](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)를 지원합니다.
|
||||
[자체 프로그램 분석](https://www.bilibili.com/video/BV1cj411A7VW) | [함수 플러그인] [원 키 우드] 프로젝트 소스 코드의 내용을 이해하는 기능을 제공
|
||||
[프로그램 분석](https://www.bilibili.com/video/BV1cj411A7VW) | [함수 플러그인] 프로젝트 트리를 분석할 수 있습니다 (Python/C/C++/Java/Lua/...)
|
||||
논문 읽기, 번역 | [함수 플러그인] LaTex/PDF 논문의 전문을 읽고 요약을 생성합니다.
|
||||
LaTeX 텍스트[번역](https://www.bilibili.com/video/BV1nk4y1Y7Js/), [원 키워드](https://www.bilibili.com/video/BV1FT411H7c5/) | [함수 플러그인] LaTeX 논문의 번역 또는 개량을 위해 일련의 모드를 번역할 수 있습니다.
|
||||
대량의 주석 생성 | [함수 플러그인] 함수 코멘트를 대량으로 생성할 수 있습니다.
|
||||
Markdown 한-영 번역 | [함수 플러그인] 위의 5 종 언어의 [README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md)를 볼 수 있습니다.
|
||||
chat 분석 보고서 생성 | [함수 플러그인] 수행 후 요약 보고서를 자동으로 생성합니다.
|
||||
[PDF 논문 번역](https://www.bilibili.com/video/BV1KT411x7Wn) | [함수 플러그인] PDF 논문이 제목 및 요약을 추출한 후 번역됩니다. (멀티 스레드)
|
||||
[Arxiv 도우미](https://www.bilibili.com/video/BV1LM4y1279X) | [함수 플러그인] Arxiv 논문 URL을 입력하면 요약을 번역하고 PDF를 다운로드 할 수 있습니다.
|
||||
[Google Scholar 통합 도우미](https://www.bilibili.com/video/BV19L411U7ia) | [함수 플러그인] Google Scholar 검색 페이지 URL을 제공하면 gpt가 [Related Works 작성](https://www.bilibili.com/video/BV1GP411U7Az/)을 도와줍니다.
|
||||
인터넷 정보 집계+GPT | [함수 플러그인] 먼저 GPT가 인터넷에서 정보를 수집하고 질문에 대답 할 수 있도록합니다. 정보가 절대적으로 구식이 아닙니다.
|
||||
수식/이미지/표 표시 | 급여, 코드 강조 기능 지원
|
||||
멀티 스레드 함수 플러그인 지원 | Chatgpt를 여러 요청에서 실행하여 [대량의 텍스트](https://www.bilibili.com/video/BV1FT411H7c5/) 또는 프로그램을 처리 할 수 있습니다.
|
||||
다크 그라디오 테마 시작 | 어둡게 주제를 변경하려면 브라우저 URL 끝에 ```/?__theme=dark```을 추가하면됩니다.
|
||||
[다중 LLM 모델](https://www.bilibili.com/video/BV1wT411p7yf) 지원, [API2D](https://api2d.com/) 인터페이스 지원됨 | GPT3.5, GPT4, [Tsinghua ChatGLM](https://github.com/THUDM/ChatGLM-6B), [Fudan MOSS](https://github.com/OpenLMLab/MOSS)가 모두 동시에 작동하는 것처럼 느낄 수 있습니다!
|
||||
LLM 모델 추가 및[huggingface 배치](https://huggingface.co/spaces/qingxu98/gpt-academic) 지원 | 새 Bing 인터페이스 (새 Bing) 추가, Clearing House [Jittorllms](https://github.com/Jittor/JittorLLMs) 지원 [LLaMA](https://github.com/facebookresearch/llama), [RWKV](https://github.com/BlinkDL/ChatRWKV) 및 [盘古α](https://openi.org.cn/pangu/)
|
||||
기타 새로운 기능 (이미지 생성 등) ... | 이 문서의 끝부분을 참조하세요. ...- 모든 버튼은 functional.py를 동적으로 읽어와서 사용자 정의 기능을 자유롭게 추가할 수 있으며, 클립 보드를 해제합니다.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/231975334-b4788e91-4887-412f-8b43-2b9c5f41d248.gif" width="700" >
|
||||
</div>
|
||||
|
||||
- 검수/오타 교정
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/231980294-f374bdcb-3309-4560-b424-38ef39f04ebd.gif" width="700" >
|
||||
</div>
|
||||
|
||||
- 출력에 수식이 포함되어 있으면 텍스와 렌더링의 형태로 동시에 표시되어 복사 및 읽기가 용이합니다.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png" width="700" >
|
||||
</div>
|
||||
|
||||
- 프로젝트 코드를 볼 시간이 없습니까? 전체 프로젝트를 chatgpt에 직접 표시하십시오
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="700" >
|
||||
</div>
|
||||
|
||||
- 다양한 대형 언어 모델 범용 요청 (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4)
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/232537274-deca0563-7aa6-4b5d-94a2-b7c453c47794.png" width="700" >
|
||||
</div>
|
||||
|
||||
---
|
||||
# 설치
|
||||
## Installation-Method 1: Run directly (Windows, Linux or MacOS)
|
||||
|
||||
1. 프로젝트 다운로드
|
||||
```sh
|
||||
git clone https://github.com/binary-husky/gpt_academic.git
|
||||
cd gpt_academic
|
||||
```
|
||||
|
||||
2. API_KEY 구성
|
||||
|
||||
`config.py`에서 API KEY 등 설정을 구성합니다. [특별한 네트워크 환경 설정](https://github.com/binary-husky/gpt_academic/issues/1) .
|
||||
|
||||
(P.S. 프로그램이 실행될 때, 이름이 `config_private.py`인 기밀 설정 파일이 있는지 우선적으로 확인하고 해당 설정으로 `config.py`의 동일한 이름의 설정을 덮어씁니다. 따라서 구성 읽기 논리를 이해할 수 있다면, `config.py` 옆에 `config_private.py`라는 새 구성 파일을 만들고 `config.py`의 구성을 `config_private.py`로 이동(복사)하는 것이 좋습니다. `config_private.py`는 git으로 관리되지 않으며 개인 정보를 더 안전하게 보호할 수 있습니다. P.S. 프로젝트는 또한 대부분의 옵션을 `환경 변수`를 통해 설정할 수 있으며, `docker-compose` 파일을 참조하여 환경 변수 작성 형식을 확인할 수 있습니다. 우선순위: `환경 변수` > `config_private.py` > `config.py`)
|
||||
|
||||
|
||||
3. 의존성 설치
|
||||
```sh
|
||||
# (I 선택: 기존 python 경험이 있다면) (python 버전 3.9 이상, 최신 버전이 좋습니다), 참고: 공식 pip 소스 또는 알리 pip 소스 사용, 일시적인 교체 방법: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
|
||||
python -m pip install -r requirements.txt
|
||||
|
||||
# (II 선택: Python에 익숙하지 않은 경우) anaconda 사용 방법은 비슷함(https://www.bilibili.com/video/BV1rc411W7Dr):
|
||||
conda create -n gptac_venv python=3.11 # anaconda 환경 만들기
|
||||
conda activate gptac_venv # anaconda 환경 활성화
|
||||
python -m pip install -r requirements.txt # 이 단계도 pip install의 단계와 동일합니다.
|
||||
```
|
||||
|
||||
<details><summary>추가지원을 위해 Tsinghua ChatGLM / Fudan MOSS를 사용해야하는 경우 지원을 클릭하여 이 부분을 확장하세요.</summary>
|
||||
<p>
|
||||
|
||||
[Tsinghua ChatGLM] / [Fudan MOSS]를 백엔드로 사용하려면 추가적인 종속성을 설치해야합니다 (전제 조건 : Python을 이해하고 Pytorch를 사용한 적이 있으며, 컴퓨터가 충분히 강력한 경우) :
|
||||
```sh
|
||||
# [선택 사항 I] Tsinghua ChatGLM을 지원합니다. Tsinghua ChatGLM에 대한 참고사항 : "Call ChatGLM fail cannot load ChatGLM parameters normally" 오류 발생시 다음 참조:
|
||||
# 1 : 기본 설치된 것들은 torch + cpu 버전입니다. cuda를 사용하려면 torch를 제거한 다음 torch + cuda를 다시 설치해야합니다.
|
||||
# 2 : 모델을 로드할 수 없는 기계 구성 때문에, AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)를
|
||||
# AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)로 변경합니다.
|
||||
python -m pip install -r request_llm/requirements_chatglm.txt
|
||||
|
||||
# [선택 사항 II] Fudan MOSS 지원
|
||||
python -m pip install -r request_llm/requirements_moss.txt
|
||||
git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # 다음 코드 줄을 실행할 때 프로젝트 루트 경로에 있어야합니다.
|
||||
|
||||
# [선택 사항III] AVAIL_LLM_MODELS config.py 구성 파일에 기대하는 모델이 포함되어 있는지 확인하십시오.
|
||||
# 현재 지원되는 전체 모델 :
|
||||
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
|
||||
```
|
||||
|
||||
</p>
|
||||
</details>
|
||||
|
||||
|
||||
|
||||
4. 실행
|
||||
```sh
|
||||
python main.py
|
||||
```5. 테스트 함수 플러그인
|
||||
```
|
||||
- 테스트 함수 플러그인 템플릿 함수 (GPT에게 오늘의 역사에서 무슨 일이 일어났는지 대답하도록 요청)를 구현하는 데 사용할 수 있습니다. 이 함수를 기반으로 더 복잡한 기능을 구현할 수 있습니다.
|
||||
"[함수 플러그인 템플릿 데모] 오늘의 역사"를 클릭하세요.
|
||||
```
|
||||
|
||||
## 설치 - 방법 2 : 도커 사용
|
||||
|
||||
1. ChatGPT 만 (대부분의 사람들이 선택하는 것을 권장합니다.)
|
||||
|
||||
``` sh
|
||||
git clone https://github.com/binary-husky/gpt_academic.git # 다운로드
|
||||
cd gpt_academic # 경로 이동
|
||||
nano config.py # 아무 텍스트 에디터로 config.py를 열고 "Proxy","API_KEY","WEB_PORT" (예 : 50923) 등을 구성합니다.
|
||||
docker build -t gpt-academic . # 설치
|
||||
|
||||
#(마지막 단계-1 선택) Linux 환경에서는 --net=host를 사용하면 더 편리합니다.
|
||||
docker run --rm -it --net=host gpt-academic
|
||||
#(마지막 단계-2 선택) macOS / windows 환경에서는 -p 옵션을 사용하여 컨테이너의 포트 (예 : 50923)를 호스트의 포트로 노출해야합니다.
|
||||
docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic
|
||||
```
|
||||
|
||||
2. ChatGPT + ChatGLM + MOSS (Docker에 익숙해야합니다.)
|
||||
|
||||
``` sh
|
||||
#docker-compose.yml을 수정하여 계획 1 및 계획 3을 삭제하고 계획 2를 유지합니다. docker-compose.yml에서 계획 2의 구성을 수정하면 됩니다. 주석을 참조하십시오.
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
3. ChatGPT + LLAMA + Pangu + RWKV (Docker에 익숙해야합니다.)
|
||||
``` sh
|
||||
#docker-compose.yml을 수정하여 계획 1 및 계획 2을 삭제하고 계획 3을 유지합니다. docker-compose.yml에서 계획 3의 구성을 수정하면 됩니다. 주석을 참조하십시오.
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
|
||||
## 설치 - 방법 3 : 다른 배치 방법
|
||||
|
||||
1. 리버스 프록시 URL / Microsoft Azure API 사용 방법
|
||||
API_URL_REDIRECT를 `config.py`에 따라 구성하면됩니다.
|
||||
|
||||
2. 원격 클라우드 서버 배치 (클라우드 서버 지식과 경험이 필요합니다.)
|
||||
[배치위키-1](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97)에 방문하십시오.
|
||||
|
||||
3. WSL2 사용 (Windows Subsystem for Linux 하위 시스템)
|
||||
[배치 위키-2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2)에 방문하십시오.
|
||||
|
||||
4. 2 차 URL (예 : `http : //localhost/subpath`)에서 실행하는 방법
|
||||
[FastAPI 실행 설명서] (docs / WithFastapi.md)를 참조하십시오.
|
||||
|
||||
5. docker-compose 실행
|
||||
docker-compose.yml을 읽은 후 지시 사항에 따라 작업하십시오.
|
||||
---
|
||||
# 고급 사용법
|
||||
## 사용자 정의 바로 가기 버튼 / 사용자 정의 함수 플러그인
|
||||
|
||||
1. 사용자 정의 바로 가기 버튼 (학술 바로 가기)
|
||||
임의의 텍스트 편집기로 'core_functional.py'를 엽니다. 엔트리 추가, 그런 다음 프로그램을 다시 시작하면됩니다. (버튼이 이미 추가되어 보이고 접두사, 접미사가 모두 변수가 효과적으로 수정되면 프로그램을 다시 시작하지 않아도됩니다.)
|
||||
예 :
|
||||
```
|
||||
"超级英译中": {
|
||||
# 접두사. 당신이 요구하는 것을 설명하는 데 사용됩니다. 예를 들어 번역, 코드를 설명, 다듬기 등
|
||||
"Prefix": "下面翻译成中文,然后用一个 markdown 表格逐一解释文中出现的专有名词:\n\n",
|
||||
|
||||
# 접미사는 입력 내용 앞뒤에 추가됩니다. 예를 들어 전위를 사용하여 입력 내용을 따옴표로 묶는데 사용할 수 있습니다.
|
||||
"Suffix": "",
|
||||
},
|
||||
```
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226899272-477c2134-ed71-4326-810c-29891fe4a508.png" width="500" >
|
||||
</div>
|
||||
|
||||
2. 사용자 지정 함수 플러그인
|
||||
강력한 함수 플러그인을 작성하여 원하는 작업을 수행하십시오.
|
||||
이 프로젝트의 플러그인 작성 및 디버깅 난이도는 매우 낮으며, 일부 파이썬 기본 지식만 있으면 제공된 템플릿을 모방하여 플러그인 기능을 구현할 수 있습니다. 자세한 내용은 [함수 플러그인 가이드]를 참조하십시오. (https://github.com/binary -husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E 4%BB%B6%E6%8C%87%E5%8D%97).
|
||||
---
|
||||
# 최신 업데이트
|
||||
## 새로운 기능 동향1. 대화 저장 기능.
|
||||
|
||||
1. 함수 플러그인 영역에서 '현재 대화 저장'을 호출하면 현재 대화를 읽을 수 있고 복원 가능한 HTML 파일로 저장할 수 있습니다. 또한 함수 플러그인 영역(드롭다운 메뉴)에서 '대화 기록 불러오기'를 호출하면 이전 대화를 복원할 수 있습니다. 팁: 파일을 지정하지 않고 '대화 기록 불러오기'를 클릭하면 기록된 HTML 캐시를 볼 수 있으며 '모든 로컬 대화 기록 삭제'를 클릭하면 모든 HTML 캐시를 삭제할 수 있습니다.
|
||||
|
||||
2. 보고서 생성. 대부분의 플러그인은 실행이 끝난 후 작업 보고서를 생성합니다.
|
||||
|
||||
3. 모듈화 기능 설계, 간단한 인터페이스로도 강력한 기능을 지원할 수 있습니다.
|
||||
|
||||
4. 자체 번역이 가능한 오픈 소스 프로젝트입니다.
|
||||
|
||||
5. 다른 오픈 소스 프로젝트를 번역하는 것은 어렵지 않습니다.
|
||||
|
||||
6. [live2d](https://github.com/fghrsh/live2d_demo) 장식 기능(기본적으로 비활성화되어 있으며 `config.py`를 수정해야 합니다.)
|
||||
|
||||
7. MOSS 대 언어 모델 지원 추가
|
||||
|
||||
8. OpenAI 이미지 생성
|
||||
|
||||
9. OpenAI 음성 분석 및 요약
|
||||
|
||||
10. LaTeX 전체적인 교정 및 오류 수정
|
||||
|
||||
## 버전:
|
||||
- version 3.5 (TODO): 자연어를 사용하여 이 프로젝트의 모든 함수 플러그인을 호출하는 기능(우선순위 높음)
|
||||
- version 3.4(TODO): 로컬 대 모듈의 다중 스레드 지원 향상
|
||||
- version 3.3: 인터넷 정보 종합 기능 추가
|
||||
- version 3.2: 함수 플러그인이 더 많은 인수 인터페이스를 지원합니다.(대화 저장 기능, 임의의 언어 코드 해석 및 동시에 임의의 LLM 조합을 확인하는 기능)
|
||||
- version 3.1: 여러 개의 GPT 모델에 대한 동시 쿼리 지원! api2d 지원, 여러 개의 apikey 로드 밸런싱 지원
|
||||
- version 3.0: chatglm 및 기타 소형 llm의 지원
|
||||
- version 2.6: 플러그인 구조를 재구성하여 상호 작용성을 향상시켰습니다. 더 많은 플러그인을 추가했습니다.
|
||||
- version 2.5: 자체 업데이트, 전체 프로젝트를 요약할 때 텍스트가 너무 길어지고 토큰이 오버플로우되는 문제를 해결했습니다.
|
||||
- version 2.4: (1) PDF 전체 번역 기능 추가; (2) 입력 영역 위치 전환 기능 추가; (3) 수직 레이아웃 옵션 추가; (4) 다중 스레드 함수 플러그인 최적화.
|
||||
- version 2.3: 다중 스레드 상호 작용성 강화
|
||||
- version 2.2: 함수 플러그인 히트 리로드 지원
|
||||
- version 2.1: 접는 레이아웃 지원
|
||||
- version 2.0: 모듈화 함수 플러그인 도입
|
||||
- version 1.0: 기본 기능
|
||||
|
||||
gpt_academic 개발자 QQ 그룹-2 : 610599535
|
||||
|
||||
- 알려진 문제
|
||||
- 일부 브라우저 번역 플러그인이이 소프트웨어의 프론트 엔드 작동 방식을 방해합니다.
|
||||
- gradio 버전이 너무 높거나 낮으면 여러 가지 이상이 발생할 수 있습니다.
|
||||
|
||||
## 참고 및 학습 자료
|
||||
|
||||
```
|
||||
많은 우수 프로젝트의 디자인을 참고했습니다. 주요 항목은 다음과 같습니다.
|
||||
|
||||
# 프로젝트 1 : Tsinghua ChatGLM-6B :
|
||||
https://github.com/THUDM/ChatGLM-6B
|
||||
|
||||
# 프로젝트 2 : Tsinghua JittorLLMs:
|
||||
https://github.com/Jittor/JittorLLMs
|
||||
|
||||
# 프로젝트 3 : Edge-GPT :
|
||||
https://github.com/acheong08/EdgeGPT
|
||||
|
||||
# 프로젝트 4 : ChuanhuChatGPT:
|
||||
https://github.com/GaiZhenbiao/ChuanhuChatGPT
|
||||
|
||||
# 프로젝트 5 : ChatPaper :
|
||||
https://github.com/kaixindelele/ChatPaper
|
||||
|
||||
# 더 많은 :
|
||||
https://github.com/gradio-app/gradio
|
||||
https://github.com/fghrsh/live2d_demo
|
||||
```
|
||||
324
docs/README.md.Portuguese.md
Normal file
324
docs/README.md.Portuguese.md
Normal file
@@ -0,0 +1,324 @@
|
||||
> **Nota**
|
||||
>
|
||||
> Ao instalar as dependências, por favor, selecione rigorosamente as versões **especificadas** no arquivo requirements.txt.
|
||||
>
|
||||
> `pip install -r requirements.txt`
|
||||
>
|
||||
|
||||
# <img src="logo.png" width="40" > Otimização acadêmica GPT (GPT Academic)
|
||||
|
||||
**Se você gostou deste projeto, por favor dê um Star. Se você criou atalhos acadêmicos mais úteis ou plugins funcionais, sinta-se livre para abrir uma issue ou pull request. Nós também temos um README em [Inglês|](README_EN.md)[日本語|](README_JP.md)[한국어|](https://github.com/mldljyh/ko_gpt_academic)[Русский|](README_RS.md)[Français](README_FR.md) traduzidos por este próprio projeto.
|
||||
Para traduzir este projeto para qualquer idioma com o GPT, leia e execute [`multi_language.py`](multi_language.py) (experimental).
|
||||
|
||||
> **Nota**
|
||||
>
|
||||
> 1. Por favor, preste atenção que somente os plugins de funções (botões) com a cor **vermelha** podem ler arquivos. Alguns plugins estão localizados no **menu suspenso** na área de plugins. Além disso, nós damos as boas-vindas com a **maior prioridade** e gerenciamos quaisquer novos plugins PR!
|
||||
>
|
||||
> 2. As funções de cada arquivo neste projeto são detalhadas em [`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A), auto-análises do projeto geradas pelo GPT também estão podem ser chamadas a qualquer momento ao clicar nos plugins relacionados. As perguntas frequentes estão resumidas no [`wiki`](https://github.com/binary-husky/gpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98). [Instruções de Instalação](#installation).
|
||||
>
|
||||
> 3. Este projeto é compatível com e incentiva o uso de modelos de linguagem nacionais, como chatglm e RWKV, Pangolin, etc. Suporta a coexistência de várias chaves de API e pode ser preenchido no arquivo de configuração como `API_KEY="openai-key1,openai-key2,api2d-key3"`. Quando precisar alterar temporariamente o `API_KEY`, basta digitar o `API_KEY` temporário na área de entrada e pressionar Enter para que ele entre em vigor.
|
||||
|
||||
<div align="center">
|
||||
|
||||
Funcionalidade | Descrição
|
||||
--- | ---
|
||||
Um clique de polimento | Suporte a um clique polimento, um clique encontrar erros de gramática no artigo
|
||||
Tradução chinês-inglês de um clique | Tradução chinês-inglês de um clique
|
||||
Explicação de código de um único clique | Exibir código, explicar código, gerar código, adicionar comentários ao código
|
||||
[Teclas de atalho personalizadas](https://www.bilibili.com/video/BV14s4y1E7jN) | Suporte a atalhos personalizados
|
||||
Projeto modular | Suporte para poderosos plugins[de função personalizada](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions), os plugins suportam[hot-reload](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)
|
||||
[Análise automática do programa](https://www.bilibili.com/video/BV1cj411A7VW) | [Plugin de função][um clique para entender](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) o código-fonte do projeto
|
||||
[Análise do programa](https://www.bilibili.com/video/BV1cj411A7VW) | [Plugin de função] Um clique pode analisar a árvore de projetos do Python/C/C++/Java/Lua/...
|
||||
Leitura de artigos, [tradução](https://www.bilibili.com/video/BV1KT411x7Wn) de artigos | [Plugin de função] um clique para interpretar o resumo de artigos LaTeX/PDF e gerar resumo
|
||||
Tradução completa LATEX, polimento|[Plugin de função] Uma clique para traduzir ou polir um artigo LATEX
|
||||
Geração em lote de comentários | [Plugin de função] Um clique gera comentários de função em lote
|
||||
[Tradução chinês-inglês](https://www.bilibili.com/video/BV1yo4y157jV/) markdown | [Plugin de função] Você viu o README em 5 linguagens acima?
|
||||
Relatório de análise de chat | [Plugin de função] Gera automaticamente um resumo após a execução
|
||||
[Funcionalidade de tradução de artigos completos em PDF](https://www.bilibili.com/video/BV1KT411x7Wn) | [Plugin de função] Extrai o título e o resumo do artigo PDF e traduz o artigo completo (multithread)
|
||||
Assistente arXiv | [Plugin de função] Insira o url do artigo arXiv para traduzir o resumo + baixar PDF
|
||||
Assistente de integração acadêmica do Google | [Plugin de função] Dê qualquer URL de página de pesquisa acadêmica do Google e deixe o GPT escrever[trabalhos relacionados](https://www.bilibili.com/video/BV1GP411U7Az/)
|
||||
Agregação de informações da Internet + GPT | [Plugin de função] Um clique para obter informações do GPT através da Internet e depois responde a perguntas para informações nunca ficarem desatualizadas
|
||||
Exibição de fórmulas/imagem/tabela | Pode exibir simultaneamente a forma de renderização e[TEX] das fórmulas, suporte a fórmulas e realce de código
|
||||
Suporte de plugins de várias linhas | Suporte a várias chamadas em linha do chatgpt, um clique para processamento[de massa de texto](https://www.bilibili.com/video/BV1FT411H7c5/) ou programa
|
||||
Tema gradio escuro | Adicione ``` /?__theme=dark``` ao final da url do navegador para ativar o tema escuro
|
||||
[Suporte para vários modelos LLM](https://www.bilibili.com/video/BV1wT411p7yf), suporte para a nova interface API2D | A sensação de ser atendido simultaneamente por GPT3.5, GPT4, [Chatglm THU](https://github.com/THUDM/ChatGLM-6B), [Moss Fudan](https://github.com/OpenLMLab/MOSS) deve ser ótima, certo?
|
||||
Mais modelos LLM incorporados, suporte para a implantação[huggingface](https://huggingface.co/spaces/qingxu98/gpt-academic) | Adicione interface Newbing (New Bing), suporte [JittorLLMs](https://github.com/Jittor/JittorLLMs) THU Introdução ao suporte do LLaMA, RWKV e Pan Gu Alpha
|
||||
Mais recursos novos mostrados (geração de imagens, etc.) ... | Consulte o final deste documento ...
|
||||
|
||||
</div>
|
||||
|
||||
- Nova interface (Modifique a opção LAYOUT em `config.py` para alternar entre o layout esquerdo/direito e o layout superior/inferior)
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/230361456-61078362-a966-4eb5-b49e-3c62ef18b860.gif" width="700" >
|
||||
</div>- All buttons are dynamically generated by reading functional.py, and you can add custom functions at will, liberating the clipboard
|
||||
|
||||
<div align="center">
|
||||
<img src = "https://user-images.githubusercontent.com/96192199/231975334-b4788e91-4887-412f-8b43-2b9c5f41d248.gif" width="700">
|
||||
</div>
|
||||
|
||||
- Proofreading/errors correction
|
||||
|
||||
|
||||
<div align="center">
|
||||
<img src = "https://user-images.githubusercontent.com/96192199/231980294-f374bdcb-3309-4560-b424-38ef39f04ebd.gif" width="700">
|
||||
</div>
|
||||
|
||||
- If the output contains formulas, it will be displayed in both tex and rendering format at the same time, which is convenient for copying and reading
|
||||
|
||||
|
||||
<div align="center">
|
||||
<img src = "https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png" width="700">
|
||||
</div>
|
||||
|
||||
- Don't want to read the project code? Just show the whole project to chatgpt
|
||||
|
||||
|
||||
<div align="center">
|
||||
<img src = "https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="700">
|
||||
</div>
|
||||
|
||||
- Mix the use of multiple large language models (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4)
|
||||
|
||||
|
||||
<div align="center">
|
||||
<img src = "https://user-images.githubusercontent.com/96192199/232537274-deca0563-7aa6-4b5d-94a2-b7c453c47794.png" width="700">
|
||||
</div>
|
||||
|
||||
---
|
||||
# Instalação
|
||||
## Installation-Method 1: Run directly (Windows, Linux or MacOS)
|
||||
|
||||
1. Download the project
|
||||
|
||||
```sh
|
||||
git clone https://github.com/binary-husky/gpt_academic.git
|
||||
cd gpt_academic
|
||||
```
|
||||
|
||||
2. Configure the API KEY
|
||||
|
||||
In `config.py`, configure API KEY and other settings, [Special Network Environment Settings] (https://github.com/binary-husky/gpt_academic/issues/1).
|
||||
|
||||
(P.S. When the program runs, it will first check whether there is a private configuration file named `config_private.py`, and use the configuration in it to cover the configuration with the same name in `config.py`. Therefore, if you can understand our configuration reading logic, we strongly recommend that you create a new configuration file named `config_private.py` next to `config.py`, and transfer (copy) the configuration in `config.py` to `config_private.py`. `config_private.py` is not controlled by git and can make your privacy information more secure. P.S. The project also supports configuring most options through `environment variables`. The writing format of environment variables is referenced to the `docker-compose` file. Reading priority: `environment variable` > `config_private.py` > `config.py`)
|
||||
|
||||
|
||||
3. Install dependencies
|
||||
|
||||
```sh
|
||||
# (Option I: for those familiar with python)(python version is 3.9 or above, the newer the better), note: use the official pip source or the Alibaba pip source. Temporary solution for changing source: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
|
||||
python -m pip install -r requirements.txt
|
||||
|
||||
# (Option II: for those who are unfamiliar with python) use anaconda, the steps are also similar (https://www.bilibili.com/video/BV1rc411W7Dr):
|
||||
conda create -n gptac_venv python=3.11 # create anaconda environment
|
||||
conda activate gptac_venv # activate anaconda environment
|
||||
python -m pip install -r requirements.txt # This step is the same as the pip installation step
|
||||
```
|
||||
|
||||
<details><summary>If you need to support Tsinghua ChatGLM / Fudan MOSS as the backend, click to expand here</summary>
|
||||
<p>
|
||||
|
||||
[Optional Step] If you need to support Tsinghua ChatGLM / Fudan MOSS as the backend, you need to install more dependencies (prerequisite: familiar with Python + used Pytorch + computer configuration is strong):
|
||||
```sh
|
||||
# 【Optional Step I】support Tsinghua ChatGLM。Tsinghua ChatGLM Note: If you encounter a "Call ChatGLM fails cannot load ChatGLM parameters normally" error, refer to the following: 1: The default installed is torch+cpu version, and using cuda requires uninstalling torch and reinstalling torch+cuda; 2: If the model cannot be loaded due to insufficient computer configuration, you can modify the model accuracy in request_llm/bridge_chatglm.py and change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
|
||||
python -m pip install -r request_llm/requirements_chatglm.txt
|
||||
|
||||
# 【Optional Step II】support Fudan MOSS
|
||||
python -m pip install -r request_llm/requirements_moss.txt
|
||||
git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Note: When executing this line of code, you must be in the project root path
|
||||
|
||||
# 【Optional Step III】Make sure that the AVAIL_LLM_MODELS in the config.py configuration file contains the expected model. Currently, all supported models are as follows (jittorllms series currently only supports docker solutions):
|
||||
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
|
||||
```
|
||||
|
||||
</p>
|
||||
</details>
|
||||
|
||||
|
||||
4. Run
|
||||
|
||||
```sh
|
||||
python main.py
|
||||
```5. Plugin de Função de Teste
|
||||
```
|
||||
- Função de modelo de plug-in de teste (exige que o GPT responda ao que aconteceu hoje na história), você pode usar esta função como modelo para implementar funções mais complexas
|
||||
Clique em "[Função de plug-in de modelo de demonstração] O que aconteceu hoje na história?"
|
||||
```
|
||||
|
||||
## Instalação - Método 2: Usando o Docker
|
||||
|
||||
1. Apenas ChatGPT (recomendado para a maioria das pessoas)
|
||||
|
||||
``` sh
|
||||
git clone https://github.com/binary-husky/gpt_academic.git # Baixar o projeto
|
||||
cd gpt_academic # Entrar no caminho
|
||||
nano config.py # Editar config.py com qualquer editor de texto configurando "Proxy", "API_KEY" e "WEB_PORT" (por exemplo, 50923), etc.
|
||||
docker build -t gpt-academic . # Instale
|
||||
|
||||
# (Ùltima etapa - escolha 1) Dentro do ambiente Linux, é mais fácil e rápido usar `--net=host`
|
||||
docker run --rm -it --net=host gpt-academic
|
||||
# (Última etapa - escolha 2) Em ambientes macOS/windows, você só pode usar a opção -p para expor a porta do contêiner (por exemplo, 50923) para a porta no host
|
||||
docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic
|
||||
```
|
||||
|
||||
2. ChatGPT + ChatGLM + MOSS (conhecimento de Docker necessário)
|
||||
|
||||
``` sh
|
||||
# Edite o arquivo docker-compose.yml, remova as soluções 1 e 3, mantenha a solução 2, e siga as instruções nos comentários do arquivo
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
3. ChatGPT + LLAMA + Pangu + RWKV (conhecimento de Docker necessário)
|
||||
``` sh
|
||||
# Edite o arquivo docker-compose.yml, remova as soluções 1 e 2, mantenha a solução 3, e siga as instruções nos comentários do arquivo
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
|
||||
## Instalação - Método 3: Outros Métodos de Implantação
|
||||
|
||||
1. Como usar URLs de proxy inverso/microsoft Azure API
|
||||
Basta configurar o API_URL_REDIRECT de acordo com as instruções em `config.py`.
|
||||
|
||||
2. Implantação em servidores em nuvem remotos (requer conhecimento e experiência de servidores em nuvem)
|
||||
Acesse [Wiki de implementação remota do servidor em nuvem](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97)
|
||||
|
||||
3. Usando a WSL2 (sub-sistema do Windows para Linux)
|
||||
Acesse [Wiki da implantação da WSL2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2)
|
||||
|
||||
4. Como executar em um subdiretório (ex. `http://localhost/subpath`)
|
||||
Acesse [Instruções de execução FastAPI](docs/WithFastapi.md)
|
||||
|
||||
5. Execute usando o docker-compose
|
||||
Leia o arquivo docker-compose.yml e siga as instruções.
|
||||
|
||||
# Uso Avançado
|
||||
## Customize novos botões de acesso rápido / plug-ins de função personalizados
|
||||
|
||||
1. Personalizar novos botões de acesso rápido (atalhos acadêmicos)
|
||||
Abra `core_functional.py` em qualquer editor de texto e adicione os seguintes itens e reinicie o programa (Se o botão já foi adicionado e pode ser visto, prefixos e sufixos são compatíveis com modificações em tempo real e não exigem reinício do programa para ter efeito.)
|
||||
Por exemplo,
|
||||
```
|
||||
"Super Eng:": {
|
||||
# Prefixo, será adicionado antes da sua entrada. Por exemplo, para descrever sua solicitação, como tradução, explicação de código, polimento, etc.
|
||||
"Prefix": "Por favor, traduza o seguinte conteúdo para chinês e use uma tabela em Markdown para explicar termos próprios no texto: \n \n",
|
||||
|
||||
# Sufixo, será adicionado após a sua entrada. Por exemplo, emparelhado com o prefixo, pode colocar sua entrada entre aspas.
|
||||
"Suffix": "",
|
||||
},
|
||||
```
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226899272-477c2134-ed71-4326-810c-29891fe4a508.png" width="500" >
|
||||
</div>
|
||||
|
||||
2. Personalizar plug-ins de função
|
||||
|
||||
Escreva plug-ins de função poderosos para executar tarefas que você deseja e não pensava possível.
|
||||
A dificuldade geral de escrever e depurar plug-ins neste projeto é baixa e, se você tem algum conhecimento básico de python, pode implementar suas próprias funções sobre o modelo que fornecemos.
|
||||
Para mais detalhes, consulte o [Guia do plug-in de função.](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97).
|
||||
|
||||
---
|
||||
# Última atualização
|
||||
## Novas funções dinâmicas.
|
||||
|
||||
1. Função de salvamento de diálogo. Ao chamar o plug-in de função "Salvar diálogo atual", é possível salvar o diálogo atual em um arquivo html legível e reversível. Além disso, ao chamar o plug-in de função "Carregar arquivo de histórico de diálogo" no menu suspenso da área de plug-in, é possível restaurar uma conversa anterior. Dica: clicar em "Carregar arquivo de histórico de diálogo" sem especificar um arquivo permite visualizar o cache do arquivo html de histórico. Clicar em "Excluir todo o registro de histórico de diálogo local" permite excluir todo o cache de arquivo html.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/235222390-24a9acc0-680f-49f5-bc81-2f3161f1e049.png" width="500" >
|
||||
</div>
|
||||
|
||||
|
||||
2. Geração de relatório. A maioria dos plug-ins gera um relatório de trabalho após a conclusão da execução.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227503770-fe29ce2c-53fd-47b0-b0ff-93805f0c2ff4.png" height="300" >
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227504617-7a497bb3-0a2a-4b50-9a8a-95ae60ea7afd.png" height="300" >
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227504005-efeaefe0-b687-49d0-bf95-2d7b7e66c348.png" height="300" >
|
||||
</div>
|
||||
|
||||
3. Design modular de funcionalidades, com interfaces simples, mas suporte a recursos poderosos
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/229288270-093643c1-0018-487a-81e6-1d7809b6e90f.png" height="400" >
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227504931-19955f78-45cd-4d1c-adac-e71e50957915.png" height="400" >
|
||||
</div>
|
||||
|
||||
4. Este é um projeto de código aberto que é capaz de "auto-traduzir-se".
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226936850-c77d7183-0749-4c1c-9875-fd4891842d0c.png" width="500" >
|
||||
</div>
|
||||
|
||||
5. A tradução de outros projetos de código aberto é simples.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="500" >
|
||||
</div>
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226969067-968a27c1-1b9c-486b-8b81-ab2de8d3f88a.png" width="500" >
|
||||
</div>
|
||||
|
||||
6. Recursos decorativos para o [live2d](https://github.com/fghrsh/live2d_demo) (desativados por padrão, é necessário modificar o arquivo `config.py`)
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/236432361-67739153-73e8-43fe-8111-b61296edabd9.png" width="500" >
|
||||
</div>
|
||||
|
||||
7. Suporte ao modelo de linguagem MOSS
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/236639178-92836f37-13af-4fdd-984d-b4450fe30336.png" width="500" >
|
||||
</div>
|
||||
|
||||
8. Geração de imagens pelo OpenAI
|
||||
<div align="center">
|
||||
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/bc7ab234-ad90-48a0-8d62-f703d9e74665" width="500" >
|
||||
</div>
|
||||
|
||||
9. Análise e resumo de áudio pelo OpenAI
|
||||
<div align="center">
|
||||
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/709ccf95-3aee-498a-934a-e1c22d3d5d5b" width="500" >
|
||||
</div>
|
||||
|
||||
10. Revisão e correção de erros de texto em Latex.
|
||||
<div align="center">
|
||||
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/651ccd98-02c9-4464-91e1-77a6b7d1b033" width="500" >
|
||||
</div>
|
||||
|
||||
## Versão:
|
||||
- Versão 3.5(Todo): Usar linguagem natural para chamar todas as funções do projeto (prioridade alta)
|
||||
- Versão 3.4(Todo): Melhorar o suporte à multithread para o chatglm local
|
||||
- Versão 3.3: +Funções integradas de internet
|
||||
- Versão 3.2: Suporte a mais interfaces de parâmetros de plug-in (função de salvar diálogo, interpretação de códigos de várias linguagens, perguntas de combinações LLM arbitrárias ao mesmo tempo)
|
||||
- Versão 3.1: Suporte a perguntas a vários modelos de gpt simultaneamente! Suporte para api2d e balanceamento de carga para várias chaves api
|
||||
- Versão 3.0: Suporte ao chatglm e outros LLMs de pequeno porte
|
||||
- Versão 2.6: Refatoração da estrutura de plug-in, melhoria da interatividade e adição de mais plug-ins
|
||||
- Versão 2.5: Autoatualização, resolvendo problemas de token de texto excessivamente longo e estouro ao compilar grandes projetos
|
||||
- Versão 2.4: (1) Adição de funcionalidade de tradução de texto completo em PDF; (2) Adição de funcionalidade de mudança de posição da área de entrada; (3) Adição de opção de layout vertical; (4) Otimização de plug-ins de multithread.
|
||||
- Versão 2.3: Melhoria da interatividade de multithread
|
||||
- Versão 2.2: Suporte à recarga a quente de plug-ins
|
||||
- Versão 2.1: Layout dobrável
|
||||
- Versão 2.0: Introdução de plug-ins de função modular
|
||||
- Versão 1.0: Funcionalidades básicasgpt_academic desenvolvedores QQ grupo-2: 610599535
|
||||
|
||||
- Problemas conhecidos
|
||||
- Extensões de tradução de alguns navegadores podem interferir na execução do front-end deste software
|
||||
- Uma versão muito alta ou muito baixa do Gradio pode causar vários erros
|
||||
|
||||
## Referências e Aprendizado
|
||||
|
||||
```
|
||||
Foi feita referência a muitos projetos excelentes em código, principalmente:
|
||||
|
||||
# Projeto1: ChatGLM-6B da Tsinghua:
|
||||
https://github.com/THUDM/ChatGLM-6B
|
||||
|
||||
# Projeto2: JittorLLMs da Tsinghua:
|
||||
https://github.com/Jittor/JittorLLMs
|
||||
|
||||
# Projeto3: Edge-GPT:
|
||||
https://github.com/acheong08/EdgeGPT
|
||||
|
||||
# Projeto4: ChuanhuChatGPT:
|
||||
https://github.com/GaiZhenbiao/ChuanhuChatGPT
|
||||
|
||||
# Projeto5: ChatPaper:
|
||||
https://github.com/kaixindelele/ChatPaper
|
||||
|
||||
# Mais:
|
||||
https://github.com/gradio-app/gradio
|
||||
https://github.com/fghrsh/live2d_demo
|
||||
```
|
||||
322
docs/README_EN.md
Normal file
322
docs/README_EN.md
Normal file
@@ -0,0 +1,322 @@
|
||||
> **Note**
|
||||
>
|
||||
> This English README is automatically generated by the markdown translation plugin in this project, and may not be 100% correct.
|
||||
>
|
||||
> When installing dependencies, **please strictly select the versions** specified in requirements.txt.
|
||||
>
|
||||
> `pip install -r requirements.txt`
|
||||
|
||||
# GPT Academic Optimization (GPT Academic)
|
||||
|
||||
**If you like this project, please give it a Star. If you've come up with more useful academic shortcuts or functional plugins, feel free to open an issue or pull request.
|
||||
To translate this project to arbitary language with GPT, read and run [`multi_language.py`](multi_language.py) (experimental).**
|
||||
|
||||
> Note:
|
||||
>
|
||||
> 1. Please note that only the function plugins (buttons) marked in **red** support reading files. Some plugins are in the **drop-down menu** in the plugin area. We welcome and process any new plugins with the **highest priority**!
|
||||
> 2. The function of each file in this project is detailed in the self-translation analysis [`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A). With version iteration, you can also click on related function plugins at any time to call GPT to regenerate the project's self-analysis report. Common questions are summarized in the [`wiki`](https://github.com/binary-husky/gpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98). [Installation method](#installation).
|
||||
> 3. This project is compatible with and encourages trying domestic large language models such as chatglm, RWKV, Pangu, etc. Multiple API keys are supported and can be filled in the configuration file like `API_KEY="openai-key1,openai-key2,api2d-key3"`. When temporarily changing `API_KEY`, enter the temporary `API_KEY` in the input area and press enter to submit, which will take effect.
|
||||
|
||||
<div align="center">
|
||||
|
||||
Function | Description
|
||||
--- | ---
|
||||
One-click polishing | Supports one-click polishing and one-click searching for grammar errors in papers.
|
||||
One-click Chinese-English translation | One-click Chinese-English translation.
|
||||
One-click code interpretation | Displays, explains, generates, and adds comments to code.
|
||||
[Custom shortcut keys](https://www.bilibili.com/video/BV14s4y1E7jN) | Supports custom shortcut keys.
|
||||
Modular design | Supports custom powerful [function plug-ins](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions), plug-ins support [hot update](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97).
|
||||
[Self-program profiling](https://www.bilibili.com/video/BV1cj411A7VW) | [Function plug-in] [One-click understanding](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) of the source code of this project
|
||||
[Program profiling](https://www.bilibili.com/video/BV1cj411A7VW) | [Function plug-in] One-click profiling of other project trees in Python/C/C++/Java/Lua/...
|
||||
Reading papers, [translating](https://www.bilibili.com/video/BV1KT411x7Wn) papers | [Function Plug-in] One-click interpretation of latex/pdf full-text papers and generation of abstracts.
|
||||
Latex full-text [translation](https://www.bilibili.com/video/BV1nk4y1Y7Js/), [polishing](https://www.bilibili.com/video/BV1FT411H7c5/) | [Function plug-in] One-click translation or polishing of latex papers.
|
||||
Batch annotation generation | [Function plug-in] One-click batch generation of function annotations.
|
||||
Markdown [Chinese-English translation](https://www.bilibili.com/video/BV1yo4y157jV/) | [Function plug-in] Have you seen the [README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md) in the five languages above?
|
||||
Chat analysis report generation | [Function plug-in] Automatically generate summary reports after running.
|
||||
[PDF full-text translation function](https://www.bilibili.com/video/BV1KT411x7Wn) | [Function plug-in] PDF paper extract title & summary + translate full text (multi-threaded)
|
||||
[Arxiv Assistant](https://www.bilibili.com/video/BV1LM4y1279X) | [Function plug-in] Enter the arxiv article url and you can translate abstracts and download PDFs with one click.
|
||||
[Google Scholar Integration Assistant](https://www.bilibili.com/video/BV19L411U7ia) | [Function plug-in] Given any Google Scholar search page URL, let GPT help you [write relatedworks](https://www.bilibili.com/video/BV1GP411U7Az/)
|
||||
Internet information aggregation+GPT | [Function plug-in] One-click [let GPT get information from the Internet first](https://www.bilibili.com/video/BV1om4y127ck), then answer questions, and let the information never be outdated.
|
||||
Formula/image/table display | Can display formulas in both [tex form and render form](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png), support formulas and code highlighting.
|
||||
Multi-threaded function plug-in support | Supports multi-threaded calling of chatgpt, and can process [massive text](https://www.bilibili.com/video/BV1FT411H7c5/) or programs with one click.
|
||||
Start Dark Gradio [theme](https://github.com/binary-husky/gpt_academic/issues/173) | Add ```/?__theme=dark``` after the browser URL to switch to the dark theme.
|
||||
[Multiple LLM models](https://www.bilibili.com/video/BV1wT411p7yf) support, [API2D](https://api2d.com/) interface support | The feeling of being served by GPT3.5, GPT4, [Tsinghua ChatGLM](https://github.com/THUDM/ChatGLM-6B), and [Fudan MOSS](https://github.com/OpenLMLab/MOSS) at the same time must be great, right?
|
||||
More LLM model access, support [huggingface deployment](https://huggingface.co/spaces/qingxu98/gpt-academic) | Add Newbing interface (New Bing), introduce Tsinghua [Jittorllms](https://github.com/Jittor/JittorLLMs) to support [LLaMA](https://github.com/facebookresearch/llama), [RWKV](https://github.com/BlinkDL/ChatRWKV) and [Panguα](https://openi.org.cn/pangu/)
|
||||
More new feature displays (image generation, etc.)…… | See the end of this document for more...
|
||||
</div>
|
||||
|
||||
- New interface (modify the LAYOUT option in `config.py` to switch between "left and right layout" and "up and down layout")
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/230361456-61078362-a966-4eb5-b49e-3c62ef18b860.gif" width="700" >
|
||||
</div>- All buttons are dynamically generated by reading `functional.py`, and you can add custom functions freely to unleash the power of clipboard.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/231975334-b4788e91-4887-412f-8b43-2b9c5f41d248.gif" width="700" >
|
||||
</div>
|
||||
|
||||
- polishing/correction
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/231980294-f374bdcb-3309-4560-b424-38ef39f04ebd.gif" width="700" >
|
||||
</div>
|
||||
|
||||
- If the output contains formulas, they will be displayed in both `tex` and render form, making it easy to copy and read.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png" width="700" >
|
||||
</div>
|
||||
|
||||
- Tired of reading the project code? ChatGPT can explain it all.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="700" >
|
||||
</div>
|
||||
|
||||
- Multiple large language models are mixed, such as ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/232537274-deca0563-7aa6-4b5d-94a2-b7c453c47794.png" width="700" >
|
||||
</div>
|
||||
|
||||
---
|
||||
# Installation
|
||||
## Method 1: Directly running (Windows, Linux or MacOS)
|
||||
|
||||
1. Download the project
|
||||
```sh
|
||||
git clone https://github.com/binary-husky/gpt_academic.git
|
||||
cd gpt_academic
|
||||
```
|
||||
|
||||
2. Configure the API_KEY
|
||||
|
||||
Configure the API KEY in `config.py`, [special network environment settings](https://github.com/binary-husky/gpt_academic/issues/1).
|
||||
|
||||
(P.S. When the program is running, it will first check if there is a private configuration file named `config_private.py` and use the configurations in it to override the same configurations in `config.py`. Therefore, if you can understand our configuration reading logic, we strongly recommend that you create a new configuration file named `config_private.py` next to `config.py` and transfer (copy) the configurations in `config.py` to `config_private.py`. `config_private.py` is not controlled by git and can make your private information more secure. P.S. The project also supports configuring most options through `environment variables`. Please refer to the format of `docker-compose` file when writing. Reading priority: `environment variables` > `config_private.py` > `config.py`)
|
||||
|
||||
|
||||
3. Install the dependencies
|
||||
```sh
|
||||
# (Option I: If familiar with python) (python version 3.9 or above, the newer the better), note: use official pip source or Ali pip source, temporary switching method: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
|
||||
python -m pip install -r requirements.txt
|
||||
|
||||
# (Option II: If not familiar with python) Use anaconda, the steps are similar (https://www.bilibili.com/video/BV1rc411W7Dr):
|
||||
conda create -n gptac_venv python=3.11 # create anaconda environment
|
||||
conda activate gptac_venv # activate anaconda environment
|
||||
python -m pip install -r requirements.txt # this step is the same as pip installation
|
||||
```
|
||||
|
||||
<details><summary>If you need to support Tsinghua ChatGLM/Fudan MOSS as a backend, click to expand</summary>
|
||||
<p>
|
||||
|
||||
[Optional step] If you need to support Tsinghua ChatGLM/Fudan MOSS as a backend, you need to install more dependencies (prerequisites: familiar with Python + used Pytorch + computer configuration is strong enough):
|
||||
```sh
|
||||
# [Optional Step I] Support Tsinghua ChatGLM. Tsinghua ChatGLM remarks: if you encounter the "Call ChatGLM fail cannot load ChatGLM parameters" error, refer to this: 1: The default installation above is torch + cpu version, to use cuda, you need to uninstall torch and reinstall torch + cuda; 2: If the model cannot be loaded due to insufficient local configuration, you can modify the model accuracy in request_llm/bridge_chatglm.py, and change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code = True)
|
||||
python -m pip install -r request_llm/requirements_chatglm.txt
|
||||
|
||||
# [Optional Step II] Support Fudan MOSS
|
||||
python -m pip install -r request_llm/requirements_moss.txt
|
||||
git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # When executing this line of code, you must be in the root directory of the project
|
||||
|
||||
# [Optional Step III] Make sure the AVAIL_LLM_MODELS in the config.py configuration file includes the expected models. Currently supported models are as follows (the jittorllms series only supports the docker solution for the time being):
|
||||
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
|
||||
```
|
||||
|
||||
</p>
|
||||
</details>
|
||||
|
||||
|
||||
|
||||
4. Run it
|
||||
```sh
|
||||
python main.py
|
||||
```5. Test Function Plugin
|
||||
```
|
||||
- Test function plugin template function (ask GPT what happened today in history), based on which you can implement more complex functions as a template
|
||||
Click "[Function Plugin Template Demo] Today in History"
|
||||
```
|
||||
|
||||
## Installation - Method 2: Using Docker
|
||||
|
||||
1. ChatGPT Only (Recommended for Most People)
|
||||
|
||||
``` sh
|
||||
git clone https://github.com/binary-husky/gpt_academic.git # Download project
|
||||
cd gpt_academic # Enter path
|
||||
nano config.py # Edit config.py with any text editor, configure "Proxy", "API_KEY" and "WEB_PORT" (e.g. 50923), etc.
|
||||
docker build -t gpt-academic . # Install
|
||||
|
||||
#(Last step - option 1) In a Linux environment, use `--net=host` for convenience and speed.
|
||||
docker run --rm -it --net=host gpt-academic
|
||||
#(Last step - option 2) On macOS/windows environment, only -p option can be used to expose the container's port (e.g. 50923) to the port of the main machine.
|
||||
docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic
|
||||
```
|
||||
|
||||
2. ChatGPT + ChatGLM + MOSS (Requires Docker Knowledge)
|
||||
|
||||
``` sh
|
||||
# Modify docker-compose.yml, delete Plan 1 and Plan 3, and keep Plan 2. Modify the configuration of Plan 2 in docker-compose.yml, refer to the comments in it for configuration.
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
3. ChatGPT + LLAMA + Pangu + RWKV (Requires Docker Knowledge)
|
||||
|
||||
``` sh
|
||||
# Modify docker-compose.yml, delete Plan 1 and Plan 2, and keep Plan 3. Modify the configuration of Plan 3 in docker-compose.yml, refer to the comments in it for configuration.
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
## Installation - Method 3: Other Deployment Options
|
||||
|
||||
1. How to Use Reverse Proxy URL/Microsoft Cloud Azure API
|
||||
Configure API_URL_REDIRECT according to the instructions in 'config.py'.
|
||||
|
||||
2. Deploy to a Remote Server (Requires Knowledge and Experience with Cloud Servers)
|
||||
Please visit [Deployment Wiki-1](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97)
|
||||
|
||||
3. Using WSL2 (Windows Subsystem for Linux)
|
||||
Please visit [Deployment Wiki-2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2)
|
||||
|
||||
4. How to Run Under a Subdomain (e.g. `http://localhost/subpath`)
|
||||
Please visit [FastAPI Running Instructions](docs/WithFastapi.md)
|
||||
|
||||
5. Using docker-compose to Run
|
||||
Read the docker-compose.yml and follow the prompts.
|
||||
|
||||
---
|
||||
# Advanced Usage
|
||||
## Custom New Shortcut Buttons / Custom Function Plugins
|
||||
|
||||
1. Custom New Shortcut Buttons (Academic Hotkey)
|
||||
Open `core_functional.py` with any text editor, add an entry as follows and restart the program. (If the button has been successfully added and is visible, the prefix and suffix can be hot-modified without having to restart the program.)
|
||||
For example,
|
||||
```
|
||||
"Super English-to-Chinese": {
|
||||
# Prefix, which will be added before your input. For example, used to describe your requests, such as translation, code explanation, polishing, etc.
|
||||
"Prefix": "Please translate the following content into Chinese and then use a markdown table to explain the proprietary terms that appear in the text:\n\n",
|
||||
|
||||
# Suffix, which is added after your input. For example, with the prefix, your input content can be surrounded by quotes.
|
||||
"Suffix": "",
|
||||
},
|
||||
```
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226899272-477c2134-ed71-4326-810c-29891fe4a508.png" width="500" >
|
||||
</div>
|
||||
|
||||
2. Custom Function Plugins
|
||||
|
||||
Write powerful function plugins to perform any task you can think of, even those you cannot think of.
|
||||
The difficulty of plugin writing and debugging in this project is very low. As long as you have a certain knowledge of Python, you can implement your own plug-in functions based on the template we provide.
|
||||
For details, please refer to the [Function Plugin Guide](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97).
|
||||
|
||||
---
|
||||
# Latest Update
|
||||
## New Feature Dynamics
|
||||
1. Conversation saving function. Call `Save current conversation` in the function plugin area to save the current conversation as a readable and recoverable HTML file. In addition, call `Load conversation history archive` in the function plugin area (dropdown menu) to restore previous sessions. Tip: Clicking `Load conversation history archive` without specifying a file will display the cached history of HTML archives, and clicking `Delete all local conversation history` will delete all HTML archive caches.
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/235222390-24a9acc0-680f-49f5-bc81-2f3161f1e049.png" width="500" >
|
||||
</div>
|
||||
|
||||
|
||||
2. Report generation. Most plugins will generate work reports after execution.
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227503770-fe29ce2c-53fd-47b0-b0ff-93805f0c2ff4.png" height="300" >
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227504617-7a497bb3-0a2a-4b50-9a8a-95ae60ea7afd.png" height="300" >
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227504005-efeaefe0-b687-49d0-bf95-2d7b7e66c348.png" height="300" >
|
||||
</div>
|
||||
|
||||
|
||||
3. Modular function design with simple interfaces that support powerful functions.
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/229288270-093643c1-0018-487a-81e6-1d7809b6e90f.png" height="400" >
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227504931-19955f78-45cd-4d1c-adac-e71e50957915.png" height="400" >
|
||||
</div>
|
||||
|
||||
|
||||
4. This is an open-source project that can "self-translate".
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226936850-c77d7183-0749-4c1c-9875-fd4891842d0c.png" width="500" >
|
||||
</div>
|
||||
|
||||
5. Translating other open-source projects is a piece of cake.
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="500" >
|
||||
</div>
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226969067-968a27c1-1b9c-486b-8b81-ab2de8d3f88a.png" width="500" >
|
||||
</div>
|
||||
|
||||
6. A small feature decorated with [live2d](https://github.com/fghrsh/live2d_demo) (disabled by default, need to modify `config.py`).
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/236432361-67739153-73e8-43fe-8111-b61296edabd9.png" width="500" >
|
||||
</div>
|
||||
|
||||
7. Added MOSS large language model support.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/236639178-92836f37-13af-4fdd-984d-b4450fe30336.png" width="500" >
|
||||
</div>
|
||||
|
||||
8. OpenAI image generation.
|
||||
<div align="center">
|
||||
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/bc7ab234-ad90-48a0-8d62-f703d9e74665" width="500" >
|
||||
</div>
|
||||
|
||||
9. OpenAI audio parsing and summarization.
|
||||
<div align="center">
|
||||
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/709ccf95-3aee-498a-934a-e1c22d3d5d5b" width="500" >
|
||||
</div>
|
||||
|
||||
10. Full-text proofreading and error correction of LaTeX.
|
||||
<div align="center">
|
||||
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/651ccd98-02c9-4464-91e1-77a6b7d1b033" width="500" >
|
||||
</div>
|
||||
|
||||
|
||||
## Versions:
|
||||
- version 3.5(Todo): Use natural language to call all function plugins of this project (high priority).
|
||||
- version 3.4(Todo): Improve multi-threading support for chatglm local large models.
|
||||
- version 3.3: +Internet information integration function.
|
||||
- version 3.2: Function plugin supports more parameter interfaces (save conversation function, interpretation of any language code + simultaneous inquiry of any LLM combination).
|
||||
- version 3.1: Support simultaneous inquiry of multiple GPT models! Support api2d, and support load balancing of multiple apikeys.
|
||||
- version 3.0: Support chatglm and other small LLM models.
|
||||
- version 2.6: Refactored plugin structure, improved interactivity, and added more plugins.
|
||||
- version 2.5: Self-updating, solving the problem of text overflow and token overflow when summarizing large engineering source codes.
|
||||
- version 2.4: (1) Added PDF full-text translation function; (2) Added the function of switching the position of the input area; (3) Added vertical layout option; (4) Optimized multi-threading function plugins.
|
||||
- version 2.3: Enhanced multi-threading interactivity.
|
||||
- version 2.2: Function plugin supports hot reloading.
|
||||
- version 2.1: Collapsible layout.
|
||||
- version 2.0: Introduction of modular function plugins.
|
||||
- version 1.0: Basic functions.
|
||||
|
||||
gpt_academic Developer QQ Group-2: 610599535
|
||||
|
||||
- Known Issues
|
||||
- Some browser translation plugins interfere with the front-end operation of this software.
|
||||
- Both high and low versions of gradio can lead to various exceptions.
|
||||
|
||||
## Reference and Learning
|
||||
|
||||
```
|
||||
Many other excellent designs have been referenced in the code, mainly including:
|
||||
|
||||
# Project 1: THU ChatGLM-6B:
|
||||
https://github.com/THUDM/ChatGLM-6B
|
||||
|
||||
# Project 2: THU JittorLLMs:
|
||||
https://github.com/Jittor/JittorLLMs
|
||||
|
||||
# Project 3: Edge-GPT:
|
||||
https://github.com/acheong08/EdgeGPT
|
||||
|
||||
# Project 4: ChuanhuChatGPT:
|
||||
https://github.com/GaiZhenbiao/ChuanhuChatGPT
|
||||
|
||||
# Project 5: ChatPaper:
|
||||
https://github.com/kaixindelele/ChatPaper
|
||||
|
||||
# More:
|
||||
https://github.com/gradio-app/gradio
|
||||
https://github.com/fghrsh/live2d_demo
|
||||
```
|
||||
323
docs/README_FR.md
Normal file
323
docs/README_FR.md
Normal file
@@ -0,0 +1,323 @@
|
||||
> **Note**
|
||||
>
|
||||
> Ce fichier README est généré automatiquement par le plugin de traduction markdown de ce projet et n'est peut - être pas correct à 100%.
|
||||
>
|
||||
> During installation, please strictly select the versions **specified** in requirements.txt.
|
||||
>
|
||||
> `pip install -r requirements.txt`
|
||||
>
|
||||
|
||||
# <img src="logo.png" width="40" > Optimisation académique GPT (GPT Academic)
|
||||
|
||||
**Si vous aimez ce projet, veuillez lui donner une étoile. Si vous avez trouvé des raccourcis académiques ou des plugins fonctionnels plus utiles, n'hésitez pas à ouvrir une demande ou une pull request.
|
||||
Pour traduire ce projet dans une langue arbitraire avec GPT, lisez et exécutez [`multi_language.py`](multi_language.py) (expérimental).
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> 1. Veuillez noter que seuls les plugins de fonctions (boutons) **en rouge** prennent en charge la lecture de fichiers. Certains plugins se trouvent dans le **menu déroulant** de la zone de plugins. De plus, nous accueillons et traitons les nouvelles pull requests pour les plugins avec **la plus haute priorité**!
|
||||
>
|
||||
> 2. Les fonctions de chaque fichier de ce projet sont expliquées en détail dans l'auto-analyse [`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A). Avec l'itération des versions, vous pouvez également cliquer sur les plugins de fonctions pertinents et appeler GPT pour régénérer le rapport d'auto-analyse du projet à tout moment. Les FAQ sont résumées dans [le wiki](https://github.com/binary-husky/gpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98). [Méthode d'installation](#installation).
|
||||
>
|
||||
> 3. Ce projet est compatible avec et encourage l'utilisation de grands modèles de langage nationaux tels que chatglm, RWKV, Pangu, etc. La coexistence de plusieurs clés API est prise en charge et peut être remplie dans le fichier de configuration, tel que `API_KEY="openai-key1,openai-key2,api2d-key3"`. Lorsque vous souhaitez remplacer temporairement `API_KEY`, saisissez temporairement `API_KEY` dans la zone de saisie, puis appuyez sur Entrée pour soumettre et activer.
|
||||
|
||||
<div align="center">
|
||||
|
||||
Functionnalité | Description
|
||||
--- | ---
|
||||
Révision en un clic | prend en charge la révision en un clic et la recherche d'erreurs de syntaxe dans les articles
|
||||
Traduction chinois-anglais en un clic | Traduction chinois-anglais en un clic
|
||||
Explication de code en un clic | Affichage, explication, génération et ajout de commentaires de code
|
||||
[Raccourcis personnalisés](https://www.bilibili.com/video/BV14s4y1E7jN) | prend en charge les raccourcis personnalisés
|
||||
Conception modulaire | prend en charge de puissants plugins de fonction personnalisée, les plugins prennent en charge la [mise à jour à chaud](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)
|
||||
[Autoscanner](https://www.bilibili.com/video/BV1cj411A7VW) | [Plug-in de fonction] [Compréhension instantanée](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) du code source de ce projet
|
||||
[Analyse de programme](https://www.bilibili.com/video/BV1cj411A7VW) | [Plug-in de fonction] Analyse en un clic de la structure d'autres projets Python / C / C ++ / Java / Lua / ...
|
||||
Lecture d'articles, [traduction](https://www.bilibili.com/video/BV1KT411x7Wn) d'articles | [Plug-in de fonction] Compréhension instantanée de l'article latex / pdf complet et génération de résumés
|
||||
[Traduction](https://www.bilibili.com/video/BV1nk4y1Y7Js/) et [révision](https://www.bilibili.com/video/BV1FT411H7c5/) complets en latex | [Plug-in de fonction] traduction ou révision en un clic d'articles en latex
|
||||
Génération de commentaires en masse | [Plug-in de fonction] Génération en un clic de commentaires de fonction en masse
|
||||
Traduction [chinois-anglais](https://www.bilibili.com/video/BV1yo4y157jV/) en Markdown | [Plug-in de fonction] avez-vous vu la [README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md) pour les 5 langues ci-dessus?
|
||||
Génération de rapports d'analyse de chat | [Plug-in de fonction] Génère automatiquement un rapport de résumé après l'exécution
|
||||
[Traduction intégrale en pdf](https://www.bilibili.com/video/BV1KT411x7Wn) | [Plug-in de fonction] Extraction de titre et de résumé de l'article pdf + traduction intégrale (multi-thread)
|
||||
[Aide à arxiv](https://www.bilibili.com/video/BV1LM4y1279X) | [Plug-in de fonction] Entrer l'url de l'article arxiv pour traduire et télécharger le résumé en un clic
|
||||
[Aide à la recherche Google Scholar](https://www.bilibili.com/video/BV19L411U7ia) | [Plug-in de fonction] Donnez l'URL de la page de recherche Google Scholar, laissez GPT vous aider à [écrire des ouvrages connexes](https://www.bilibili.com/video/BV1GP411U7Az/)
|
||||
Aggrégation d'informations en ligne et GPT | [Plug-in de fonction] Permet à GPT de [récupérer des informations en ligne](https://www.bilibili.com/video/BV1om4y127ck), puis de répondre aux questions, afin que les informations ne soient jamais obsolètes
|
||||
Affichage d'équations / images / tableaux | Fournit un affichage simultané de [la forme tex et de la forme rendue](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png), prend en charge les formules mathématiques et la coloration syntaxique du code
|
||||
Prise en charge des plugins à plusieurs threads | prend en charge l'appel multithread de chatgpt, un clic pour traiter [un grand nombre d'articles](https://www.bilibili.com/video/BV1FT411H7c5/) ou de programmes
|
||||
Thème gradio sombre en option de démarrage | Ajoutez```/?__theme=dark``` à la fin de l'URL du navigateur pour basculer vers le thème sombre
|
||||
[Prise en charge de plusieurs modèles LLM](https://www.bilibili.com/video/BV1wT411p7yf), [API2D](https://api2d.com/) | Sera probablement très agréable d'être servi simultanément par GPT3.5, GPT4, [ChatGLM de Tsinghua](https://github.com/THUDM/ChatGLM-6B), [MOSS de Fudan](https://github.com/OpenLMLab/MOSS)
|
||||
Plus de modèles LLM, déploiement de [huggingface](https://huggingface.co/spaces/qingxu98/gpt-academic) | Ajout prise en charge de l'interface Newbing (nouvelle bing), introduction du support de [Jittorllms de Tsinghua](https://github.com/Jittor/JittorLLMs), [LLaMA](https://github.com/facebookresearch/llama), [RWKV](https://github.com/BlinkDL/ChatRWKV) et [Panguα](https://openi.org.cn/pangu/)
|
||||
Plus de nouvelles fonctionnalités (génération d'images, etc.) ... | Voir la fin de ce document pour plus de détails ...
|
||||
|
||||
</div>
|
||||
|
||||
|
||||
- Nouvelle interface (modifier l'option LAYOUT de `config.py` pour passer d'une disposition ``gauche-droite`` à une disposition ``haut-bas``)
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/230361456-61078362-a966-4eb5-b49e-3c62ef18b860.gif" width="700" >
|
||||
</div>- Tous les boutons sont générés dynamiquement en lisant functional.py et peuvent être facilement personnalisés pour ajouter des fonctionnalités personnalisées, ce qui facilite l'utilisation du presse-papiers.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/231975334-b4788e91-4887-412f-8b43-2b9c5f41d248.gif" width="700" >
|
||||
</div>
|
||||
|
||||
- Correction d'erreurs/lissage du texte.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/231980294-f374bdcb-3309-4560-b424-38ef39f04ebd.gif" width="700" >
|
||||
</div>
|
||||
|
||||
- Si la sortie contient des équations, elles sont affichées à la fois sous forme de tex et sous forme rendue pour faciliter la lecture et la copie.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png" width="700" >
|
||||
</div>
|
||||
|
||||
- Pas envie de lire les codes de ce projet? Tout le projet est directement exposé par ChatGPT.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="700" >
|
||||
</div>
|
||||
|
||||
- Appel à une variété de modèles de langage de grande envergure (ChatGLM + OpenAI-GPT3.5 + [API2D] (https://api2d.com/)-GPT4).
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/232537274-deca0563-7aa6-4b5d-94a2-b7c453c47794.png" width="700" >
|
||||
</div>
|
||||
|
||||
---
|
||||
# Installation
|
||||
## Installation-Method 1: running directly (Windows, Linux or MacOS)
|
||||
|
||||
1. Télécharger le projet
|
||||
```sh
|
||||
git clone https://github.com/binary-husky/gpt_academic.git
|
||||
cd gpt_academic
|
||||
```
|
||||
|
||||
2. Configuration de la clé API
|
||||
|
||||
Dans `config.py`, configurez la clé API et d'autres paramètres. Consultez [Special network environment settings] (https://github.com/binary-husky/gpt_academic/issues/1).
|
||||
|
||||
(P.S. Lorsque le programme est exécuté, il vérifie en premier s'il existe un fichier de configuration privé nommé `config_private.py` et remplace les paramètres portant le même nom dans `config.py` par les paramètres correspondants dans `config_private.py`. Par conséquent, si vous comprenez la logique de lecture de nos configurations, nous vous recommandons vivement de créer un nouveau fichier de configuration nommé `config_private.py` à côté de `config.py` et de transférer (copier) les configurations de `config.py`. `config_private.py` n'est pas contrôlé par Git et peut garantir la sécurité de vos informations privées. P.S. Le projet prend également en charge la configuration de la plupart des options via "variables d'environnement", le format d'écriture des variables d'environnement est référencé dans le fichier `docker-compose`. Priorité de lecture: "variables d'environnement" > `config_private.py` > `config.py`)
|
||||
|
||||
|
||||
3. Installer les dépendances
|
||||
```sh
|
||||
# (Option I: python users instalation) (Python version 3.9 or higher, the newer the better). Note: use official pip source or ali pip source. To temporarily change the source: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
|
||||
python -m pip install -r requirements.txt
|
||||
|
||||
# (Option II: non-python users instalation) Use Anaconda, the steps are similar (https://www.bilibili.com/video/BV1rc411W7Dr):
|
||||
conda create -n gptac_venv python=3.11 # Create anaconda env
|
||||
conda activate gptac_venv # Activate anaconda env
|
||||
python -m pip install -r requirements.txt # Same step as pip instalation
|
||||
```
|
||||
|
||||
<details><summary>Cliquez ici pour afficher le texte si vous souhaitez prendre en charge THU ChatGLM/FDU MOSS en tant que backend.</summary>
|
||||
<p>
|
||||
|
||||
【Optional】 Si vous souhaitez prendre en charge THU ChatGLM/FDU MOSS en tant que backend, des dépendances supplémentaires doivent être installées (prérequis: compétent en Python + utilisez Pytorch + configuration suffisante de l'ordinateur):
|
||||
```sh
|
||||
# 【Optional Step I】 Support THU ChatGLM. Remarque sur THU ChatGLM: Si vous rencontrez l'erreur "Appel à ChatGLM échoué, les paramètres ChatGLM ne peuvent pas être chargés normalement", reportez-vous à ce qui suit: 1: La version par défaut installée est torch+cpu, si vous souhaitez utiliser cuda, vous devez désinstaller torch et réinstaller torch+cuda; 2: Si le modèle ne peut pas être chargé en raison d'une configuration insuffisante de l'ordinateur local, vous pouvez modifier la précision du modèle dans request_llm/bridge_chatglm.py, modifier AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) par AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
|
||||
python -m pip install -r request_llm/requirements_chatglm.txt
|
||||
|
||||
# 【Optional Step II】 Support FDU MOSS
|
||||
python -m pip install -r request_llm/requirements_moss.txt
|
||||
git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Note: When running this line of code, you must be in the project root path.
|
||||
|
||||
# 【Optional Step III】Make sure the AVAIL_LLM_MODELS in the config.py configuration file contains the desired model. Currently, all models supported are as follows (the jittorllms series currently only supports the docker scheme):
|
||||
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
|
||||
```
|
||||
|
||||
</p>
|
||||
</details>
|
||||
|
||||
|
||||
|
||||
4. Exécution
|
||||
```sh
|
||||
python main.py
|
||||
```5. Plugin de fonction de test
|
||||
```
|
||||
- Fonction de modèle de plugin de test (requiert que GPT réponde à ce qui s'est passé dans l'histoire aujourd'hui), vous pouvez utiliser cette fonction comme modèle pour mettre en œuvre des fonctionnalités plus complexes.
|
||||
Cliquez sur "[Démo de modèle de plugin de fonction] Aujourd'hui dans l'histoire"
|
||||
```
|
||||
|
||||
## Installation - Méthode 2: Utilisation de Docker
|
||||
|
||||
1. ChatGPT uniquement (recommandé pour la plupart des gens)
|
||||
|
||||
``` sh
|
||||
git clone https://github.com/binary-husky/gpt_academic.git # Télécharger le projet
|
||||
cd gpt_academic # Accéder au chemin
|
||||
nano config.py # Editez config.py avec n'importe quel éditeur de texte en configurant "Proxy", "API_KEY" et "WEB_PORT" (p. ex. 50923)
|
||||
docker build -t gpt-academic . # Installer
|
||||
|
||||
# (Dernière étape - choix1) Dans un environnement Linux, l'utilisation de `--net=host` est plus facile et rapide
|
||||
docker run --rm -it --net=host gpt-academic
|
||||
# (Dernière étape - choix 2) Dans un environnement macOS/Windows, seule l'option -p permet d'exposer le port du récipient (p.ex. 50923) au port de l'hôte.
|
||||
docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic
|
||||
```
|
||||
|
||||
2. ChatGPT + ChatGLM + MOSS (il faut connaître Docker)
|
||||
|
||||
``` sh
|
||||
# Modifiez docker-compose.yml, supprimez la solution 1 et la solution 3, conservez la solution 2. Modifiez la configuration de la solution 2 dans docker-compose.yml en suivant les commentaires.
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
3. ChatGPT + LLAMA + PanGu + RWKV (il faut connaître Docker)
|
||||
``` sh
|
||||
# Modifiez docker-compose.yml, supprimez la solution 1 et la solution 2, conservez la solution 3. Modifiez la configuration de la solution 3 dans docker-compose.yml en suivant les commentaires.
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
|
||||
## Installation - Méthode 3: Autres méthodes de déploiement
|
||||
|
||||
1. Comment utiliser une URL de proxy inversé / Microsoft Azure Cloud API
|
||||
Configurez simplement API_URL_REDIRECT selon les instructions de config.py.
|
||||
|
||||
2. Déploiement distant sur un serveur cloud (connaissance et expérience des serveurs cloud requises)
|
||||
Veuillez consulter [Wiki de déploiement-1] (https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97).
|
||||
|
||||
3. Utilisation de WSL2 (sous-système Windows pour Linux)
|
||||
Veuillez consulter [Wiki de déploiement-2] (https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2).
|
||||
|
||||
4. Comment exécuter sous un sous-répertoire (tel que `http://localhost/subpath`)
|
||||
Veuillez consulter les [instructions d'exécution de FastAPI] (docs/WithFastapi.md).
|
||||
|
||||
5. Utilisation de docker-compose
|
||||
Veuillez lire docker-compose.yml, puis suivre les instructions fournies.
|
||||
|
||||
# Utilisation avancée
|
||||
## Personnalisation de nouveaux boutons pratiques / Plugins de fonctions personnalisées
|
||||
|
||||
1. Personnalisation de nouveaux boutons pratiques (raccourcis académiques)
|
||||
Ouvrez core_functional.py avec n'importe quel éditeur de texte, ajoutez une entrée comme suit, puis redémarrez le programme. (Si le bouton a été ajouté avec succès et est visible, le préfixe et le suffixe prennent en charge les modifications à chaud et ne nécessitent pas le redémarrage du programme pour prendre effet.)
|
||||
Par exemple
|
||||
```
|
||||
"Super coller sens": {
|
||||
# Préfixe, sera ajouté avant votre entrée. Par exemple, pour décrire votre demande, telle que traduire, expliquer du code, faire la mise en forme, etc.
|
||||
"Prefix": "Veuillez traduire le contenu suivant en chinois, puis expliquer chaque terme proprement nommé qui y apparaît avec un tableau markdown:\n\n",
|
||||
|
||||
# Suffixe, sera ajouté après votre entrée. Par exemple, en utilisant le préfixe, vous pouvez entourer votre contenu d'entrée de guillemets.
|
||||
"Suffix": "",
|
||||
},
|
||||
```
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226899272-477c2134-ed71-4326-810c-29891fe4a508.png" width="500" >
|
||||
</div>
|
||||
|
||||
2. Plugins de fonctions personnalisées
|
||||
|
||||
Écrivez des plugins de fonctions puissants pour effectuer toutes les tâches que vous souhaitez ou que vous ne pouvez pas imaginer.
|
||||
Les plugins de ce projet ont une difficulté de programmation et de débogage très faible. Si vous avez des connaissances de base en Python, vous pouvez simuler la fonctionnalité de votre propre plugin en suivant le modèle que nous avons fourni.
|
||||
Veuillez consulter le [Guide du plugin de fonction] (https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) pour plus de détails.
|
||||
|
||||
---
|
||||
# Latest Update
|
||||
|
||||
## Nouvelles fonctionnalités en cours de déploiement.
|
||||
|
||||
1. Fonction de sauvegarde de la conversation.
|
||||
Appelez simplement "Enregistrer la conversation actuelle" dans la zone de plugin de fonction pour enregistrer la conversation actuelle en tant que fichier html lisible et récupérable. De plus, dans la zone de plugin de fonction (menu déroulant), appelez "Charger une archive de l'historique de la conversation" pour restaurer la conversation précédente. Astuce : cliquer directement sur "Charger une archive de l'historique de la conversation" sans spécifier de fichier permet de consulter le cache d'archive html précédent. Cliquez sur "Supprimer tous les enregistrements locaux de l'historique de la conversation" pour supprimer le cache d'archive html.
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/235222390-24a9acc0-680f-49f5-bc81-2f3161f1e049.png" width="500" >
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
2. Générer un rapport. La plupart des plugins génèrent un rapport de travail après l'exécution.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227503770-fe29ce2c-53fd-47b0-b0ff-93805f0c2ff4.png" height="300" >
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227504617-7a497bb3-0a2a-4b50-9a8a-95ae60ea7afd.png" height="300" >
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227504005-efeaefe0-b687-49d0-bf95-2d7b7e66c348.png" height="300" >
|
||||
</div>
|
||||
|
||||
3. Conception de fonctionnalités modulaires avec une interface simple mais capable d'une fonctionnalité puissante.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/229288270-093643c1-0018-487a-81e6-1d7809b6e90f.png" height="400" >
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227504931-19955f78-45cd-4d1c-adac-e71e50957915.png" height="400" >
|
||||
</div>
|
||||
|
||||
4. C'est un projet open source qui peut "se traduire de lui-même".
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226936850-c77d7183-0749-4c1c-9875-fd4891842d0c.png" width="500" >
|
||||
</div>
|
||||
|
||||
5. Traduire d'autres projets open source n'est pas un problème.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="500" >
|
||||
</div>
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226969067-968a27c1-1b9c-486b-8b81-ab2de8d3f88a.png" width="500" >
|
||||
</div>
|
||||
|
||||
6. Fonction de décoration de live2d (désactivée par défaut, nécessite une modification de config.py).
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/236432361-67739153-73e8-43fe-8111-b61296edabd9.png" width="500" >
|
||||
</div>
|
||||
|
||||
7. Prise en charge du modèle de langue MOSS.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/236639178-92836f37-13af-4fdd-984d-b4450fe30336.png" width="500" >
|
||||
</div>
|
||||
|
||||
8. Génération d'images OpenAI.
|
||||
<div align="center">
|
||||
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/bc7ab234-ad90-48a0-8d62-f703d9e74665" width="500" >
|
||||
</div>
|
||||
|
||||
9. Analyse et synthèse vocales OpenAI.
|
||||
<div align="center">
|
||||
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/709ccf95-3aee-498a-934a-e1c22d3d5d5b" width="500" >
|
||||
</div>
|
||||
|
||||
10. Correction de la totalité des erreurs de Latex.
|
||||
<div align="center">
|
||||
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/651ccd98-02c9-4464-91e1-77a6b7d1b033" width="500" >
|
||||
</div>
|
||||
|
||||
|
||||
## Versions :
|
||||
- version 3.5 (À faire) : appel de toutes les fonctions de plugin de ce projet en langage naturel (priorité élevée)
|
||||
- version 3.4 (À faire) : amélioration du support multi-thread de chatglm en local
|
||||
- version 3.3 : Fonctionnalité intégrée d'informations d'internet
|
||||
- version 3.2 : La fonction du plugin de fonction prend désormais en charge des interfaces de paramètres plus nombreuses (fonction de sauvegarde, décodage de n'importe quel langage de code + interrogation simultanée de n'importe quelle combinaison de LLM)
|
||||
- version 3.1 : Prise en charge de l'interrogation simultanée de plusieurs modèles GPT ! Support api2d, équilibrage de charge multi-clé api.
|
||||
- version 3.0 : Prise en charge de chatglm et autres LLM de petite taille.
|
||||
- version 2.6 : Refonte de la structure des plugins, amélioration de l'interactivité, ajout de plus de plugins.
|
||||
- version 2.5 : Auto-mise à jour, résolution des problèmes de texte trop long et de dépassement de jetons lors de la compilation du projet global.
|
||||
- version 2.4 : (1) Nouvelle fonction de traduction de texte intégral PDF ; (2) Nouvelle fonction de permutation de position de la zone d'entrée ; (3) Nouvelle option de mise en page verticale ; (4) Amélioration des fonctions multi-thread de plug-in.
|
||||
- version 2.3 : Amélioration de l'interactivité multithread.
|
||||
- version 2.2 : Les plugins de fonctions peuvent désormais être rechargés à chaud.
|
||||
- version 2.1 : Disposition pliable
|
||||
- version 2.0 : Introduction de plugins de fonctions modulaires
|
||||
- version 1.0 : Fonctionnalités de base
|
||||
|
||||
gpt_academic développeur QQ groupe-2:610599535
|
||||
|
||||
- Problèmes connus
|
||||
- Certains plugins de traduction de navigateur perturbent le fonctionnement de l'interface frontend de ce logiciel
|
||||
- Des versions gradio trop hautes ou trop basses provoquent de nombreuses anomalies
|
||||
|
||||
## Référence et apprentissage
|
||||
|
||||
```
|
||||
De nombreux autres excellents projets ont été référencés dans le code, notamment :
|
||||
|
||||
# Projet 1 : ChatGLM-6B de Tsinghua :
|
||||
https://github.com/THUDM/ChatGLM-6B
|
||||
|
||||
# Projet 2 : JittorLLMs de Tsinghua :
|
||||
https://github.com/Jittor/JittorLLMs
|
||||
|
||||
# Projet 3 : Edge-GPT :
|
||||
https://github.com/acheong08/EdgeGPT
|
||||
|
||||
# Projet 4 : ChuanhuChatGPT :
|
||||
https://github.com/GaiZhenbiao/ChuanhuChatGPT
|
||||
|
||||
# Projet 5 : ChatPaper :
|
||||
https://github.com/kaixindelele/ChatPaper
|
||||
|
||||
# Plus :
|
||||
https://github.com/gradio-app/gradio
|
||||
https://github.com/fghrsh/live2d_demo
|
||||
```
|
||||
329
docs/README_JP.md
Normal file
329
docs/README_JP.md
Normal file
@@ -0,0 +1,329 @@
|
||||
> **Note**
|
||||
>
|
||||
> このReadmeファイルは、このプロジェクトのmarkdown翻訳プラグインによって自動的に生成されたもので、100%正確ではない可能性があります。
|
||||
>
|
||||
> When installing dependencies, please strictly choose the versions specified in `requirements.txt`.
|
||||
>
|
||||
> `pip install -r requirements.txt`
|
||||
>
|
||||
|
||||
# <img src="logo.png" width="40" > GPT 学术优化 (GPT Academic)
|
||||
|
||||
**もしこのプロジェクトが好きなら、星をつけてください。もしあなたがより良いアカデミックショートカットまたは機能プラグインを思いついた場合、Issueをオープンするか pull request を送信してください。私たちはこのプロジェクト自体によって翻訳された[英語 |](README_EN.md)[日本語 |](README_JP.md)[한국어 |](https://github.com/mldljyh/ko_gpt_academic)[Русский |](README_RS.md)[Français](README_FR.md)のREADMEも用意しています。
|
||||
GPTを使った任意の言語にこのプロジェクトを翻訳するには、[`multi_language.py`](multi_language.py)を読んで実行してください。 (experimental)。
|
||||
|
||||
> **注意**
|
||||
>
|
||||
> 1. **赤色**で表示された関数プラグイン(ボタン)のみ、ファイルの読み取りをサポートしています。一部のプラグインは、プラグインエリアの**ドロップダウンメニュー**内にあります。また、私たちはどんな新しいプラグインのPRでも、**最優先**で歓迎し、処理します!
|
||||
>
|
||||
> 2. このプロジェクトの各ファイルの機能は、自己解析の詳細説明書である[`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A)で説明されています。バージョンが進化するにつれて、関連する関数プラグインをいつでもクリックし、GPTを呼び出してプロジェクトの自己解析レポートを再生成することができます。よくある問題は[`wiki`](https://github.com/binary-husky/gpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98)にまとめられています。[インストール方法](#installation)。
|
||||
|
||||
> 3. このプロジェクトは、chatglmやRWKV、パンクなど、国内の大規模自然言語モデルを利用することをサポートし、試みることを奨励します。複数のAPIキーを共存することができ、設定ファイルに`API_KEY="openai-key1,openai-key2,api2d-key3"`のように記入することができます。`API_KEY`を一時的に変更する場合は、入力エリアに一時的な`API_KEY`を入力してEnterキーを押せば、それが有効になります。
|
||||
|
||||
|
||||
<div align="center">
|
||||
|
||||
機能 | 説明
|
||||
--- | ---
|
||||
一键校正 | 一键で校正可能、論文の文法エラーを検索することができる
|
||||
一键中英翻訳 | 一键で中英翻訳可能
|
||||
一键コード解説 | コードを表示し、解説し、生成し、コードに注釈をつけることができる
|
||||
[自分でカスタマイズ可能なショートカットキー](https://www.bilibili.com/video/BV14s4y1E7jN) | 自分でカスタマイズ可能なショートカットキーをサポートする
|
||||
モジュール化された設計 | カスタマイズ可能な[強力な関数プラグイン](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions)をサポートし、プラグインは[ホットアップデート](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)に対応している
|
||||
[自己プログラム解析](https://www.bilibili.com/video/BV1cj411A7VW) | [関数プラグイン] [一键読解](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A)このプロジェクトのソースコード
|
||||
プログラム解析 | [関数プラグイン] 一鍵で他のPython/C/C++/Java/Lua/...プロジェクトを分析できる
|
||||
論文の読み、[翻訳](https://www.bilibili.com/video/BV1KT411x7Wn) | [関数プラグイン] LaTex/ PDF論文の全文を一鍵で読み解き、要約を生成することができる
|
||||
LaTex全文[翻訳](https://www.bilibili.com/video/BV1nk4y1Y7Js/)、[校正](https://www.bilibili.com/video/BV1FT411H7c5/) | [関数プラグイン] LaTex論文の翻訳または校正を一鍵で行うことができる
|
||||
一括で注釈を生成 | [関数プラグイン] 一鍵で関数に注釈をつけることができる
|
||||
Markdown[中英翻訳](https://www.bilibili.com/video/BV1yo4y157jV/) | [関数プラグイン] 上記の5種類の言語の[README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md)を見たことがありますか?
|
||||
チャット分析レポート生成 | [関数プラグイン] 実行後、自動的に概要報告書を生成する
|
||||
[PDF論文全文翻訳機能](https://www.bilibili.com/video/BV1KT411x7Wn) | [関数プラグイン] PDF論文からタイトルと要約を抽出し、全文を翻訳する(マルチスレッド)
|
||||
[Arxivアシスタント](https://www.bilibili.com/video/BV1LM4y1279X) | [関数プラグイン] arxiv記事のURLを入力するだけで、要約を一鍵翻訳し、PDFをダウンロードできる
|
||||
[Google Scholar 総合アシスタント](https://www.bilibili.com/video/BV19L411U7ia) | [関数プラグイン] 任意のGoogle Scholar検索ページURLを指定すると、gptが[related works](https://www.bilibili.com/video/BV1GP411U7Az/)を作成する
|
||||
インターネット情報収集+GPT | [関数プラグイン] まずGPTに[インターネットから情報を収集](https://www.bilibili.com/video/BV1om4y127ck)してから質問に回答させ、情報が常に最新であるようにする
|
||||
数式/画像/表表示 | 数式の[tex形式とレンダリング形式](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png)を同時に表示し、数式、コードハイライトをサポートしている
|
||||
マルチスレッド関数プラグインがサポートされている | chatgptをマルチスレッドで呼び出し、[大量のテキスト](https://www.bilibili.com/video/BV1FT411H7c5/)またはプログラムを一鍵で処理できる
|
||||
ダークグラジオ[テーマの起動](https://github.com/binary-husky/gpt_academic/issues/173) | ブラウザのURLの後ろに```/?__theme=dark```を追加すると、ダークテーマを切り替えることができます。
|
||||
[多数のLLMモデル](https://www.bilibili.com/video/BV1wT411p7yf)がサポートされ、[API2D](https://api2d.com/)がサポートされている | 同時にGPT3.5、GPT4、[清華ChatGLM](https://github.com/THUDM/ChatGLM-6B)、[復旦MOSS](https://github.com/OpenLMLab/MOSS)に対応
|
||||
より多くのLLMモデルが接続され、[huggingfaceデプロイ](https://huggingface.co/spaces/qingxu98/gpt-academic)がサポートされている | Newbingインターフェイス(Newbing)、清華大学の[Jittorllm](https://github.com/Jittor/JittorLLMs)のサポート[LLaMA](https://github.com/facebookresearch/llama), [RWKV](https://github.com/BlinkDL/ChatRWKV)と[盘古α](https://openi.org.cn/pangu/)
|
||||
さらに多くの新機能(画像生成など)を紹介する... | この文書の最後に示す...
|
||||
</div>
|
||||
|
||||
- 新しいインターフェース(`config.py`のLAYOUTオプションを変更することで、「左右配置」と「上下配置」を切り替えることができます)
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/230361456-61078362-a966-4eb5-b49e-3c62ef18b860.gif" width="700" >
|
||||
</div>- All buttons are dynamically generated by reading functional.py, and custom functions can be freely added to free the clipboard.
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/231975334-b4788e91-4887-412f-8b43-2b9c5f41d248.gif" width="700" >
|
||||
</div>
|
||||
|
||||
- Polishing/Correction
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/231980294-f374bdcb-3309-4560-b424-38ef39f04ebd.gif" width="700" >
|
||||
</div>
|
||||
|
||||
- If the output contains formulas, they are displayed in both TeX and rendering forms, making it easy to copy and read.
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png" width="700" >
|
||||
</div>
|
||||
|
||||
- Don't feel like looking at the project code? Just ask chatgpt directly.
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="700" >
|
||||
</div>
|
||||
|
||||
|
||||
- Mixed calls of multiple large language models (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4)
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/232537274-deca0563-7aa6-4b5d-94a2-b7c453c47794.png" width="700" >
|
||||
</div>
|
||||
|
||||
---
|
||||
|
||||
# Installation
|
||||
|
||||
## Installation-Method 1: Directly run (Windows, Linux or MacOS)
|
||||
|
||||
1. Download the project.
|
||||
|
||||
```sh
|
||||
git clone https://github.com/binary-husky/gpt_academic.git
|
||||
cd gpt_academic
|
||||
```
|
||||
|
||||
2. Configure the API_KEY.
|
||||
|
||||
Configure the API KEY and other settings in `config.py` and [special network environment settings](https://github.com/binary-husky/gpt_academic/issues/1).
|
||||
|
||||
(P.S. When the program is running, it will first check if there is a private configuration file named `config_private.py`, and use the configuration in it to override the same name configuration in `config.py`. Therefore, if you can understand our configuration reading logic, we strongly recommend that you create a new configuration file named `config_private.py` next to `config.py`, and transfer (copy) the configuration in `config.py` to `config_private.py`. `config_private.py` is not controlled by git and can make your privacy information more secure. P.S. The project also supports configuring most options through `environment variables`, and the writing format of environment variables refers to the `docker-compose` file. Reading priority: `environment variables` > `config_private.py` > `config.py`)
|
||||
|
||||
3. Install dependencies.
|
||||
|
||||
```sh
|
||||
# (Choose I: If familiar with Python)(Python version 3.9 or above, the newer the better) Note: Use the official pip source or Ali pip source. Temporary switching source method: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
|
||||
python -m pip install -r requirements.txt
|
||||
|
||||
# (Choose II: If not familiar with Python) Use anaconda, the steps are the same (https://www.bilibili.com/video/BV1rc411W7Dr):
|
||||
conda create -n gptac_venv python=3.11 # Create anaconda environment.
|
||||
conda activate gptac_venv # Activate the anaconda environment.
|
||||
python -m pip install -r requirements.txt # This step is the same as the pip installation step.
|
||||
```
|
||||
|
||||
<details><summary>If you need to support Tsinghua ChatGLM/Fudan MOSS as a backend, click to expand.</summary>
|
||||
<p>
|
||||
|
||||
[Optional Steps] If you need to support Tsinghua ChatGLM/Fudan MOSS as a backend, you need to install more dependencies (precondition: familiar with Python + used Pytorch + computer configuration). Strong enough):
|
||||
|
||||
```sh
|
||||
# Optional step I: support Tsinghua ChatGLM. Tsinghua ChatGLM remarks: If you encounter the error "Call ChatGLM fail cannot load ChatGLM parameters normally", refer to the following: 1: The version installed above is torch+cpu version, using cuda requires uninstalling torch and reinstalling torch+cuda; 2: If the model cannot be loaded due to insufficient local configuration, you can modify the model accuracy in request_llm/bridge_chatglm.py, and change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True).
|
||||
python -m pip install -r request_llm/requirements_chatglm.txt
|
||||
|
||||
# Optional Step II: Support Fudan MOSS.
|
||||
python -m pip install -r request_llm/requirements_moss.txt
|
||||
git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Note that when executing this line of code, it must be in the project root.
|
||||
|
||||
# 【Optional Step III】Ensure that the AVAIL_LLM_MODELS in the config.py configuration file contains the expected model. Currently, all supported models are as follows (jittorllms series currently only supports the docker solution):
|
||||
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
|
||||
```
|
||||
|
||||
</p>
|
||||
</details>
|
||||
|
||||
|
||||
|
||||
4. Run.
|
||||
|
||||
```sh
|
||||
python main.py
|
||||
```5. Testing Function Plugin
|
||||
```
|
||||
- Test function plugin template function (requires gpt to answer what happened today in history), you can use this function as a template to implement more complex functions
|
||||
Click "[Function Plugin Template Demo] Today in History"
|
||||
```
|
||||
|
||||
## Installation-Methods 2: Using Docker
|
||||
|
||||
1. Only ChatGPT (recommended for most people)
|
||||
|
||||
``` sh
|
||||
git clone https://github.com/binary-husky/gpt_academic.git # Download project
|
||||
cd gpt_academic # Enter path
|
||||
nano config.py # Edit config.py with any text editor ‑ configure "Proxy," "API_KEY," "WEB_PORT" (e.g., 50923) and more
|
||||
docker build -t gpt-academic . # installation
|
||||
|
||||
#(Last step-Option 1) In a Linux environment, `--net=host` is more convenient and quick
|
||||
docker run --rm -it --net=host gpt-academic
|
||||
#(Last step-Option 2) In a macOS/windows environment, the -p option must be used to expose the container port (e.g., 50923) to the port on the host.
|
||||
docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic
|
||||
```
|
||||
|
||||
2. ChatGPT + ChatGLM + MOSS (requires familiarity with Docker)
|
||||
|
||||
``` sh
|
||||
# Modify docker-compose.yml, delete plans 1 and 3, and retain plan 2. Modify the configuration of plan 2 in docker-compose.yml, and reference the comments for instructions.
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
3. ChatGPT + LLAMA + Pangu + RWKV (requires familiarity with Docker)
|
||||
``` sh
|
||||
# Modify docker-compose.yml, delete plans 1 and 2, and retain plan 3. Modify the configuration of plan 3 in docker-compose.yml, and reference the comments for instructions.
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
|
||||
## Installation-Method 3: Other Deployment Methods
|
||||
|
||||
1. How to use proxy URL/Microsoft Azure API
|
||||
Configure API_URL_REDIRECT according to the instructions in `config.py`.
|
||||
|
||||
2. Remote Cloud Server Deployment (requires cloud server knowledge and experience)
|
||||
Please visit [Deployment Wiki-1](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97)
|
||||
|
||||
3. Using WSL2 (Windows Subsystem for Linux Subsystem)
|
||||
Please visit [Deployment Wiki-2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2)
|
||||
|
||||
4. How to run on a secondary URL (such as `http://localhost/subpath`)
|
||||
Please visit [FastAPI Running Instructions](docs/WithFastapi.md)
|
||||
|
||||
5. Run with docker-compose
|
||||
Please read docker-compose.yml and follow the instructions provided therein.
|
||||
---
|
||||
# Advanced Usage
|
||||
## Customize new convenience buttons/custom function plugins
|
||||
|
||||
1. Custom new convenience buttons (academic shortcut keys)
|
||||
Open `core_functional.py` with any text editor, add the item as follows, and restart the program. (If the button has been added successfully and is visible, the prefix and suffix support hot modification without restarting the program.)
|
||||
example:
|
||||
```
|
||||
"Super English to Chinese Translation": {
|
||||
# Prefix, which will be added before your input. For example, used to describe your request, such as translation, code interpretation, polish, etc.
|
||||
"Prefix": "Please translate the following content into Chinese, and explain the proper nouns in the text in a markdown table one by one:\n\n",
|
||||
|
||||
# Suffix, which will be added after your input. For example, in combination with the prefix, you can surround your input content with quotation marks.
|
||||
"Suffix": "",
|
||||
},
|
||||
```
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226899272-477c2134-ed71-4326-810c-29891fe4a508.png" width="500" >
|
||||
</div>
|
||||
|
||||
2. Custom function plugins
|
||||
|
||||
Write powerful function plugins to perform any task you can and cannot think of.
|
||||
The difficulty of writing and debugging plugins in this project is low, and as long as you have a certain amount of python basic knowledge, you can follow the template provided by us to achieve your own plugin functions.
|
||||
For details, please refer to the [Function Plugin Guide](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97).
|
||||
|
||||
---
|
||||
# Latest Update
|
||||
## New feature dynamics.
|
||||
1. ダイアログの保存機能。関数プラグインエリアで '現在の会話を保存' を呼び出すと、現在のダイアログを読み取り可能で復元可能なHTMLファイルとして保存できます。さらに、関数プラグインエリア(ドロップダウンメニュー)で 'ダイアログの履歴保存ファイルを読み込む' を呼び出すことで、以前の会話を復元することができます。Tips:ファイルを指定せずに 'ダイアログの履歴保存ファイルを読み込む' をクリックすることで、過去のHTML保存ファイルのキャッシュを表示することができます。'すべてのローカルダイアログの履歴を削除' をクリックすることで、すべてのHTML保存ファイルのキャッシュを削除できます。
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/235222390-24a9acc0-680f-49f5-bc81-2f3161f1e049.png" width="500">
|
||||
</div>
|
||||
|
||||
|
||||
2. 報告書を生成します。ほとんどのプラグインは、実行が終了した後に作業報告書を生成します。
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227503770-fe29ce2c-53fd-47b0-b0ff-93805f0c2ff4.png" height="300">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227504617-7a497bb3-0a2a-4b50-9a8a-95ae60ea7afd.png" height="300">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227504005-efeaefe0-b687-49d0-bf95-2d7b7e66c348.png" height="300">
|
||||
</div>
|
||||
|
||||
3. モジュール化された機能設計、簡単なインターフェースで強力な機能をサポートする。
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/229288270-093643c1-0018-487a-81e6-1d7809b6e90f.png" height="400">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227504931-19955f78-45cd-4d1c-adac-e71e50957915.png" height="400">
|
||||
</div>
|
||||
|
||||
4. 自己解決可能なオープンソースプロジェクトです。
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226936850-c77d7183-0749-4c1c-9875-fd4891842d0c.png" width="500">
|
||||
</div>
|
||||
|
||||
|
||||
5. 他のオープンソースプロジェクトの解読、容易である。
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="500">
|
||||
</div>
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226969067-968a27c1-1b9c-486b-8b81-ab2de8d3f88a.png" width="500">
|
||||
</div>
|
||||
|
||||
6. [Live2D](https://github.com/fghrsh/live2d_demo)のデコレート小機能です。(デフォルトでは閉じてますが、 `config.py`を変更する必要があります。)
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/236432361-67739153-73e8-43fe-8111-b61296edabd9.png" width="500">
|
||||
</div>
|
||||
|
||||
7. 新たにMOSS大言語モデルのサポートを追加しました。
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/236639178-92836f37-13af-4fdd-984d-b4450fe30336.png" width="500">
|
||||
</div>
|
||||
|
||||
8. OpenAI画像生成
|
||||
<div align="center">
|
||||
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/bc7ab234-ad90-48a0-8d62-f703d9e74665" width="500">
|
||||
</div>
|
||||
|
||||
9. OpenAIオーディオの解析とサマリー
|
||||
<div align="center">
|
||||
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/709ccf95-3aee-498a-934a-e1c22d3d5d5b" width="500">
|
||||
</div>
|
||||
|
||||
10. 全文校正されたLaTeX
|
||||
<div align="center">
|
||||
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/651ccd98-02c9-4464-91e1-77a6b7d1b033" width="500">
|
||||
</div>
|
||||
|
||||
|
||||
## バージョン:
|
||||
- version 3.5(作業中):すべての関数プラグインを自然言語で呼び出すことができるようにする(高い優先度)。
|
||||
- version 3.4(作業中):chatglmのローカルモデルのマルチスレッドをサポートすることで、機能を改善する。
|
||||
- version 3.3:+Web情報の総合機能
|
||||
- version 3.2:関数プラグインでさらに多くのパラメータインターフェイスをサポートする(ダイアログの保存機能、任意の言語コードの解読+同時に任意のLLM組み合わせに関する問い合わせ)
|
||||
- version 3.1:複数のGPTモデルを同時に質問できるようになりました! api2dをサポートし、複数のAPIキーを均等に負荷分散することができます。
|
||||
- version 3.0:chatglmとその他の小型LLMのサポート。
|
||||
- version 2.6:プラグイン構造を再構築し、対話内容を高め、より多くのプラグインを追加しました。
|
||||
- version 2.5:自己アップデートし、長文書やトークンのオーバーフローの問題を解決しました。
|
||||
- version 2.4:(1)全文翻訳のPDF機能を追加しました。(2)入力エリアの位置切り替え機能を追加しました。(3)垂直レイアウトオプションを追加しました。(4)マルチスレッド関数プラグインを最適化しました。
|
||||
- version 2.3:マルチスレッド性能の向上。
|
||||
- version 2.2:関数プラグインのホットリロードをサポートする。
|
||||
- version 2.1:折りたたみ式レイアウト。
|
||||
- version 2.0:モジュール化された関数プラグインを導入。
|
||||
- version 1.0:基本機能
|
||||
|
||||
gpt_academic開発者QQグループ-2:610599535
|
||||
|
||||
- 既知の問題
|
||||
- 一部のブラウザ翻訳プラグインが、このソフトウェアのフロントエンドの実行を妨害する
|
||||
- gradioバージョンが高すぎるか低すぎると、多くの異常が引き起こされる
|
||||
|
||||
## 参考学習
|
||||
|
||||
```
|
||||
コードの中には、他の優れたプロジェクトの設計から参考にしたものがたくさん含まれています:
|
||||
|
||||
# プロジェクト1:清華ChatGLM-6B:
|
||||
https://github.com/THUDM/ChatGLM-6B
|
||||
|
||||
# プロジェクト2:清華JittorLLMs:
|
||||
https://github.com/Jittor/JittorLLMs
|
||||
|
||||
# プロジェクト3:Edge-GPT:
|
||||
https://github.com/acheong08/EdgeGPT
|
||||
|
||||
# プロジェクト4:ChuanhuChatGPT:
|
||||
https://github.com/GaiZhenbiao/ChuanhuChatGPT
|
||||
|
||||
# プロジェクト5:ChatPaper:
|
||||
https://github.com/kaixindelele/ChatPaper
|
||||
|
||||
# その他:
|
||||
https://github.com/gradio-app/gradio
|
||||
https://github.com/fghrsh/live2d_demo
|
||||
```
|
||||
278
docs/README_RS.md
Normal file
278
docs/README_RS.md
Normal file
@@ -0,0 +1,278 @@
|
||||
> **Note**
|
||||
>
|
||||
> Этот файл самовыражения автоматически генерируется модулем перевода markdown в этом проекте и может быть не на 100% правильным.
|
||||
>
|
||||
# <img src="logo.png" width="40" > GPT Академическая оптимизация (GPT Academic)
|
||||
|
||||
**Если вам нравится этот проект, пожалуйста, поставьте ему звезду. Если вы придумали более полезные языковые ярлыки или функциональные плагины, не стесняйтесь открывать issue или pull request.
|
||||
Чтобы перевести этот проект на произвольный язык с помощью GPT, ознакомьтесь и запустите [`multi_language.py`](multi_language.py) (экспериментальный).
|
||||
|
||||
> **Примечание**
|
||||
>
|
||||
> 1. Обратите внимание, что только функциональные плагины (кнопки), помеченные **красным цветом**, поддерживают чтение файлов, некоторые плагины находятся в **выпадающем меню** в области плагинов. Кроме того, мы с наивысшим приоритетом рады и обрабатываем pull requests для любых новых плагинов!
|
||||
>
|
||||
> 2. В каждом файле проекта функциональность описана в документе самоанализа [`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A). С каждой итерацией выполнения версии вы можете в любое время вызвать повторное создание отчета о самоанализе этого проекта, щелкнув соответствующий функциональный плагин и вызвав GPT. Вопросы сборки описаны в [`wiki`](https://github.com/binary-husky/gpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98). [Метод установки](#installation).
|
||||
>
|
||||
> 3. Этот проект совместим и поощряет использование китайских языковых моделей chatglm и RWKV, пангу и т. Д. Поддержка нескольких api-key, которые могут существовать одновременно, может быть указан в файле конфигурации, например `API_KEY="openai-key1,openai-key2,api2d-key3"`. Если требуется временно изменить `API_KEY`, введите временный `API_KEY` в области ввода и нажмите клавишу Enter, чтобы он вступил в силу.
|
||||
|
||||
> **Примечание**
|
||||
>
|
||||
> При установке зависимостей строго выбирайте версии, **указанные в файле requirements.txt**.
|
||||
>
|
||||
> `pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/`## Задание
|
||||
|
||||
Вы профессиональный переводчик научных статей.
|
||||
|
||||
Переведите этот файл в формате Markdown на русский язык. Не изменяйте существующие команды Markdown, ответьте только переведенными результатами.
|
||||
|
||||
## Результат
|
||||
|
||||
Функция | Описание
|
||||
--- | ---
|
||||
Однокнопочный стиль | Поддержка однокнопочного стиля и поиска грамматических ошибок в научных статьях
|
||||
Однокнопочный перевод на английский и китайский | Однокнопочный перевод на английский и китайский
|
||||
Однокнопочное объяснение кода | Показ кода, объяснение его, генерация кода, комментирование кода
|
||||
[Настройка быстрых клавиш](https://www.bilibili.com/video/BV14s4y1E7jN) | Поддержка настройки быстрых клавиш
|
||||
Модульный дизайн | Поддержка пользовательских функциональных плагинов мощных [функциональных плагинов](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions), плагины поддерживают [горячую замену](https://github.com/binary-husky/gpt_academic/wiki/Function-Plug-in-Guide)
|
||||
[Анализ своей программы](https://www.bilibili.com/video/BV1cj411A7VW) | [Функциональный плагин] [Однокнопочный просмотр](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academicProject-Self-analysis-Report) исходного кода этого проекта
|
||||
[Анализ программы](https://www.bilibili.com/video/BV1cj411A7VW) | [Функциональный плагин] Однокнопочный анализ дерева других проектов Python/C/C++/Java/Lua/...
|
||||
Чтение статей, [перевод](https://www.bilibili.com/video/BV1KT411x7Wn) статей | [Функциональный плагин] Однокнопочное чтение полного текста научных статей и генерация резюме
|
||||
Полный перевод [LaTeX](https://www.bilibili.com/video/BV1nk4y1Y7Js/) и совершенствование | [Функциональный плагин] Однокнопочный перевод или совершенствование LaTeX статьи
|
||||
Автоматическое комментирование | [Функциональный плагин] Однокнопочное автоматическое генерирование комментариев функций
|
||||
[Перевод](https://www.bilibili.com/video/BV1yo4y157jV/) Markdown на английский и китайский | [Функциональный плагин] Вы видели обе версии файлов [README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md) для этих 5 языков?
|
||||
Отчет о чат-анализе | [Функциональный плагин] После запуска будет автоматически сгенерировано сводное извещение
|
||||
Функция перевода полного текста [PDF-статьи](https://www.bilibili.com/video/BV1KT411x7Wn) | [Функциональный плагин] Извлечение заголовка и резюме [PDF-статьи](https://www.bilibili.com/video/BV1KT411x7Wn) и перевод всего документа (многопоточность)
|
||||
[Arxiv Helper](https://www.bilibili.com/video/BV1LM4y1279X) | [Функциональный плагин] Введите URL статьи на arxiv и одним щелчком мыши переведите резюме и загрузите PDF
|
||||
[Google Scholar Integration Helper](https://www.bilibili.com/video/BV19L411U7ia) | [Функциональный плагин] При заданном любом URL страницы поиска в Google Scholar позвольте gpt вам помочь [написать обзор](https://www.bilibili.com/video/BV1GP411U7Az/)
|
||||
Сбор Интернет-информации + GPT | [Функциональный плагин] Однокнопочный [запрос информации из Интернета GPT](https://www.bilibili.com/video/BV1om4y127ck), затем ответьте на вопрос, чтобы информация не устарела никогда
|
||||
Отображение формул / изображений / таблиц | Может одновременно отображать формулы в [формате Tex и рендеринге](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png), поддерживает формулы, подсвечивает код
|
||||
Поддержка функций с многопоточностью | Поддержка многопоточного вызова chatgpt, однокнопочная обработка [больших объемов текста](https://www.bilibili.com/video/BV1FT411H7c5/) или программ
|
||||
Темная тема gradio для запуска приложений | Добавьте ```/?__theme=dark``` после URL в браузере, чтобы переключиться на темную тему
|
||||
[Поддержка нескольких моделей LLM](https://www.bilibili.com/video/BV1wT411p7yf), [API2D](https://api2d.com/) | Они одновременно обслуживаются GPT3.5, GPT4, [Clear ChatGLM](https://github.com/THUDM/ChatGLM-6B), [Fudan MOSS](https://github.com/OpenLMLab/MOSS)
|
||||
Подключение нескольких новых моделей LLM, поддержка деплоя[huggingface](https://huggingface.co/spaces/qingxu98/gpt-academic) | Подключение интерфейса Newbing (новый Bing), подключение поддержки [LLaMA](https://github.com/facebookresearch/llama), поддержка [RWKV](https://github.com/BlinkDL/ChatRWKV) и [Pangu α](https://openi.org.cn/pangu/)
|
||||
Больше новых функций (генерация изображения и т. д.) | См. на конце этого файла…- All buttons are dynamically generated by reading functional.py, and custom functions can be freely added to liberate the clipboard
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/231975334-b4788e91-4887-412f-8b43-2b9c5f41d248.gif" width="700" >
|
||||
</div>
|
||||
|
||||
- Revision/Correction
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/231980294-f374bdcb-3309-4560-b424-38ef39f04ebd.gif" width="700" >
|
||||
</div>
|
||||
|
||||
- If the output contains formulas, they will be displayed in both tex and rendered form for easy copying and reading
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png" width="700" >
|
||||
</div>
|
||||
|
||||
- Don't feel like looking at project code? Show the entire project directly in chatgpt
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="700" >
|
||||
</div>
|
||||
|
||||
- Mixing multiple large language models (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4)
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/232537274-deca0563-7aa6-4b5d-94a2-b7c453c47794.png" width="700" >
|
||||
</div>
|
||||
|
||||
---
|
||||
# Installation
|
||||
## Installation-Method 1: Run directly (Windows, Linux or MacOS)
|
||||
|
||||
1. Download the project
|
||||
```sh
|
||||
git clone https://github.com/binary-husky/gpt_academic.git
|
||||
cd gpt_academic
|
||||
```
|
||||
|
||||
2. Configure API_KEY
|
||||
|
||||
In `config.py`, configure API KEY and other settings, [special network environment settings] (https://github.com/binary-husky/gpt_academic/issues/1).
|
||||
|
||||
(P.S. When the program is running, it will first check whether there is a secret configuration file named `config_private.py` and use the configuration in it to replace the same name in` config.py`. Therefore, if you understand our configuration reading logic, we strongly recommend that you create a new configuration file named `config_private.py` next to `config.py`, and transfer (copy) the configuration in `config.py` to `config_private.py`. `config_private.py` is not controlled by git, which can make your privacy information more secure. P.S. The project also supports configuring most options through `environment variables`, and the writing format of environment variables refers to the `docker-compose` file. Priority of read: `environment variable`>`config_private.py`>`config.py`)
|
||||
|
||||
|
||||
3. Install dependencies
|
||||
```sh
|
||||
# (Option I: If familiar with Python)(Python version 3.9 or above, the newer the better), note: use the official pip source or the aliyun pip source, temporary switching source method: python -m pip install -r requirements.txt - i https://mirrors.aliyun.com/pypi/simple/
|
||||
python -m pip install -r requirements.txt
|
||||
|
||||
# (Option II: If unfamiliar with Python)Use Anaconda, the steps are also similar (https://www.bilibili.com/video/BV1rc411W7Dr):
|
||||
conda create -n gptac_venv python=3.11 # create an Anaconda environment
|
||||
conda activate gptac_venv # activate Anaconda environment
|
||||
python -m pip install -r requirements.txt # This step is the same as the pip installation
|
||||
```
|
||||
|
||||
<details><summary> If you need to support Tsinghua ChatGLM/Fudan MOSS as backend, click here to expand </summary>
|
||||
<p>
|
||||
|
||||
[Optional step] If you need to support Tsinghua ChatGLM/Fudan MOSS as backend, you need to install more dependencies (prerequisites: familiar with Python + have used Pytorch + computer configuration is strong):
|
||||
```sh
|
||||
# [Optional step I] Support Tsinghua ChatGLM. Tsinghua ChatGLM note: If you encounter the "Call ChatGLM fail cannot load ChatGLM parameters normally" error, refer to the following: 1: The default installation above is torch+cpu version, and cuda is used Need to uninstall torch and reinstall torch+cuda; 2: If you cannot load the model due to insufficient local configuration, you can modify the model accuracy in request_llm/bridge_chatglm.py, AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) Modify to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
|
||||
python -m pip install -r request_llm/requirements_chatglm.txt
|
||||
|
||||
# [Optional step II] Support Fudan MOSS
|
||||
python -m pip install -r request_llm/requirements_moss.txt
|
||||
git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Note that when executing this line of code, you must be in the project root path
|
||||
|
||||
# [Optional step III] Make sure the AVAIL_LLM_MODELS in the config.py configuration file contains the expected models. Currently, all supported models are as follows (the jittorllms series currently only supports the docker solution):
|
||||
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
|
||||
```
|
||||
|
||||
</p>
|
||||
</details>
|
||||
|
||||
|
||||
|
||||
4. Run
|
||||
```sh
|
||||
python main.py
|
||||
```5. Testing Function Plugin
|
||||
```
|
||||
- Testing function plugin template function (requires GPT to answer what happened in history today), you can use this function as a template to implement more complex functions
|
||||
Click "[Function plugin Template Demo] On this day in history"
|
||||
```
|
||||
|
||||
## Installation - Method 2: Using Docker
|
||||
|
||||
1. ChatGPT only (recommended for most people)
|
||||
|
||||
``` sh
|
||||
git clone https://github.com/binary-husky/gpt_academic.git # download the project
|
||||
cd gpt_academic # enter the path
|
||||
nano config.py # edit config.py with any text editor to configure "Proxy", "API_KEY", and "WEB_PORT" (eg 50923)
|
||||
docker build -t gpt-academic . # install
|
||||
|
||||
# (Last step-Option 1) In a Linux environment, using `--net=host` is more convenient and faster
|
||||
docker run --rm -it --net=host gpt-academic
|
||||
# (Last step-Option 2) In macOS/windows environment, only -p option can be used to expose the port on the container (eg 50923) to the port on the host
|
||||
docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic
|
||||
```
|
||||
|
||||
2. ChatGPT + ChatGLM + MOSS (requires familiarity with Docker)
|
||||
|
||||
``` sh
|
||||
# Edit docker-compose.yml, delete solutions 1 and 3, and keep solution 2. Modify the configuration of solution 2 in docker-compose.yml, refer to the comments in it
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
3. ChatGPT + LLAMA + PanGu + RWKV (requires familiarity with Docker)
|
||||
``` sh
|
||||
# Edit docker-compose.yml, delete solutions 1 and 2, and keep solution 3. Modify the configuration of solution 3 in docker-compose.yml, refer to the comments in it
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
|
||||
## Installation Method 3: Other Deployment Methods
|
||||
|
||||
1. How to use reverse proxy URL/Microsoft Azure API
|
||||
Configure API_URL_REDIRECT according to the instructions in `config.py`.
|
||||
|
||||
2. Remote Cloud Server Deployment (Requires Knowledge and Experience of Cloud Servers)
|
||||
Please visit [Deployment Wiki-1](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97)
|
||||
|
||||
3. Using WSL2 (Windows Subsystem for Linux subsystem)
|
||||
Please visit [Deployment Wiki-2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2)
|
||||
|
||||
4. How to run at the secondary URL (such as `http://localhost/subpath`)
|
||||
Please visit [FastAPI Operation Instructions](docs/WithFastapi.md)
|
||||
|
||||
5. Using docker-compose to run
|
||||
Please read docker-compose.yml and follow the prompts to operate.
|
||||
|
||||
---
|
||||
# Advanced Usage
|
||||
## Customize new convenient buttons / custom function plugins
|
||||
|
||||
1. Customize new convenient buttons (academic shortcuts)
|
||||
Open `core_functional.py` with any text editor, add an entry as follows, and then restart the program. (If the button has been added successfully and is visible, both prefixes and suffixes can be hot-modified without having to restart the program.)
|
||||
For example:
|
||||
```
|
||||
"Super English to Chinese": {
|
||||
# Prefix, will be added before your input. For example, describe your requirements, such as translation, code interpretation, polishing, etc.
|
||||
"Prefix": "Please translate the following content into Chinese, and then explain each proper noun that appears in the text with a markdown table:\n\n",
|
||||
|
||||
# Suffix, will be added after your input. For example, with the prefix, you can enclose your input content in quotes.
|
||||
"Suffix": "",
|
||||
},
|
||||
```
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226899272-477c2134-ed71-4326-810c-29891fe4a508.png" width="500" >
|
||||
</div>
|
||||
|
||||
2. Custom function plugin
|
||||
|
||||
Write powerful function plugins to perform any task you can and can't imagine.
|
||||
The difficulty of debugging and writing plugins in this project is very low. As long as you have a certain knowledge of python, you can implement your own plugin function by imitating the template we provide.
|
||||
Please refer to the [Function Plugin Guide](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) for details.
|
||||
|
||||
---
|
||||
# Latest Update
|
||||
## New feature dynamic
|
||||
|
||||
1. Сохранение диалогов. Вызовите "Сохранить текущий диалог" в разделе функций-плагина, чтобы сохранить текущий диалог как файл HTML, который можно прочитать и восстановить. Кроме того, вызовите «Загрузить архив истории диалога» в меню функций-плагина, чтобы восстановить предыдущую сессию. Совет: если нажать кнопку "Загрузить исторический архив диалога" без указания файла, можно просмотреть кэш исторических файлов HTML. Щелкните "Удалить все локальные записи истории диалогов", чтобы удалить все файловые кэши HTML.
|
||||
|
||||
2. Создание отчетов. Большинство плагинов создают рабочий отчет после завершения выполнения.
|
||||
|
||||
3. Модульный дизайн функций, простой интерфейс, но сильный функционал.
|
||||
|
||||
4. Это проект с открытым исходным кодом, который может «сам переводить себя».
|
||||
|
||||
5. Перевод других проектов с открытым исходным кодом - это не проблема.
|
||||
|
||||
6. Мелкие функции декорирования [live2d](https://github.com/fghrsh/live2d_demo) (по умолчанию отключены, нужно изменить `config.py`).
|
||||
|
||||
7. Поддержка большой языковой модели MOSS.
|
||||
|
||||
8. Генерация изображений с помощью OpenAI.
|
||||
|
||||
9. Анализ и подведение итогов аудиофайлов с помощью OpenAI.
|
||||
|
||||
10. Полный цикл проверки правописания с использованием LaTeX.
|
||||
|
||||
## Версии:
|
||||
- Версия 3.5 (Todo): использование естественного языка для вызова функций-плагинов проекта (высокий приоритет)
|
||||
- Версия 3.4 (Todo): улучшение многопоточной поддержки локальных больших моделей чата.
|
||||
- Версия 3.3: добавлена функция объединения интернет-информации.
|
||||
- Версия 3.2: функции-плагины поддерживают большое количество параметров (сохранение диалогов, анализирование любого языка программирования и одновременное запрос LLM-групп).
|
||||
- Версия 3.1: поддержка одновременного запроса нескольких моделей GPT! Поддержка api2d, сбалансированное распределение нагрузки по нескольким ключам api.
|
||||
- Версия 3.0: поддержка chatglm и других небольших LLM.
|
||||
- Версия 2.6: перестройка структуры плагинов, улучшение интерактивности, добавлено больше плагинов.
|
||||
- Версия 2.5: автоматическое обновление для решения проблемы длинного текста и переполнения токенов при обработке больших проектов.
|
||||
- Версия 2.4: (1) добавлена функция полного перевода PDF; (2) добавлена функция переключения положения ввода; (3) добавлена опция вертикального макета; (4) оптимизация многопоточности плагинов.
|
||||
- Версия 2.3: улучшение многопоточной интерактивности.
|
||||
- Версия 2.2: функции-плагины поддерживают горячую перезагрузку.
|
||||
- Версия 2.1: раскрывающийся макет.
|
||||
- Версия 2.0: использование модульных функций-плагинов.
|
||||
- Версия 1.0: базовые функции.
|
||||
|
||||
gpt_academic Разработчик QQ-группы-2: 610599535
|
||||
|
||||
- Известные проблемы
|
||||
- Некоторые плагины перевода в браузерах мешают работе фронтенда этого программного обеспечения
|
||||
- Высокая или низкая версия gradio может вызвать множество исключений
|
||||
|
||||
## Ссылки и учебные материалы
|
||||
|
||||
```
|
||||
Мы использовали многие концепты кода из других отличных проектов, включая:
|
||||
|
||||
# Проект 1: Qinghua ChatGLM-6B:
|
||||
https://github.com/THUDM/ChatGLM-6B
|
||||
|
||||
# Проект 2: Qinghua JittorLLMs:
|
||||
https://github.com/Jittor/JittorLLMs
|
||||
|
||||
# Проект 3: Edge-GPT:
|
||||
https://github.com/acheong08/EdgeGPT
|
||||
|
||||
# Проект 4: Chuanhu ChatGPT:
|
||||
https://github.com/GaiZhenbiao/ChuanhuChatGPT
|
||||
|
||||
# Проект 5: ChatPaper:
|
||||
https://github.com/kaixindelele/ChatPaper
|
||||
|
||||
# Больше:
|
||||
https://github.com/gradio-app/gradio
|
||||
https://github.com/fghrsh/live2d_demo
|
||||
```
|
||||
@@ -1668,7 +1668,7 @@
|
||||
"Markdown翻译指定语言": "TranslateMarkdownToSpecifiedLanguage",
|
||||
"Langchain知识库": "LangchainKnowledgeBase",
|
||||
"Latex英文纠错加PDF对比": "CorrectEnglishInLatexWithPDFComparison",
|
||||
"Latex输出PDF结果": "OutputPDFFromLatex",
|
||||
"Latex输出PDF": "OutputPDFFromLatex",
|
||||
"Latex翻译中文并重新编译PDF": "TranslateChineseToEnglishInLatexAndRecompilePDF",
|
||||
"sprint亮靛": "SprintIndigo",
|
||||
"寻找Latex主文件": "FindLatexMainFile",
|
||||
@@ -3004,5 +3004,7 @@
|
||||
"1. 上传图片": "TranslatedText",
|
||||
"保存状态": "TranslatedText",
|
||||
"GPT-Academic对话存档": "TranslatedText",
|
||||
"Arxiv论文精细翻译": "TranslatedText"
|
||||
"Arxiv论文精细翻译": "TranslatedText",
|
||||
"from crazy_functions.AdvancedFunctionTemplate import 测试图表渲染": "from crazy_functions.AdvancedFunctionTemplate import test_chart_rendering",
|
||||
"测试图表渲染": "test_chart_rendering"
|
||||
}
|
||||
|
||||
@@ -1492,7 +1492,7 @@
|
||||
"交互功能模板函数": "InteractiveFunctionTemplateFunction",
|
||||
"交互功能函数模板": "InteractiveFunctionFunctionTemplate",
|
||||
"Latex英文纠错加PDF对比": "LatexEnglishErrorCorrectionWithPDFComparison",
|
||||
"Latex输出PDF结果": "LatexOutputPDFResult",
|
||||
"Latex输出PDF": "LatexOutputPDFResult",
|
||||
"Latex翻译中文并重新编译PDF": "TranslateChineseAndRecompilePDF",
|
||||
"语音助手": "VoiceAssistant",
|
||||
"微调数据集生成": "FineTuneDatasetGeneration",
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
"批量Markdown翻译": "BatchTranslateMarkdown",
|
||||
"连接bing搜索回答问题": "ConnectBingSearchAnswerQuestion",
|
||||
"Langchain知识库": "LangchainKnowledgeBase",
|
||||
"Latex输出PDF结果": "OutputPDFFromLatex",
|
||||
"Latex输出PDF": "OutputPDFFromLatex",
|
||||
"把字符太少的块清除为回车": "ClearBlocksWithTooFewCharactersToNewline",
|
||||
"Latex精细分解与转化": "DecomposeAndConvertLatex",
|
||||
"解析一个C项目的头文件": "ParseCProjectHeaderFiles",
|
||||
@@ -97,5 +97,12 @@
|
||||
"多智能体": "MultiAgent",
|
||||
"图片生成_DALLE2": "ImageGeneration_DALLE2",
|
||||
"图片生成_DALLE3": "ImageGeneration_DALLE3",
|
||||
"图片修改_DALLE2": "ImageModification_DALLE2"
|
||||
}
|
||||
"图片修改_DALLE2": "ImageModification_DALLE2",
|
||||
"生成多种Mermaid图表": "GenerateMultipleMermaidCharts",
|
||||
"知识库文件注入": "InjectKnowledgeBaseFiles",
|
||||
"PDF翻译中文并重新编译PDF": "TranslatePDFToChineseAndRecompilePDF",
|
||||
"随机小游戏": "RandomMiniGame",
|
||||
"互动小游戏": "InteractiveMiniGame",
|
||||
"解析历史输入": "ParseHistoricalInput",
|
||||
"高阶功能模板函数示意图": "HighOrderFunctionTemplateDiagram"
|
||||
}
|
||||
@@ -1468,7 +1468,7 @@
|
||||
"交互功能模板函数": "InteractiveFunctionTemplateFunctions",
|
||||
"交互功能函数模板": "InteractiveFunctionFunctionTemplates",
|
||||
"Latex英文纠错加PDF对比": "LatexEnglishCorrectionWithPDFComparison",
|
||||
"Latex输出PDF结果": "OutputPDFFromLatex",
|
||||
"Latex输出PDF": "OutputPDFFromLatex",
|
||||
"Latex翻译中文并重新编译PDF": "TranslateLatexToChineseAndRecompilePDF",
|
||||
"语音助手": "VoiceAssistant",
|
||||
"微调数据集生成": "FineTuneDatasetGeneration",
|
||||
|
||||
2952
flagged/modeling_moss.py
Normal file
2952
flagged/modeling_moss.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -31,6 +31,9 @@ from .bridge_qianfan import predict as qianfan_ui
|
||||
from .bridge_google_gemini import predict as genai_ui
|
||||
from .bridge_google_gemini import predict_no_ui_long_connection as genai_noui
|
||||
|
||||
from .bridge_zhipu import predict_no_ui_long_connection as zhipu_noui
|
||||
from .bridge_zhipu import predict as zhipu_ui
|
||||
|
||||
colors = ['#FF00FF', '#00FFFF', '#FF0000', '#990099', '#009999', '#990044']
|
||||
|
||||
class LazyloadTiktoken(object):
|
||||
@@ -44,13 +47,13 @@ class LazyloadTiktoken(object):
|
||||
tmp = tiktoken.encoding_for_model(model)
|
||||
print('加载tokenizer完毕')
|
||||
return tmp
|
||||
|
||||
|
||||
def encode(self, *args, **kwargs):
|
||||
encoder = self.get_encoder(self.model)
|
||||
encoder = self.get_encoder(self.model)
|
||||
return encoder.encode(*args, **kwargs)
|
||||
|
||||
|
||||
def decode(self, *args, **kwargs):
|
||||
encoder = self.get_encoder(self.model)
|
||||
encoder = self.get_encoder(self.model)
|
||||
return encoder.decode(*args, **kwargs)
|
||||
|
||||
# Endpoint 重定向
|
||||
@@ -63,7 +66,7 @@ azure_endpoint = AZURE_ENDPOINT + f'openai/deployments/{AZURE_ENGINE}/chat/compl
|
||||
# 兼容旧版的配置
|
||||
try:
|
||||
API_URL = get_conf("API_URL")
|
||||
if API_URL != "https://api.openai.com/v1/chat/completions":
|
||||
if API_URL != "https://api.openai.com/v1/chat/completions":
|
||||
openai_endpoint = API_URL
|
||||
print("警告!API_URL配置选项将被弃用,请更换为API_URL_REDIRECT配置")
|
||||
except:
|
||||
@@ -95,7 +98,7 @@ model_info = {
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
|
||||
|
||||
"gpt-3.5-turbo-16k": {
|
||||
"fn_with_ui": chatgpt_ui,
|
||||
"fn_without_ui": chatgpt_noui,
|
||||
@@ -185,7 +188,7 @@ model_info = {
|
||||
"tokenizer": tokenizer_gpt4,
|
||||
"token_cnt": get_token_num_gpt4,
|
||||
},
|
||||
|
||||
|
||||
"gpt-4-vision-preview": {
|
||||
"fn_with_ui": chatgpt_vision_ui,
|
||||
"fn_without_ui": chatgpt_vision_noui,
|
||||
@@ -215,16 +218,25 @@ model_info = {
|
||||
"token_cnt": get_token_num_gpt4,
|
||||
},
|
||||
|
||||
# api_2d (此后不需要在此处添加api2d的接口了,因为下面的代码会自动添加)
|
||||
"api2d-gpt-3.5-turbo": {
|
||||
"fn_with_ui": chatgpt_ui,
|
||||
"fn_without_ui": chatgpt_noui,
|
||||
"endpoint": api2d_endpoint,
|
||||
"max_token": 4096,
|
||||
# 智谱AI
|
||||
"glm-4": {
|
||||
"fn_with_ui": zhipu_ui,
|
||||
"fn_without_ui": zhipu_noui,
|
||||
"endpoint": None,
|
||||
"max_token": 10124 * 8,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
"glm-3-turbo": {
|
||||
"fn_with_ui": zhipu_ui,
|
||||
"fn_without_ui": zhipu_noui,
|
||||
"endpoint": None,
|
||||
"max_token": 10124 * 4,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
|
||||
# api_2d (此后不需要在此处添加api2d的接口了,因为下面的代码会自动添加)
|
||||
"api2d-gpt-4": {
|
||||
"fn_with_ui": chatgpt_ui,
|
||||
"fn_without_ui": chatgpt_noui,
|
||||
@@ -548,7 +560,7 @@ if "sparkv2" in AVAIL_LLM_MODELS: # 讯飞星火认知大模型
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
if "sparkv3" in AVAIL_LLM_MODELS: # 讯飞星火认知大模型
|
||||
if "sparkv3" in AVAIL_LLM_MODELS or "sparkv3.5" in AVAIL_LLM_MODELS: # 讯飞星火认知大模型
|
||||
try:
|
||||
from .bridge_spark import predict_no_ui_long_connection as spark_noui
|
||||
from .bridge_spark import predict as spark_ui
|
||||
@@ -560,6 +572,14 @@ if "sparkv3" in AVAIL_LLM_MODELS: # 讯飞星火认知大模型
|
||||
"max_token": 4096,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
"sparkv3.5": {
|
||||
"fn_with_ui": spark_ui,
|
||||
"fn_without_ui": spark_noui,
|
||||
"endpoint": None,
|
||||
"max_token": 4096,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
}
|
||||
})
|
||||
except:
|
||||
@@ -580,19 +600,17 @@ if "llama2" in AVAIL_LLM_MODELS: # llama2
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
if "zhipuai" in AVAIL_LLM_MODELS: # zhipuai
|
||||
if "zhipuai" in AVAIL_LLM_MODELS: # zhipuai 是glm-4的别名,向后兼容配置
|
||||
try:
|
||||
from .bridge_zhipu import predict_no_ui_long_connection as zhipu_noui
|
||||
from .bridge_zhipu import predict as zhipu_ui
|
||||
model_info.update({
|
||||
"zhipuai": {
|
||||
"fn_with_ui": zhipu_ui,
|
||||
"fn_without_ui": zhipu_noui,
|
||||
"endpoint": None,
|
||||
"max_token": 4096,
|
||||
"max_token": 10124 * 8,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
}
|
||||
},
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
@@ -635,7 +653,7 @@ AZURE_CFG_ARRAY = get_conf("AZURE_CFG_ARRAY")
|
||||
if len(AZURE_CFG_ARRAY) > 0:
|
||||
for azure_model_name, azure_cfg_dict in AZURE_CFG_ARRAY.items():
|
||||
# 可能会覆盖之前的配置,但这是意料之中的
|
||||
if not azure_model_name.startswith('azure'):
|
||||
if not azure_model_name.startswith('azure'):
|
||||
raise ValueError("AZURE_CFG_ARRAY中配置的模型必须以azure开头")
|
||||
endpoint_ = azure_cfg_dict["AZURE_ENDPOINT"] + \
|
||||
f'openai/deployments/{azure_cfg_dict["AZURE_ENGINE"]}/chat/completions?api-version=2023-05-15'
|
||||
@@ -701,7 +719,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
|
||||
executor = ThreadPoolExecutor(max_workers=4)
|
||||
models = model.split('&')
|
||||
n_model = len(models)
|
||||
|
||||
|
||||
window_len = len(observe_window)
|
||||
assert window_len==3
|
||||
window_mutex = [["", time.time(), ""] for _ in range(n_model)] + [True]
|
||||
@@ -720,7 +738,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
|
||||
time.sleep(0.25)
|
||||
if not window_mutex[-1]: break
|
||||
# 看门狗(watchdog)
|
||||
for i in range(n_model):
|
||||
for i in range(n_model):
|
||||
window_mutex[i][1] = observe_window[1]
|
||||
# 观察窗(window)
|
||||
chat_string = []
|
||||
|
||||
@@ -113,6 +113,8 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
||||
error_msg = get_full_error(chunk, stream_response).decode()
|
||||
if "reduce the length" in error_msg:
|
||||
raise ConnectionAbortedError("OpenAI拒绝了请求:" + error_msg)
|
||||
elif """type":"upstream_error","param":"307""" in error_msg:
|
||||
raise ConnectionAbortedError("正常结束,但显示Token不足,导致输出不完整,请削减单次输入的文本量。")
|
||||
else:
|
||||
raise RuntimeError("OpenAI拒绝了请求:" + error_msg)
|
||||
if ('data: [DONE]' in chunk_decoded): break # api2d 正常完成
|
||||
|
||||
@@ -57,6 +57,10 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
|
||||
if "vision" in llm_kwargs["llm_model"]:
|
||||
have_recent_file, image_paths = have_any_recent_upload_image_files(chatbot)
|
||||
if not have_recent_file:
|
||||
chatbot.append((inputs, "没有检测到任何近期上传的图像文件,请上传jpg格式的图片,此外,请注意拓展名需要小写"))
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="等待图片") # 刷新界面
|
||||
return
|
||||
def make_media_input(inputs, image_paths):
|
||||
for image_path in image_paths:
|
||||
inputs = inputs + f'<br/><br/><div align="center"><img src="file={os.path.abspath(image_path)}"></div>'
|
||||
|
||||
@@ -146,21 +146,17 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
# 开始接收回复
|
||||
try:
|
||||
response = f"[Local Message] 等待{model_name}响应中 ..."
|
||||
for response in generate_from_baidu_qianfan(inputs, llm_kwargs, history, system_prompt):
|
||||
chatbot[-1] = (inputs, response)
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
history.extend([inputs, response])
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
except ConnectionAbortedError as e:
|
||||
from .bridge_all import model_info
|
||||
if len(history) >= 2: history[-1] = ""; history[-2] = "" # 清除当前溢出的输入:history[-2] 是本次输入, history[-1] 是本次输出
|
||||
history = clip_history(inputs=inputs, history=history, tokenizer=model_info[llm_kwargs['llm_model']]['tokenizer'],
|
||||
history = clip_history(inputs=inputs, history=history, tokenizer=model_info[llm_kwargs['llm_model']]['tokenizer'],
|
||||
max_token_limit=(model_info[llm_kwargs['llm_model']]['max_token'])) # history至少释放二分之一
|
||||
chatbot[-1] = (chatbot[-1][0], "[Local Message] Reduce the length. 本次输入过长, 或历史数据过长. 历史缓存数据已部分释放, 您可以请再次尝试. (若再次失败则更可能是因为输入过长.)")
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="异常") # 刷新界面
|
||||
return
|
||||
|
||||
# 总结输出
|
||||
response = f"[Local Message] {model_name}响应异常 ..."
|
||||
if response == f"[Local Message] 等待{model_name}响应中 ...":
|
||||
response = f"[Local Message] {model_name}响应异常 ..."
|
||||
history.extend([inputs, response])
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
@@ -51,6 +51,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
# 开始接收回复
|
||||
from .com_qwenapi import QwenRequestInstance
|
||||
sri = QwenRequestInstance()
|
||||
response = f"[Local Message] 等待{model_name}响应中 ..."
|
||||
for response in sri.generate(inputs, llm_kwargs, history, system_prompt):
|
||||
chatbot[-1] = (inputs, response)
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
|
||||
@@ -56,6 +56,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
# 开始接收回复
|
||||
from .com_skylark2api import YUNQUERequestInstance
|
||||
sri = YUNQUERequestInstance()
|
||||
response = f"[Local Message] 等待{model_name}响应中 ..."
|
||||
for response in sri.generate(inputs, llm_kwargs, history, system_prompt):
|
||||
chatbot[-1] = (inputs, response)
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
|
||||
@@ -9,7 +9,7 @@ model_name = '星火认知大模型'
|
||||
|
||||
def validate_key():
|
||||
XFYUN_APPID = get_conf('XFYUN_APPID')
|
||||
if XFYUN_APPID == '00000000' or XFYUN_APPID == '':
|
||||
if XFYUN_APPID == '00000000' or XFYUN_APPID == '':
|
||||
return False
|
||||
return True
|
||||
|
||||
@@ -49,9 +49,10 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
from core_functional import handle_core_functionality
|
||||
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
||||
|
||||
# 开始接收回复
|
||||
# 开始接收回复
|
||||
from .com_sparkapi import SparkRequestInstance
|
||||
sri = SparkRequestInstance()
|
||||
response = f"[Local Message] 等待{model_name}响应中 ..."
|
||||
for response in sri.generate(inputs, llm_kwargs, history, system_prompt, use_image_api=True):
|
||||
chatbot[-1] = (inputs, response)
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
|
||||
@@ -1,15 +1,21 @@
|
||||
|
||||
import time
|
||||
import os
|
||||
from toolbox import update_ui, get_conf, update_ui_lastest_msg
|
||||
from toolbox import check_packages, report_exception
|
||||
from toolbox import check_packages, report_exception, have_any_recent_upload_image_files
|
||||
|
||||
model_name = '智谱AI大模型'
|
||||
zhipuai_default_model = 'glm-4'
|
||||
|
||||
def validate_key():
|
||||
ZHIPUAI_API_KEY = get_conf("ZHIPUAI_API_KEY")
|
||||
if ZHIPUAI_API_KEY == '': return False
|
||||
return True
|
||||
|
||||
def make_media_input(inputs, image_paths):
|
||||
for image_path in image_paths:
|
||||
inputs = inputs + f'<br/><br/><div align="center"><img src="file={os.path.abspath(image_path)}"></div>'
|
||||
return inputs
|
||||
|
||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
|
||||
"""
|
||||
⭐多线程方法
|
||||
@@ -18,34 +24,40 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
||||
watch_dog_patience = 5
|
||||
response = ""
|
||||
|
||||
if llm_kwargs["llm_model"] == "zhipuai":
|
||||
llm_kwargs["llm_model"] = zhipuai_default_model
|
||||
|
||||
if validate_key() is False:
|
||||
raise RuntimeError('请配置ZHIPUAI_API_KEY')
|
||||
|
||||
from .com_zhipuapi import ZhipuRequestInstance
|
||||
sri = ZhipuRequestInstance()
|
||||
for response in sri.generate(inputs, llm_kwargs, history, sys_prompt):
|
||||
# 开始接收回复
|
||||
from .com_zhipuglm import ZhipuChatInit
|
||||
zhipu_bro_init = ZhipuChatInit()
|
||||
for chunk, response in zhipu_bro_init.generate_chat(inputs, llm_kwargs, history, sys_prompt):
|
||||
if len(observe_window) >= 1:
|
||||
observe_window[0] = response
|
||||
if len(observe_window) >= 2:
|
||||
if (time.time()-observe_window[1]) > watch_dog_patience: raise RuntimeError("程序终止。")
|
||||
if (time.time() - observe_window[1]) > watch_dog_patience:
|
||||
raise RuntimeError("程序终止。")
|
||||
return response
|
||||
|
||||
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
|
||||
|
||||
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream=True, additional_fn=None):
|
||||
"""
|
||||
⭐单线程方法
|
||||
函数的说明请见 request_llms/bridge_all.py
|
||||
"""
|
||||
chatbot.append((inputs, ""))
|
||||
chatbot.append([inputs, ""])
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
check_packages(["zhipuai"])
|
||||
except:
|
||||
yield from update_ui_lastest_msg(f"导入软件依赖失败。使用该模型需要额外依赖,安装方法```pip install zhipuai==1.0.7```。",
|
||||
chatbot=chatbot, history=history, delay=0)
|
||||
yield from update_ui_lastest_msg(f"导入软件依赖失败。使用该模型需要额外依赖,安装方法```pip install --upgrade zhipuai```。",
|
||||
chatbot=chatbot, history=history, delay=0)
|
||||
return
|
||||
|
||||
|
||||
if validate_key() is False:
|
||||
yield from update_ui_lastest_msg(lastmsg="[Local Message] 请配置ZHIPUAI_API_KEY", chatbot=chatbot, history=history, delay=0)
|
||||
return
|
||||
@@ -53,16 +65,29 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
if additional_fn is not None:
|
||||
from core_functional import handle_core_functionality
|
||||
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
||||
|
||||
# 开始接收回复
|
||||
from .com_zhipuapi import ZhipuRequestInstance
|
||||
sri = ZhipuRequestInstance()
|
||||
for response in sri.generate(inputs, llm_kwargs, history, system_prompt):
|
||||
chatbot[-1] = (inputs, response)
|
||||
chatbot[-1] = [inputs, ""]
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
|
||||
# 总结输出
|
||||
if response == f"[Local Message] 等待{model_name}响应中 ...":
|
||||
response = f"[Local Message] {model_name}响应异常 ..."
|
||||
if llm_kwargs["llm_model"] == "zhipuai":
|
||||
llm_kwargs["llm_model"] = zhipuai_default_model
|
||||
|
||||
if llm_kwargs["llm_model"] in ["glm-4v"]:
|
||||
have_recent_file, image_paths = have_any_recent_upload_image_files(chatbot)
|
||||
if not have_recent_file:
|
||||
chatbot.append((inputs, "没有检测到任何近期上传的图像文件,请上传jpg格式的图片,此外,请注意拓展名需要小写"))
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="等待图片") # 刷新界面
|
||||
return
|
||||
if have_recent_file:
|
||||
inputs = make_media_input(inputs, image_paths)
|
||||
chatbot[-1] = [inputs, ""]
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
|
||||
|
||||
# 开始接收回复
|
||||
from .com_zhipuglm import ZhipuChatInit
|
||||
zhipu_bro_init = ZhipuChatInit()
|
||||
for chunk, response in zhipu_bro_init.generate_chat(inputs, llm_kwargs, history, system_prompt):
|
||||
chatbot[-1] = [inputs, response]
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
history.extend([inputs, response])
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
@@ -7,7 +7,7 @@ import os
|
||||
import re
|
||||
import requests
|
||||
from typing import List, Dict, Tuple
|
||||
from toolbox import get_conf, encode_image, get_pictures_list
|
||||
from toolbox import get_conf, encode_image, get_pictures_list, to_markdown_tabs
|
||||
|
||||
proxies, TIMEOUT_SECONDS = get_conf("proxies", "TIMEOUT_SECONDS")
|
||||
|
||||
@@ -112,34 +112,6 @@ def html_local_img(__file, layout="left", max_width=None, max_height=None, md=Tr
|
||||
return a
|
||||
|
||||
|
||||
def to_markdown_tabs(head: list, tabs: list, alignment=":---:", column=False):
|
||||
"""
|
||||
Args:
|
||||
head: 表头:[]
|
||||
tabs: 表值:[[列1], [列2], [列3], [列4]]
|
||||
alignment: :--- 左对齐, :---: 居中对齐, ---: 右对齐
|
||||
column: True to keep data in columns, False to keep data in rows (default).
|
||||
Returns:
|
||||
A string representation of the markdown table.
|
||||
"""
|
||||
if column:
|
||||
transposed_tabs = list(map(list, zip(*tabs)))
|
||||
else:
|
||||
transposed_tabs = tabs
|
||||
# Find the maximum length among the columns
|
||||
max_len = max(len(column) for column in transposed_tabs)
|
||||
|
||||
tab_format = "| %s "
|
||||
tabs_list = "".join([tab_format % i for i in head]) + "|\n"
|
||||
tabs_list += "".join([tab_format % alignment for i in head]) + "|\n"
|
||||
|
||||
for i in range(max_len):
|
||||
row_data = [tab[i] if i < len(tab) else "" for tab in transposed_tabs]
|
||||
row_data = file_manifest_filter_html(row_data, filter_=None)
|
||||
tabs_list += "".join([tab_format % i for i in row_data]) + "|\n"
|
||||
|
||||
return tabs_list
|
||||
|
||||
|
||||
class GoogleChatInit:
|
||||
def __init__(self):
|
||||
|
||||
@@ -65,6 +65,7 @@ class SparkRequestInstance():
|
||||
self.gpt_url = "ws://spark-api.xf-yun.com/v1.1/chat"
|
||||
self.gpt_url_v2 = "ws://spark-api.xf-yun.com/v2.1/chat"
|
||||
self.gpt_url_v3 = "ws://spark-api.xf-yun.com/v3.1/chat"
|
||||
self.gpt_url_v35 = "wss://spark-api.xf-yun.com/v3.5/chat"
|
||||
self.gpt_url_img = "wss://spark-api.cn-huabei-1.xf-yun.com/v2.1/image"
|
||||
|
||||
self.time_to_yield_event = threading.Event()
|
||||
@@ -91,6 +92,8 @@ class SparkRequestInstance():
|
||||
gpt_url = self.gpt_url_v2
|
||||
elif llm_kwargs['llm_model'] == 'sparkv3':
|
||||
gpt_url = self.gpt_url_v3
|
||||
elif llm_kwargs['llm_model'] == 'sparkv3.5':
|
||||
gpt_url = self.gpt_url_v35
|
||||
else:
|
||||
gpt_url = self.gpt_url
|
||||
file_manifest = []
|
||||
@@ -190,6 +193,7 @@ def gen_params(appid, inputs, llm_kwargs, history, system_prompt, file_manifest)
|
||||
"spark": "general",
|
||||
"sparkv2": "generalv2",
|
||||
"sparkv3": "generalv3",
|
||||
"sparkv3.5": "generalv3.5",
|
||||
}
|
||||
domains_select = domains[llm_kwargs['llm_model']]
|
||||
if file_manifest: domains_select = 'image'
|
||||
|
||||
84
request_llms/com_zhipuglm.py
Normal file
84
request_llms/com_zhipuglm.py
Normal file
@@ -0,0 +1,84 @@
|
||||
# encoding: utf-8
|
||||
# @Time : 2024/1/22
|
||||
# @Author : Kilig947 & binary husky
|
||||
# @Descr : 兼容最新的智谱Ai
|
||||
from toolbox import get_conf
|
||||
from zhipuai import ZhipuAI
|
||||
from toolbox import get_conf, encode_image, get_pictures_list
|
||||
import logging, os
|
||||
|
||||
|
||||
def input_encode_handler(inputs, llm_kwargs):
|
||||
if llm_kwargs["most_recent_uploaded"].get("path"):
|
||||
image_paths = get_pictures_list(llm_kwargs["most_recent_uploaded"]["path"])
|
||||
md_encode = []
|
||||
for md_path in image_paths:
|
||||
type_ = os.path.splitext(md_path)[1].replace(".", "")
|
||||
type_ = "jpeg" if type_ == "jpg" else type_
|
||||
md_encode.append({"data": encode_image(md_path), "type": type_})
|
||||
return inputs, md_encode
|
||||
|
||||
|
||||
class ZhipuChatInit:
|
||||
|
||||
def __init__(self):
|
||||
ZHIPUAI_API_KEY, ZHIPUAI_MODEL = get_conf("ZHIPUAI_API_KEY", "ZHIPUAI_MODEL")
|
||||
if len(ZHIPUAI_MODEL) > 0:
|
||||
logging.error('ZHIPUAI_MODEL 配置项选项已经弃用,请在LLM_MODEL中配置')
|
||||
self.zhipu_bro = ZhipuAI(api_key=ZHIPUAI_API_KEY)
|
||||
self.model = ''
|
||||
|
||||
def __conversation_user(self, user_input: str, llm_kwargs):
|
||||
if self.model not in ["glm-4v"]:
|
||||
return {"role": "user", "content": user_input}
|
||||
else:
|
||||
input_, encode_img = input_encode_handler(user_input, llm_kwargs=llm_kwargs)
|
||||
what_i_have_asked = {"role": "user", "content": []}
|
||||
what_i_have_asked['content'].append({"type": 'text', "text": user_input})
|
||||
if encode_img:
|
||||
img_d = {"type": "image_url",
|
||||
"image_url": {'url': encode_img}}
|
||||
what_i_have_asked['content'].append(img_d)
|
||||
return what_i_have_asked
|
||||
|
||||
def __conversation_history(self, history, llm_kwargs):
|
||||
messages = []
|
||||
conversation_cnt = len(history) // 2
|
||||
if conversation_cnt:
|
||||
for index in range(0, 2 * conversation_cnt, 2):
|
||||
what_i_have_asked = self.__conversation_user(history[index], llm_kwargs)
|
||||
what_gpt_answer = {
|
||||
"role": "assistant",
|
||||
"content": history[index + 1]
|
||||
}
|
||||
messages.append(what_i_have_asked)
|
||||
messages.append(what_gpt_answer)
|
||||
return messages
|
||||
|
||||
def __conversation_message_payload(self, inputs, llm_kwargs, history, system_prompt):
|
||||
messages = []
|
||||
if system_prompt:
|
||||
messages.append({"role": "system", "content": system_prompt})
|
||||
self.model = llm_kwargs['llm_model']
|
||||
messages.extend(self.__conversation_history(history, llm_kwargs)) # 处理 history
|
||||
messages.append(self.__conversation_user(inputs, llm_kwargs)) # 处理用户对话
|
||||
response = self.zhipu_bro.chat.completions.create(
|
||||
model=self.model, messages=messages, stream=True,
|
||||
temperature=llm_kwargs.get('temperature', 0.95) * 0.95, # 只能传默认的 temperature 和 top_p
|
||||
top_p=llm_kwargs.get('top_p', 0.7) * 0.7,
|
||||
max_tokens=llm_kwargs.get('max_tokens', 1024 * 4), # 最大输出模型的一半
|
||||
)
|
||||
return response
|
||||
|
||||
def generate_chat(self, inputs, llm_kwargs, history, system_prompt):
|
||||
self.model = llm_kwargs['llm_model']
|
||||
response = self.__conversation_message_payload(inputs, llm_kwargs, history, system_prompt)
|
||||
bro_results = ''
|
||||
for chunk in response:
|
||||
bro_results += chunk.choices[0].delta.content
|
||||
yield chunk.choices[0].delta.content, bro_results
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
zhipu = ZhipuChatInit()
|
||||
zhipu.generate_chat('你好', {'llm_model': 'glm-4'}, [], '你是WPSAi')
|
||||
1
request_llms/moss
Submodule
1
request_llms/moss
Submodule
Submodule request_llms/moss added at 4d905bcead
@@ -1,9 +1,10 @@
|
||||
https://public.gpt-academic.top/publish/gradio-3.32.7-py3-none-any.whl
|
||||
https://public.agent-matrix.com/publish/gradio-3.32.8-py3-none-any.whl
|
||||
gradio-client==0.8
|
||||
pypdf2==2.12.1
|
||||
zhipuai<2
|
||||
zhipuai>=2
|
||||
tiktoken>=0.3.3
|
||||
requests[socks]
|
||||
pydantic==1.10.11
|
||||
pydantic==2.5.2
|
||||
protobuf==3.18
|
||||
transformers>=4.27.1
|
||||
scipdf_parser>=0.52
|
||||
|
||||
137
shared_utils/handle_upload.py
Normal file
137
shared_utils/handle_upload.py
Normal file
@@ -0,0 +1,137 @@
|
||||
import importlib
|
||||
import time
|
||||
import inspect
|
||||
import re
|
||||
import os
|
||||
import base64
|
||||
import gradio
|
||||
import shutil
|
||||
import glob
|
||||
from shared_utils.config_loader import get_conf
|
||||
|
||||
def html_local_file(file):
|
||||
base_path = os.path.dirname(__file__) # 项目目录
|
||||
if os.path.exists(str(file)):
|
||||
file = f'file={file.replace(base_path, ".")}'
|
||||
return file
|
||||
|
||||
|
||||
def html_local_img(__file, layout="left", max_width=None, max_height=None, md=True):
|
||||
style = ""
|
||||
if max_width is not None:
|
||||
style += f"max-width: {max_width};"
|
||||
if max_height is not None:
|
||||
style += f"max-height: {max_height};"
|
||||
__file = html_local_file(__file)
|
||||
a = f'<div align="{layout}"><img src="{__file}" style="{style}"></div>'
|
||||
if md:
|
||||
a = f""
|
||||
return a
|
||||
|
||||
|
||||
def file_manifest_filter_type(file_list, filter_: list = None):
|
||||
new_list = []
|
||||
if not filter_:
|
||||
filter_ = ["png", "jpg", "jpeg"]
|
||||
for file in file_list:
|
||||
if str(os.path.basename(file)).split(".")[-1] in filter_:
|
||||
new_list.append(html_local_img(file, md=False))
|
||||
else:
|
||||
new_list.append(file)
|
||||
return new_list
|
||||
|
||||
|
||||
def zip_extract_member_new(self, member, targetpath, pwd):
|
||||
# 修复中文乱码的问题
|
||||
"""Extract the ZipInfo object 'member' to a physical
|
||||
file on the path targetpath.
|
||||
"""
|
||||
import zipfile
|
||||
if not isinstance(member, zipfile.ZipInfo):
|
||||
member = self.getinfo(member)
|
||||
|
||||
# build the destination pathname, replacing
|
||||
# forward slashes to platform specific separators.
|
||||
arcname = member.filename.replace('/', os.path.sep)
|
||||
arcname = arcname.encode('cp437', errors='replace').decode('gbk', errors='replace')
|
||||
|
||||
if os.path.altsep:
|
||||
arcname = arcname.replace(os.path.altsep, os.path.sep)
|
||||
# interpret absolute pathname as relative, remove drive letter or
|
||||
# UNC path, redundant separators, "." and ".." components.
|
||||
arcname = os.path.splitdrive(arcname)[1]
|
||||
invalid_path_parts = ('', os.path.curdir, os.path.pardir)
|
||||
arcname = os.path.sep.join(x for x in arcname.split(os.path.sep)
|
||||
if x not in invalid_path_parts)
|
||||
if os.path.sep == '\\':
|
||||
# filter illegal characters on Windows
|
||||
arcname = self._sanitize_windows_name(arcname, os.path.sep)
|
||||
|
||||
targetpath = os.path.join(targetpath, arcname)
|
||||
targetpath = os.path.normpath(targetpath)
|
||||
|
||||
# Create all upper directories if necessary.
|
||||
upperdirs = os.path.dirname(targetpath)
|
||||
if upperdirs and not os.path.exists(upperdirs):
|
||||
os.makedirs(upperdirs)
|
||||
|
||||
if member.is_dir():
|
||||
if not os.path.isdir(targetpath):
|
||||
os.mkdir(targetpath)
|
||||
return targetpath
|
||||
|
||||
with self.open(member, pwd=pwd) as source, \
|
||||
open(targetpath, "wb") as target:
|
||||
shutil.copyfileobj(source, target)
|
||||
|
||||
return targetpath
|
||||
|
||||
|
||||
def extract_archive(file_path, dest_dir):
|
||||
import zipfile
|
||||
import tarfile
|
||||
import os
|
||||
|
||||
# Get the file extension of the input file
|
||||
file_extension = os.path.splitext(file_path)[1]
|
||||
|
||||
# Extract the archive based on its extension
|
||||
if file_extension == ".zip":
|
||||
with zipfile.ZipFile(file_path, "r") as zipobj:
|
||||
zipobj._extract_member = lambda a,b,c: zip_extract_member_new(zipobj, a,b,c) # 修复中文乱码的问题
|
||||
zipobj.extractall(path=dest_dir)
|
||||
print("Successfully extracted zip archive to {}".format(dest_dir))
|
||||
|
||||
elif file_extension in [".tar", ".gz", ".bz2"]:
|
||||
with tarfile.open(file_path, "r:*") as tarobj:
|
||||
tarobj.extractall(path=dest_dir)
|
||||
print("Successfully extracted tar archive to {}".format(dest_dir))
|
||||
|
||||
# 第三方库,需要预先pip install rarfile
|
||||
# 此外,Windows上还需要安装winrar软件,配置其Path环境变量,如"C:\Program Files\WinRAR"才可以
|
||||
elif file_extension == ".rar":
|
||||
try:
|
||||
import rarfile
|
||||
|
||||
with rarfile.RarFile(file_path) as rf:
|
||||
rf.extractall(path=dest_dir)
|
||||
print("Successfully extracted rar archive to {}".format(dest_dir))
|
||||
except:
|
||||
print("Rar format requires additional dependencies to install")
|
||||
return "\n\n解压失败! 需要安装pip install rarfile来解压rar文件。建议:使用zip压缩格式。"
|
||||
|
||||
# 第三方库,需要预先pip install py7zr
|
||||
elif file_extension == ".7z":
|
||||
try:
|
||||
import py7zr
|
||||
|
||||
with py7zr.SevenZipFile(file_path, mode="r") as f:
|
||||
f.extractall(path=dest_dir)
|
||||
print("Successfully extracted 7z archive to {}".format(dest_dir))
|
||||
except:
|
||||
print("7z format requires additional dependencies to install")
|
||||
return "\n\n解压失败! 需要安装pip install py7zr来解压7z文件"
|
||||
else:
|
||||
return ""
|
||||
return ""
|
||||
|
||||
@@ -20,10 +20,10 @@ if __name__ == "__main__":
|
||||
|
||||
# plugin_test(plugin='crazy_functions.函数动态生成->函数动态生成', main_input='交换图像的蓝色通道和红色通道', advanced_arg={"file_path_arg": "./build/ants.jpg"})
|
||||
|
||||
# plugin_test(plugin='crazy_functions.Latex输出PDF结果->Latex翻译中文并重新编译PDF', main_input="2307.07522")
|
||||
# plugin_test(plugin='crazy_functions.Latex输出PDF->Latex翻译中文并重新编译PDF', main_input="2307.07522")
|
||||
|
||||
plugin_test(
|
||||
plugin="crazy_functions.Latex输出PDF结果->Latex翻译中文并重新编译PDF",
|
||||
plugin="crazy_functions.Latex输出PDF->Latex翻译中文并重新编译PDF",
|
||||
main_input="G:/SEAFILE_LOCAL/50503047/我的资料库/学位/paperlatex/aaai/Fu_8368_with_appendix",
|
||||
)
|
||||
|
||||
@@ -66,7 +66,7 @@ if __name__ == "__main__":
|
||||
|
||||
# plugin_test(plugin='crazy_functions.知识库文件注入->读取知识库作答', main_input="远程云服务器部署?")
|
||||
|
||||
# plugin_test(plugin='crazy_functions.Latex输出PDF结果->Latex翻译中文并重新编译PDF', main_input="2210.03629")
|
||||
# plugin_test(plugin='crazy_functions.Latex输出PDF->Latex翻译中文并重新编译PDF', main_input="2210.03629")
|
||||
|
||||
# advanced_arg = {"advanced_arg":"--llm_to_learn=gpt-3.5-turbo --prompt_prefix='根据下面的服装类型提示,想象一个穿着者,对这个人外貌、身处的环境、内心世界、人设进行描写。要求:100字以内,用第二人称。' --system_prompt=''" }
|
||||
# plugin_test(plugin='crazy_functions.chatglm微调工具->微调数据集生成', main_input='build/dev.json', advanced_arg=advanced_arg)
|
||||
|
||||
353
theme.py
Normal file
353
theme.py
Normal file
@@ -0,0 +1,353 @@
|
||||
import gradio as gr
|
||||
from toolbox import get_conf
|
||||
CODE_HIGHLIGHT, ADD_WAIFU = get_conf('CODE_HIGHLIGHT', 'ADD_WAIFU')
|
||||
# gradio可用颜色列表
|
||||
# gr.themes.utils.colors.slate (石板色)
|
||||
# gr.themes.utils.colors.gray (灰色)
|
||||
# gr.themes.utils.colors.zinc (锌色)
|
||||
# gr.themes.utils.colors.neutral (中性色)
|
||||
# gr.themes.utils.colors.stone (石头色)
|
||||
# gr.themes.utils.colors.red (红色)
|
||||
# gr.themes.utils.colors.orange (橙色)
|
||||
# gr.themes.utils.colors.amber (琥珀色)
|
||||
# gr.themes.utils.colors.yellow (黄色)
|
||||
# gr.themes.utils.colors.lime (酸橙色)
|
||||
# gr.themes.utils.colors.green (绿色)
|
||||
# gr.themes.utils.colors.emerald (祖母绿)
|
||||
# gr.themes.utils.colors.teal (青蓝色)
|
||||
# gr.themes.utils.colors.cyan (青色)
|
||||
# gr.themes.utils.colors.sky (天蓝色)
|
||||
# gr.themes.utils.colors.blue (蓝色)
|
||||
# gr.themes.utils.colors.indigo (靛蓝色)
|
||||
# gr.themes.utils.colors.violet (紫罗兰色)
|
||||
# gr.themes.utils.colors.purple (紫色)
|
||||
# gr.themes.utils.colors.fuchsia (洋红色)
|
||||
# gr.themes.utils.colors.pink (粉红色)
|
||||
# gr.themes.utils.colors.rose (玫瑰色)
|
||||
|
||||
|
||||
def adjust_theme():
|
||||
|
||||
try:
|
||||
color_er = gr.themes.utils.colors.fuchsia
|
||||
set_theme = gr.themes.Default(
|
||||
primary_hue=gr.themes.utils.colors.orange,
|
||||
neutral_hue=gr.themes.utils.colors.gray,
|
||||
font=["sans-serif", "Microsoft YaHei", "ui-sans-serif", "system-ui",
|
||||
"sans-serif", gr.themes.utils.fonts.GoogleFont("Source Sans Pro")],
|
||||
font_mono=["ui-monospace", "Consolas", "monospace", gr.themes.utils.fonts.GoogleFont("IBM Plex Mono")])
|
||||
set_theme.set(
|
||||
# Colors
|
||||
input_background_fill_dark="*neutral_800",
|
||||
# Transition
|
||||
button_transition="none",
|
||||
# Shadows
|
||||
button_shadow="*shadow_drop",
|
||||
button_shadow_hover="*shadow_drop_lg",
|
||||
button_shadow_active="*shadow_inset",
|
||||
input_shadow="0 0 0 *shadow_spread transparent, *shadow_inset",
|
||||
input_shadow_focus="0 0 0 *shadow_spread *secondary_50, *shadow_inset",
|
||||
input_shadow_focus_dark="0 0 0 *shadow_spread *neutral_700, *shadow_inset",
|
||||
checkbox_label_shadow="*shadow_drop",
|
||||
block_shadow="*shadow_drop",
|
||||
form_gap_width="1px",
|
||||
# Button borders
|
||||
input_border_width="1px",
|
||||
input_background_fill="white",
|
||||
# Gradients
|
||||
stat_background_fill="linear-gradient(to right, *primary_400, *primary_200)",
|
||||
stat_background_fill_dark="linear-gradient(to right, *primary_400, *primary_600)",
|
||||
error_background_fill=f"linear-gradient(to right, {color_er.c100}, *background_fill_secondary)",
|
||||
error_background_fill_dark="*background_fill_primary",
|
||||
checkbox_label_background_fill="linear-gradient(to top, *neutral_50, white)",
|
||||
checkbox_label_background_fill_dark="linear-gradient(to top, *neutral_900, *neutral_800)",
|
||||
checkbox_label_background_fill_hover="linear-gradient(to top, *neutral_100, white)",
|
||||
checkbox_label_background_fill_hover_dark="linear-gradient(to top, *neutral_900, *neutral_800)",
|
||||
button_primary_background_fill="linear-gradient(to bottom right, *primary_100, *primary_300)",
|
||||
button_primary_background_fill_dark="linear-gradient(to bottom right, *primary_500, *primary_600)",
|
||||
button_primary_background_fill_hover="linear-gradient(to bottom right, *primary_100, *primary_200)",
|
||||
button_primary_background_fill_hover_dark="linear-gradient(to bottom right, *primary_500, *primary_500)",
|
||||
button_primary_border_color_dark="*primary_500",
|
||||
button_secondary_background_fill="linear-gradient(to bottom right, *neutral_100, *neutral_200)",
|
||||
button_secondary_background_fill_dark="linear-gradient(to bottom right, *neutral_600, *neutral_700)",
|
||||
button_secondary_background_fill_hover="linear-gradient(to bottom right, *neutral_100, *neutral_100)",
|
||||
button_secondary_background_fill_hover_dark="linear-gradient(to bottom right, *neutral_600, *neutral_600)",
|
||||
button_cancel_background_fill=f"linear-gradient(to bottom right, {color_er.c100}, {color_er.c200})",
|
||||
button_cancel_background_fill_dark=f"linear-gradient(to bottom right, {color_er.c600}, {color_er.c700})",
|
||||
button_cancel_background_fill_hover=f"linear-gradient(to bottom right, {color_er.c100}, {color_er.c100})",
|
||||
button_cancel_background_fill_hover_dark=f"linear-gradient(to bottom right, {color_er.c600}, {color_er.c600})",
|
||||
button_cancel_border_color=color_er.c200,
|
||||
button_cancel_border_color_dark=color_er.c600,
|
||||
button_cancel_text_color=color_er.c600,
|
||||
button_cancel_text_color_dark="white",
|
||||
)
|
||||
|
||||
# 添加一个萌萌的看板娘
|
||||
if ADD_WAIFU:
|
||||
js = """
|
||||
<script src="file=docs/waifu_plugin/jquery.min.js"></script>
|
||||
<script src="file=docs/waifu_plugin/jquery-ui.min.js"></script>
|
||||
<script src="file=docs/waifu_plugin/autoload.js"></script>
|
||||
"""
|
||||
gradio_original_template_fn = gr.routes.templates.TemplateResponse
|
||||
def gradio_new_template_fn(*args, **kwargs):
|
||||
res = gradio_original_template_fn(*args, **kwargs)
|
||||
res.body = res.body.replace(b'</html>', f'{js}</html>'.encode("utf8"))
|
||||
res.init_headers()
|
||||
return res
|
||||
gr.routes.templates.TemplateResponse = gradio_new_template_fn # override gradio template
|
||||
except:
|
||||
set_theme = None
|
||||
print('gradio版本较旧, 不能自定义字体和颜色')
|
||||
return set_theme
|
||||
|
||||
|
||||
advanced_css = """
|
||||
.markdown-body table {
|
||||
margin: 1em 0;
|
||||
border-collapse: collapse;
|
||||
empty-cells: show;
|
||||
}
|
||||
|
||||
.markdown-body th, .markdown-body td {
|
||||
border: 1.2px solid var(--border-color-primary);
|
||||
padding: 5px;
|
||||
}
|
||||
|
||||
.markdown-body thead {
|
||||
background-color: rgba(175,184,193,0.2);
|
||||
}
|
||||
|
||||
.markdown-body thead th {
|
||||
padding: .5em .2em;
|
||||
}
|
||||
|
||||
.markdown-body ol, .markdown-body ul {
|
||||
padding-inline-start: 2em !important;
|
||||
}
|
||||
|
||||
/* chat box. */
|
||||
[class *= "message"] {
|
||||
border-radius: var(--radius-xl) !important;
|
||||
/* padding: var(--spacing-xl) !important; */
|
||||
/* font-size: var(--text-md) !important; */
|
||||
/* line-height: var(--line-md) !important; */
|
||||
/* min-height: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl)); */
|
||||
/* min-width: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl)); */
|
||||
}
|
||||
[data-testid = "bot"] {
|
||||
max-width: 95%;
|
||||
/* width: auto !important; */
|
||||
border-bottom-left-radius: 0 !important;
|
||||
}
|
||||
[data-testid = "user"] {
|
||||
max-width: 100%;
|
||||
/* width: auto !important; */
|
||||
border-bottom-right-radius: 0 !important;
|
||||
}
|
||||
|
||||
/* linein code block. */
|
||||
.markdown-body code {
|
||||
display: inline;
|
||||
white-space: break-spaces;
|
||||
border-radius: 6px;
|
||||
margin: 0 2px 0 2px;
|
||||
padding: .2em .4em .1em .4em;
|
||||
background-color: rgba(13, 17, 23, 0.95);
|
||||
color: #c9d1d9;
|
||||
}
|
||||
|
||||
.dark .markdown-body code {
|
||||
display: inline;
|
||||
white-space: break-spaces;
|
||||
border-radius: 6px;
|
||||
margin: 0 2px 0 2px;
|
||||
padding: .2em .4em .1em .4em;
|
||||
background-color: rgba(175,184,193,0.2);
|
||||
}
|
||||
|
||||
/* code block css */
|
||||
.markdown-body pre code {
|
||||
display: block;
|
||||
overflow: auto;
|
||||
white-space: pre;
|
||||
background-color: rgba(13, 17, 23, 0.95);
|
||||
border-radius: 10px;
|
||||
padding: 1em;
|
||||
margin: 1em 2em 1em 0.5em;
|
||||
}
|
||||
|
||||
.dark .markdown-body pre code {
|
||||
display: block;
|
||||
overflow: auto;
|
||||
white-space: pre;
|
||||
background-color: rgba(175,184,193,0.2);
|
||||
border-radius: 10px;
|
||||
padding: 1em;
|
||||
margin: 1em 2em 1em 0.5em;
|
||||
}
|
||||
|
||||
"""
|
||||
|
||||
if CODE_HIGHLIGHT:
|
||||
advanced_css += """
|
||||
|
||||
.codehilite .hll { background-color: #6e7681 }
|
||||
.codehilite .c { color: #8b949e; font-style: italic } /* Comment */
|
||||
.codehilite .err { color: #f85149 } /* Error */
|
||||
.codehilite .esc { color: #c9d1d9 } /* Escape */
|
||||
.codehilite .g { color: #c9d1d9 } /* Generic */
|
||||
.codehilite .k { color: #ff7b72 } /* Keyword */
|
||||
.codehilite .l { color: #a5d6ff } /* Literal */
|
||||
.codehilite .n { color: #c9d1d9 } /* Name */
|
||||
.codehilite .o { color: #ff7b72; font-weight: bold } /* Operator */
|
||||
.codehilite .x { color: #c9d1d9 } /* Other */
|
||||
.codehilite .p { color: #c9d1d9 } /* Punctuation */
|
||||
.codehilite .ch { color: #8b949e; font-style: italic } /* Comment.Hashbang */
|
||||
.codehilite .cm { color: #8b949e; font-style: italic } /* Comment.Multiline */
|
||||
.codehilite .cp { color: #8b949e; font-weight: bold; font-style: italic } /* Comment.Preproc */
|
||||
.codehilite .cpf { color: #8b949e; font-style: italic } /* Comment.PreprocFile */
|
||||
.codehilite .c1 { color: #8b949e; font-style: italic } /* Comment.Single */
|
||||
.codehilite .cs { color: #8b949e; font-weight: bold; font-style: italic } /* Comment.Special */
|
||||
.codehilite .gd { color: #ffa198; background-color: #490202 } /* Generic.Deleted */
|
||||
.codehilite .ge { color: #c9d1d9; font-style: italic } /* Generic.Emph */
|
||||
.codehilite .gr { color: #ffa198 } /* Generic.Error */
|
||||
.codehilite .gh { color: #79c0ff; font-weight: bold } /* Generic.Heading */
|
||||
.codehilite .gi { color: #56d364; background-color: #0f5323 } /* Generic.Inserted */
|
||||
.codehilite .go { color: #8b949e } /* Generic.Output */
|
||||
.codehilite .gp { color: #8b949e } /* Generic.Prompt */
|
||||
.codehilite .gs { color: #c9d1d9; font-weight: bold } /* Generic.Strong */
|
||||
.codehilite .gu { color: #79c0ff } /* Generic.Subheading */
|
||||
.codehilite .gt { color: #ff7b72 } /* Generic.Traceback */
|
||||
.codehilite .g-Underline { color: #c9d1d9; text-decoration: underline } /* Generic.Underline */
|
||||
.codehilite .kc { color: #79c0ff } /* Keyword.Constant */
|
||||
.codehilite .kd { color: #ff7b72 } /* Keyword.Declaration */
|
||||
.codehilite .kn { color: #ff7b72 } /* Keyword.Namespace */
|
||||
.codehilite .kp { color: #79c0ff } /* Keyword.Pseudo */
|
||||
.codehilite .kr { color: #ff7b72 } /* Keyword.Reserved */
|
||||
.codehilite .kt { color: #ff7b72 } /* Keyword.Type */
|
||||
.codehilite .ld { color: #79c0ff } /* Literal.Date */
|
||||
.codehilite .m { color: #a5d6ff } /* Literal.Number */
|
||||
.codehilite .s { color: #a5d6ff } /* Literal.String */
|
||||
.codehilite .na { color: #c9d1d9 } /* Name.Attribute */
|
||||
.codehilite .nb { color: #c9d1d9 } /* Name.Builtin */
|
||||
.codehilite .nc { color: #f0883e; font-weight: bold } /* Name.Class */
|
||||
.codehilite .no { color: #79c0ff; font-weight: bold } /* Name.Constant */
|
||||
.codehilite .nd { color: #d2a8ff; font-weight: bold } /* Name.Decorator */
|
||||
.codehilite .ni { color: #ffa657 } /* Name.Entity */
|
||||
.codehilite .ne { color: #f0883e; font-weight: bold } /* Name.Exception */
|
||||
.codehilite .nf { color: #d2a8ff; font-weight: bold } /* Name.Function */
|
||||
.codehilite .nl { color: #79c0ff; font-weight: bold } /* Name.Label */
|
||||
.codehilite .nn { color: #ff7b72 } /* Name.Namespace */
|
||||
.codehilite .nx { color: #c9d1d9 } /* Name.Other */
|
||||
.codehilite .py { color: #79c0ff } /* Name.Property */
|
||||
.codehilite .nt { color: #7ee787 } /* Name.Tag */
|
||||
.codehilite .nv { color: #79c0ff } /* Name.Variable */
|
||||
.codehilite .ow { color: #ff7b72; font-weight: bold } /* Operator.Word */
|
||||
.codehilite .pm { color: #c9d1d9 } /* Punctuation.Marker */
|
||||
.codehilite .w { color: #6e7681 } /* Text.Whitespace */
|
||||
.codehilite .mb { color: #a5d6ff } /* Literal.Number.Bin */
|
||||
.codehilite .mf { color: #a5d6ff } /* Literal.Number.Float */
|
||||
.codehilite .mh { color: #a5d6ff } /* Literal.Number.Hex */
|
||||
.codehilite .mi { color: #a5d6ff } /* Literal.Number.Integer */
|
||||
.codehilite .mo { color: #a5d6ff } /* Literal.Number.Oct */
|
||||
.codehilite .sa { color: #79c0ff } /* Literal.String.Affix */
|
||||
.codehilite .sb { color: #a5d6ff } /* Literal.String.Backtick */
|
||||
.codehilite .sc { color: #a5d6ff } /* Literal.String.Char */
|
||||
.codehilite .dl { color: #79c0ff } /* Literal.String.Delimiter */
|
||||
.codehilite .sd { color: #a5d6ff } /* Literal.String.Doc */
|
||||
.codehilite .s2 { color: #a5d6ff } /* Literal.String.Double */
|
||||
.codehilite .se { color: #79c0ff } /* Literal.String.Escape */
|
||||
.codehilite .sh { color: #79c0ff } /* Literal.String.Heredoc */
|
||||
.codehilite .si { color: #a5d6ff } /* Literal.String.Interpol */
|
||||
.codehilite .sx { color: #a5d6ff } /* Literal.String.Other */
|
||||
.codehilite .sr { color: #79c0ff } /* Literal.String.Regex */
|
||||
.codehilite .s1 { color: #a5d6ff } /* Literal.String.Single */
|
||||
.codehilite .ss { color: #a5d6ff } /* Literal.String.Symbol */
|
||||
.codehilite .bp { color: #c9d1d9 } /* Name.Builtin.Pseudo */
|
||||
.codehilite .fm { color: #d2a8ff; font-weight: bold } /* Name.Function.Magic */
|
||||
.codehilite .vc { color: #79c0ff } /* Name.Variable.Class */
|
||||
.codehilite .vg { color: #79c0ff } /* Name.Variable.Global */
|
||||
.codehilite .vi { color: #79c0ff } /* Name.Variable.Instance */
|
||||
.codehilite .vm { color: #79c0ff } /* Name.Variable.Magic */
|
||||
.codehilite .il { color: #a5d6ff } /* Literal.Number.Integer.Long */
|
||||
|
||||
.dark .codehilite .hll { background-color: #2C3B41 }
|
||||
.dark .codehilite .c { color: #79d618; font-style: italic } /* Comment */
|
||||
.dark .codehilite .err { color: #FF5370 } /* Error */
|
||||
.dark .codehilite .esc { color: #89DDFF } /* Escape */
|
||||
.dark .codehilite .g { color: #EEFFFF } /* Generic */
|
||||
.dark .codehilite .k { color: #BB80B3 } /* Keyword */
|
||||
.dark .codehilite .l { color: #C3E88D } /* Literal */
|
||||
.dark .codehilite .n { color: #EEFFFF } /* Name */
|
||||
.dark .codehilite .o { color: #89DDFF } /* Operator */
|
||||
.dark .codehilite .p { color: #89DDFF } /* Punctuation */
|
||||
.dark .codehilite .ch { color: #79d618; font-style: italic } /* Comment.Hashbang */
|
||||
.dark .codehilite .cm { color: #79d618; font-style: italic } /* Comment.Multiline */
|
||||
.dark .codehilite .cp { color: #79d618; font-style: italic } /* Comment.Preproc */
|
||||
.dark .codehilite .cpf { color: #79d618; font-style: italic } /* Comment.PreprocFile */
|
||||
.dark .codehilite .c1 { color: #79d618; font-style: italic } /* Comment.Single */
|
||||
.dark .codehilite .cs { color: #79d618; font-style: italic } /* Comment.Special */
|
||||
.dark .codehilite .gd { color: #FF5370 } /* Generic.Deleted */
|
||||
.dark .codehilite .ge { color: #89DDFF } /* Generic.Emph */
|
||||
.dark .codehilite .gr { color: #FF5370 } /* Generic.Error */
|
||||
.dark .codehilite .gh { color: #C3E88D } /* Generic.Heading */
|
||||
.dark .codehilite .gi { color: #C3E88D } /* Generic.Inserted */
|
||||
.dark .codehilite .go { color: #79d618 } /* Generic.Output */
|
||||
.dark .codehilite .gp { color: #FFCB6B } /* Generic.Prompt */
|
||||
.dark .codehilite .gs { color: #FF5370 } /* Generic.Strong */
|
||||
.dark .codehilite .gu { color: #89DDFF } /* Generic.Subheading */
|
||||
.dark .codehilite .gt { color: #FF5370 } /* Generic.Traceback */
|
||||
.dark .codehilite .kc { color: #89DDFF } /* Keyword.Constant */
|
||||
.dark .codehilite .kd { color: #BB80B3 } /* Keyword.Declaration */
|
||||
.dark .codehilite .kn { color: #89DDFF; font-style: italic } /* Keyword.Namespace */
|
||||
.dark .codehilite .kp { color: #89DDFF } /* Keyword.Pseudo */
|
||||
.dark .codehilite .kr { color: #BB80B3 } /* Keyword.Reserved */
|
||||
.dark .codehilite .kt { color: #BB80B3 } /* Keyword.Type */
|
||||
.dark .codehilite .ld { color: #C3E88D } /* Literal.Date */
|
||||
.dark .codehilite .m { color: #F78C6C } /* Literal.Number */
|
||||
.dark .codehilite .s { color: #C3E88D } /* Literal.String */
|
||||
.dark .codehilite .na { color: #BB80B3 } /* Name.Attribute */
|
||||
.dark .codehilite .nb { color: #82AAFF } /* Name.Builtin */
|
||||
.dark .codehilite .nc { color: #FFCB6B } /* Name.Class */
|
||||
.dark .codehilite .no { color: #EEFFFF } /* Name.Constant */
|
||||
.dark .codehilite .nd { color: #82AAFF } /* Name.Decorator */
|
||||
.dark .codehilite .ni { color: #89DDFF } /* Name.Entity */
|
||||
.dark .codehilite .ne { color: #FFCB6B } /* Name.Exception */
|
||||
.dark .codehilite .nf { color: #82AAFF } /* Name.Function */
|
||||
.dark .codehilite .nl { color: #82AAFF } /* Name.Label */
|
||||
.dark .codehilite .nn { color: #FFCB6B } /* Name.Namespace */
|
||||
.dark .codehilite .nx { color: #EEFFFF } /* Name.Other */
|
||||
.dark .codehilite .py { color: #FFCB6B } /* Name.Property */
|
||||
.dark .codehilite .nt { color: #FF5370 } /* Name.Tag */
|
||||
.dark .codehilite .nv { color: #89DDFF } /* Name.Variable */
|
||||
.dark .codehilite .ow { color: #89DDFF; font-style: italic } /* Operator.Word */
|
||||
.dark .codehilite .pm { color: #89DDFF } /* Punctuation.Marker */
|
||||
.dark .codehilite .w { color: #EEFFFF } /* Text.Whitespace */
|
||||
.dark .codehilite .mb { color: #F78C6C } /* Literal.Number.Bin */
|
||||
.dark .codehilite .mf { color: #F78C6C } /* Literal.Number.Float */
|
||||
.dark .codehilite .mh { color: #F78C6C } /* Literal.Number.Hex */
|
||||
.dark .codehilite .mi { color: #F78C6C } /* Literal.Number.Integer */
|
||||
.dark .codehilite .mo { color: #F78C6C } /* Literal.Number.Oct */
|
||||
.dark .codehilite .sa { color: #BB80B3 } /* Literal.String.Affix */
|
||||
.dark .codehilite .sb { color: #C3E88D } /* Literal.String.Backtick */
|
||||
.dark .codehilite .sc { color: #C3E88D } /* Literal.String.Char */
|
||||
.dark .codehilite .dl { color: #EEFFFF } /* Literal.String.Delimiter */
|
||||
.dark .codehilite .sd { color: #79d618; font-style: italic } /* Literal.String.Doc */
|
||||
.dark .codehilite .s2 { color: #C3E88D } /* Literal.String.Double */
|
||||
.dark .codehilite .se { color: #EEFFFF } /* Literal.String.Escape */
|
||||
.dark .codehilite .sh { color: #C3E88D } /* Literal.String.Heredoc */
|
||||
.dark .codehilite .si { color: #89DDFF } /* Literal.String.Interpol */
|
||||
.dark .codehilite .sx { color: #C3E88D } /* Literal.String.Other */
|
||||
.dark .codehilite .sr { color: #89DDFF } /* Literal.String.Regex */
|
||||
.dark .codehilite .s1 { color: #C3E88D } /* Literal.String.Single */
|
||||
.dark .codehilite .ss { color: #89DDFF } /* Literal.String.Symbol */
|
||||
.dark .codehilite .bp { color: #89DDFF } /* Name.Builtin.Pseudo */
|
||||
.dark .codehilite .fm { color: #82AAFF } /* Name.Function.Magic */
|
||||
.dark .codehilite .vc { color: #89DDFF } /* Name.Variable.Class */
|
||||
.dark .codehilite .vg { color: #89DDFF } /* Name.Variable.Global */
|
||||
.dark .codehilite .vi { color: #89DDFF } /* Name.Variable.Instance */
|
||||
.dark .codehilite .vm { color: #82AAFF } /* Name.Variable.Magic */
|
||||
.dark .codehilite .il { color: #F78C6C } /* Literal.Number.Integer.Long */
|
||||
|
||||
"""
|
||||
@@ -1,296 +1 @@
|
||||
/**
|
||||
* base64.ts
|
||||
*
|
||||
* Licensed under the BSD 3-Clause License.
|
||||
* http://opensource.org/licenses/BSD-3-Clause
|
||||
*
|
||||
* References:
|
||||
* http://en.wikipedia.org/wiki/Base64
|
||||
*
|
||||
* @author Dan Kogai (https://github.com/dankogai)
|
||||
*/
|
||||
const version = '3.7.2';
|
||||
/**
|
||||
* @deprecated use lowercase `version`.
|
||||
*/
|
||||
const VERSION = version;
|
||||
const _hasatob = typeof atob === 'function';
|
||||
const _hasbtoa = typeof btoa === 'function';
|
||||
const _hasBuffer = typeof Buffer === 'function';
|
||||
const _TD = typeof TextDecoder === 'function' ? new TextDecoder() : undefined;
|
||||
const _TE = typeof TextEncoder === 'function' ? new TextEncoder() : undefined;
|
||||
const b64ch = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=';
|
||||
const b64chs = Array.prototype.slice.call(b64ch);
|
||||
const b64tab = ((a) => {
|
||||
let tab = {};
|
||||
a.forEach((c, i) => tab[c] = i);
|
||||
return tab;
|
||||
})(b64chs);
|
||||
const b64re = /^(?:[A-Za-z\d+\/]{4})*?(?:[A-Za-z\d+\/]{2}(?:==)?|[A-Za-z\d+\/]{3}=?)?$/;
|
||||
const _fromCC = String.fromCharCode.bind(String);
|
||||
const _U8Afrom = typeof Uint8Array.from === 'function'
|
||||
? Uint8Array.from.bind(Uint8Array)
|
||||
: (it, fn = (x) => x) => new Uint8Array(Array.prototype.slice.call(it, 0).map(fn));
|
||||
const _mkUriSafe = (src) => src
|
||||
.replace(/=/g, '').replace(/[+\/]/g, (m0) => m0 == '+' ? '-' : '_');
|
||||
const _tidyB64 = (s) => s.replace(/[^A-Za-z0-9\+\/]/g, '');
|
||||
/**
|
||||
* polyfill version of `btoa`
|
||||
*/
|
||||
const btoaPolyfill = (bin) => {
|
||||
// console.log('polyfilled');
|
||||
let u32, c0, c1, c2, asc = '';
|
||||
const pad = bin.length % 3;
|
||||
for (let i = 0; i < bin.length;) {
|
||||
if ((c0 = bin.charCodeAt(i++)) > 255 ||
|
||||
(c1 = bin.charCodeAt(i++)) > 255 ||
|
||||
(c2 = bin.charCodeAt(i++)) > 255)
|
||||
throw new TypeError('invalid character found');
|
||||
u32 = (c0 << 16) | (c1 << 8) | c2;
|
||||
asc += b64chs[u32 >> 18 & 63]
|
||||
+ b64chs[u32 >> 12 & 63]
|
||||
+ b64chs[u32 >> 6 & 63]
|
||||
+ b64chs[u32 & 63];
|
||||
}
|
||||
return pad ? asc.slice(0, pad - 3) + "===".substring(pad) : asc;
|
||||
};
|
||||
/**
|
||||
* does what `window.btoa` of web browsers do.
|
||||
* @param {String} bin binary string
|
||||
* @returns {string} Base64-encoded string
|
||||
*/
|
||||
const _btoa = _hasbtoa ? (bin) => btoa(bin)
|
||||
: _hasBuffer ? (bin) => Buffer.from(bin, 'binary').toString('base64')
|
||||
: btoaPolyfill;
|
||||
const _fromUint8Array = _hasBuffer
|
||||
? (u8a) => Buffer.from(u8a).toString('base64')
|
||||
: (u8a) => {
|
||||
// cf. https://stackoverflow.com/questions/12710001/how-to-convert-uint8-array-to-base64-encoded-string/12713326#12713326
|
||||
const maxargs = 0x1000;
|
||||
let strs = [];
|
||||
for (let i = 0, l = u8a.length; i < l; i += maxargs) {
|
||||
strs.push(_fromCC.apply(null, u8a.subarray(i, i + maxargs)));
|
||||
}
|
||||
return _btoa(strs.join(''));
|
||||
};
|
||||
/**
|
||||
* converts a Uint8Array to a Base64 string.
|
||||
* @param {boolean} [urlsafe] URL-and-filename-safe a la RFC4648 §5
|
||||
* @returns {string} Base64 string
|
||||
*/
|
||||
const fromUint8Array = (u8a, urlsafe = false) => urlsafe ? _mkUriSafe(_fromUint8Array(u8a)) : _fromUint8Array(u8a);
|
||||
// This trick is found broken https://github.com/dankogai/js-base64/issues/130
|
||||
// const utob = (src: string) => unescape(encodeURIComponent(src));
|
||||
// reverting good old fationed regexp
|
||||
const cb_utob = (c) => {
|
||||
if (c.length < 2) {
|
||||
var cc = c.charCodeAt(0);
|
||||
return cc < 0x80 ? c
|
||||
: cc < 0x800 ? (_fromCC(0xc0 | (cc >>> 6))
|
||||
+ _fromCC(0x80 | (cc & 0x3f)))
|
||||
: (_fromCC(0xe0 | ((cc >>> 12) & 0x0f))
|
||||
+ _fromCC(0x80 | ((cc >>> 6) & 0x3f))
|
||||
+ _fromCC(0x80 | (cc & 0x3f)));
|
||||
}
|
||||
else {
|
||||
var cc = 0x10000
|
||||
+ (c.charCodeAt(0) - 0xD800) * 0x400
|
||||
+ (c.charCodeAt(1) - 0xDC00);
|
||||
return (_fromCC(0xf0 | ((cc >>> 18) & 0x07))
|
||||
+ _fromCC(0x80 | ((cc >>> 12) & 0x3f))
|
||||
+ _fromCC(0x80 | ((cc >>> 6) & 0x3f))
|
||||
+ _fromCC(0x80 | (cc & 0x3f)));
|
||||
}
|
||||
};
|
||||
const re_utob = /[\uD800-\uDBFF][\uDC00-\uDFFFF]|[^\x00-\x7F]/g;
|
||||
/**
|
||||
* @deprecated should have been internal use only.
|
||||
* @param {string} src UTF-8 string
|
||||
* @returns {string} UTF-16 string
|
||||
*/
|
||||
const utob = (u) => u.replace(re_utob, cb_utob);
|
||||
//
|
||||
const _encode = _hasBuffer
|
||||
? (s) => Buffer.from(s, 'utf8').toString('base64')
|
||||
: _TE
|
||||
? (s) => _fromUint8Array(_TE.encode(s))
|
||||
: (s) => _btoa(utob(s));
|
||||
/**
|
||||
* converts a UTF-8-encoded string to a Base64 string.
|
||||
* @param {boolean} [urlsafe] if `true` make the result URL-safe
|
||||
* @returns {string} Base64 string
|
||||
*/
|
||||
const encode = (src, urlsafe = false) => urlsafe
|
||||
? _mkUriSafe(_encode(src))
|
||||
: _encode(src);
|
||||
/**
|
||||
* converts a UTF-8-encoded string to URL-safe Base64 RFC4648 §5.
|
||||
* @returns {string} Base64 string
|
||||
*/
|
||||
const encodeURI = (src) => encode(src, true);
|
||||
// This trick is found broken https://github.com/dankogai/js-base64/issues/130
|
||||
// const btou = (src: string) => decodeURIComponent(escape(src));
|
||||
// reverting good old fationed regexp
|
||||
const re_btou = /[\xC0-\xDF][\x80-\xBF]|[\xE0-\xEF][\x80-\xBF]{2}|[\xF0-\xF7][\x80-\xBF]{3}/g;
|
||||
const cb_btou = (cccc) => {
|
||||
switch (cccc.length) {
|
||||
case 4:
|
||||
var cp = ((0x07 & cccc.charCodeAt(0)) << 18)
|
||||
| ((0x3f & cccc.charCodeAt(1)) << 12)
|
||||
| ((0x3f & cccc.charCodeAt(2)) << 6)
|
||||
| (0x3f & cccc.charCodeAt(3)), offset = cp - 0x10000;
|
||||
return (_fromCC((offset >>> 10) + 0xD800)
|
||||
+ _fromCC((offset & 0x3FF) + 0xDC00));
|
||||
case 3:
|
||||
return _fromCC(((0x0f & cccc.charCodeAt(0)) << 12)
|
||||
| ((0x3f & cccc.charCodeAt(1)) << 6)
|
||||
| (0x3f & cccc.charCodeAt(2)));
|
||||
default:
|
||||
return _fromCC(((0x1f & cccc.charCodeAt(0)) << 6)
|
||||
| (0x3f & cccc.charCodeAt(1)));
|
||||
}
|
||||
};
|
||||
/**
|
||||
* @deprecated should have been internal use only.
|
||||
* @param {string} src UTF-16 string
|
||||
* @returns {string} UTF-8 string
|
||||
*/
|
||||
const btou = (b) => b.replace(re_btou, cb_btou);
|
||||
/**
|
||||
* polyfill version of `atob`
|
||||
*/
|
||||
const atobPolyfill = (asc) => {
|
||||
// console.log('polyfilled');
|
||||
asc = asc.replace(/\s+/g, '');
|
||||
if (!b64re.test(asc))
|
||||
throw new TypeError('malformed base64.');
|
||||
asc += '=='.slice(2 - (asc.length & 3));
|
||||
let u24, bin = '', r1, r2;
|
||||
for (let i = 0; i < asc.length;) {
|
||||
u24 = b64tab[asc.charAt(i++)] << 18
|
||||
| b64tab[asc.charAt(i++)] << 12
|
||||
| (r1 = b64tab[asc.charAt(i++)]) << 6
|
||||
| (r2 = b64tab[asc.charAt(i++)]);
|
||||
bin += r1 === 64 ? _fromCC(u24 >> 16 & 255)
|
||||
: r2 === 64 ? _fromCC(u24 >> 16 & 255, u24 >> 8 & 255)
|
||||
: _fromCC(u24 >> 16 & 255, u24 >> 8 & 255, u24 & 255);
|
||||
}
|
||||
return bin;
|
||||
};
|
||||
/**
|
||||
* does what `window.atob` of web browsers do.
|
||||
* @param {String} asc Base64-encoded string
|
||||
* @returns {string} binary string
|
||||
*/
|
||||
const _atob = _hasatob ? (asc) => atob(_tidyB64(asc))
|
||||
: _hasBuffer ? (asc) => Buffer.from(asc, 'base64').toString('binary')
|
||||
: atobPolyfill;
|
||||
//
|
||||
const _toUint8Array = _hasBuffer
|
||||
? (a) => _U8Afrom(Buffer.from(a, 'base64'))
|
||||
: (a) => _U8Afrom(_atob(a), c => c.charCodeAt(0));
|
||||
/**
|
||||
* converts a Base64 string to a Uint8Array.
|
||||
*/
|
||||
const toUint8Array = (a) => _toUint8Array(_unURI(a));
|
||||
//
|
||||
const _decode = _hasBuffer
|
||||
? (a) => Buffer.from(a, 'base64').toString('utf8')
|
||||
: _TD
|
||||
? (a) => _TD.decode(_toUint8Array(a))
|
||||
: (a) => btou(_atob(a));
|
||||
const _unURI = (a) => _tidyB64(a.replace(/[-_]/g, (m0) => m0 == '-' ? '+' : '/'));
|
||||
/**
|
||||
* converts a Base64 string to a UTF-8 string.
|
||||
* @param {String} src Base64 string. Both normal and URL-safe are supported
|
||||
* @returns {string} UTF-8 string
|
||||
*/
|
||||
const decode = (src) => _decode(_unURI(src));
|
||||
/**
|
||||
* check if a value is a valid Base64 string
|
||||
* @param {String} src a value to check
|
||||
*/
|
||||
const isValid = (src) => {
|
||||
if (typeof src !== 'string')
|
||||
return false;
|
||||
const s = src.replace(/\s+/g, '').replace(/={0,2}$/, '');
|
||||
return !/[^\s0-9a-zA-Z\+/]/.test(s) || !/[^\s0-9a-zA-Z\-_]/.test(s);
|
||||
};
|
||||
//
|
||||
const _noEnum = (v) => {
|
||||
return {
|
||||
value: v, enumerable: false, writable: true, configurable: true
|
||||
};
|
||||
};
|
||||
/**
|
||||
* extend String.prototype with relevant methods
|
||||
*/
|
||||
const extendString = function () {
|
||||
const _add = (name, body) => Object.defineProperty(String.prototype, name, _noEnum(body));
|
||||
_add('fromBase64', function () { return decode(this); });
|
||||
_add('toBase64', function (urlsafe) { return encode(this, urlsafe); });
|
||||
_add('toBase64URI', function () { return encode(this, true); });
|
||||
_add('toBase64URL', function () { return encode(this, true); });
|
||||
_add('toUint8Array', function () { return toUint8Array(this); });
|
||||
};
|
||||
/**
|
||||
* extend Uint8Array.prototype with relevant methods
|
||||
*/
|
||||
const extendUint8Array = function () {
|
||||
const _add = (name, body) => Object.defineProperty(Uint8Array.prototype, name, _noEnum(body));
|
||||
_add('toBase64', function (urlsafe) { return fromUint8Array(this, urlsafe); });
|
||||
_add('toBase64URI', function () { return fromUint8Array(this, true); });
|
||||
_add('toBase64URL', function () { return fromUint8Array(this, true); });
|
||||
};
|
||||
/**
|
||||
* extend Builtin prototypes with relevant methods
|
||||
*/
|
||||
const extendBuiltins = () => {
|
||||
extendString();
|
||||
extendUint8Array();
|
||||
};
|
||||
const gBase64 = {
|
||||
version: version,
|
||||
VERSION: VERSION,
|
||||
atob: _atob,
|
||||
atobPolyfill: atobPolyfill,
|
||||
btoa: _btoa,
|
||||
btoaPolyfill: btoaPolyfill,
|
||||
fromBase64: decode,
|
||||
toBase64: encode,
|
||||
encode: encode,
|
||||
encodeURI: encodeURI,
|
||||
encodeURL: encodeURI,
|
||||
utob: utob,
|
||||
btou: btou,
|
||||
decode: decode,
|
||||
isValid: isValid,
|
||||
fromUint8Array: fromUint8Array,
|
||||
toUint8Array: toUint8Array,
|
||||
extendString: extendString,
|
||||
extendUint8Array: extendUint8Array,
|
||||
extendBuiltins: extendBuiltins,
|
||||
};
|
||||
// makecjs:CUT //
|
||||
export { version };
|
||||
export { VERSION };
|
||||
export { _atob as atob };
|
||||
export { atobPolyfill };
|
||||
export { _btoa as btoa };
|
||||
export { btoaPolyfill };
|
||||
export { decode as fromBase64 };
|
||||
export { encode as toBase64 };
|
||||
export { utob };
|
||||
export { encode };
|
||||
export { encodeURI };
|
||||
export { encodeURI as encodeURL };
|
||||
export { btou };
|
||||
export { decode };
|
||||
export { isValid };
|
||||
export { fromUint8Array };
|
||||
export { toUint8Array };
|
||||
export { extendString };
|
||||
export { extendUint8Array };
|
||||
export { extendBuiltins };
|
||||
// and finally,
|
||||
export { gBase64 as Base64 };
|
||||
// we have moved mermaid-related code to gradio-fix repository: binary-husky/gradio-fix@32150d0
|
||||
|
||||
@@ -59,6 +59,7 @@
|
||||
|
||||
/* Scrollbar Width */
|
||||
::-webkit-scrollbar {
|
||||
height: 12px;
|
||||
width: 12px;
|
||||
}
|
||||
|
||||
|
||||
173
themes/common.js
173
themes/common.js
@@ -234,7 +234,7 @@ let timeoutID = null;
|
||||
let lastInvocationTime = 0;
|
||||
let lastArgs = null;
|
||||
function do_something_but_not_too_frequently(min_interval, func) {
|
||||
return function(...args) {
|
||||
return function (...args) {
|
||||
lastArgs = args;
|
||||
const now = Date.now();
|
||||
if (!lastInvocationTime || (now - lastInvocationTime) >= min_interval) {
|
||||
@@ -263,13 +263,8 @@ function chatbotContentChanged(attempt = 1, force = false) {
|
||||
gradioApp().querySelectorAll('#gpt-chatbot .message-wrap .message.bot').forEach(addCopyButton);
|
||||
}, i === 0 ? 0 : 200);
|
||||
}
|
||||
// we have moved mermaid-related code to gradio-fix repository: binary-husky/gradio-fix@32150d0
|
||||
|
||||
const run_mermaid_render = do_something_but_not_too_frequently(1000, function () {
|
||||
const blocks = document.querySelectorAll(`pre.mermaid, diagram-div`);
|
||||
if (blocks.length == 0) { return; }
|
||||
uml("mermaid");
|
||||
});
|
||||
run_mermaid_render();
|
||||
}
|
||||
|
||||
|
||||
@@ -672,9 +667,9 @@ function limit_scroll_position() {
|
||||
let scrollableDiv = document.querySelector('#gpt-chatbot > div.wrap');
|
||||
scrollableDiv.addEventListener('wheel', function (e) {
|
||||
let preventScroll = false;
|
||||
if (e.deltaX != 0) { prevented_offset = 0; return;}
|
||||
if (this.scrollHeight == this.clientHeight) { prevented_offset = 0; return;}
|
||||
if (e.deltaY < 0) { prevented_offset = 0; return;}
|
||||
if (e.deltaX != 0) { prevented_offset = 0; return; }
|
||||
if (this.scrollHeight == this.clientHeight) { prevented_offset = 0; return; }
|
||||
if (e.deltaY < 0) { prevented_offset = 0; return; }
|
||||
if (e.deltaY > 0 && this.scrollHeight - this.clientHeight - this.scrollTop <= 1) { preventScroll = true; }
|
||||
|
||||
if (preventScroll) {
|
||||
@@ -713,3 +708,161 @@ function GptAcademicJavaScriptInit(LAYOUT = "LEFT-RIGHT") {
|
||||
// setInterval(function () { uml("mermaid") }, 5000); // 每50毫秒执行一次
|
||||
|
||||
}
|
||||
|
||||
|
||||
function loadLive2D() {
|
||||
try {
|
||||
$("<link>").attr({ href: "file=themes/waifu_plugin/waifu.css", rel: "stylesheet", type: "text/css" }).appendTo('head');
|
||||
$('body').append('<div class="waifu"><div class="waifu-tips"></div><canvas id="live2d" class="live2d"></canvas><div class="waifu-tool"><span class="fui-home"></span> <span class="fui-chat"></span> <span class="fui-eye"></span> <span class="fui-user"></span> <span class="fui-photo"></span> <span class="fui-info-circle"></span> <span class="fui-cross"></span></div></div>');
|
||||
$.ajax({
|
||||
url: "file=themes/waifu_plugin/waifu-tips.js", dataType: "script", cache: true, success: function () {
|
||||
$.ajax({
|
||||
url: "file=themes/waifu_plugin/live2d.js", dataType: "script", cache: true, success: function () {
|
||||
/* 可直接修改部分参数 */
|
||||
live2d_settings['hitokotoAPI'] = "hitokoto.cn"; // 一言 API
|
||||
live2d_settings['modelId'] = 3; // 默认模型 ID
|
||||
live2d_settings['modelTexturesId'] = 44; // 默认材质 ID
|
||||
live2d_settings['modelStorage'] = false; // 不储存模型 ID
|
||||
live2d_settings['waifuSize'] = '210x187';
|
||||
live2d_settings['waifuTipsSize'] = '187x52';
|
||||
live2d_settings['canSwitchModel'] = true;
|
||||
live2d_settings['canSwitchTextures'] = true;
|
||||
live2d_settings['canSwitchHitokoto'] = false;
|
||||
live2d_settings['canTakeScreenshot'] = false;
|
||||
live2d_settings['canTurnToHomePage'] = false;
|
||||
live2d_settings['canTurnToAboutPage'] = false;
|
||||
live2d_settings['showHitokoto'] = false; // 显示一言
|
||||
live2d_settings['showF12Status'] = false; // 显示加载状态
|
||||
live2d_settings['showF12Message'] = false; // 显示看板娘消息
|
||||
live2d_settings['showF12OpenMsg'] = false; // 显示控制台打开提示
|
||||
live2d_settings['showCopyMessage'] = false; // 显示 复制内容 提示
|
||||
live2d_settings['showWelcomeMessage'] = true; // 显示进入面页欢迎词
|
||||
/* 在 initModel 前添加 */
|
||||
initModel("file=themes/waifu_plugin/waifu-tips.json");
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
} catch (err) { console.log("[Error] JQuery is not defined.") }
|
||||
}
|
||||
|
||||
function get_checkbox_selected_items(elem_id){
|
||||
display_panel_arr = [];
|
||||
document.getElementById(elem_id).querySelector('[data-testid="checkbox-group"]').querySelectorAll('label').forEach(label => {
|
||||
// Get the span text
|
||||
const spanText = label.querySelector('span').textContent;
|
||||
// Get the input value
|
||||
const checked = label.querySelector('input').checked;
|
||||
if (checked) {
|
||||
display_panel_arr.push(spanText)
|
||||
}
|
||||
});
|
||||
return display_panel_arr;
|
||||
}
|
||||
|
||||
function set_checkbox(key, bool, set_twice=false) {
|
||||
set_success = false;
|
||||
elem_ids = ["cbsc", "cbs"]
|
||||
elem_ids.forEach(id => {
|
||||
document.getElementById(id).querySelector('[data-testid="checkbox-group"]').querySelectorAll('label').forEach(label => {
|
||||
// Get the span text
|
||||
const spanText = label.querySelector('span').textContent;
|
||||
if (spanText === key) {
|
||||
if (bool){
|
||||
label.classList.add('selected');
|
||||
} else {
|
||||
if (label.classList.contains('selected')) {
|
||||
label.classList.remove('selected');
|
||||
}
|
||||
}
|
||||
if (set_twice){
|
||||
setTimeout(() => {
|
||||
if (bool){
|
||||
label.classList.add('selected');
|
||||
} else {
|
||||
if (label.classList.contains('selected')) {
|
||||
label.classList.remove('selected');
|
||||
}
|
||||
}
|
||||
}, 5000);
|
||||
}
|
||||
|
||||
label.querySelector('input').checked = bool;
|
||||
set_success = true;
|
||||
return
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
if (!set_success){
|
||||
console.log("设置checkbox失败,没有找到对应的key")
|
||||
}
|
||||
}
|
||||
|
||||
function apply_cookie_for_checkbox(dark) {
|
||||
// console.log("apply_cookie_for_checkboxes")
|
||||
let searchString = "输入清除键";
|
||||
let bool_value = "False";
|
||||
|
||||
////////////////// darkmode ///////////////////
|
||||
if (getCookie("js_darkmode_cookie")) {
|
||||
dark = getCookie("js_darkmode_cookie")
|
||||
}
|
||||
dark = dark == "True";
|
||||
if (document.querySelectorAll('.dark').length) {
|
||||
if (!dark) {
|
||||
document.querySelectorAll('.dark').forEach(el => el.classList.remove('dark'));
|
||||
}
|
||||
} else {
|
||||
if (dark) {
|
||||
document.querySelector('body').classList.add('dark');
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////// clearButton ///////////////////////////
|
||||
if (getCookie("js_clearbtn_show_cookie")) {
|
||||
// have cookie
|
||||
bool_value = getCookie("js_clearbtn_show_cookie")
|
||||
bool_value = bool_value == "True";
|
||||
searchString = "输入清除键";
|
||||
if (bool_value) {
|
||||
let clearButton = document.getElementById("elem_clear");
|
||||
let clearButton2 = document.getElementById("elem_clear2");
|
||||
clearButton.style.display = "block";
|
||||
clearButton2.style.display = "block";
|
||||
set_checkbox(searchString, true);
|
||||
} else {
|
||||
let clearButton = document.getElementById("elem_clear");
|
||||
let clearButton2 = document.getElementById("elem_clear2");
|
||||
clearButton.style.display = "none";
|
||||
clearButton2.style.display = "none";
|
||||
set_checkbox(searchString, false);
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////// live2d ///////////////////////////
|
||||
|
||||
if (getCookie("js_live2d_show_cookie")) {
|
||||
// have cookie
|
||||
searchString = "添加Live2D形象";
|
||||
bool_value = getCookie("js_live2d_show_cookie");
|
||||
bool_value = bool_value == "True";
|
||||
if (bool_value) {
|
||||
loadLive2D();
|
||||
set_checkbox(searchString, true);
|
||||
} else {
|
||||
$('.waifu').hide();
|
||||
set_checkbox(searchString, false);
|
||||
}
|
||||
} else {
|
||||
// do not have cookie
|
||||
// get conf
|
||||
display_panel_arr = get_checkbox_selected_items("cbsc");
|
||||
searchString = "添加Live2D形象";
|
||||
if (display_panel_arr.includes(searchString)) {
|
||||
loadLive2D();
|
||||
} else {
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -5,17 +5,14 @@ def get_common_html_javascript_code():
|
||||
js = "\n"
|
||||
for jsf in [
|
||||
"file=themes/common.js",
|
||||
"file=themes/mermaid.min.js",
|
||||
"file=themes/mermaid_loader.js",
|
||||
]:
|
||||
js += f"""<script src="{jsf}"></script>\n"""
|
||||
|
||||
# 添加Live2D
|
||||
if ADD_WAIFU:
|
||||
for jsf in [
|
||||
"file=docs/waifu_plugin/jquery.min.js",
|
||||
"file=docs/waifu_plugin/jquery-ui.min.js",
|
||||
"file=docs/waifu_plugin/autoload.js",
|
||||
"file=themes/waifu_plugin/jquery.min.js",
|
||||
"file=themes/waifu_plugin/jquery-ui.min.js",
|
||||
]:
|
||||
js += f"""<script src="{jsf}"></script>\n"""
|
||||
return js
|
||||
1590
themes/mermaid.min.js
vendored
1590
themes/mermaid.min.js
vendored
File diff suppressed because one or more lines are too long
@@ -1,55 +1 @@
|
||||
import { deflate, inflate } from '/file=themes/pako.esm.mjs';
|
||||
import { toUint8Array, fromUint8Array, toBase64, fromBase64 } from '/file=themes/base64.mjs';
|
||||
|
||||
const base64Serde = {
|
||||
serialize: (state) => {
|
||||
return toBase64(state, true);
|
||||
},
|
||||
deserialize: (state) => {
|
||||
return fromBase64(state);
|
||||
}
|
||||
};
|
||||
|
||||
const pakoSerde = {
|
||||
serialize: (state) => {
|
||||
const data = new TextEncoder().encode(state);
|
||||
const compressed = deflate(data, { level: 9 });
|
||||
return fromUint8Array(compressed, true);
|
||||
},
|
||||
deserialize: (state) => {
|
||||
const data = toUint8Array(state);
|
||||
return inflate(data, { to: 'string' });
|
||||
}
|
||||
};
|
||||
|
||||
const serdes = {
|
||||
base64: base64Serde,
|
||||
pako: pakoSerde
|
||||
};
|
||||
|
||||
export const serializeState = (state, serde = 'pako') => {
|
||||
if (!(serde in serdes)) {
|
||||
throw new Error(`Unknown serde type: ${serde}`);
|
||||
}
|
||||
const json = JSON.stringify(state);
|
||||
const serialized = serdes[serde].serialize(json);
|
||||
return `${serde}:${serialized}`;
|
||||
};
|
||||
|
||||
const deserializeState = (state) => {
|
||||
let type, serialized;
|
||||
if (state.includes(':')) {
|
||||
let tempType;
|
||||
[tempType, serialized] = state.split(':');
|
||||
if (tempType in serdes) {
|
||||
type = tempType;
|
||||
} else {
|
||||
throw new Error(`Unknown serde type: ${tempType}`);
|
||||
}
|
||||
} else {
|
||||
type = 'base64';
|
||||
serialized = state;
|
||||
}
|
||||
const json = serdes[type].deserialize(serialized);
|
||||
return JSON.parse(json);
|
||||
};
|
||||
// we have moved mermaid-related code to gradio-fix repository: binary-husky/gradio-fix@32150d0
|
||||
|
||||
@@ -1,197 +1 @@
|
||||
const uml = async className => {
|
||||
|
||||
// Custom element to encapsulate Mermaid content.
|
||||
class MermaidDiv extends HTMLElement {
|
||||
|
||||
/**
|
||||
* Creates a special Mermaid div shadow DOM.
|
||||
* Works around issues of shared IDs.
|
||||
* @return {void}
|
||||
*/
|
||||
constructor() {
|
||||
super()
|
||||
|
||||
// Create the Shadow DOM and attach style
|
||||
const shadow = this.attachShadow({ mode: "open" })
|
||||
const style = document.createElement("style")
|
||||
style.textContent = `
|
||||
:host {
|
||||
display: block;
|
||||
line-height: initial;
|
||||
font-size: 16px;
|
||||
}
|
||||
div.diagram {
|
||||
margin: 0;
|
||||
overflow: visible;
|
||||
}`
|
||||
shadow.appendChild(style)
|
||||
}
|
||||
}
|
||||
|
||||
if (typeof customElements.get("diagram-div") === "undefined") {
|
||||
customElements.define("diagram-div", MermaidDiv)
|
||||
}
|
||||
|
||||
const getFromCode = parent => {
|
||||
// Handles <pre><code> text extraction.
|
||||
let text = ""
|
||||
for (let j = 0; j < parent.childNodes.length; j++) {
|
||||
const subEl = parent.childNodes[j]
|
||||
if (subEl.tagName.toLowerCase() === "code") {
|
||||
for (let k = 0; k < subEl.childNodes.length; k++) {
|
||||
const child = subEl.childNodes[k]
|
||||
const whitespace = /^\s*$/
|
||||
if (child.nodeName === "#text" && !(whitespace.test(child.nodeValue))) {
|
||||
text = child.nodeValue
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return text
|
||||
}
|
||||
|
||||
function createOrUpdateHyperlink(parentElement, linkText, linkHref) {
|
||||
// Search for an existing anchor element within the parentElement
|
||||
let existingAnchor = parentElement.querySelector("a");
|
||||
|
||||
// Check if an anchor element already exists
|
||||
if (existingAnchor) {
|
||||
// Update the hyperlink reference if it's different from the current one
|
||||
if (existingAnchor.href !== linkHref) {
|
||||
existingAnchor.href = linkHref;
|
||||
}
|
||||
// Update the target attribute to ensure it opens in a new tab
|
||||
existingAnchor.target = '_blank';
|
||||
|
||||
// If the text must be dynamic, uncomment and use the following line:
|
||||
// existingAnchor.textContent = linkText;
|
||||
} else {
|
||||
// If no anchor exists, create one and append it to the parentElement
|
||||
let anchorElement = document.createElement("a");
|
||||
anchorElement.href = linkHref; // Set hyperlink reference
|
||||
anchorElement.textContent = linkText; // Set text displayed
|
||||
anchorElement.target = '_blank'; // Ensure it opens in a new tab
|
||||
parentElement.appendChild(anchorElement); // Append the new anchor element to the parent
|
||||
}
|
||||
}
|
||||
|
||||
function removeLastLine(str) {
|
||||
// 将字符串按换行符分割成数组
|
||||
var lines = str.split('\n');
|
||||
lines.pop();
|
||||
// 将数组重新连接成字符串,并按换行符连接
|
||||
var result = lines.join('\n');
|
||||
return result;
|
||||
}
|
||||
|
||||
// 给出配置 Provide a default config in case one is not specified
|
||||
const defaultConfig = {
|
||||
startOnLoad: false,
|
||||
theme: "default",
|
||||
flowchart: {
|
||||
htmlLabels: false
|
||||
},
|
||||
er: {
|
||||
useMaxWidth: false
|
||||
},
|
||||
sequence: {
|
||||
useMaxWidth: false,
|
||||
noteFontWeight: "14px",
|
||||
actorFontSize: "14px",
|
||||
messageFontSize: "16px"
|
||||
}
|
||||
}
|
||||
if (document.body.classList.contains("dark")) {
|
||||
defaultConfig.theme = "dark"
|
||||
}
|
||||
|
||||
const Module = await import('/file=themes/mermaid_editor.js');
|
||||
|
||||
function do_render(block, code, codeContent, cnt) {
|
||||
var rendered_content = mermaid.render(`_diagram_${cnt}`, code);
|
||||
////////////////////////////// 记录有哪些代码已经被渲染了 ///////////////////////////////////
|
||||
let codeFinishRenderElement = block.querySelector("code_finish_render"); // 如果block下已存在code_already_rendered元素,则获取它
|
||||
if (codeFinishRenderElement) { // 如果block下已存在code_already_rendered元素
|
||||
codeFinishRenderElement.style.display = "none";
|
||||
} else {
|
||||
// 如果不存在code_finish_render元素,则将code元素中的内容添加到新创建的code_finish_render元素中
|
||||
let codeFinishRenderElementNew = document.createElement("code_finish_render"); // 创建一个新的code_already_rendered元素
|
||||
codeFinishRenderElementNew.style.display = "none";
|
||||
codeFinishRenderElementNew.textContent = "";
|
||||
block.appendChild(codeFinishRenderElementNew); // 将新创建的code_already_rendered元素添加到block中
|
||||
codeFinishRenderElement = codeFinishRenderElementNew;
|
||||
}
|
||||
|
||||
////////////////////////////// 创建一个用于渲染的容器 ///////////////////////////////////
|
||||
let mermaidRender = block.querySelector(".mermaid_render"); // 尝试获取已存在的<div class='mermaid_render'>
|
||||
if (!mermaidRender) {
|
||||
mermaidRender = document.createElement("div"); // 不存在,创建新的<div class='mermaid_render'>
|
||||
mermaidRender.classList.add("mermaid_render");
|
||||
block.appendChild(mermaidRender); // 将新创建的元素附加到block
|
||||
}
|
||||
mermaidRender.innerHTML = rendered_content
|
||||
codeFinishRenderElement.textContent = code // 标记已经渲染的部分
|
||||
|
||||
////////////////////////////// 创建一个“点击这里编辑脑图” ///////////////////////////////
|
||||
let pako_encode = Module.serializeState({
|
||||
"code": codeContent,
|
||||
"mermaid": "{\n \"theme\": \"default\"\n}",
|
||||
"autoSync": true,
|
||||
"updateDiagram": false
|
||||
});
|
||||
createOrUpdateHyperlink(block, "点击这里编辑脑图", "https://mermaid.live/edit#" + pako_encode)
|
||||
}
|
||||
|
||||
// 加载配置 Load up the config
|
||||
mermaid.mermaidAPI.globalReset() // 全局复位
|
||||
const config = (typeof mermaidConfig === "undefined") ? defaultConfig : mermaidConfig
|
||||
mermaid.initialize(config)
|
||||
// 查找需要渲染的元素 Find all of our Mermaid sources and render them.
|
||||
const blocks = document.querySelectorAll(`pre.mermaid`);
|
||||
|
||||
for (let i = 0; i < blocks.length; i++) {
|
||||
var block = blocks[i]
|
||||
////////////////////////////// 如果代码没有发生变化,就不渲染了 ///////////////////////////////////
|
||||
var code = getFromCode(block);
|
||||
let code_elem = block.querySelector("code");
|
||||
let codeContent = code_elem.textContent; // 获取code元素中的文本内容
|
||||
|
||||
// 判断codeContent是否包含'<gpt_academic_hide_mermaid_code>',如果是,则使code_elem隐藏
|
||||
if (codeContent.indexOf('<gpt_academic_hide_mermaid_code>') !== -1) {
|
||||
code_elem.style.display = "none";
|
||||
}
|
||||
|
||||
// 如果block下已存在code_already_rendered元素,则获取它
|
||||
let codePendingRenderElement = block.querySelector("code_pending_render");
|
||||
if (codePendingRenderElement) { // 如果block下已存在code_pending_render元素
|
||||
codePendingRenderElement.style.display = "none";
|
||||
if (codePendingRenderElement.textContent !== codeContent) {
|
||||
codePendingRenderElement.textContent = codeContent; // 如果现有的code_pending_render元素中的内容与code元素中的内容不同,更新code_pending_render元素中的内容
|
||||
}
|
||||
else {
|
||||
continue; // 如果相同,就不处理了
|
||||
}
|
||||
} else { // 如果不存在code_pending_render元素,则将code元素中的内容添加到新创建的code_pending_render元素中
|
||||
let codePendingRenderElementNew = document.createElement("code_pending_render"); // 创建一个新的code_already_rendered元素
|
||||
codePendingRenderElementNew.style.display = "none";
|
||||
codePendingRenderElementNew.textContent = codeContent;
|
||||
block.appendChild(codePendingRenderElementNew); // 将新创建的code_pending_render元素添加到block中
|
||||
codePendingRenderElement = codePendingRenderElementNew;
|
||||
}
|
||||
|
||||
////////////////////////////// 在这里才真正开始渲染 ///////////////////////////////////
|
||||
try {
|
||||
do_render(block, code, codeContent, i);
|
||||
// console.log("渲染", codeContent);
|
||||
} catch (err) {
|
||||
try {
|
||||
var lines = code.split('\n'); if (lines.length < 2) { continue; }
|
||||
do_render(block, removeLastLine(code), codeContent, i);
|
||||
// console.log("渲染", codeContent);
|
||||
} catch (err) {
|
||||
console.log("以下代码不能渲染", code, removeLastLine(code), err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// we have moved mermaid-related code to gradio-fix repository: binary-husky/gradio-fix@32150d0
|
||||
|
||||
6878
themes/pako.esm.mjs
6878
themes/pako.esm.mjs
File diff suppressed because it is too large
Load Diff
109
themes/theme.py
109
themes/theme.py
@@ -46,8 +46,7 @@ cookie相关工具函数
|
||||
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
"""
|
||||
|
||||
|
||||
def init_cookie(cookies, chatbot):
|
||||
def init_cookie(cookies):
|
||||
# 为每一位访问的用户赋予一个独一无二的uuid编码
|
||||
cookies.update({"uuid": uuid.uuid4()})
|
||||
return cookies
|
||||
@@ -91,31 +90,107 @@ js_code_for_css_changing = """(css) => {
|
||||
}
|
||||
"""
|
||||
|
||||
js_code_for_darkmode_init = """(dark) => {
|
||||
dark = dark == "True";
|
||||
if (document.querySelectorAll('.dark').length) {
|
||||
if (!dark){
|
||||
document.querySelectorAll('.dark').forEach(el => el.classList.remove('dark'));
|
||||
}
|
||||
} else {
|
||||
if (dark){
|
||||
document.querySelector('body').classList.add('dark');
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
js_code_for_toggle_darkmode = """() => {
|
||||
if (document.querySelectorAll('.dark').length) {
|
||||
setCookie("js_darkmode_cookie", "False", 365);
|
||||
document.querySelectorAll('.dark').forEach(el => el.classList.remove('dark'));
|
||||
} else {
|
||||
setCookie("js_darkmode_cookie", "True", 365);
|
||||
document.querySelector('body').classList.add('dark');
|
||||
}
|
||||
document.querySelectorAll('code_pending_render').forEach(code => {code.remove();})
|
||||
}"""
|
||||
|
||||
|
||||
js_code_for_persistent_cookie_init = """(persistent_cookie) => {
|
||||
return getCookie("persistent_cookie");
|
||||
js_code_for_persistent_cookie_init = """(py_pickle_cookie, cookie) => {
|
||||
return [getCookie("py_pickle_cookie"), cookie];
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
js_code_reset = """
|
||||
(a,b,c)=>{
|
||||
return [[], [], "已重置"];
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
js_code_clear = """
|
||||
(a,b)=>{
|
||||
return ["", ""];
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
js_code_show_or_hide = """
|
||||
(display_panel_arr)=>{
|
||||
setTimeout(() => {
|
||||
// get conf
|
||||
display_panel_arr = get_checkbox_selected_items("cbs");
|
||||
|
||||
////////////////////// 输入清除键 ///////////////////////////
|
||||
let searchString = "输入清除键";
|
||||
let ele = "none";
|
||||
if (display_panel_arr.includes(searchString)) {
|
||||
let clearButton = document.getElementById("elem_clear");
|
||||
let clearButton2 = document.getElementById("elem_clear2");
|
||||
clearButton.style.display = "block";
|
||||
clearButton2.style.display = "block";
|
||||
setCookie("js_clearbtn_show_cookie", "True", 365);
|
||||
} else {
|
||||
let clearButton = document.getElementById("elem_clear");
|
||||
let clearButton2 = document.getElementById("elem_clear2");
|
||||
clearButton.style.display = "none";
|
||||
clearButton2.style.display = "none";
|
||||
setCookie("js_clearbtn_show_cookie", "False", 365);
|
||||
}
|
||||
|
||||
////////////////////// 基础功能区 ///////////////////////////
|
||||
searchString = "基础功能区";
|
||||
if (display_panel_arr.includes(searchString)) {
|
||||
ele = document.getElementById("basic-panel");
|
||||
ele.style.display = "block";
|
||||
} else {
|
||||
ele = document.getElementById("basic-panel");
|
||||
ele.style.display = "none";
|
||||
}
|
||||
|
||||
////////////////////// 函数插件区 ///////////////////////////
|
||||
searchString = "函数插件区";
|
||||
if (display_panel_arr.includes(searchString)) {
|
||||
ele = document.getElementById("plugin-panel");
|
||||
ele.style.display = "block";
|
||||
} else {
|
||||
ele = document.getElementById("plugin-panel");
|
||||
ele.style.display = "none";
|
||||
}
|
||||
|
||||
}, 50);
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
|
||||
js_code_show_or_hide_group2 = """
|
||||
(display_panel_arr)=>{
|
||||
setTimeout(() => {
|
||||
// console.log("display_panel_arr");
|
||||
// get conf
|
||||
display_panel_arr = get_checkbox_selected_items("cbsc");
|
||||
|
||||
////////////////////// 添加Live2D形象 ///////////////////////////
|
||||
let searchString = "添加Live2D形象";
|
||||
let ele = "none";
|
||||
if (display_panel_arr.includes(searchString)) {
|
||||
setCookie("js_live2d_show_cookie", "True", 365);
|
||||
loadLive2D();
|
||||
} else {
|
||||
setCookie("js_live2d_show_cookie", "False", 365);
|
||||
$('.waifu').hide();
|
||||
}
|
||||
|
||||
|
||||
}, 50);
|
||||
}
|
||||
"""
|
||||
|
||||
0
themes/waifu_plugin/autoload.js
Normal file
0
themes/waifu_plugin/autoload.js
Normal file
BIN
themes/waifu_plugin/flat-ui-icons-regular.eot
Normal file
BIN
themes/waifu_plugin/flat-ui-icons-regular.eot
Normal file
Binary file not shown.
126
themes/waifu_plugin/flat-ui-icons-regular.svg
Normal file
126
themes/waifu_plugin/flat-ui-icons-regular.svg
Normal file
@@ -0,0 +1,126 @@
|
||||
<?xml version="1.0" standalone="no"?>
|
||||
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd" >
|
||||
<svg xmlns="http://www.w3.org/2000/svg">
|
||||
<metadata>
|
||||
<json>
|
||||
{
|
||||
"fontFamily": "flat-ui-icons",
|
||||
"majorVersion": 1,
|
||||
"minorVersion": 1,
|
||||
"fontURL": "http://designmodo.com/flat",
|
||||
"designer": "Sergey Shmidt",
|
||||
"designerURL": "http://designmodo.com",
|
||||
"license": "Attribution-NonCommercial-NoDerivs 3.0 Unported",
|
||||
"licenseURL": "http://creativecommons.org/licenses/by-nc-nd/3.0/",
|
||||
"version": "Version 1.1",
|
||||
"fontId": "flat-ui-icons",
|
||||
"psName": "flat-ui-icons",
|
||||
"subFamily": "Regular",
|
||||
"fullName": "flat-ui-icons",
|
||||
"description": "Generated by IcoMoon"
|
||||
}
|
||||
</json>
|
||||
</metadata>
|
||||
<defs>
|
||||
<font id="flat-ui-icons" horiz-adv-x="1024">
|
||||
<font-face units-per-em="1024" ascent="960" descent="-64" />
|
||||
<missing-glyph horiz-adv-x="1024" />
|
||||
<glyph unicode=" " d="" horiz-adv-x="512" />
|
||||
<glyph unicode="" d="M896 192l-384 512-384-512h768z" />
|
||||
<glyph unicode="" d="M128 704l384-512 384 512h-768z" />
|
||||
<glyph unicode="" d="M896 256h-768l384 384 384-384z" />
|
||||
<glyph unicode="" d="M512 256l-384 384h768l-384-384z" />
|
||||
<glyph unicode="" d="M896 0l-768 448 768 448v-896z" />
|
||||
<glyph unicode="" d="M128 896l768-448-768-448v896z" />
|
||||
<glyph unicode="" d="M224.96 448.768l447.168 447.232 128-131.008-321.152-318.016 321.152-320.896-128.256-128.256-446.912 450.944z" />
|
||||
<glyph unicode="" d="M353.152-2.112l-128.192 128.256 321.088 320.896-321.152 317.952 128 131.008 447.168-447.232-446.912-450.88z" />
|
||||
<glyph unicode="" d="M928 351.936h-320v-319.936c0-35.392-28.608-64-64-64h-64c-35.328 0-64 28.608-64 64v319.936h-320c-35.328 0-64 28.736-64 64.064v64.064c0 35.328 28.672 63.872 64 63.872h320v320.064c0 35.328 28.672 64 64 64h64c35.392 0 64-28.672 64-64v-320.064h320c35.392 0 64-28.544 64-63.872v-64.064c0-35.328-28.608-64.064-64-64.064z" />
|
||||
<glyph unicode="" d="M919.808 764.032c12.48-12.416 12.48-32.832 0-45.248l-248.896-249.024c-12.352-12.416-12.352-32.832 0-45.312l248.768-249.088c12.48-12.416 12.48-32.832 0-45.248l-90.624-90.432c-12.352-12.416-32.768-12.416-45.248 0l-248.64 249.088c-12.416 12.416-32.832 12.416-45.248 0l-248.896-248.896c-12.416-12.48-32.832-12.48-45.248 0l-90.496 90.624c-12.416 12.352-12.416 32.768 0 45.248l248.96 248.896c12.416 12.416 12.416 32.832 0 45.312l-248.768 249.024c-12.416 12.48-12.416 32.832 0 45.248l90.56 90.496c12.416 12.416 32.832 12.416 45.248 0l248.64-249.024c12.416-12.48 32.832-12.48 45.248-0.064l248.832 248.96c12.48 12.352 32.896 12.352 45.248 0l90.56-90.56z" />
|
||||
<glyph unicode="" d="M923.136 822.592c-12.352 12.544-32.768 12.544-45.12 0l-476.16-474.496c-12.48-12.544-32.832-12.544-45.248 0l-208.64 212.736c-6.144 6.208-14.272 9.408-22.336 9.472-8.256 0-16.576-3.008-22.848-9.472l-92.16-83.008c-6.144-6.272-9.472-14.144-9.472-22.336 0-8.32 3.328-17.024 9.472-23.232l210.368-220.992c12.416-12.48 32.832-33.024 45.248-45.632l90.432-91.264c12.416-12.48 32.768-12.48 45.248 0l611.712 611.328c12.48 12.48 12.48 33.088 0 45.632l-90.496 91.264z" />
|
||||
<glyph unicode="" d="M512 960c-281.6 0-512-230.4-512-512s230.4-512 512-512 512 230.4 512 512c0 281.6-230.4 512-512 512zM512 140.8c-168.96 0-307.2 138.24-307.2 307.2s138.24 307.2 307.2 307.2c168.96 0 307.2-138.24 307.2-307.2 0-168.96-138.24-307.2-307.2-307.2z" />
|
||||
<glyph unicode="" d="M512 960c-281.6 0-512-230.4-512-512s230.4-512 512-512 512 230.4 512 512c0 281.6-230.4 512-512 512zM512 140.8c-168.96 0-307.2 138.24-307.2 307.2s138.24 307.2 307.2 307.2c168.96 0 307.2-138.24 307.2-307.2 0-168.96-138.24-307.2-307.2-307.2zM512 601.6c-87.040 0-153.6-66.56-153.6-153.6s66.56-153.6 153.6-153.6 153.6 66.56 153.6 153.6c0 87.040-66.56 153.6-153.6 153.6z" />
|
||||
<glyph unicode="" d="M256 960h512c143.36 0 256-112.64 256-256v-512c0-143.36-112.64-256-256-256h-512c-143.36 0-256 112.64-256 256v512c0 143.36 112.64 256 256 256z" />
|
||||
<glyph unicode="" d="M768 960h-512c-143.36 0-256-112.64-256-256v-512c0-143.36 112.64-256 256-256h512c143.36 0 256 112.64 256 256v512c0 143.36-112.64 256-256 256zM844.8 550.4l-368.64-368.64c-5.12-5.12-20.48-5.12-25.6 0l-56.32 56.32c-5.12 5.12-20.48 20.48-25.6 25.6l-128 133.12c-5.12 5.12-5.12 10.24-5.12 15.36s0 10.24 5.12 15.36l56.32 51.2c5.12 0 10.24 5.12 10.24 5.12 5.12 0 10.24 0 15.36-5.12l122.88-128c5.12-5.12 20.48-5.12 25.6 0l286.72 286.72c5.12 5.12 20.48 5.12 25.6 0l56.32-56.32c10.24-10.24 10.24-20.48 5.12-30.72z" />
|
||||
<glyph unicode="" d="M512 960c-282.752 0-512-229.248-512-512 0-282.688 229.248-512 512-512 282.816 0 512 229.248 512 512 0 282.752-229.184 512-512 512zM576.768 195.136c0-37.056-28.992-67.072-64.768-67.072s-64.768 30.016-64.768 67.072v313.088c0 37.056 28.992 67.072 64.768 67.072s64.768-30.016 64.768-67.072v-313.088zM512 640.32c-35.776 0-64.768 28.608-64.768 63.872s28.992 63.744 64.768 63.744 64.768-28.544 64.768-63.808-28.992-63.808-64.768-63.808z" />
|
||||
<glyph unicode="" d="M512 960c-282.752 0-512-229.248-512-512s229.248-512 512-512c282.752 0 512 229.248 512 512 0 282.752-229.248 512-512 512zM512 128.064c-35.776 0-64.768 28.544-64.768 63.808 0 35.2 28.992 63.808 64.768 63.808 35.776 0 64.768-28.608 64.768-63.808 0-35.264-28.992-63.808-64.768-63.808zM576.768 387.776c0-37.056-28.992-67.072-64.768-67.072-35.776 0-64.768 30.080-64.768 67.072v313.088c0 37.056 28.992 67.072 64.768 67.072 35.776 0 64.768-30.080 64.768-67.072v-313.088z" />
|
||||
<glyph unicode="" d="M512-64c-282.752 0-512 229.248-512 512 0 282.688 229.248 512 512 512 282.752 0 512-229.248 512-512 0-282.752-229.248-512-512-512zM512 128.064c35.776 0 64.768 28.544 64.768 63.808 0 35.2-28.992 63.808-64.768 63.808-35.776 0-64.768-28.608-64.768-63.808 0-35.264 28.992-63.808 64.768-63.808zM650.752 724.288c-33.92 27.904-82.24 43.456-140.032 43.456-42.56 0-78.912-7.68-110.144-20.16-16.576-6.72-69.632-39.68-80.64-48.896l32.384-48.32c5.312-9.344 13.952-14.080 25.92-14.080 4.992 0 10.624 1.984 16.96 5.888 4.608 2.88 41.088 21.696 56.512 26.368 32.32 9.6 67.84 5.696 84.16 0.64 22.272-6.848 38.4-19.904 47.36-37.76 5.888-11.776 13.376-44.16-4.224-74.432-14.656-25.088-37.568-44.16-62.848-61.056-13.504-9.216-26.048-18.624-37.376-28.416-0.512 0-1.792-0.96-4.672-3.52 1.408 1.216 3.264 2.304 4.672 3.52 3.2 0.128-30.784-43.328-30.784-83.52 0-42.88 0-64 0-64h128v64c0 33.28 16.128 51.968 16.448 56.704 11.008 7.872 61.056 46.144 72.96 59.904 22.208 25.6 38.592 59.392 38.592 107.008 0 48.832-19.392 88.832-53.248 116.672z" />
|
||||
<glyph unicode="" d="M512 960c-282.752 0-512-229.184-512-511.936 0-282.816 229.248-512.064 512-512.064 282.752 0 512 229.248 512 512.064 0 282.752-229.248 511.936-512 511.936zM842.88 552.128l-367.296-367.232c-7.488-7.488-19.712-7.488-27.136 0l-54.272 54.784c-7.424 7.552-19.712 19.904-27.136 27.392l-126.336 132.8c-3.712 3.712-5.696 8.96-5.696 13.888 0 4.992 1.984 9.728 5.696 13.504l55.36 49.92c3.776 3.84 8.768 5.632 13.696 5.632 4.864-0.064 9.728-1.984 13.44-5.632l125.248-127.872c7.488-7.616 19.648-7.616 27.136 0l285.888 285.12c7.424 7.488 19.712 7.488 27.136 0l54.336-54.912c7.424-7.488 7.424-19.84-0.064-27.392z" />
|
||||
<glyph unicode="" d="M874.048 810.048c-199.936 200-524.096 199.936-724.096 0-199.936-199.872-199.936-524.096 0.064-724.032 199.936-199.936 524.096-199.936 724.032-0.064 200 199.936 200 524.16 0 724.096zM747.2 309.056c27.52-27.52 28.224-71.296 1.728-97.856-26.56-26.56-70.4-25.728-97.792 1.728l-139.072 139.008-139.584-139.584c-27.52-27.456-71.296-28.224-97.792-1.728-26.56 26.56-25.728 70.4 1.664 97.856l139.648 139.584-139.648 139.648c-27.456 27.392-28.224 71.168-1.664 97.728 26.496 26.56 70.336 25.792 97.792-1.664l139.584-139.584 139.072 139.072c27.456 27.456 71.232 28.224 97.792 1.664 26.496-26.56 25.728-70.336-1.728-97.792l-139.008-139.072 139.008-139.008z" />
|
||||
<glyph unicode="" d="M512 960.064c-282.752 0-512-229.312-512-512.064 0-282.816 229.248-512.064 512-512.064s512 229.248 512 512.064c0 282.752-229.248 512.064-512 512.064zM764.224 383.296h-187.392v-187.52c0-36.992-28.992-67.072-64.768-67.072s-64.768 30.080-64.768 67.072v187.52h-188.16c-36.992 0-67.072 28.928-67.072 64.704s30.080 64.768 67.072 64.768h188.16v188.16c0 37.056 28.992 67.072 64.768 67.072s64.768-30.016 64.768-67.072v-188.16h187.456c37.056 0 67.072-29.056 67.072-64.768s-30.016-64.704-67.136-64.704z" />
|
||||
<glyph unicode="" d="M288 960h-192c-35.328 0-64-28.608-64-64v-896c0-35.392 28.672-64 64-64h192c35.328 0 64 28.608 64 64v896c0 35.392-28.672 64-64 64zM928 960h-192c-35.392 0-64-28.608-64-64v-896c0-35.392 28.608-64 64-64h192c35.392 0 64 28.608 64 64v896c0 35.392-28.608 64-64 64z" />
|
||||
<glyph unicode="" d="M880 475.776l-832 480c-9.856 5.696-22.144 5.696-32 0-9.856-5.76-16-16.32-16-27.776v-960c0-11.456 6.144-22.016 16-27.712 4.928-2.88 10.496-4.288 16-4.288s11.072 1.408 16 4.288l832 480c9.856 5.696 16 16.256 16 27.712s-6.144 22.016-16 27.776z" />
|
||||
<glyph unicode="" d="M493.184 896c-48.384 0-63.040-27.84-63.040-27.84s-183.104-216.192-266.56-216.192c-82.176 0-81.344 0-81.344 0-45.44 0-82.24-36.416-82.24-81.28v-244.096c0-44.928 36.8-81.28 82.176-81.28 0 0 1.344 0 82.176 0 81.024 0 269.568-218.88 269.568-218.88 14.912-15.488 35.904-25.152 59.264-25.152 45.376 0 82.176 36.352 82.176 81.28v732.096c0 44.928-36.8 81.344-82.176 81.344zM843.968 817.728l-47.424-70.976c86.656-70.4 142.208-177.728 142.208-298.176s-55.488-227.84-142.208-298.112l47.424-70.976c109.44 85.888 180.032 219.136 180.032 369.088 0 150.016-70.592 283.2-180.032 369.152zM748.8 675.328l-47.872-71.68c41.344-38.912 67.392-93.76 67.392-155.072s-26.048-116.096-67.392-155.072l47.872-71.616c63.872 54.72 104.576 136 104.576 226.688 0 90.816-40.704 171.968-104.576 226.752z" />
|
||||
<glyph unicode="" d="M492.8 896c-51.2 0-64-25.6-64-25.6s-179.2-217.6-262.4-217.6c-83.2 0-83.2 0-83.2 0-44.8 0-83.2-38.4-83.2-83.2v-243.2c0-44.8 38.4-83.2 83.2-83.2 0 0 0 0 83.2 0 83.2 0 268.8-217.6 268.8-217.6 12.8-12.8 32-25.6 57.6-25.6 44.8 0 83.2 38.4 83.2 83.2v729.6c0 44.8-38.4 83.2-83.2 83.2z" />
|
||||
<glyph unicode="" d="M832 640l-213.056-208.448-125.696 125.696 210.752 210.688-160 160.064h448v-448l-160 160zM526.976 342.528l-206.976-202.496 167.488-172.032h-455.488v452.288l160-164.288 210.752 210.752 124.224-124.224z" />
|
||||
<glyph unicode="" d="M991.936 863.36h-959.872c-17.6 0-32-15.36-32-34.176v-124.672c0-18.048 14.4-32.832 32-32.832h959.872c17.6 0 32 14.72 32 32.832v124.672c0 18.816-14.4 34.176-32 34.176zM991.936 543.36h-959.872c-17.6 0-32-15.36-32-34.24v-124.608c0-18.112 14.4-32.832 32-32.832h959.872c17.6 0 32 14.72 32 32.832v124.672c0 18.816-14.4 34.176-32 34.176zM991.936 223.36h-959.872c-17.6 0-32-15.36-32-34.24v-124.608c0-17.984 14.4-32.768 32-32.768h959.872c17.6 0 32 14.72 32 32.768v124.608c0 18.88-14.4 34.24-32 34.24z" />
|
||||
<glyph unicode="" d="M352 896h-320c-19.2 0-32-12.8-32-32v-320c0-19.2 12.8-32 32-32h320c19.2 0 32 12.8 32 32v320c0 19.2-12.8 32-32 32zM352 384h-320c-19.2 0-32-12.8-32-32v-320c0-19.2 12.8-32 32-32h320c19.2 0 32 12.8 32 32v320c0 19.2-12.8 32-32 32zM992 896h-448c-19.2 0-32-12.8-32-32v-64c0-19.2 12.8-32 32-32h448c19.2 0 32 12.8 32 32v64c0 19.2-12.8 32-32 32zM992 640h-448c-19.2 0-32-12.8-32-32v-64c0-19.2 12.8-32 32-32h448c19.2 0 32 12.8 32 32v64c0 19.2-12.8 32-32 32zM992 384h-448c-19.2 0-32-12.8-32-32v-64c0-19.2 12.8-32 32-32h448c19.2 0 32 12.8 32 32v64c0 19.2-12.8 32-32 32zM992 128h-448c-19.2 0-32-12.8-32-32v-64c0-19.2 12.8-32 32-32h448c19.2 0 32 12.8 32 32v64c0 19.2-12.8 32-32 32z" />
|
||||
<glyph unicode="" d="M288 896h-192c-19.2 0-32-12.8-32-32v-192c0-19.2 12.8-32 32-32h192c19.2 0 32 12.8 32 32v192c0 19.2-12.8 32-32 32zM288 576h-192c-19.2 0-32-12.8-32-32v-192c0-19.2 12.8-32 32-32h192c19.2 0 32 12.8 32 32v192c0 19.2-12.8 32-32 32zM608 896h-192c-19.2 0-32-12.8-32-32v-192c0-19.2 12.8-32 32-32h192c19.2 0 32 12.8 32 32v192c0 19.2-12.8 32-32 32zM608 576h-192c-19.2 0-32-12.8-32-32v-192c0-19.2 12.8-32 32-32h192c19.2 0 32 12.8 32 32v192c0 19.2-12.8 32-32 32zM928 896h-192c-19.2 0-32-12.8-32-32v-192c0-19.2 12.8-32 32-32h192c19.2 0 32 12.8 32 32v192c0 19.2-12.8 32-32 32zM928 576h-192c-19.2 0-32-12.8-32-32v-192c0-19.2 12.8-32 32-32h192c19.2 0 32 12.8 32 32v192c0 19.2-12.8 32-32 32zM288 256h-192c-19.2 0-32-12.8-32-32v-192c0-19.2 12.8-32 32-32h192c19.2 0 32 12.8 32 32v192c0 19.2-12.8 32-32 32zM608 256h-192c-19.2 0-32-12.8-32-32v-192c0-19.2 12.8-32 32-32h192c19.2 0 32 12.8 32 32v192c0 19.2-12.8 32-32 32zM928 256h-192c-19.2 0-32-12.8-32-32v-192c0-19.2 12.8-32 32-32h192c19.2 0 32 12.8 32 32v192c0 19.2-12.8 32-32 32z" />
|
||||
<glyph unicode="" d="M416 960h-384c-19.2 0-32-12.8-32-32v-384c0-19.2 12.8-32 32-32h384c19.2 0 32 12.8 32 32v384c0 19.2-12.8 32-32 32zM992 960h-384c-19.2 0-32-12.8-32-32v-384c0-19.2 12.8-32 32-32h384c19.2 0 32 12.8 32 32v384c0 19.2-12.8 32-32 32zM416 384h-384c-19.2 0-32-12.8-32-32v-384c0-19.2 12.8-32 32-32h384c19.2 0 32 12.8 32 32v384c0 19.2-12.8 32-32 32zM992 384h-384c-19.2 0-32-12.8-32-32v-384c0-19.2 12.8-32 32-32h384c19.2 0 32 12.8 32 32v384c0 19.2-12.8 32-32 32z" />
|
||||
<glyph unicode="" d="M992 896h-768c-19.2 0-32-12.8-32-32v-64c0-19.2 12.8-32 32-32h768c19.2 0 32 12.8 32 32v64c0 19.2-12.8 32-32 32zM992 640h-768c-19.2 0-32-12.8-32-32v-64c0-19.2 12.8-32 32-32h768c19.2 0 32 12.8 32 32v64c0 19.2-12.8 32-32 32zM992 384h-768c-19.2 0-32-12.8-32-32v-64c0-19.2 12.8-32 32-32h768c19.2 0 32 12.8 32 32v64c0 19.2-12.8 32-32 32zM992 128h-768c-19.2 0-32-12.8-32-32v-64c0-19.2 12.8-32 32-32h768c19.2 0 32 12.8 32 32v64c0 19.2-12.8 32-32 32zM96 896h-64c-19.2 0-32-12.8-32-32v-64c0-19.2 12.8-32 32-32h64c19.2 0 32 12.8 32 32v64c0 19.2-12.8 32-32 32zM96 640h-64c-19.2 0-32-12.8-32-32v-64c0-19.2 12.8-32 32-32h64c19.2 0 32 12.8 32 32v64c0 19.2-12.8 32-32 32zM96 384h-64c-19.2 0-32-12.8-32-32v-64c0-19.2 12.8-32 32-32h64c19.2 0 32 12.8 32 32v64c0 19.2-12.8 32-32 32zM96 128h-64c-19.2 0-32-12.8-32-32v-64c0-19.2 12.8-32 32-32h64c19.2 0 32 12.8 32 32v64c0 19.2-12.8 32-32 32z" />
|
||||
<glyph unicode="" d="M992 896h-960c-19.2 0-32-12.8-32-32v-64c0-19.2 12.8-32 32-32h960c19.2 0 32 12.8 32 32v64c0 19.2-12.8 32-32 32zM992 640h-960c-19.2 0-32-12.8-32-32v-64c0-19.2 12.8-32 32-32h960c19.2 0 32 12.8 32 32v64c0 19.2-12.8 32-32 32zM992 384h-960c-19.2 0-32-12.8-32-32v-64c0-19.2 12.8-32 32-32h960c19.2 0 32 12.8 32 32v64c0 19.2-12.8 32-32 32zM992 128h-960c-19.2 0-32-12.8-32-32v-64c0-19.2 12.8-32 32-32h960c19.2 0 32 12.8 32 32v64c0 19.2-12.8 32-32 32z" />
|
||||
<glyph unicode="" d="M992 832h-640c-19.2 0-32-12.8-32-32v-64c0-19.2 12.8-32 32-32h640c19.2 0 32 12.8 32 32v64c0 19.2-12.8 32-32 32zM992 512h-640c-19.2 0-32-12.8-32-32v-64c0-19.2 12.8-32 32-32h640c19.2 0 32 12.8 32 32v64c0 19.2-12.8 32-32 32zM992 192h-640c-19.2 0-32-12.8-32-32v-64c0-19.2 12.8-32 32-32h640c19.2 0 32 12.8 32 32v64c0 19.2-12.8 32-32 32zM256 768c0-70.692-57.308-128-128-128-70.692 0-128 57.308-128 128 0 70.692 57.308 128 128 128 70.692 0 128-57.308 128-128zM256 448c0-70.692-57.308-128-128-128-70.692 0-128 57.308-128 128 0 70.692 57.308 128 128 128 70.692 0 128-57.308 128-128zM256 128c0-70.692-57.308-128-128-128-70.692 0-128 57.308-128 128 0 70.692 57.308 128 128 128 70.692 0 128-57.308 128-128z" />
|
||||
<glyph unicode="" d="M896 960h-768c-70.656 0-128-57.344-128-128v-768c0-70.656 57.344-128 128-128h768c70.656 0 128 57.344 128 128v768c0 70.656-57.344 128-128 128zM384 895.936c35.328 0 64-28.608 64-63.936 0-35.392-28.672-64-64-64s-64 28.608-64 64c0 35.328 28.672 63.936 64 63.936zM192 895.936c35.328 0 64-28.608 64-63.936 0-35.392-28.672-64-64-64s-64 28.608-64 64c0 35.328 28.672 63.936 64 63.936zM896.064 64h-768.064v640h768.064v-640z" />
|
||||
<glyph unicode="" d="M938.752 767.744h-106.688v106.624c0 47.104-38.208 85.312-85.312 85.312h-661.44c-47.104 0-85.312-38.208-85.312-85.312v-660.672c0-47.168 37.248-85.376 83.136-85.376h108.864v-106.688c0-47.104 37.248-85.312 83.136-85.312h665.792c45.952 0 83.2 38.208 83.2 85.312v660.736c-0.064 47.104-38.272 85.376-85.376 85.376zM384 895.616c35.328 0 64-28.608 64-63.936 0-35.392-28.672-64-64-64s-64 28.608-64 64c0 35.328 28.672 63.936 64 63.936zM192 895.616c35.328 0 64-28.608 64-63.936 0-35.392-28.672-64-64-64s-64 28.608-64 64c0 35.328 28.672 63.936 64 63.936zM128 255.68l-0.064 448h576.064v-448h-576zM896 63.68h-576v64.64h428.864c45.952 0 83.2 38.208 83.2 85.376v297.984h63.936v-448z" />
|
||||
<glyph unicode="" d="M768 191.936c-121.6 0-197.888 68.736-256 144.448-58.112-75.712-134.4-144.448-256-144.448-102.848 0-256 68.224-256 256.064 0 187.776 153.152 256 256 256 121.6 0 197.888-68.672 256-144.448 58.112 75.776 134.4 144.448 256 144.448 102.912 0 256-68.224 256-256 0-187.84-153.088-256.064-256-256.064zM256 576c-29.632-0.512-128-11.136-128-128 0-121.856 106.624-128 128-128 78.272 0 123.264 47.808 178.752 128-55.488 80.128-100.48 128-178.752 128zM589.248 448c55.424-80.128 100.352-127.872 178.432-128 30.336 0.448 128.32 11.264 128.32 128 0 121.856-106.624 128-128 128-78.272 0-123.264-47.872-178.752-128z" />
|
||||
<glyph unicode="" d="M800 512c-22.976 0-59.328 0-96 0v-128c22.656 0 44.8 0 64 0 12.096 0 23.296 0 32 0 123.712 0 224-100.288 224-224s-100.288-224-224-224-224 100.224-224 224c0 22.976 0 59.264 0 96h-128c0-22.656 0-44.864 0-64 0-12.096 0-23.232 0-32 0-123.776-100.288-224-224-224s-224 100.224-224 224 100.288 224 224 224c22.976 0 59.328 0 96 0v128c-22.592 0-44.864 0-64 0-12.096 0-23.232 0-32 0-123.712 0-224 100.224-224 224 0 123.712 100.288 224 224 224s224-100.288 224-224c0-22.976 0-59.328 0-96h128c0 22.592 0 44.864 0 64 0 12.096 0 23.232 0 32 0 123.712 100.288 224 224 224s224-100.288 224-224c0-123.776-100.288-224-224-224zM320 736c0 52.992-43.008 96-96 96s-96-43.008-96-96c0-53.056 43.008-96 96-96 7.744 0 19.52 0 32 0 29.568 0 64 0 64 0s0 69.056 0 96zM320 192c0 29.504 0 64 0 64s-69.056 0-96 0c-52.992 0-96-43.008-96-96s43.008-96 96-96 96 43.008 96 96c0 7.744 0 19.52 0 32zM704 160c0-52.992 43.008-96 96-96s96 43.008 96 96-43.008 96-96 96c-7.744 0-19.52 0-32 0-29.568 0-64 0-64 0s0-69.12 0-96zM576 512h-128v-128h128v128zM800 832c-52.992 0-96-43.008-96-96 0-7.744 0-19.456 0-32 0-29.632 0-64 0-64s69.056 0 96 0c52.992 0 96 42.944 96 96 0 52.992-43.008 96-96 96z" />
|
||||
<glyph unicode="" d="M801.984 406.4c-28.672 17.664-65.408 7.232-81.92-23.36-0.576-1.024-0.576-2.24-1.152-3.264l-1.472 0.96c-41.984-74.432-117.696-124.736-205.184-124.736s-163.136 50.304-205.184 124.736l-1.408-0.832c-0.704 1.6-0.704 3.456-1.6 5.12-16.576 30.528-53.312 41.024-82.048 23.36s-38.528-56.832-21.952-87.36c1.28-2.24 3.264-3.648 4.672-5.696l-1.088-0.704c53.12-94.208 143.104-161.6 248.576-180.608v-70.016h-120.064c-33.152 0-60.032-28.672-60.032-64 0-35.392 26.88-64 60.032-64h360.128c33.216 0 60.032 28.608 60.032 64 0 35.328-26.816 64-60.032 64h-120v69.952c105.472 19.008 195.456 86.528 248.576 180.672l-0.384 0.256c1.088 1.472 2.624 2.432 3.456 4.096 16.64 30.656 6.784 69.76-21.952 87.424zM512.256 320c99.456 0 180.032 85.952 180.032 192v256c0 106.048-80.64 192-180.032 192-99.456 0-180.096-85.952-180.096-192v-256c0-106.048 80.64-192 180.096-192z" />
|
||||
<glyph unicode="" d="M948.544 446.848c100.48 102.784 100.352 269.312 0 372.032-51.392 52.48-118.976 78.144-186.24 76.992-94.144-1.536-249.344-128.96-249.344-128.96s-159.616 129.216-256 129.088c-65.728-0.128-131.392-25.856-181.504-77.056-100.416-102.784-100.48-269.248 0-372.032l436.544-446.336 436.544 446.272z" />
|
||||
<glyph unicode="" d="M512.128 432.064c-87.872 0-159.104 73.728-159.104 164.8 0 91.136 71.232 164.864 159.104 164.864s159.104-73.728 159.104-164.864c0-91.008-71.232-164.8-159.104-164.8zM512.128 960.384c-194.496 0-352.128-163.328-352.128-364.8 0-190.272 159.488-435.776 265.984-555.264 39.808-44.544 86.144-104.704 86.144-104.704s49.792 60.352 92.48 106.304c106.368 114.496 259.648 344.448 259.648 553.6 0 201.536-157.632 364.864-352.128 364.864z" />
|
||||
<glyph unicode="" d="M960.512 710.272c-21.76 35.968-48.576 71.168-81.344 103.808-33.216 32.896-68.992 59.968-105.6 81.6l64.32 64.32c0 0 93.056 0 139.648-46.528 46.464-46.592 46.464-139.648 46.464-139.648l-63.488-63.552zM387.2 128.768h-194.432v194.432l23.36 23.36c39.552-18.56 78.784-44.928 114.176-80.32 35.392-35.328 61.696-74.688 80.32-114.176l-23.424-23.296zM906.752 656.512l-440-448.32c-22.72 37.632-50.688 74.304-84.992 108.352-34.688 34.432-72.064 62.72-110.336 85.312l449.152 440.896c37.824-17.856 75.456-42.944 109.312-76.864s59.008-71.424 76.864-109.376zM128 832v-767.936h768v319.936l128 127.936v-482.88c0-51.392-41.6-93.056-93.056-93.056h-837.888c-51.392 0-93.056 41.664-93.056 93.056v837.824c0 51.456 41.664 93.12 93.056 93.12h482.944l-128-128h-320z" />
|
||||
<glyph unicode="" d="M960.256 96.064v-0.768l-256.256 256.256v-127.488c0-70.72-57.344-128.064-128-128.064h-448c-70.656 0-128 57.344-128 128.064v447.872c0 70.72 57.344 128.064 128 128.064h448c70.656 0 128-57.344 128-128.064v-128.576l256 256v0.64c35.392 0 64-28.608 64-64v-576c0-35.264-28.544-63.808-63.744-63.936z" />
|
||||
<glyph unicode="" d="M897.024 768h-147.84l-42.88 90.624c-9.792 21.312-45.056 37.376-79.36 37.376h-244.8c-34.304 0-69.568-16.064-79.424-37.376l-41.856-90.624h-132.864c-128 0-128-64-128-64v-640c0 0 0-64 128-64h768c128 0 128 64 128 64v640c0 0 0 64-126.976 64zM512 128.064c-141.376 0-256 114.496-256 255.872 0 141.44 114.624 256.064 256 256.064s256-114.624 256-256.064c0-141.376-114.624-255.872-256-255.872zM512 544c-88.384 0-160-71.616-160-160 0-88.32 71.616-160 160-160s160 71.68 160 160c0 88.384-71.616 160-160 160z" />
|
||||
<glyph unicode="" d="M512.064 960c-282.688 0-511.872-229.184-511.872-511.936 0-282.816 229.184-511.936 511.872-511.936 282.752 0 511.936 229.12 511.936 511.936 0 282.752-229.184 511.936-511.936 511.936zM678.976 268.48l-14.848-14.976c-12.416-12.352-33.344-12.992-46.464-1.28l-171.52 147.52c-13.12 11.712-23.040 35.712-22.208 53.248l17.856 283.072c0.896 17.6 16 31.936 33.664 31.936h21.056c17.6 0 32.704-14.336 33.536-31.936l14.656-231.808c0.896-17.536 11.2-42.688 22.848-55.808l112.768-133.568c11.648-12.992 11.136-33.984-1.344-46.4z" />
|
||||
<glyph unicode="" d="M512.064 800c-338.944 0-512.96-352.896-512.96-352.896s131.328-352.96 512.96-352.96c345.472 0 512.832 351.616 512.832 351.616s-168.64 354.24-512.832 354.24zM512.832 226.496c-123.968 0-213.504 96.576-213.504 220.608 0 124.096 89.536 220.544 213.504 220.544 123.904 0 213.44-96.448 213.44-220.544 0-124.032-89.6-220.608-213.44-220.608zM512.832 579.456c-70.784-0.128-128.128-61.44-128.128-132.352 0-70.848 57.344-132.352 128.128-132.352s128.064 61.504 128.064 132.352c0 70.912-57.28 132.544-128.064 132.352z" />
|
||||
<glyph unicode="" d="M457.856 168.064l289.28-226.496c4.736-3.776 7.616-5.632 10.368-5.632 8 0 10.496 5.504 10.496 14.528v214.4c0 15.104 9.984 27.136 23.36 27.136h105.152c127.488 0 127.36 61.44 127.36 61.44v640.064c0 0 0 66.56-127.872 66.56h-767.936c-128 0-128-66.56-128-66.56v-640.064c0 0-0.064-61.44 128.448-61.44h256c0 0 53.568-1.472 73.344-23.936z" />
|
||||
<glyph unicode="" d="M1024 26.752c0-50.176-41.6-90.752-93.12-90.752h-291.264v351.68c0 53.056-38.016 96.128-85.056 96.128h-85.12c-46.976 0-85.12-43.072-85.12-96.128v-351.68h-291.264c-51.392 0-93.056 40.576-93.056 90.752v478.976c0 23.36 9.344 44.48 24.192 60.544l-0.96 1.856 425.92 372.992c34.304 25.152 89.984 25.152 124.288 0l427.264-372.992-0.448-2.368c14.592-16.064 23.744-36.928 23.744-60.032v-478.976z" />
|
||||
<glyph unicode="" d="M896-64h-192v128h192.064v640h-768.064v-640h192v-128h-192c-70.656 0-128 57.344-128 128v768c0 70.656 57.344 128 128 128h768c70.656 0 128-57.344 128-128v-768c0-70.656-57.344-128-128-128zM192 895.936c-35.392 0-64-28.608-64-63.936 0-35.392 28.608-64 64-64s64 28.608 64 64c0 35.328-28.608 63.936-64 63.936zM384 895.936c-35.392 0-64-28.608-64-63.936 0-35.392 28.608-64 64-64s64 28.608 64 64c0 35.328-28.608 63.936-64 63.936zM271.936 200.704c-22.208 23.232-22.208 60.864 0 84.16l196.928 209.408c6.144 6.464 13.44 10.496 21.12 13.44 0.064 0.064 0.192 0.064 0.32 0.128 5.888 2.24 11.84 3.456 17.984 3.712 2.24 0.192 4.416 0.384 6.656 0.256 2.752-0.192 5.376-1.024 8-1.6 11.328-2.24 22.272-6.72 30.976-15.872l196.864-209.408c22.272-23.296 22.272-60.928 0-84.16-22.272-23.104-58.304-23.104-80.576 0l-94.208 119.232v-319.936c0-34.176-32.064-64.064-64.64-64.064-32.512 0-63.36 29.888-63.36 64.064v319.936l-95.488-119.296c-22.272-23.168-58.304-23.168-80.576 0z" />
|
||||
<glyph unicode="" d="M723.392 353.6c-11.328 11.456-15.104 32.704-8.384 47.296 0 0 47.232 102.464 47.232 177.728 0 210.624-170.432 381.376-380.736 381.376s-380.8-170.752-380.8-381.312c0-210.624 170.496-381.376 380.8-381.376 75.2 0 177.408 47.36 177.408 47.36 14.656 6.784 35.968 2.944 47.232-8.448l291.456-291.776c11.456-11.392 30.080-11.392 41.344 0l75.776 75.904c11.456 11.456 11.456 30.144 0 41.472l-291.328 291.776zM381.504 373.376c-113.088 0-205.056 92.032-205.056 205.312 0 113.216 92.032 205.312 205.056 205.312s204.992-92.096 204.992-205.312c0-113.28-91.904-205.312-204.992-205.312z" />
|
||||
<glyph unicode="" d="M449.024 596.288c106.56 0 193.024 81.344 193.024 181.888-0.064 100.416-86.464 181.824-193.024 181.824s-193.024-81.408-193.024-181.824c0-100.48 86.464-181.888 193.024-181.888zM600.32 583.68c-42.56-29.44-94.592-47.424-151.296-47.424-56.96 0-109.12 18.112-151.744 47.744-173.248-37.312-297.28-136.832-297.28-254.016v-258.88c0-17.152 14.4-31.104 32-31.104h64c17.6 0 32 12.608 32 28.096 0 8.96 0 201.856 0 201.856 0 16.64 9.536 9.984 21.376 9.984 11.776 0 21.312-9.024 21.312-19.968l0.32-179.968c0.896-10.368 9.6-84.416 20.544-86.592 0 0 66.56-57.344 256.448-57.344 191.232 0 256.448 57.344 256.448 57.344 10.944 2.112 19.712 76.16 20.544 86.592l0.32 179.968c0 11.008 9.536 19.968 21.376 19.968 11.776 0 21.312-9.024 21.312-19.968 0 0 0-182.912 0-191.872 0-15.488 14.4-28.096 32-28.096h64c17.6 0 32 14.016 32 31.104v258.88c0 116.864-123.392 216.128-295.68 253.696z" />
|
||||
<glyph unicode="" d="M896 864c-50.496 0-768 0-768 0-50.496 0-128-41.152-128-90.944v-18.112c0 0 432.768-361.856 512-361.856s512 360.704 512 360.704v19.2c0 49.856-77.504 91.008-128 91.008zM0 608.96v-512.896c0 0 0-64.064 128-64.064h768c128.192 0 128 64.064 128 64.064v514.496c0 0-364.16-324.992-512-324.992-146.304 0-512 323.392-512 323.392z" />
|
||||
<glyph unicode="" d="M896-64h-768c-35.328 0-64 28.608-64 64.064v447.936c0 35.328 28.672 64 64 64h64v128c0 176.704 143.232 320 320 320s320-143.296 320-320v-128h64c35.392 0 64-28.672 64-64v-447.936c0-35.456-28.608-64.064-64-64.064zM704 640c0 105.984-85.952 192-192 192s-192-86.016-192-192v-128h384v128z" />
|
||||
<glyph unicode="" d="M767.872 787.008l-0.128-0.064c-0.896 0.64-1.6 1.536-2.624 2.24-29.184 20.032-68.992 12.608-89.024-16.704-19.968-29.312-12.48-69.312 16.64-89.344 0.768-0.64 1.536-0.896 2.24-1.28l-0.256-0.448c82.88-58.048 137.28-154.496 137.28-263.744 0-177.536-143.296-321.472-320-321.472s-320 143.936-320 321.472c0 109.248 54.4 205.696 137.28 263.744l-0.256 0.448c0.704 0.384 1.472 0.64 2.24 1.216 29.184 20.032 36.608 60.032 16.64 89.344-20.032 29.312-59.84 36.8-89.024 16.704-0.96-0.704-1.728-1.536-2.688-2.24l-0.064 0.128c-116.032-81.408-192.128-216.32-192.128-369.344 0-248.576 200.576-450.176 448-450.176s448 201.6 448 450.176c0 153.024-76.096 287.936-192.128 369.344zM512 352c35.392 0 64 28.608 64 64v447.936c0 35.392-28.608 64.064-64 64.064-35.328 0-64-28.672-64-64.064v-447.936c0-35.392 28.672-64 64-64z" />
|
||||
<glyph unicode="" d="M320 576c-35.328 0-64-28.608-64-64s28.672-64 64-64 64 28.608 64 64-28.672 64-64 64zM512 384c-35.328 0-64-28.608-64-64s28.672-64 64-64 64 28.608 64 64-28.672 64-64 64zM320 384c-35.328 0-64-28.608-64-64s28.672-64 64-64 64 28.608 64 64-28.672 64-64 64zM896 895.936h-128c0 35.392-28.608 64.064-64 64.064s-64-28.672-64-64.064h-256c0 35.392-28.672 64.064-64 64.064s-64-28.672-64-64.064h-128c-70.656 0-128-57.28-128-127.936v-640c0-70.72 57.344-128 128-128h768c70.656 0 128 57.28 128 128v640c0 70.656-57.344 127.936-128 127.936zM896 128h-768v640h128c0-35.392 28.672-64 64-64s64 28.608 64 64h256c0-35.392 28.608-64 64-64s64 28.608 64 64h128v-640zM704 576c-35.392 0-64-28.608-64-64s28.608-64 64-64 64 28.608 64 64-28.608 64-64 64zM512 576c-35.328 0-64-28.608-64-64s28.672-64 64-64 64 28.608 64 64-28.672 64-64 64zM704 384c-35.392 0-64-28.608-64-64s28.608-64 64-64 64 28.608 64 64-28.608 64-64 64z" />
|
||||
<glyph unicode="" d="M918.272 527.040c-17.344 2.56-35.968 18.304-41.344 35.008l-26.112 63.232c-8.128 15.552-6.272 39.872 4.352 53.952l42.112 56.192c10.624 14.080 9.728 36.352-1.984 49.536l-46.272 46.4c-13.12 11.712-35.52 12.544-49.6 1.984l-56.128-42.24c-14.144-10.496-38.4-12.48-54.016-4.288l-63.168 26.048c-16.832 5.312-32.64 24-35.008 41.472l-9.984 69.504c-2.496 17.408-18.816 33.152-36.352 34.944 0 0-10.816 1.216-32.768 1.216s-32.768-1.216-32.768-1.216c-17.536-1.792-33.92-17.536-36.352-34.944l-9.984-69.504c-2.432-17.472-18.176-36.16-35.008-41.472l-63.168-26.048c-15.552-8.192-39.808-6.208-53.888 4.288l-56.256 42.24c-14.016 10.624-36.416 9.728-49.6-1.984l-46.208-46.272c-11.648-13.184-12.544-35.52-1.984-49.6l42.176-56.192c10.56-14.080 12.48-38.4 4.288-53.952l-26.048-63.296c-5.376-16.704-24-32.448-41.408-35.008l-69.504-9.792c-17.472-2.56-33.216-18.88-35.008-36.416 0 0-1.152-10.88-1.152-32.832 0-21.952 1.152-32.896 1.152-32.896 1.856-17.472 17.6-33.792 35.008-36.288l69.504-9.856c17.408-2.496 36.032-18.304 41.408-35.008l26.112-63.232c8.192-15.616 6.272-39.808-4.288-53.888l-42.176-56.256c-10.56-14.144-13.12-33.28-5.632-42.496 7.424-9.216 28.864-32.064 28.928-32.064 0-0.128 7.232-6.72 16-14.656 8.768-8.064 44.48-19.2 58.56-8.64l56.256 42.112c14.080 10.624 38.336 12.544 53.888 4.352l63.040-25.984c16.832-5.44 32.576-24 35.008-41.472l9.984-69.504c2.432-17.344 18.816-33.28 36.288-35.072 0 0 10.88-1.152 32.832-1.152s32.768 1.152 32.768 1.152c17.472 1.792 33.856 17.664 36.352 35.072l9.984 69.504c2.368 17.472 18.112 36.032 35.008 41.472l63.104 25.984c15.616 8.192 39.872 6.272 54.016-4.224l56.256-42.24c14.144-10.56 36.352-9.664 49.6 1.92l46.272 46.336c11.648 13.184 12.48 35.52 1.856 49.6l-42.112 56.256c-10.624 14.080-12.48 38.272-4.352 53.888l26.112 63.232c5.376 16.768 24 32.512 41.344 35.008l69.504 9.856c17.344 2.496 33.152 18.816 35.008 36.288 0 0 1.152 10.88 1.152 32.896 0 21.952-1.152 32.832-1.152 32.832-1.856 17.536-17.6 33.856-35.008 36.416l-69.44 9.792zM512 320c-70.656 0-128 57.344-128 128 0 70.72 57.344 128 128 128 70.592 0 128-57.344 128-128 0-70.656-57.344-128-128-128z" />
|
||||
<glyph unicode="" d="M768 697.024v0h128c35.392 0 64-28.672 64-64v-640c0-35.392-28.608-64-64-64h-672c-88.384 0-160 71.616-160 160v703.936c0 88.384 71.616 160.064 160 160.064h672c35.392 0 64-28.672 64-64 0-35.392-28.608-64.064-64-64.064h-640c-35.328 0-64-28.608-64-64s28.672-64 64-64h128v-256l64 64 64-64v256h256z" />
|
||||
<glyph unicode="" d="M0 64v192h128v-192.128h640v768.128h-640v-192h-128v192c0 70.656 57.344 128 128 128h640c70.72 0 128-57.344 128-128v-768c0-70.72-57.28-128-128-128h-640c-70.656 0-128 57.28-128 128zM264.768 688c23.232 22.272 60.864 22.272 84.096 0l209.408-196.8c6.528-6.208 10.496-13.568 13.504-21.184 0.064-0.128 0.064-0.192 0.128-0.32 2.24-5.824 3.456-11.84 3.648-17.984 0.256-2.24 0.448-4.416 0.256-6.72-0.128-2.688-1.024-5.248-1.664-7.936-2.176-11.264-6.656-22.208-15.872-30.976l-209.408-196.8c-23.232-22.272-60.864-22.272-84.096 0-23.168 22.272-23.168 58.24 0 80.512l119.232 94.208h-320c-34.112 0-64 32.064-64 64.64 0 32.512 29.888 63.36 64 63.36h320l-119.232 95.552c-23.232 22.144-23.232 58.304 0 80.448z" />
|
||||
<glyph unicode="" d="M928 704h-64v-640c0 0-1.984-128-128-128 0 0-318.016 0-448 0s-128 128-128 128v640h-64c-35.328 0-64 28.672-64 64s28.672 64 64 64h320v32c0 53.056 42.944 96 96 96 52.992 0 96-42.944 96-96v-32h320c35.392 0 64-28.608 64-64s-28.608-64-64-64zM736 704h-448v-640h448v640zM416 640c35.328 0 64-28.672 64-64v-384c0-35.392-28.672-64-64-64s-64 28.608-64 64v384c0 35.328 28.672 64 64 64zM608 640c35.392 0 64-28.672 64-64v-384c0-35.392-28.608-64-64-64s-64 28.608-64 64v384c0 35.328 28.608 64 64 64z" />
|
||||
<glyph unicode="" d="M896 768c0 0-278.016 0.064-320 0.064s-89.984 127.936-128 127.936-320 0-320 0c-70.656 0-128-57.28-128-128v-640.064c0-126.656 128-128 128-128h768c70.656 0 128 57.344 128 128v512c0 70.72-57.344 128.064-128 128.064zM896.064 127.936h-768.064v640.064c0 0 214.016 0 254.016 0s89.984-128 128-128c40 0 386.048 0 386.048 0v-512.064z" />
|
||||
<glyph unicode="" d="M895.424 960.064h-767.872c-127.296 0-127.552-128.064-127.552-128.064v-511.936c0 0 0.704-128.064 128-128.064h256c0 0 53.568-1.472 73.344-23.936l289.344-226.496c4.736-3.776 7.616-5.632 10.432-5.632 8 0 10.368 5.504 10.368 14.592v214.336c0 15.104 9.984 27.2 23.424 27.2h105.088c125.312 0 128 128.064 128 128.064v511.872c0 0-1.28 128.064-128.576 128.064zM896 320.064h-256v-128l-164.608 128h-347.392v511.936h768v-511.936z" />
|
||||
<glyph unicode="" d="M896 63.872h-768v768h320v128l-358.976 0.064c-49.152 0-89.024-39.936-89.024-89.088v-845.952c0-49.152 39.872-89.024 89.024-89.024h845.952c49.152 0 89.024 39.872 89.024 89.024v358.976h-128v-320zM1024 896c0 14.656-6.080 27.52-14.72 38.272-1.344 1.728-2.048 3.712-3.584 5.312-0.192 0.128-0.256 0.384-0.384 0.576-0.384 0.32-0.448 0.832-0.832 1.216-4.096 4.096-9.152 6.528-13.952 9.28-2.112 1.216-3.84 3.008-6.080 3.968-8.704 3.776-17.92 5.376-27.264 5.12-0.128 0-0.256 0.064-0.384 0.064h-313.024c-36.992 0.064-67.008-28.544-67.008-63.808 0-35.2 30.080-63.808 67.136-63.808h161.216l-402.56-403.328c-24.832-24.768-24.832-64.768 0-89.472 24.832-24.768 65.024-24.768 89.792 0l403.968 403.52v-163.2c0-37.056 28.608-67.072 63.872-67.072s63.808 30.016 63.808 67.072v313.024c0 0.64-0.32 1.152-0.32 1.728 0 0.512 0.32 1.024 0.32 1.536z" />
|
||||
<glyph unicode="" d="M0 576.448v107.712c0 45.952 38.208 83.136 85.312 83.136h107.392v90.432c0 21.056 21.568 102.208 48.192 102.208h96.384c26.624 0 48.192-81.152 48.192-102.208v-90.432h319.232v90.432c0 21.056 21.632 102.208 48.192 102.208h96.384c26.624 0 48.192-81.152 48.192-102.208v-90.432h41.28c47.168 0 85.376-37.184 85.376-83.136v-107.776h-1024.128zM1024.064 511.36v-492.224c0-45.952-38.208-83.2-85.376-83.2h-853.376c-47.104 0-85.312 37.248-85.312 83.2v492.224h1024.064z" />
|
||||
<glyph unicode="" d="M32 447.936c288 32.064 448 192.064 480 480.064 32.064-288 192.064-448 480.128-480.064-288.064-32-448.064-192-480.128-480-32 288-192 448-480 480z" />
|
||||
<glyph unicode="" d="M1024 448l-380.8-128-10.304-384-245.696 304.96-387.2-109.376 228.992 316.416-228.992 316.416 387.2-109.312 245.696 304.896 10.304-384 380.8-128z" />
|
||||
<glyph unicode="" d="M768 223.552c35.392 0 64 28.672 64 64.064s-28.608 64.064-64 64.064-64-28.672-64-64.064 28.608-64.064 64-64.064zM938.752 864h-853.376c-47.168 0-85.376-38.208-85.376-85.376v-661.184c0-47.168 38.208-85.44 85.376-85.44h853.376c47.104 0 85.312 38.272 85.312 85.44v661.184c0 47.168-38.208 85.376-85.312 85.376zM896.064 160.192h-768.064v255.552h768.064v-255.552zM896.064 607.872h-768.064v128.064h768.064v-128.064z" />
|
||||
<glyph unicode="" d="M939.712 875.712c-112.448 112.448-294.784 112.448-407.296-0.064l-448-448c-112.512-112.512-112.512-294.848-0.064-407.296s294.784-112.512 407.296 0l94.848 92.16c-51.008 1.152-97.536 17.728-136.96 44.672l-48.448-46.4c-62.528-62.528-163.84-62.528-226.304 0-62.464 62.464-62.464 163.84 0.064 226.304l448 448c62.528 62.528 163.84 62.528 226.24 0 62.528-62.528 62.592-163.776 0.064-226.24l-223.232-224.768c-18.752-18.752-49.152-18.752-67.904 0s-18.752 49.152 0 67.904l168.576 170.176c12.48 12.48 12.544 32.768 0 45.248l-45.248 45.248c-12.48 12.48-32.768 12.48-45.248 0l-168.576-170.176c-68.736-68.736-68.736-180.16 0-248.896s180.16-68.736 248.896 0l223.232 224.832c112.448 112.448 112.448 294.848 0.064 407.296z" />
|
||||
<glyph unicode="" d="M939.648 875.648c-54.464 54.4-126.784 84.352-203.648 84.352-76.928 0-149.248-29.952-203.648-84.352 0 0-181.696-181.632-192.128-191.936-54.208-54.336-84.096-126.72-84.224-204.096 0.128-76.8 30.080-148.992 84.352-203.264l23.36-23.424c6.272-6.272 14.528-9.344 22.656-9.344 8.192 0 16.384 3.136 22.656 9.344l45.248 45.248c12.48 12.48 12.48 32.768 0 45.248l-23.424 23.424c-61.376 61.376-62.208 162.048-1.792 224.512 1.856 1.856 193.856 193.792 193.856 193.792 30.208 30.208 70.336 46.848 113.088 46.848s82.88-16.64 113.152-46.784v-0.064c62.528-62.592 62.528-163.776 0-226.24l-9.856-9.856c15.424-41.6 24.64-86.208 24.704-133.056 0-8.512-1.216-16.704-1.664-25.024l77.312 77.376c112.448 112.512 112.384 294.912 0 407.296zM660.16 643.136c-6.208 6.272-14.464 9.344-22.592 9.344-8.256 0-16.448-3.136-22.656-9.344l-45.248-45.248c-12.544-12.48-12.544-32.768 0-45.248l23.36-23.424c61.376-61.376 62.272-162.048 1.856-224.512-1.856-1.856-193.856-193.792-193.856-193.792-30.144-30.272-70.272-46.912-113.088-46.912-42.688 0-82.816 16.64-113.088 46.784v0.064c-62.528 62.592-62.528 163.776-0.064 226.24l9.92 9.856c-15.488 41.6-24.704 86.208-24.704 133.056 0 8.512 1.152 16.704 1.664 25.024l-77.312-77.376c-112.512-112.512-112.448-294.848 0-407.232 54.464-54.464 126.784-84.416 203.648-84.416s149.184 29.952 203.648 84.352c0 0 181.696 181.632 192.128 191.936 54.208 54.336 84.096 126.72 84.224 204.096-0.128 76.8-30.144 148.992-84.352 203.264l-23.488 23.488z" />
|
||||
<glyph unicode="" d="M1012.736 484.16l-241.216 352c-11.968 17.408-31.68 27.84-52.8 27.84h-654.72c-35.392 0-64-28.672-64-64v-704c0-35.328 28.608-64 64-64h654.72c21.12 0 40.896 10.368 52.8 27.84l241.216 352c15.040 21.76 15.040 50.56 0 72.32zM736 352c-52.992 0-96 43.008-96 96s43.008 96 96 96 96-43.008 96-96-43.008-96-96-96z" />
|
||||
<glyph unicode="" d="M842.752 960h-660.544c-47.552 0-86.208-38.144-86.208-64v-853.376c0-68.416 38.656-106.624 86.208-106.624h660.544c47.040 0 85.248 38.208 85.248 85.312v853.376c0 47.168-38.208 85.312-85.248 85.312zM544 128h-256c-35.392 0-64 28.608-64 64s28.608 64 64 64h256c35.392 0 64-28.608 64-64s-28.608-64-64-64zM736 384h-448c-35.392 0-64 28.608-64 64s28.608 64 64 64h448c35.392 0 64-28.608 64-64s-28.608-64-64-64zM736 640h-448c-35.392 0-64 28.608-64 64s28.608 64 64 64h448c35.392 0 64-28.608 64-64s-28.608-64-64-64z" />
|
||||
<glyph unicode="" d="M938.752 32h-853.376c-47.168 0-85.376 37.248-85.376 83.264v665.472c0 46.016 38.208 83.264 85.376 83.264h853.376c47.104 0 85.312-37.248 85.312-83.264v-665.472c0-46.016-38.208-83.264-85.312-83.264zM896.064 736h-768.064v-511.808c0 0 64 64.064 128 128.064 64 64.064 128 0 128 0l64-64c0 0 118.72 120.768 192 192.128 66.88 66.944 128 0 128 0l128-128.128 0.064 383.744zM320 480c-35.328 0-64 28.672-64 63.936 0 35.392 28.672 64.064 64 64.064s64-28.672 64-64.064c0-35.264-28.672-63.936-64-63.936z" />
|
||||
<glyph unicode="" d="M928-64h-832c-51.2 0-96 44.8-96 96v832c0 51.2 44.8 96 96 96h825.6c57.6 0 102.4-44.8 102.4-96v-825.6c0-57.6-44.8-102.4-96-102.4zM748.8 768c-121.6 0-172.8-83.2-172.8-166.4v-89.6h-64v-128h64v-384h128v384h128v128h-128v70.4c0 38.4 6.4 57.6 51.2 57.6h76.8v121.6s-38.4 6.4-83.2 6.4z" />
|
||||
<glyph unicode="" d="M1017.6 646.4c0 83.2-64 147.2-147.2 147.2-115.2 6.4-236.8 6.4-358.4 6.4-121.6 0-243.2 0-358.4-6.4-83.2 0-147.2-64-147.2-147.2-6.4-70.4-6.4-134.4-6.4-198.4s0-128 6.4-198.4c0-83.2 64-147.2 147.2-147.2 115.2-6.4 236.8-6.4 358.4-6.4 121.6 0 243.2 0 358.4 6.4 83.2 0 147.2 64 147.2 147.2 6.4 64 6.4 128 6.4 198.4 0 64 0 128-6.4 198.4zM384 224v448l320-224-320-224z" />
|
||||
<glyph unicode="" d="M876.8 896c-147.2 6.4-243.2-76.8-294.4-243.2 25.6 12.8 51.2 19.2 76.8 19.2 51.2 0 76.8-32 70.4-89.6 0-38.4-25.6-89.6-70.4-153.6-38.4-70.4-70.4-102.4-96-102.4-25.6 0-51.2 51.2-76.8 160-6.4 25.6-19.2 108.8-38.4 236.8-19.2 115.2-70.4 172.8-147.2 160-32 0-83.2-32-153.6-96-44.8-38.4-96-83.2-147.2-128l51.2-64c44.8 32 70.4 51.2 76.8 51.2 38.4 0 70.4-57.6 96-166.4 32-108.8 57.6-211.2 83.2-313.6 38.4-108.8 89.6-166.4 153.6-166.4 96 0 211.2 89.6 352 275.2 134.4 179.2 204.8 313.6 211.2 416 6.4 134.4-44.8 204.8-147.2 204.8z" />
|
||||
<glyph unicode="" d="M1024 768c-38.4-19.2-76.8-25.6-121.6-32 44.8 25.6 76.8 64 89.6 115.2-38.4-25.6-83.2-38.4-134.4-51.2-38.4 38.4-96 64-153.6 64-108.8 0-204.8-96-204.8-211.2 0-19.2 0-32 6.4-44.8-172.8 6.4-332.8 89.6-435.2 217.6-19.2-32-25.6-64-25.6-102.4 0-70.4 38.4-134.4 96-172.8-32 0-64 12.8-96 25.6 0-102.4 70.4-185.6 166.4-204.8-19.2-12.8-38.4-12.8-57.6-12.8-12.8 0-25.6 0-38.4 6.4 25.6-83.2 102.4-147.2 198.4-147.2-70.4-57.6-160-89.6-262.4-89.6h-51.2c96-64 204.8-96 320-96 384 0 595.2 320 595.2 595.2v25.6c44.8 32 83.2 70.4 108.8 115.2z" />
|
||||
<glyph unicode="" d="M179.2 57.6c76.8 115.2 211.2 185.6 358.4 185.6 134.4 0 256-64 339.2-160 89.6 96 147.2 224 147.2 364.8 0 281.6-230.4 512-512 512s-512-230.4-512-512c0-153.6 70.4-294.4 179.2-390.4zM787.2 294.4c-6.4-19.2-19.2-19.2-38.4-12.8-70.4 32-147.2 51.2-224 51.2-83.2 0-160-19.2-230.4-51.2-6.4-6.4-25.6-6.4-32 19.2-6.4 12.8 6.4 25.6 12.8 32 76.8 38.4 160 57.6 249.6 57.6s172.8-19.2 243.2-51.2c12.8-12.8 25.6-25.6 19.2-44.8zM832 422.4c-6.4-6.4-12.8-12.8-25.6-12.8h-6.4c-83.2 38.4-179.2 64-275.2 64s-185.6-19.2-268.8-57.6h-6.4c-12.8 0-19.2 6.4-25.6 12.8l-6.4 12.8c0 6.4 6.4 19.2 12.8 19.2 89.6 38.4 192 64 300.8 64 108.8 0 211.2-25.6 300.8-64v-38.4zM185.6 633.6c102.4 44.8 217.6 64 339.2 64 115.2 0 230.4-25.6 332.8-64 12.8-6.4 25.6-19.2 25.6-38.4 0-25.6-19.2-44.8-44.8-44.8h-6.4c-96 38.4-198.4 57.6-307.2 57.6s-211.2-19.2-307.2-51.2h-6.4c-25.6 0-44.8 19.2-44.8 44.8 0 6.4 6.4 25.6 19.2 32zM537.6 76.8c-89.6 0-166.4-44.8-211.2-108.8 57.6-19.2 121.6-32 185.6-32 83.2 0 160 19.2 224 51.2-44.8 57.6-115.2 89.6-198.4 89.6z" />
|
||||
<glyph unicode="" d="M979.2 371.2c6.4 25.6 6.4 51.2 6.4 76.8 0 262.4-211.2 473.6-473.6 473.6-25.6 0-51.2 0-76.8-6.4-38.4 32-89.6 44.8-147.2 44.8-160 0-288-128-288-288 0-57.6 12.8-108.8 44.8-153.6-6.4-19.2-6.4-44.8-6.4-70.4 0-262.4 211.2-473.6 473.6-473.6 25.6 0 51.2 0 76.8 6.4 44.8-25.6 96-44.8 153.6-44.8 160 0 288 128 288 288-6.4 57.6-19.2 108.8-51.2 147.2zM736 230.4c-19.2-32-51.2-51.2-89.6-70.4-38.4-19.2-83.2-25.6-134.4-25.6-64 0-115.2 12.8-160 32-32 12.8-51.2 38.4-70.4 64-19.2 32-25.6 57.6-25.6 83.2 0 12.8 6.4 25.6 19.2 38.4 12.8 12.8 25.6 19.2 44.8 19.2 12.8 0 25.6-6.4 38.4-12.8 6.4-6.4 12.8-19.2 19.2-38.4 6.4-19.2 19.2-32 25.6-44.8 6.4-12.8 19.2-25.6 38.4-32 19.2-6.4 38.4-12.8 64-12.8 38.4 0 70.4 6.4 89.6 25.6 25.6 19.2 32 38.4 32 57.6 0 19.2-6.4 32-19.2 44.8-6.4 19.2-19.2 25.6-38.4 32-19.2 6.4-51.2 12.8-83.2 19.2-44.8 12.8-83.2 25.6-115.2 38.4-32 12.8-57.6 32-76.8 51.2-19.2 25.6-25.6 57.6-25.6 89.6 0 32 12.8 64 32 89.6 19.2 25.6 44.8 44.8 83.2 57.6 38.4 12.8 76.8 19.2 128 19.2 38.4 0 70.4-6.4 102.4-12.8 25.6-6.4 51.2-19.2 70.4-38.4 19.2-12.8 32-32 44.8-44.8s12.8-32 12.8-51.2c0-12.8-6.4-25.6-19.2-38.4-12.8-12.8-25.6-19.2-44.8-19.2-12.8 0-25.6 6.4-32 12.8-6.4 6.4-19.2 19.2-25.6 32-12.8 25.6-25.6 38.4-44.8 51.2-12.8 12.8-38.4 19.2-76.8 19.2-32 0-57.6-6.4-76.8-19.2-19.2-12.8-32-25.6-32-44.8 0-12.8 6.4-19.2 12.8-32l25.6-19.2c12.8-6.4 25.6-12.8 38.4-12.8 12.8-6.4 32-6.4 64-12.8 32-12.8 64-25.6 96-32 32-6.4 51.2-19.2 76.8-32 19.2-12.8 38.4-32 51.2-51.2 6.4-25.6 12.8-51.2 12.8-76.8 0-38.4-12.8-70.4-32-102.4z" />
|
||||
<glyph unicode="" d="M512 960c-281.6 0-512-230.4-512-512 0-211.2 128-390.4 307.2-467.2 0 38.4 0 76.8 6.4 115.2 12.8 38.4 64 281.6 64 281.6s-12.8 32-12.8 76.8c0 76.8 44.8 134.4 96 134.4s70.4-32 70.4-76.8-32-115.2-44.8-179.2c-12.8-57.6 25.6-96 83.2-96 96 0 160 121.6 160 275.2 0 115.2-76.8 198.4-211.2 198.4-153.6 0-249.6-115.2-249.6-243.2 0-44.8 12.8-76.8 32-102.4 6.4-12.8 12.8-12.8 6.4-25.6 0-6.4-6.4-32-12.8-38.4-6.4-12.8-12.8-19.2-25.6-12.8-70.4 32-102.4 108.8-102.4 198.4 0 147.2 121.6 320 364.8 320 198.4 0 326.4-140.8 326.4-294.4 0-198.4-108.8-352-275.2-352-57.6 0-108.8 32-128 64 0 0-32-115.2-38.4-140.8-12.8-38.4-32-76.8-51.2-108.8 51.2-32 96-38.4 147.2-38.4 281.6 0 512 230.4 512 512s-230.4 512-512 512z" />
|
||||
<glyph unicode="" d="M256 915.2c-134.4-51.2-224-147.2-249.6-288-12.8-83.2-6.4-172.8 32-249.6 6.4-19.2 19.2-32 32-51.2l19.2-19.2c12.8 6.4 25.6 6.4 32 12.8 44.8 25.6 76.8 64 115.2 96-128 153.6 6.4 332.8 172.8 377.6 160 38.4 371.2-25.6 416-192 19.2-64 6.4-140.8-44.8-192-25.6-25.6-64-44.8-102.4-51.2-25.6-6.4-44.8-6.4-70.4 0-12.8 6.4-25.6 6.4-38.4 6.4-19.2 6.4-38.4 6.4-38.4 25.6v268.8c0 19.2 0 12.8-12.8 19.2-12.8 0-25.6 0-38.4 6.4-38.4 0-83.2 0-121.6-6.4-12.8 0-19.2 0-19.2-19.2v-140.8l6.4-294.4c0-32 0-102.4-32-115.2-38.4-19.2-70.4 19.2-108.8 25.6 6.4-51.2-25.6-147.2 32-172.8 51.2-25.6 115.2-32 172.8-12.8 115.2 38.4 153.6 172.8 140.8 275.2 179.2-51.2 377.6 38.4 454.4 198.4 57.6 115.2 32 262.4-51.2 358.4-166.4 185.6-480 224-697.6 134.4z" />
|
||||
<glyph unicode="" d="M928-64h-832c-51.2 0-96 44.8-96 96v832c0 51.2 44.8 96 96 96h825.6c57.6 0 102.4-44.8 102.4-96v-825.6c0-57.6-44.8-102.4-96-102.4zM262.4 768c-44.8 0-76.8-32-76.8-76.8 0-38.4 25.6-76.8 70.4-76.8 44.8 0 70.4 32 70.4 76.8 6.4 44.8-19.2 76.8-64 76.8zM339.2 569.6h-147.2v-441.6h147.2v441.6zM876.8 377.6c0 134.4-64 204.8-160 204.8-76.8 0-108.8-44.8-128-70.4v64h-153.6v-441.6h147.2v236.8c0 12.8 0 25.6 6.4 32 12.8 25.6 32 51.2 76.8 51.2 51.2 0 70.4-38.4 70.4-96v-230.4h147.2v249.6z" />
|
||||
<glyph unicode="" d="M0 89.6v0zM236.8 396.8c89.6 0 153.6 96 140.8 211.2-19.2 121.6-108.8 217.6-198.4 217.6-89.6 6.4-153.6-89.6-140.8-211.2 19.2-115.2 108.8-217.6 198.4-217.6zM1024 704v83.2c0 96-76.8 172.8-166.4 172.8h-684.8c-96 0-172.8-76.8-172.8-166.4 57.6 51.2 140.8 96 224 96h358.4l-83.2-70.4h-108.8c70.4-25.6 115.2-115.2 115.2-204.8 0-76.8-44.8-140.8-102.4-185.6-57.6-44.8-70.4-64-70.4-102.4 0-32 64-89.6 96-108.8 96-64 128-128 128-230.4 0-19.2 0-32-6.4-51.2h307.2c96 0 172.8 76.8 172.8 172.8v531.2h-192v-192h-64v192h-198.4v64h192v192h64v-192h192zM185.6 192h64c-25.6 25.6-51.2 57.6-51.2 96 0 25.6 6.4 44.8 19.2 64h-32c-76.8 6.4-140.8 32-185.6 70.4v-275.2c51.2 32 115.2 44.8 185.6 44.8zM6.4 70.4v19.2c-6.4-6.4-6.4-12.8 0-19.2zM454.4 6.4c-12.8 57.6-70.4 89.6-140.8 140.8-25.6 6.4-57.6 12.8-89.6 12.8-89.6 0-172.8-32-217.6-89.6 12.8-76.8 83.2-134.4 166.4-134.4h288v32c0 12.8 0 25.6-6.4 38.4z" />
|
||||
<glyph unicode="" d="M512 960c-281.6 0-512-230.4-512-512s230.4-512 512-512 512 230.4 512 512-230.4 512-512 512zM825.6 697.6c51.2-64 83.2-140.8 83.2-230.4-57.6 12.8-115.2 19.2-166.4 19.2-38.4 0-76.8-6.4-115.2-12.8l-25.6 64c83.2 32 160 83.2 224 160zM512 844.8c96 0 179.2-32 249.6-89.6-51.2-64-121.6-108.8-198.4-140.8-51.2 108.8-102.4 179.2-134.4 224 25.6 6.4 51.2 6.4 83.2 6.4zM332.8 806.4c32-32 83.2-102.4 147.2-217.6-121.6-38.4-243.2-44.8-320-44.8h-38.4c32 115.2 108.8 211.2 211.2 262.4zM115.2 448c12.8 6.4 25.6 6.4 44.8 6.4 83.2 0 217.6 6.4 364.8 51.2 6.4-19.2 12.8-32 25.6-51.2-102.4-32-179.2-83.2-230.4-134.4-51.2-51.2-89.6-96-108.8-128-64 70.4-96 160-96 256zM512 51.2c-89.6 0-172.8 32-236.8 76.8 12.8 25.6 44.8 70.4 89.6 115.2 51.2 44.8 115.2 96 204.8 128 32-83.2 57.6-185.6 76.8-294.4-38.4-19.2-83.2-25.6-134.4-25.6zM736 121.6c-19.2 102.4-44.8 185.6-76.8 268.8 25.6 6.4 51.2 6.4 83.2 6.4 44.8 0 102.4-6.4 153.6-19.2-12.8-108.8-70.4-198.4-160-256z" />
|
||||
<glyph unicode="" d="M921.6 678.4h-256v64h256v-64zM499.2 416c12.8-25.6 25.6-57.6 25.6-96s-6.4-70.4-25.6-102.4l-51.2-51.2c-19.2-12.8-44.8-25.6-70.4-32s-57.6-6.4-89.6-6.4h-288v640h307.2c76.8 0 134.4-25.6 166.4-70.4 19.2-25.6 25.6-57.6 25.6-96s-12.8-70.4-32-96c-6.4-12.8-19.2-25.6-44.8-32 32-12.8 57.6-32 76.8-57.6zM147.2 518.4h134.4c25.6 0 51.2 6.4 70.4 12.8 19.2 12.8 25.6 32 25.6 57.6 0 32-12.8 51.2-32 57.6-25.6 6.4-51.2 12.8-83.2 12.8h-115.2v-140.8zM390.4 332.8c0 32-12.8 57.6-38.4 70.4-12.8 6.4-38.4 12.8-64 12.8h-140.8v-172.8h134.4c25.6 0 51.2 6.4 64 12.8 25.6 6.4 44.8 32 44.8 76.8zM1017.6 435.2c6.4-19.2 6.4-51.2 6.4-89.6h-332.8c0-44.8 19.2-76.8 44.8-96 19.2-12.8 38.4-19.2 64-19.2s51.2 6.4 64 19.2c19.2 6.4 25.6 19.2 32 32h121.6c0-25.6-19.2-57.6-44.8-83.2-38.4-44.8-96-64-172.8-64-57.6 0-115.2 19.2-160 57.6-44.8 32-70.4 96-70.4 179.2 0 76.8 19.2 140.8 64 185.6 44.8 44.8 96 64 166.4 64 38.4 0 76.8-6.4 108.8-19.2 32-12.8 57.6-38.4 76.8-70.4 19.2-32 25.6-64 32-96zM902.4 422.4c0 32-12.8 57.6-32 70.4-19.2 19.2-44.8 25.6-70.4 25.6-32 0-51.2-6.4-70.4-25.6-19.2-19.2-25.6-38.4-32-70.4h204.8z" />
|
||||
<glyph unicode="" d="M565.888 547.328l69.824-33.728 105.408 33.728v61.184c0 126.080-102.784 228.608-229.12 228.608s-229.056-102.592-229.056-228.608v-321.024c0-29.632-24.192-53.696-53.824-53.696s-53.824 24.064-53.824 53.696v134.4h-175.296v-134.4c0-126.080 102.72-228.608 229.12-228.608 126.336 0 229.12 102.592 229.12 228.608v321.024c0 29.568 24.192 53.696 53.824 53.696 29.696 0 53.888-24.128 53.888-53.696l-0.064-61.184zM848.704 421.888v-134.4c0-29.632-24.128-53.696-53.824-53.696-29.696 0-53.888 24.064-53.888 53.696v137.088l-105.344-33.728-69.824 33.728v-137.088c0-126.080 102.784-228.608 229.12-228.608s229.056 102.592 229.056 228.608v134.4h-175.296z" />
|
||||
<glyph unicode="" d="M608 307.2c-19.2-19.2 0-51.2 0-51.2l128-217.6s19.2-25.6 38.4-25.6 38.4 12.8 38.4 12.8l102.4 147.2s12.8 19.2 12.8 32c0 25.6-32 32-32 32l-243.2 76.8c-6.4 0-25.6 6.4-44.8-6.4zM595.2 416c12.8-19.2 44.8-12.8 44.8-12.8l243.2 70.4s32 12.8 38.4 32c6.4 19.2-6.4 38.4-6.4 38.4l-108.8 134.4s-12.8 19.2-32 19.2c-25.6 0-38.4-25.6-38.4-25.6l-140.8-217.6s-6.4-19.2 0-38.4zM480 499.2c32 6.4 38.4 51.2 38.4 51.2v345.6c-6.4 0-6.4 38.4-25.6 51.2-32 19.2-44.8 6.4-51.2 6.4l-198.4-70.4s-19.2-6.4-32-25.6c-12.8-25.6 12.8-57.6 12.8-57.6l211.2-288s19.2-19.2 44.8-12.8zM435.2 358.4c0 25.6-32 44.8-32 44.8l-217.6 108.8s-32 12.8-44.8 6.4c-19.2-12.8-25.6-25.6-32-32l-12.8-172.8s0-32 6.4-44.8c12.8-19.2 44.8-6.4 44.8-6.4l256 57.6c12.8 0 25.6 6.4 32 38.4zM492.8 262.4c-19.2 12.8-44.8-6.4-44.8-6.4l-172.8-185.6s-19.2-25.6-12.8-44.8c6.4-19.2 12.8-25.6 25.6-32l172.8-51.2s19.2-6.4 38.4 0c19.2 0 12.8 32 12.8 32l6.4 256s0 25.6-25.6 32z" />
|
||||
<glyph unicode="" d="M518.4 416l115.2-313.6v-6.4c-38.4-12.8-83.2-19.2-128-19.2-38.4 0-76.8 6.4-108.8 12.8l121.6 326.4zM896 448c0-140.8-76.8-256-192-326.4l115.2 332.8c19.2 51.2 32 96 32 134.4v38.4c32-51.2 44.8-115.2 44.8-179.2zM128 448c0 51.2 12.8 108.8 32 153.6l185.6-486.4c-128 57.6-217.6 185.6-217.6 332.8zM192 652.8c70.4 102.4 185.6 166.4 320 166.4 102.4 0 192-38.4 262.4-96h-6.4c-38.4 0-64-32-64-64s19.2-57.6 38.4-89.6c12.8-25.6 32-57.6 32-102.4 0-32-12.8-70.4-32-121.6l-38.4-128-140.8 403.2c25.6 0 44.8 6.4 44.8 6.4 19.2 0 19.2 32 0 32 0 0-64-6.4-102.4-6.4-38.4 0-102.4 6.4-102.4 6.4-19.2 0-25.6-32 0-32 0 0 19.2 0 38.4-6.4l57.6-160-83.2-243.2-140.8 403.2c25.6 6.4 44.8 6.4 44.8 6.4 19.2 0 19.2 32 0 32 0 0-64-6.4-102.4-6.4h-25.6zM851.2 960h-678.4c-96 0-172.8-76.8-172.8-172.8v-678.4c0-96 76.8-172.8 172.8-172.8h678.4c96 0 172.8 76.8 172.8 172.8v678.4c0 96-76.8 172.8-172.8 172.8zM960 448c0-249.6-198.4-448-448-448s-448 198.4-448 448 198.4 448 448 448 448-198.4 448-448z" />
|
||||
<glyph unicode="" d="M409.6 62.494v343.341h493.929v-439.718l-493.929 96.376zM409.6 839.529l493.929 90.353v-439.718h-493.929v349.365zM331.294 490.165h-331.294v271.059l331.294 60.235v-331.294zM331.294 80.565l-331.294 66.259v259.012h331.294v-325.271z" horiz-adv-x="904" />
|
||||
<glyph unicode="" d="M64 768c19.2-128 128-659.2 377.6-812.8 38.4-25.6 83.2-19.2 115.2 6.4 121.6 102.4 243.2 275.2 275.2 358.4 64-6.4 108.8 12.8 108.8 12.8v128h-115.2c-140.8 0-236.8 166.4-179.2 313.6 38.4 102.4 108.8 25.6 121.6 0 12.8-32 6.4-115.2-6.4-172.8 19.2-51.2 140.8-76.8 166.4-38.4 32 96 44.8 262.4-38.4 352-57.6 38.4-198.4 70.4-300.8 6.4s-102.4-204.8-96-275.2c6.4-70.4 32-217.6 172.8-300.8 12.8-12.8-153.6-230.4-160-217.6-185.6 179.2-249.6 544-262.4 640h-179.2z" />
|
||||
<glyph unicode="" d="M576 512v-236.8c0-57.6 0-96 6.4-108.8 6.4-19.2 19.2-32 38.4-44.8 25.6-12.8 51.2-19.2 76.8-19.2 51.2 0 83.2 6.4 134.4 38.4v-153.6c-44.8-19.2-83.2-32-115.2-38.4-38.4-12.8-76.8-12.8-115.2-12.8-44.8 0-76.8 6.4-108.8 19.2-38.4 12.8-64 32-89.6 51.2-25.6 19.2-44.8 44.8-51.2 70.4-12.8 25.6-12.8 57.6-12.8 108.8v352h-147.2v147.2c38.4 12.8 83.2 32 115.2 57.6 25.6 25.6 51.2 51.2 70.4 89.6 19.2 32 32 76.8 38.4 128h160v-256h256v-192h-256z" />
|
||||
<glyph unicode="" d="M646.4 236.8h-192l-64-300.8h-262.4l25.6 108.8h-153.6l198.4 915.2h448c134.4 0 288-96 236.8-313.6-38.4-192-192-300.8-371.2-300.8h-185.6l-64-300.8h-44.8l-12.8-44.8h134.4l64 300.8h243.2c76.8 0 147.2 25.6 198.4 64l32 25.6c51.2 51.2 83.2 115.2 102.4 192 12.8 76.8 6.4 140.8-32 185.6-19.2 19.2-38.4 38.4-64 51.2 96-38.4 166.4-134.4 134.4-288-38.4-179.2-192-294.4-371.2-294.4zM492.8 524.8c70.4 0 134.4 57.6 153.6 128 19.2 70.4-25.6 128-89.6 128h-128l-64-256h128z" />
|
||||
<glyph unicode="" d="M780.8 160c-204.8 0-275.2 89.6-313.6 204.8l-38.4 121.6c-25.6 89.6-64 153.6-166.4 153.6-70.4 0-147.2-51.2-147.2-198.4 0-115.2 57.6-185.6 140.8-185.6 89.6 0 153.6 70.4 153.6 70.4l44.8-102.4s-64-64-198.4-64c-166.4 0-256 96-256 275.2 0 192 89.6 300.8 262.4 300.8 153.6 0 236.8-57.6 281.6-211.2l38.4-121.6c25.6-89.6 76.8-147.2 198.4-147.2 76.8 0 121.6 19.2 121.6 64 0 32-19.2 57.6-76.8 76.8l-76.8 19.2c-96 25.6-134.4 76.8-134.4 153.6 0 128 102.4 172.8 211.2 172.8 121.6 0 192-44.8 204.8-153.6l-115.2-12.8c-6.4 51.2-38.4 70.4-89.6 70.4s-83.2-25.6-83.2-64 12.8-57.6 64-70.4l76.8-19.2c89.6-25.6 140.8-70.4 140.8-166.4 0-121.6-96-166.4-243.2-166.4z" />
|
||||
<glyph unicode="" d="M928 960h-832c-51.2 0-96-44.8-96-96v-825.6c0-57.6 44.8-102.4 96-102.4h825.6c57.6 0 96 44.8 96 96v832c6.4 51.2-38.4 96-89.6 96zM512 646.4c108.8 0 198.4-89.6 198.4-198.4s-89.6-198.4-198.4-198.4-198.4 89.6-198.4 198.4 89.6 198.4 198.4 198.4zM896 102.4c0-19.2-19.2-38.4-38.4-38.4h-691.2c-19.2 0-38.4 19.2-38.4 38.4v409.6h89.6c-6.4-25.6-6.4-51.2-6.4-76.8 0-166.4 128-307.2 300.8-307.2s300.8 140.8 300.8 307.2c0 25.6-6.4 51.2-12.8 76.8h96v-409.6zM896 678.4c0-19.2-19.2-38.4-38.4-38.4h-115.2c-19.2 0-38.4 19.2-38.4 38.4v115.2c0 19.2 19.2 38.4 38.4 38.4h115.2c19.2 0 38.4-19.2 38.4-38.4v-115.2z" />
|
||||
<glyph unicode="" d="M64 960l64-896 384-128 384 128 64 896h-896zM780.8 659.2h-428.8l12.8-115.2h409.6l-32-352-230.4-64-230.4 64-12.8 179.2h115.2v-89.6l128-32 128 32 12.8 147.2h-390.4l-32 345.6h563.2l-12.8-115.2z" />
|
||||
<glyph unicode="" d="M0 435.2c0-44.8 6.4-89.6 12.8-128s19.2-70.4 38.4-96c12.8-25.6 32-51.2 57.6-70.4s51.2-38.4 76.8-51.2c25.6-12.8 57.6-25.6 96-32l108.8-19.2s76.8-6.4 121.6-6.4 83.2 0 121.6 6.4 70.4 6.4 108.8 19.2c38.4 6.4 70.4 19.2 96 32s51.2 32 76.8 51.2c25.6 19.2 44.8 44.8 57.6 70.4 12.8 25.6 25.6 57.6 38.4 96 12.8 38.4 12.8 83.2 12.8 128 0 83.2-25.6 153.6-83.2 217.6l6.4 25.6c0 12.8 6.4 25.6 6.4 44.8v64l-19.2 76.8h-32c-12.8 0-25.6-6.4-44.8-6.4-19.2-6.4-38.4-12.8-64-25.6l-76.8-51.2c-51.2 12.8-121.6 19.2-204.8 19.2s-153.6-6.4-198.4-19.2c-32 19.2-57.6 32-83.2 44.8-25.6 12.8-44.8 19.2-64 25.6l-38.4 12.8h-38.4l-19.2-76.8c-6.4-25.6-6.4-44.8 0-64 0-19.2 6.4-32 6.4-44.8 0-12.8 6.4-19.2 6.4-25.6-57.6-64-83.2-134.4-83.2-217.6zM128 307.2c0 44.8 19.2 89.6 64 134.4 12.8 12.8 25.6 19.2 44.8 25.6 19.2 6.4 38.4 12.8 57.6 12.8h64c19.2 0 44.8 0 76.8-6.4h153.6c25.6 0 51.2 6.4 70.4 6.4h64c19.2 0 44.8-6.4 57.6-12.8 19.2-6.4 32-12.8 44.8-25.6 44.8-38.4 64-83.2 64-134.4 0-25.6-6.4-51.2-12.8-76.8l-25.6-57.6c-12.8-12.8-25.6-25.6-44.8-38.4-19.2-12.8-38.4-19.2-57.6-25.6-19.2-6.4-44.8-12.8-70.4-12.8-32 0-57.6-6.4-76.8-6.4-25.6 6.4-57.6 6.4-89.6 6.4h-89.6c-25.6 0-51.2 0-76.8 6.4-32 0-51.2 6.4-70.4 12.8-19.2 6.4-38.4 12.8-57.6 25.6-25.6 12.8-44.8 19.2-51.2 38.4-12.8 12.8-19.2 32-25.6 57.6-12.8 19.2-12.8 44.8-12.8 70.4zM640 320c0-51.2 25.6-96 64-96s64 44.8 64 96-25.6 96-64 96c-32 0-64-44.8-64-96zM256 320c0-51.2 32-96 64-96s64 44.8 64 96-25.6 96-64 96-64-44.8-64-96z" />
|
||||
<glyph unicode="" d="M985.6 364.8l-390.4-390.4c-44.8-44.8-121.6-44.8-166.4 0l-396.8 390.4c-44.8 44.8-44.8 121.6 0 166.4l390.4 390.4c51.2 51.2 128 51.2 172.8 6.4l179.2-179.2-262.4-268.8-102.4 102.4c-32 32-83.2 32-108.8 0l-83.2-83.2c-32-32-32-76.8 0-108.8l236.8-236.8c25.6-25.6 57.6-25.6 83.2-19.2 12.8 6.4 19.2 6.4 25.6 19.2l396.8 403.2 19.2-19.2c57.6-51.2 57.6-128 6.4-172.8zM550.4 224c-12.8-12.8-44.8-12.8-44.8-12.8s-32 0-38.4 12.8l-179.2 185.6c-12.8 12.8-12.8 38.4 0 57.6l51.2 51.2c12.8 12.8 44.8 12.8 57.6 0l115.2-121.6 352 352c12.8 12.8 44.8 12.8 57.6 0l51.2-51.2c12.8-12.8 12.8-44.8 0-57.6l-422.4-416z" />
|
||||
<glyph unicode="" d="M512 748.8l211.2 179.2 300.8-198.4-204.8-166.4-307.2 185.6zM1024 396.8l-300.8-198.4-211.2 172.8 300.8 185.6 211.2-160zM300.8 198.4l-300.8 198.4 204.8 166.4 307.2-192-211.2-172.8zM0 729.6l300.8 198.4 211.2-179.2-300.8-192-211.2 172.8zM512 332.8l211.2-179.2 89.6 57.6v-64l-300.8-179.2-300.8 179.2v64l89.6-51.2 211.2 172.8z" />
|
||||
<glyph unicode="" d="M864 249.6c-38.4 0-64 32-64 64v256c0 38.4 32 64 64 64 38.4 0 64-32 64-64v-256c0-32-25.6-64-64-64zM697.6 102.4h-38.4v-108.8c0-38.4-25.6-64-57.6-64s-57.6 25.6-57.6 64v108.8h-70.4v-108.8c0-38.4-25.6-64-57.6-64s-57.6 25.6-57.6 64v108.8h-32c-19.2 0-38.4 19.2-38.4 44.8v428.8h448v-422.4c0-32-12.8-51.2-38.4-51.2zM736 633.6h-448c0 89.6 32 153.6 76.8 192l-70.4 83.2c-6.4 12.8-6.4 25.6 0 38.4 12.8 12.8 25.6 12.8 38.4 0l83.2-96c32 12.8 64 19.2 96 19.2s70.4-6.4 96-19.2l83.2 96c12.8 12.8 25.6 12.8 38.4 0s12.8-32 0-38.4l-70.4-83.2c44.8-32 76.8-102.4 76.8-192zM441.6 761.6c-12.8 0-25.6-12.8-25.6-32s12.8-32 25.6-32 25.6 12.8 25.6 32-12.8 32-25.6 32zM582.4 761.6c-12.8 0-25.6-12.8-25.6-32s12.8-32 25.6-32 25.6 19.2 25.6 32-12.8 32-25.6 32zM160 249.6c-38.4 0-64 32-64 64v256c0 38.4 25.6 64 64 64s64-32 64-64v-256c0-32-25.6-64-64-64z" />
|
||||
<glyph unicode="" d="M921.6 211.2c-32-153.6-115.2-211.2-147.2-249.6-32-25.6-121.6-25.6-153.6-6.4-38.4 25.6-134.4 25.6-166.4 0-44.8-32-115.2-19.2-128-12.8-256 179.2-352 716.8 12.8 774.4 64 12.8 134.4-32 134.4-32 51.2-25.6 70.4-12.8 115.2 6.4 96 44.8 243.2 44.8 313.6-76.8-147.2-96-153.6-294.4 19.2-403.2zM716.8 960c12.8-70.4-64-224-204.8-230.4-12.8 38.4 32 217.6 204.8 230.4z" />
|
||||
</font></defs></svg>
|
||||
|
After Width: | Height: | Size: 56 KiB |
BIN
themes/waifu_plugin/flat-ui-icons-regular.ttf
Normal file
BIN
themes/waifu_plugin/flat-ui-icons-regular.ttf
Normal file
Binary file not shown.
BIN
themes/waifu_plugin/flat-ui-icons-regular.woff
Normal file
BIN
themes/waifu_plugin/flat-ui-icons-regular.woff
Normal file
Binary file not shown.
13
themes/waifu_plugin/jquery-ui.min.js
vendored
Normal file
13
themes/waifu_plugin/jquery-ui.min.js
vendored
Normal file
File diff suppressed because one or more lines are too long
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user