diff --git a/README.md b/README.md index 87362913..9d9fe055 100644 --- a/README.md +++ b/README.md @@ -113,9 +113,11 @@ cd gpt_academic 2. 配置API_KEY -在`config.py`中,配置API KEY等设置,[点击查看特殊网络环境设置方法](https://github.com/binary-husky/gpt_academic/issues/1) 。 +在`config.py`中,配置API KEY等设置,[点击查看特殊网络环境设置方法](https://github.com/binary-husky/gpt_academic/issues/1) 。[Wiki页面](https://github.com/binary-husky/gpt_academic/wiki/%E9%A1%B9%E7%9B%AE%E9%85%8D%E7%BD%AE%E8%AF%B4%E6%98%8E)。 -(P.S. 程序运行时会优先检查是否存在名为`config_private.py`的私密配置文件,并用其中的配置覆盖`config.py`的同名配置。因此,如果您能理解我们的配置读取逻辑,我们强烈建议您在`config.py`旁边创建一个名为`config_private.py`的新配置文件,并把`config.py`中的配置转移(复制)到`config_private.py`中(仅复制您修改过的配置条目即可)。`config_private.py`不受git管控,可以让您的隐私信息更加安全。P.S.项目同样支持通过`环境变量`配置大多数选项,环境变量的书写格式参考`docker-compose`文件。读取优先级: `环境变量` > `config_private.py` > `config.py`) +「 程序会优先检查是否存在名为`config_private.py`的私密配置文件,并用其中的配置覆盖`config.py`的同名配置。如您能理解该读取逻辑,我们强烈建议您在`config.py`旁边创建一个名为`config_private.py`的新配置文件,并把`config.py`中的配置转移(复制)到`config_private.py`中(仅复制您修改过的配置条目即可)。 」 + +「 支持通过`环境变量`配置项目,环境变量的书写格式参考`docker-compose.yml`文件或者我们的[Wiki页面](https://github.com/binary-husky/gpt_academic/wiki/%E9%A1%B9%E7%9B%AE%E9%85%8D%E7%BD%AE%E8%AF%B4%E6%98%8E)。配置读取优先级: `环境变量` > `config_private.py` > `config.py`。 」 3. 安装依赖 @@ -123,7 +125,7 @@ cd gpt_academic # (选择I: 如熟悉python)(python版本3.9以上,越新越好),备注:使用官方pip源或者阿里pip源,临时换源方法:python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ python -m pip install -r requirements.txt -# (选择II: 如不熟悉python)使用anaconda,步骤也是类似的 (https://www.bilibili.com/video/BV1rc411W7Dr): +# (选择II: 使用Anaconda)步骤也是类似的 (https://www.bilibili.com/video/BV1rc411W7Dr): conda create -n gptac_venv python=3.11 # 创建anaconda环境 conda activate gptac_venv # 激活anaconda环境 python -m pip install -r requirements.txt # 这个步骤和pip安装一样的步骤 @@ -161,26 +163,25 @@ python main.py ### 安装方法II:使用Docker +0. 部署项目的全部能力(这个是包含cuda和latex的大型镜像。如果您网速慢、硬盘小或没有显卡,则不推荐使用这个,建议使用方案1)(需要熟悉[Nvidia Docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installing-on-ubuntu-and-debian)运行时) [![fullcapacity](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml) -1. 仅ChatGPT(推荐大多数人选择,等价于docker-compose方案1) +``` sh +# 修改docker-compose.yml,保留方案0并删除其他方案。修改docker-compose.yml中方案0的配置,参考其中注释即可 +docker-compose up +``` + +1. 仅ChatGPT+文心一言+spark等在线模型(推荐大多数人选择) [![basic](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml) [![basiclatex](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml) [![basicaudio](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml) - ``` sh -git clone --depth=1 https://github.com/binary-husky/gpt_academic.git # 下载项目 -cd gpt_academic # 进入路径 -nano config.py # 用任意文本编辑器编辑config.py, 配置 “Proxy”, “API_KEY” 以及 “WEB_PORT” (例如50923) 等 -docker build -t gpt-academic . # 安装 - -#(最后一步-Linux操作系统)用`--net=host`更方便快捷 -docker run --rm -it --net=host gpt-academic -#(最后一步-MacOS/Windows操作系统)只能用-p选项将容器上的端口(例如50923)暴露给主机上的端口 -docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic +# 修改docker-compose.yml,保留方案1并删除其他方案。修改docker-compose.yml中方案1的配置,参考其中注释即可 +docker-compose up ``` -P.S. 如果需要依赖Latex的插件功能,请见Wiki。另外,您也可以直接使用docker-compose获取Latex功能(修改docker-compose.yml,保留方案4并删除其他方案)。 + +P.S. 如果需要依赖Latex的插件功能,请见Wiki。另外,您也可以直接使用方案4或者方案0获取Latex功能。 2. ChatGPT + ChatGLM2 + MOSS + LLAMA2 + 通义千问(需要熟悉[Nvidia Docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installing-on-ubuntu-and-debian)运行时) [![chatglm](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml) @@ -321,6 +322,7 @@ Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史h ### II:版本: - version 3.60(todo): 优化虚空终端,引入code interpreter和更多插件 +- version 3.53: 支持动态选择不同界面主题,提高稳定性&解决多用户冲突问题 - version 3.50: 使用自然语言调用本项目的所有函数插件(虚空终端),支持插件分类,改进UI,设计新主题 - version 3.49: 支持百度千帆平台和文心一言 - version 3.48: 支持阿里达摩院通义千问,上海AI-Lab书生,讯飞星火 diff --git a/app.py b/app.py index 3b96a097..6fa41b3e 100644 --- a/app.py +++ b/app.py @@ -2,23 +2,35 @@ import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染 def main(): import subprocess, sys - subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'gradio-stable-fork']) + subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'https://github.com/binary-husky/gpt_academic/raw/master/docs/gradio-3.32.6-py3-none-any.whl']) import gradio as gr + if gr.__version__ not in ['3.32.6']: + raise ModuleNotFoundError("使用项目内置Gradio获取最优体验! 请运行 `pip install -r requirements.txt` 指令安装内置Gradio及其他依赖, 详情信息见requirements.txt.") from request_llm.bridge_all import predict from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, load_chat_cookies, DummyWith # 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到 proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION') CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT') - ENABLE_AUDIO, AUTO_CLEAR_TXT, PATH_LOGGING = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT', 'PATH_LOGGING') + ENABLE_AUDIO, AUTO_CLEAR_TXT, PATH_LOGGING, AVAIL_THEMES, THEME = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT', 'PATH_LOGGING', 'AVAIL_THEMES', 'THEME') + DARK_MODE, = get_conf('DARK_MODE') # 如果WEB_PORT是-1, 则随机选取WEB端口 PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT from check_proxy import get_current_version - from themes.theme import adjust_theme, advanced_css, theme_declaration + from themes.theme import adjust_theme, advanced_css, theme_declaration, load_dynamic_theme + initial_prompt = "Serve me as a writing and programming assistant." title_html = f"

GPT 学术优化 {get_current_version()}

{theme_declaration}" - description = "代码开源和更新[地址🚀](https://github.com/binary-husky/gpt_academic)," - description += "感谢热情的[开发者们❤️](https://github.com/binary-husky/gpt_academic/graphs/contributors)" + description = "Github源代码开源和更新[地址🚀](https://github.com/binary-husky/gpt_academic), " + description += "感谢热情的[开发者们❤️](https://github.com/binary-husky/gpt_academic/graphs/contributors)." + description += "

常见问题请查阅[项目Wiki](https://github.com/binary-husky/gpt_academic/wiki), " + description += "如遇到Bug请前往[Bug反馈](https://github.com/binary-husky/gpt_academic/issues)." + description += "

普通对话使用说明: 1. 输入问题; 2. 点击提交" + description += "

基础功能区使用说明: 1. 输入文本; 2. 点击任意基础功能区按钮" + description += "

函数插件区使用说明: 1. 输入路径/问题, 或者上传文件; 2. 点击任意函数插件区按钮" + description += "

虚空终端使用说明: 点击虚空终端, 然后根据提示输入指令, 再次点击虚空终端" + description += "

如何保存对话: 点击保存当前的对话按钮" + description += "

如何语音对话: 请阅读Wiki" # 问询记录, python 版本建议3.9+(越新越好) import logging, uuid @@ -61,6 +73,7 @@ def main(): with gr.Blocks(title="GPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo: gr.HTML(title_html) gr.HTML('''
Duplicate Space请您打开此页面后务必点击上方的“复制空间”(Duplicate Space)按钮!使用时,先在输入框填入API-KEY然后回车。
切忌在“复制空间”(Duplicate Space)之前填入API_KEY或进行提问,否则您的API_KEY将极可能被空间所有者攫取!
支持任意数量的OpenAI的密钥和API2D的密钥共存,例如输入"OpenAI密钥1,API2D密钥2",然后提交,即可同时使用两种模型接口。
''') + secret_css, dark_mode = gr.Textbox(visible=False), gr.Textbox(DARK_MODE, visible=False) cookies = gr.State(load_chat_cookies()) with gr_L1(): with gr_L2(scale=2, elem_id="gpt-chat"): @@ -72,11 +85,11 @@ def main(): with gr.Row(): txt = gr.Textbox(show_label=False, lines=2, placeholder="输入问题或API密钥,输入多个密钥时,用英文逗号间隔。支持OpenAI密钥和API2D密钥共存。").style(container=False) with gr.Row(): - submitBtn = gr.Button("提交", variant="primary") + submitBtn = gr.Button("提交", elem_id="elem_submit", variant="primary") with gr.Row(): - resetBtn = gr.Button("重置", variant="secondary"); resetBtn.style(size="sm") - stopBtn = gr.Button("停止", variant="secondary"); stopBtn.style(size="sm") - clearBtn = gr.Button("清除", variant="secondary", visible=False); clearBtn.style(size="sm") + resetBtn = gr.Button("重置", elem_id="elem_reset", variant="secondary"); resetBtn.style(size="sm") + stopBtn = gr.Button("停止", elem_id="elem_stop", variant="secondary"); stopBtn.style(size="sm") + clearBtn = gr.Button("清除", elem_id="elem_clear", variant="secondary", visible=False); clearBtn.style(size="sm") if ENABLE_AUDIO: with gr.Row(): audio_mic = gr.Audio(source="microphone", type="numpy", streaming=True, show_label=False).style(container=False) @@ -87,7 +100,7 @@ def main(): for k in functional: if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue variant = functional[k]["Color"] if "Color" in functional[k] else "secondary" - functional[k]["Button"] = gr.Button(k, variant=variant) + functional[k]["Button"] = gr.Button(k, variant=variant, info_str=f'基础功能区: {k}') functional[k]["Button"].style(size="sm") with gr.Accordion("函数插件区", open=True, elem_id="plugin-panel") as area_crazy_fn: with gr.Row(): @@ -100,7 +113,9 @@ def main(): if not plugin.get("AsButton", True): continue visible = True if match_group(plugin['Group'], DEFAULT_FN_GROUPS) else False variant = plugins[k]["Color"] if "Color" in plugin else "secondary" - plugin['Button'] = plugins[k]['Button'] = gr.Button(k, variant=variant, visible=visible).style(size="sm") + info = plugins[k].get("Info", k) + plugin['Button'] = plugins[k]['Button'] = gr.Button(k, variant=variant, + visible=visible, info_str=f'函数插件区: {info}').style(size="sm") with gr.Row(): with gr.Accordion("更多函数插件", open=True): dropdown_fn_list = [] @@ -117,15 +132,27 @@ def main(): switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary").style(size="sm") with gr.Row(): with gr.Accordion("点击展开“文件上传区”。上传本地文件/压缩包供函数插件调用。", open=False) as area_file_up: - file_upload = gr.Files(label="任何文件, 但推荐上传压缩文件(zip, tar)", file_count="multiple") - with gr.Accordion("更换模型 & SysPrompt & 交互界面布局", open=(LAYOUT == "TOP-DOWN"), elem_id="interact-panel"): - system_prompt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt) + file_upload = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple", elem_id="elem_upload") + + + with gr.Floating(init_x="0%", init_y="0%", visible=True, width=None, drag="forbidden"): + with gr.Row(): + with gr.Tab("上传文件", elem_id="interact-panel"): + gr.Markdown("请上传本地文件/压缩包供“函数插件区”功能调用。请注意: 上传文件后会自动把输入区修改为相应路径。") + file_upload_2 = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple") + + with gr.Tab("更换模型 & Prompt", elem_id="interact-panel"): + md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False) top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",) temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",) - max_length_sl = gr.Slider(minimum=256, maximum=8192, value=4096, step=1, interactive=True, label="Local LLM MaxLength",) - checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "底部输入区", "输入清除键", "插件参数区"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区") - md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False) - dark_mode_btn = gr.Button("Toggle Dark Mode ☀", variant="secondary").style(size="sm") + max_length_sl = gr.Slider(minimum=256, maximum=1024*32, value=4096, step=128, interactive=True, label="Local LLM MaxLength",) + system_prompt = gr.Textbox(show_label=True, lines=2, placeholder=f"System Prompt", label="System prompt", value=initial_prompt) + + with gr.Tab("界面外观", elem_id="interact-panel"): + theme_dropdown = gr.Dropdown(AVAIL_THEMES, value=THEME, label="更换UI主题").style(container=False) + checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "浮动输入区", "输入清除键", "插件参数区"], + value=["基础功能区", "函数插件区"], label="显示/隐藏功能区", elem_id='cbs').style(container=False) + dark_mode_btn = gr.Button("切换界面明暗 ☀", variant="secondary").style(size="sm") dark_mode_btn.click(None, None, None, _js="""() => { if (document.querySelectorAll('.dark').length) { document.querySelectorAll('.dark').forEach(el => el.classList.remove('dark')); @@ -134,13 +161,17 @@ def main(): } }""", ) + with gr.Tab("帮助", elem_id="interact-panel"): gr.Markdown(description) - with gr.Accordion("备选输入区", open=True, visible=False, elem_id="input-panel2") as area_input_secondary: - with gr.Row(): - txt2 = gr.Textbox(show_label=False, placeholder="Input question here.", label="输入区2").style(container=False) - with gr.Row(): - submitBtn2 = gr.Button("提交", variant="primary") - with gr.Row(): + + with gr.Floating(init_x="20%", init_y="50%", visible=False, width="40%", drag="top") as area_input_secondary: + with gr.Accordion("浮动输入区", open=True, elem_id="input-panel2"): + with gr.Row() as row: + row.style(equal_height=True) + with gr.Column(scale=10): + txt2 = gr.Textbox(show_label=False, placeholder="Input question here.", lines=8, label="输入区2").style(container=False) + with gr.Column(scale=1, min_width=40): + submitBtn2 = gr.Button("提交", variant="primary"); submitBtn2.style(size="sm") resetBtn2 = gr.Button("重置", variant="secondary"); resetBtn2.style(size="sm") stopBtn2 = gr.Button("停止", variant="secondary"); stopBtn2.style(size="sm") clearBtn2 = gr.Button("清除", variant="secondary", visible=False); clearBtn2.style(size="sm") @@ -150,12 +181,12 @@ def main(): ret = {} ret.update({area_basic_fn: gr.update(visible=("基础功能区" in a))}) ret.update({area_crazy_fn: gr.update(visible=("函数插件区" in a))}) - ret.update({area_input_primary: gr.update(visible=("底部输入区" not in a))}) - ret.update({area_input_secondary: gr.update(visible=("底部输入区" in a))}) + ret.update({area_input_primary: gr.update(visible=("浮动输入区" not in a))}) + ret.update({area_input_secondary: gr.update(visible=("浮动输入区" in a))}) ret.update({clearBtn: gr.update(visible=("输入清除键" in a))}) ret.update({clearBtn2: gr.update(visible=("输入清除键" in a))}) ret.update({plugin_advanced_arg: gr.update(visible=("插件参数区" in a))}) - if "底部输入区" in a: ret.update({txt: gr.update(value="")}) + if "浮动输入区" in a: ret.update({txt: gr.update(value="")}) return ret checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn, area_input_primary, area_input_secondary, txt, txt2, clearBtn, clearBtn2, plugin_advanced_arg] ) # 整理反复出现的控件句柄组合 @@ -183,6 +214,7 @@ def main(): cancel_handles.append(click_handle) # 文件上传区,接收文件后与chatbot的互动 file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies]) + file_upload_2.upload(on_file_uploaded, [file_upload_2, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies]) # 函数插件-固定按钮区 for k in plugins: if not plugins[k].get("AsButton", True): continue @@ -192,16 +224,45 @@ def main(): # 函数插件-下拉菜单与随变按钮的互动 def on_dropdown_changed(k): variant = plugins[k]["Color"] if "Color" in plugins[k] else "secondary" - ret = {switchy_bt: gr.update(value=k, variant=variant)} + info = plugins[k].get("Info", k) + ret = {switchy_bt: gr.update(value=k, variant=variant, info_str=f'函数插件区: {info}')} if plugins[k].get("AdvancedArgs", False): # 是否唤起高级插件参数区 ret.update({plugin_advanced_arg: gr.update(visible=True, label=f"插件[{k}]的高级参数说明:" + plugins[k].get("ArgsReminder", [f"没有提供高级参数功能说明"]))}) else: ret.update({plugin_advanced_arg: gr.update(visible=False, label=f"插件[{k}]不需要高级参数。")}) return ret dropdown.select(on_dropdown_changed, [dropdown], [switchy_bt, plugin_advanced_arg] ) + def on_md_dropdown_changed(k): return {chatbot: gr.update(label="当前模型:"+k)} md_dropdown.select(on_md_dropdown_changed, [md_dropdown], [chatbot] ) + + def on_theme_dropdown_changed(theme, secret_css): + adjust_theme, css_part1, _, adjust_dynamic_theme = load_dynamic_theme(theme) + if adjust_dynamic_theme: + css_part2 = adjust_dynamic_theme._get_theme_css() + else: + css_part2 = adjust_theme()._get_theme_css() + return css_part2 + css_part1 + + theme_handle = theme_dropdown.select(on_theme_dropdown_changed, [theme_dropdown, secret_css], [secret_css]) + theme_handle.then( + None, + [secret_css], + None, + _js="""(css) => { + var existingStyles = document.querySelectorAll("style[data-loaded-css]"); + for (var i = 0; i < existingStyles.length; i++) { + var style = existingStyles[i]; + style.parentNode.removeChild(style); + } + var styleElement = document.createElement('style'); + styleElement.setAttribute('data-loaded-css', css); + styleElement.innerHTML = css; + document.head.appendChild(styleElement); + } + """ + ) # 随变按钮的回调函数注册 def route(request: gr.Request, k, *args, **kwargs): if k in [r"打开插件列表", r"请先从插件列表中选择"]: return @@ -237,19 +298,30 @@ def main(): cookies.update({'uuid': uuid.uuid4()}) return cookies demo.load(init_cookie, inputs=[cookies, chatbot], outputs=[cookies]) - demo.load(lambda: 0, inputs=None, outputs=None, _js='()=>{ChatBotHeight();}') + darkmode_js = """(dark) => { + dark = dark == "True"; + if (document.querySelectorAll('.dark').length) { + if (!dark){ + document.querySelectorAll('.dark').forEach(el => el.classList.remove('dark')); + } + } else { + if (dark){ + document.querySelector('body').classList.add('dark'); + } + } + }""" + demo.load(None, inputs=[dark_mode], outputs=None, _js=darkmode_js) # 配置暗色主题或亮色主题 + demo.load(None, inputs=[gr.Textbox(LAYOUT, visible=False)], outputs=None, _js='(LAYOUT)=>{GptAcademicJavaScriptInit(LAYOUT);}') # gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数 def auto_opentab_delay(): import threading, webbrowser, time print(f"如果浏览器没有自动打开,请复制并转到以下URL:") - print(f"\t(亮色主题): http://localhost:{PORT}") - print(f"\t(暗色主题): http://localhost:{PORT}/?__theme=dark") + if DARK_MODE: print(f"\t「暗色主题已启用(支持动态切换主题)」: http://localhost:{PORT}") + else: print(f"\t「亮色主题已启用(支持动态切换主题)」: http://localhost:{PORT}") def open(): time.sleep(2) # 打开浏览器 - DARK_MODE, = get_conf('DARK_MODE') - if DARK_MODE: webbrowser.open_new_tab(f"http://localhost:{PORT}/?__theme=dark") - else: webbrowser.open_new_tab(f"http://localhost:{PORT}") + webbrowser.open_new_tab(f"http://localhost:{PORT}") threading.Thread(target=open, name="open-browser", daemon=True).start() threading.Thread(target=auto_update, name="self-upgrade", daemon=True).start() threading.Thread(target=warm_up_modules, name="warm-up", daemon=True).start() diff --git a/check_proxy.py b/check_proxy.py index b6fe99f8..740eed23 100644 --- a/check_proxy.py +++ b/check_proxy.py @@ -155,11 +155,13 @@ def auto_update(raise_error=False): def warm_up_modules(): print('正在执行一些模块的预热...') + from toolbox import ProxyNetworkActivate from request_llm.bridge_all import model_info - enc = model_info["gpt-3.5-turbo"]['tokenizer'] - enc.encode("模块预热", disallowed_special=()) - enc = model_info["gpt-4"]['tokenizer'] - enc.encode("模块预热", disallowed_special=()) + with ProxyNetworkActivate("Warmup_Modules"): + enc = model_info["gpt-3.5-turbo"]['tokenizer'] + enc.encode("模块预热", disallowed_special=()) + enc = model_info["gpt-4"]['tokenizer'] + enc.encode("模块预热", disallowed_special=()) if __name__ == '__main__': import os diff --git a/config.py b/config.py index 46f8a988..fb4dab81 100644 --- a/config.py +++ b/config.py @@ -50,6 +50,7 @@ DEFAULT_WORKER_NUM = 3 # 色彩主题, 可选 ["Default", "Chuanhu-Small-and-Beautiful", "High-Contrast"] # 更多主题, 请查阅Gradio主题商店: https://huggingface.co/spaces/gradio/theme-gallery 可选 ["Gstaff/Xkcd", "NoCrypt/Miku", ...] THEME = "Chuanhu-Small-and-Beautiful" +AVAIL_THEMES = ["Default", "Chuanhu-Small-and-Beautiful", "High-Contrast", "Gstaff/Xkcd", "NoCrypt/Miku"] # 对话窗的高度 (仅在LAYOUT="TOP-DOWN"时生效) @@ -62,7 +63,10 @@ CODE_HIGHLIGHT = True # 窗口布局 LAYOUT = "LEFT-RIGHT" # "LEFT-RIGHT"(左右布局) # "TOP-DOWN"(上下布局) -DARK_MODE = True # 暗色模式 / 亮色模式 + + +# 暗色模式 / 亮色模式 +DARK_MODE = True # 发送请求到OpenAI后,等待多久判定为超时 @@ -81,13 +85,13 @@ LLM_MODEL = "gpt-3.5-turbo" # 可选 "chatglm" AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "api2d-gpt-3.5-turbo", "spark", "azure-gpt-3.5"] # 插件分类默认选项 -DEFAULT_FN_GROUPS = ['对话', '编程', '学术'] +DEFAULT_FN_GROUPS = ['对话', '编程', '学术', '智能体'] # 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 ) LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓ AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", - "gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "stack-claude"] + "gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "stack-claude"] # P.S. 其他可用的模型还包括 ["qianfan", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", # "spark", "sparkv2", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"] @@ -186,11 +190,20 @@ GROBID_URLS = [ # 是否允许通过自然语言描述修改本页的配置,该功能具有一定的危险性,默认关闭 ALLOW_RESET_CONFIG = False + + # 临时的上传文件夹位置,请勿修改 PATH_PRIVATE_UPLOAD = "private_upload" + + # 日志文件夹的位置,请勿修改 PATH_LOGGING = "gpt_log" + +# 除了连接OpenAI之外,还有哪些场合允许使用代理,请勿修改 +WHEN_TO_USE_PROXY = ["Download_LLM", "Download_Gradio_Theme", "Connect_Grobid", "Warmup_Modules"] + + """ 在线大模型配置关联关系示意图 │ diff --git a/core_functional.py b/core_functional.py index c4519ef8..22c2e457 100644 --- a/core_functional.py +++ b/core_functional.py @@ -11,7 +11,8 @@ def get_core_functions(): # 前缀,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等 "Prefix": r"Below is a paragraph from an academic paper. Polish the writing to meet the academic style, " + r"improve the spelling, grammar, clarity, concision and overall readability. When necessary, rewrite the whole sentence. " + - r"Furthermore, list all modification and explain the reasons to do so in markdown table." + "\n\n", + r"Firstly, you should provide the polished paragraph. " + r"Secondly, you should list all your modification and explain the reasons to do so in markdown table." + "\n\n", # 后缀,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来 "Suffix": r"", # 按钮颜色 (默认 secondary) @@ -27,17 +28,18 @@ def get_core_functions(): "Suffix": r"", }, "查找语法错误": { - "Prefix": r"Can you help me ensure that the grammar and the spelling is correct? " + - r"Do not try to polish the text, if no mistake is found, tell me that this paragraph is good." + - r"If you find grammar or spelling mistakes, please list mistakes you find in a two-column markdown table, " + - r"put the original text the first column, " + - r"put the corrected text in the second column and highlight the key words you fixed.""\n" + "Prefix": r"Help me ensure that the grammar and the spelling is correct. " + r"Do not try to polish the text, if no mistake is found, tell me that this paragraph is good. " + r"If you find grammar or spelling mistakes, please list mistakes you find in a two-column markdown table, " + r"put the original text the first column, " + r"put the corrected text in the second column and highlight the key words you fixed. " + r"Finally, please provide the proofreaded text.""\n\n" r"Example:""\n" r"Paragraph: How is you? Do you knows what is it?""\n" r"| Original sentence | Corrected sentence |""\n" r"| :--- | :--- |""\n" r"| How **is** you? | How **are** you? |""\n" - r"| Do you **knows** what **is** **it**? | Do you **know** what **it** **is** ? |""\n" + r"| Do you **knows** what **is** **it**? | Do you **know** what **it** **is** ? |""\n\n" r"Below is a paragraph from an academic paper. " r"You need to report all grammar and spelling mistakes as the example before." + "\n\n", diff --git a/crazy_functional.py b/crazy_functional.py index 4df53f58..6a41600d 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -6,6 +6,7 @@ def get_crazy_functions(): from crazy_functions.生成函数注释 import 批量生成函数注释 from crazy_functions.解析项目源代码 import 解析项目本身 from crazy_functions.解析项目源代码 import 解析一个Python项目 + from crazy_functions.解析项目源代码 import 解析一个Matlab项目 from crazy_functions.解析项目源代码 import 解析一个C项目的头文件 from crazy_functions.解析项目源代码 import 解析一个C项目 from crazy_functions.解析项目源代码 import 解析一个Golang项目 @@ -38,7 +39,7 @@ def get_crazy_functions(): function_plugins = { "虚空终端": { - "Group": "对话|编程|学术", + "Group": "对话|编程|学术|智能体", "Color": "stop", "AsButton": True, "Function": HotReload(虚空终端) @@ -77,6 +78,13 @@ def get_crazy_functions(): "Info": "批量总结word文档 | 输入参数为路径", "Function": HotReload(总结word文档) }, + "解析整个Matlab项目": { + "Group": "编程", + "Color": "stop", + "AsButton": False, + "Info": "解析一个Matlab项目的所有源文件(.m) | 输入参数为路径", + "Function": HotReload(解析一个Matlab项目) + }, "解析整个C++项目头文件": { "Group": "编程", "Color": "stop", @@ -243,20 +251,23 @@ def get_crazy_functions(): "Info": "对中文Latex项目全文进行润色处理 | 输入参数为路径或上传压缩包", "Function": HotReload(Latex中文润色) }, - "Latex项目全文中译英(输入路径或上传压缩包)": { - "Group": "学术", - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - "Info": "对Latex项目全文进行中译英处理 | 输入参数为路径或上传压缩包", - "Function": HotReload(Latex中译英) - }, - "Latex项目全文英译中(输入路径或上传压缩包)": { - "Group": "学术", - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - "Info": "对Latex项目全文进行英译中处理 | 输入参数为路径或上传压缩包", - "Function": HotReload(Latex英译中) - }, + + # 被新插件取代 + # "Latex项目全文中译英(输入路径或上传压缩包)": { + # "Group": "学术", + # "Color": "stop", + # "AsButton": False, # 加入下拉菜单中 + # "Info": "对Latex项目全文进行中译英处理 | 输入参数为路径或上传压缩包", + # "Function": HotReload(Latex中译英) + # }, + # "Latex项目全文英译中(输入路径或上传压缩包)": { + # "Group": "学术", + # "Color": "stop", + # "AsButton": False, # 加入下拉菜单中 + # "Info": "对Latex项目全文进行英译中处理 | 输入参数为路径或上传压缩包", + # "Function": HotReload(Latex英译中) + # }, + "批量Markdown中译英(输入路径或上传压缩包)": { "Group": "编程", "Color": "stop", @@ -513,6 +524,18 @@ def get_crazy_functions(): except: print('Load function plugin failed') + try: + from crazy_functions.函数动态生成 import 函数动态生成 + function_plugins.update({ + "动态代码解释器(CodeInterpreter)": { + "Group": "智能体", + "Color": "stop", + "AsButton": False, + "Function": HotReload(函数动态生成) + } + }) + except: + print('Load function plugin failed') # try: # from crazy_functions.CodeInterpreter import 虚空终端CodeInterpreter diff --git a/crazy_functions/Langchain知识库.py b/crazy_functions/Langchain知识库.py index 741a3d06..8433895f 100644 --- a/crazy_functions/Langchain知识库.py +++ b/crazy_functions/Langchain知识库.py @@ -53,14 +53,14 @@ def 知识库问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 print('Checking Text2vec ...') from langchain.embeddings.huggingface import HuggingFaceEmbeddings - with ProxyNetworkActivate(): # 临时地激活代理网络 + with ProxyNetworkActivate('Download_LLM'): # 临时地激活代理网络 HuggingFaceEmbeddings(model_name="GanymedeNil/text2vec-large-chinese") # < -------------------构建知识库--------------- > chatbot.append(['
'.join(file_manifest), "正在构建知识库..."]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 print('Establishing knowledge archive ...') - with ProxyNetworkActivate(): # 临时地激活代理网络 + with ProxyNetworkActivate('Download_LLM'): # 临时地激活代理网络 kai = knowledge_archive_interface() kai.feed_archive(file_manifest=file_manifest, id=kai_id) kai_files = kai.get_loaded_file() diff --git a/crazy_functions/Latex输出PDF结果.py b/crazy_functions/Latex输出PDF结果.py index 56ca1c6c..7ec5a4b2 100644 --- a/crazy_functions/Latex输出PDF结果.py +++ b/crazy_functions/Latex输出PDF结果.py @@ -79,7 +79,7 @@ def move_project(project_folder, arxiv_id=None): shutil.copytree(src=project_folder, dst=new_workfolder) return new_workfolder -def arxiv_download(chatbot, history, txt): +def arxiv_download(chatbot, history, txt, allow_cache=True): def check_cached_translation_pdf(arxiv_id): translation_dir = pj(ARXIV_CACHE_DIR, arxiv_id, 'translation') if not os.path.exists(translation_dir): @@ -116,7 +116,7 @@ def arxiv_download(chatbot, history, txt): arxiv_id = url_.split('/abs/')[-1] if 'v' in arxiv_id: arxiv_id = arxiv_id[:10] cached_translation_pdf = check_cached_translation_pdf(arxiv_id) - if cached_translation_pdf: return cached_translation_pdf, arxiv_id + if cached_translation_pdf and allow_cache: return cached_translation_pdf, arxiv_id url_tar = url_.replace('/abs/', '/e-print/') translation_dir = pj(ARXIV_CACHE_DIR, arxiv_id, 'e-print') @@ -228,6 +228,9 @@ def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, # <-------------- more requirements -------------> if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") more_req = plugin_kwargs.get("advanced_arg", "") + no_cache = more_req.startswith("--no-cache") + if no_cache: more_req.lstrip("--no-cache") + allow_cache = not no_cache _switch_prompt_ = partial(switch_prompt, more_requirement=more_req) # <-------------- check deps -------------> @@ -244,7 +247,7 @@ def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, # <-------------- clear history and read input -------------> history = [] - txt, arxiv_id = yield from arxiv_download(chatbot, history, txt) + txt, arxiv_id = yield from arxiv_download(chatbot, history, txt, allow_cache) if txt.endswith('.pdf'): report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"发现已经存在翻译好的PDF文档") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 diff --git a/crazy_functions/crazy_utils.py b/crazy_functions/crazy_utils.py index ee1ab907..b7a18196 100644 --- a/crazy_functions/crazy_utils.py +++ b/crazy_functions/crazy_utils.py @@ -651,7 +651,7 @@ class knowledge_archive_interface(): from toolbox import ProxyNetworkActivate print('Checking Text2vec ...') from langchain.embeddings.huggingface import HuggingFaceEmbeddings - with ProxyNetworkActivate(): # 临时地激活代理网络 + with ProxyNetworkActivate('Download_LLM'): # 临时地激活代理网络 self.text2vec_large_chinese = HuggingFaceEmbeddings(model_name="GanymedeNil/text2vec-large-chinese") return self.text2vec_large_chinese @@ -807,3 +807,10 @@ class construct_html(): with open(os.path.join(get_log_folder(), file_name), 'w', encoding='utf8') as f: f.write(self.html_string.encode('utf-8', 'ignore').decode()) return os.path.join(get_log_folder(), file_name) + + +def get_plugin_arg(plugin_kwargs, key, default): + # 如果参数是空的 + if (key in plugin_kwargs) and (plugin_kwargs[key] == ""): plugin_kwargs.pop(key) + # 正常情况 + return plugin_kwargs.get(key, default) diff --git a/crazy_functions/gen_fns/gen_fns_shared.py b/crazy_functions/gen_fns/gen_fns_shared.py new file mode 100644 index 00000000..8e73794e --- /dev/null +++ b/crazy_functions/gen_fns/gen_fns_shared.py @@ -0,0 +1,70 @@ +import time +import importlib +from toolbox import trimmed_format_exc, gen_time_str, get_log_folder +from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, is_the_upload_folder +from toolbox import promote_file_to_downloadzone, get_log_folder, update_ui_lastest_msg +import multiprocessing + +def get_class_name(class_string): + import re + # Use regex to extract the class name + class_name = re.search(r'class (\w+)\(', class_string).group(1) + return class_name + +def try_make_module(code, chatbot): + module_file = 'gpt_fn_' + gen_time_str().replace('-','_') + fn_path = f'{get_log_folder(plugin_name="gen_plugin_verify")}/{module_file}.py' + with open(fn_path, 'w', encoding='utf8') as f: f.write(code) + promote_file_to_downloadzone(fn_path, chatbot=chatbot) + class_name = get_class_name(code) + manager = multiprocessing.Manager() + return_dict = manager.dict() + p = multiprocessing.Process(target=is_function_successfully_generated, args=(fn_path, class_name, return_dict)) + # only has 10 seconds to run + p.start(); p.join(timeout=10) + if p.is_alive(): p.terminate(); p.join() + p.close() + return return_dict["success"], return_dict['traceback'] + +# check is_function_successfully_generated +def is_function_successfully_generated(fn_path, class_name, return_dict): + return_dict['success'] = False + return_dict['traceback'] = "" + try: + # Create a spec for the module + module_spec = importlib.util.spec_from_file_location('example_module', fn_path) + # Load the module + example_module = importlib.util.module_from_spec(module_spec) + module_spec.loader.exec_module(example_module) + # Now you can use the module + some_class = getattr(example_module, class_name) + # Now you can create an instance of the class + instance = some_class() + return_dict['success'] = True + return + except: + return_dict['traceback'] = trimmed_format_exc() + return + +def subprocess_worker(code, file_path, return_dict): + return_dict['result'] = None + return_dict['success'] = False + return_dict['traceback'] = "" + try: + module_file = 'gpt_fn_' + gen_time_str().replace('-','_') + fn_path = f'{get_log_folder(plugin_name="gen_plugin_run")}/{module_file}.py' + with open(fn_path, 'w', encoding='utf8') as f: f.write(code) + class_name = get_class_name(code) + # Create a spec for the module + module_spec = importlib.util.spec_from_file_location('example_module', fn_path) + # Load the module + example_module = importlib.util.module_from_spec(module_spec) + module_spec.loader.exec_module(example_module) + # Now you can use the module + some_class = getattr(example_module, class_name) + # Now you can create an instance of the class + instance = some_class() + return_dict['result'] = instance.run(file_path) + return_dict['success'] = True + except: + return_dict['traceback'] = trimmed_format_exc() diff --git a/crazy_functions/pdf_fns/parse_pdf.py b/crazy_functions/pdf_fns/parse_pdf.py index 8a7117ad..a047efce 100644 --- a/crazy_functions/pdf_fns/parse_pdf.py +++ b/crazy_functions/pdf_fns/parse_pdf.py @@ -1,16 +1,26 @@ +from functools import lru_cache +from toolbox import gen_time_str +from toolbox import promote_file_to_downloadzone +from toolbox import write_history_to_file, promote_file_to_downloadzone +from toolbox import get_conf +from toolbox import ProxyNetworkActivate +from colorful import * import requests import random -from functools import lru_cache +import copy +import os +import math + class GROBID_OFFLINE_EXCEPTION(Exception): pass def get_avail_grobid_url(): - from toolbox import get_conf GROBID_URLS, = get_conf('GROBID_URLS') if len(GROBID_URLS) == 0: return None try: _grobid_url = random.choice(GROBID_URLS) # 随机负载均衡 if _grobid_url.endswith('/'): _grobid_url = _grobid_url.rstrip('/') - res = requests.get(_grobid_url+'/api/isalive') + with ProxyNetworkActivate('Connect_Grobid'): + res = requests.get(_grobid_url+'/api/isalive') if res.text=='true': return _grobid_url else: return None except: @@ -21,10 +31,141 @@ def parse_pdf(pdf_path, grobid_url): import scipdf # pip install scipdf_parser if grobid_url.endswith('/'): grobid_url = grobid_url.rstrip('/') try: - article_dict = scipdf.parse_pdf_to_dict(pdf_path, grobid_url=grobid_url) + with ProxyNetworkActivate('Connect_Grobid'): + article_dict = scipdf.parse_pdf_to_dict(pdf_path, grobid_url=grobid_url) except GROBID_OFFLINE_EXCEPTION: raise GROBID_OFFLINE_EXCEPTION("GROBID服务不可用,请修改config中的GROBID_URL,可修改成本地GROBID服务。") except: raise RuntimeError("解析PDF失败,请检查PDF是否损坏。") return article_dict + +def produce_report_markdown(gpt_response_collection, meta, paper_meta_info, chatbot, fp, generated_conclusion_files): + # -=-=-=-=-=-=-=-= 写出第1个文件:翻译前后混合 -=-=-=-=-=-=-=-= + res_path = write_history_to_file(meta + ["# Meta Translation" , paper_meta_info] + gpt_response_collection, file_basename=f"{gen_time_str()}translated_and_original.md", file_fullname=None) + promote_file_to_downloadzone(res_path, rename_file=os.path.basename(res_path)+'.md', chatbot=chatbot) + generated_conclusion_files.append(res_path) + + # -=-=-=-=-=-=-=-= 写出第2个文件:仅翻译后的文本 -=-=-=-=-=-=-=-= + translated_res_array = [] + # 记录当前的大章节标题: + last_section_name = "" + for index, value in enumerate(gpt_response_collection): + # 先挑选偶数序列号: + if index % 2 != 0: + # 先提取当前英文标题: + cur_section_name = gpt_response_collection[index-1].split('\n')[0].split(" Part")[0] + # 如果index是1的话,则直接使用first section name: + if cur_section_name != last_section_name: + cur_value = cur_section_name + '\n' + last_section_name = copy.deepcopy(cur_section_name) + else: + cur_value = "" + # 再做一个小修改:重新修改当前part的标题,默认用英文的 + cur_value += value + translated_res_array.append(cur_value) + res_path = write_history_to_file(meta + ["# Meta Translation" , paper_meta_info] + translated_res_array, + file_basename = f"{gen_time_str()}-translated_only.md", + file_fullname = None, + auto_caption = False) + promote_file_to_downloadzone(res_path, rename_file=os.path.basename(res_path)+'.md', chatbot=chatbot) + generated_conclusion_files.append(res_path) + return res_path + +def translate_pdf(article_dict, llm_kwargs, chatbot, fp, generated_conclusion_files, TOKEN_LIMIT_PER_FRAGMENT, DST_LANG): + from crazy_functions.crazy_utils import construct_html + from crazy_functions.crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf + from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive + from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency + + prompt = "以下是一篇学术论文的基本信息:\n" + # title + title = article_dict.get('title', '无法获取 title'); prompt += f'title:{title}\n\n' + # authors + authors = article_dict.get('authors', '无法获取 authors'); prompt += f'authors:{authors}\n\n' + # abstract + abstract = article_dict.get('abstract', '无法获取 abstract'); prompt += f'abstract:{abstract}\n\n' + # command + prompt += f"请将题目和摘要翻译为{DST_LANG}。" + meta = [f'# Title:\n\n', title, f'# Abstract:\n\n', abstract ] + + # 单线,获取文章meta信息 + paper_meta_info = yield from request_gpt_model_in_new_thread_with_ui_alive( + inputs=prompt, + inputs_show_user=prompt, + llm_kwargs=llm_kwargs, + chatbot=chatbot, history=[], + sys_prompt="You are an academic paper reader。", + ) + + # 多线,翻译 + inputs_array = [] + inputs_show_user_array = [] + + # get_token_num + from request_llm.bridge_all import model_info + enc = model_info[llm_kwargs['llm_model']]['tokenizer'] + def get_token_num(txt): return len(enc.encode(txt, disallowed_special=())) + + def break_down(txt): + raw_token_num = get_token_num(txt) + if raw_token_num <= TOKEN_LIMIT_PER_FRAGMENT: + return [txt] + else: + # raw_token_num > TOKEN_LIMIT_PER_FRAGMENT + # find a smooth token limit to achieve even seperation + count = int(math.ceil(raw_token_num / TOKEN_LIMIT_PER_FRAGMENT)) + token_limit_smooth = raw_token_num // count + count + return breakdown_txt_to_satisfy_token_limit_for_pdf(txt, get_token_fn=get_token_num, limit=token_limit_smooth) + + for section in article_dict.get('sections'): + if len(section['text']) == 0: continue + section_frags = break_down(section['text']) + for i, fragment in enumerate(section_frags): + heading = section['heading'] + if len(section_frags) > 1: heading += f' Part-{i+1}' + inputs_array.append( + f"你需要翻译{heading}章节,内容如下: \n\n{fragment}" + ) + inputs_show_user_array.append( + f"# {heading}\n\n{fragment}" + ) + + gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency( + inputs_array=inputs_array, + inputs_show_user_array=inputs_show_user_array, + llm_kwargs=llm_kwargs, + chatbot=chatbot, + history_array=[meta for _ in inputs_array], + sys_prompt_array=[ + "请你作为一个学术翻译,负责把学术论文准确翻译成中文。注意文章中的每一句话都要翻译。" for _ in inputs_array], + ) + # -=-=-=-=-=-=-=-= 写出Markdown文件 -=-=-=-=-=-=-=-= + produce_report_markdown(gpt_response_collection, meta, paper_meta_info, chatbot, fp, generated_conclusion_files) + + # -=-=-=-=-=-=-=-= 写出HTML文件 -=-=-=-=-=-=-=-= + ch = construct_html() + orig = "" + trans = "" + gpt_response_collection_html = copy.deepcopy(gpt_response_collection) + for i,k in enumerate(gpt_response_collection_html): + if i%2==0: + gpt_response_collection_html[i] = inputs_show_user_array[i//2] + else: + # 先提取当前英文标题: + cur_section_name = gpt_response_collection[i-1].split('\n')[0].split(" Part")[0] + cur_value = cur_section_name + "\n" + gpt_response_collection_html[i] + gpt_response_collection_html[i] = cur_value + + final = ["", "", "一、论文概况", "", "Abstract", paper_meta_info, "二、论文翻译", ""] + final.extend(gpt_response_collection_html) + for i, k in enumerate(final): + if i%2==0: + orig = k + if i%2==1: + trans = k + ch.add_row(a=orig, b=trans) + create_report_file_name = f"{os.path.basename(fp)}.trans.html" + html_file = ch.save_file(create_report_file_name) + generated_conclusion_files.append(html_file) + promote_file_to_downloadzone(html_file, rename_file=os.path.basename(html_file), chatbot=chatbot) diff --git a/crazy_functions/函数动态生成.py b/crazy_functions/函数动态生成.py new file mode 100644 index 00000000..d16ef88d --- /dev/null +++ b/crazy_functions/函数动态生成.py @@ -0,0 +1,252 @@ +# 本源代码中, ⭐ = 关键步骤 +""" +测试: + - 裁剪图像,保留下半部分 + - 交换图像的蓝色通道和红色通道 + - 将图像转为灰度图像 + - 将csv文件转excel表格 + +Testing: + - Crop the image, keeping the bottom half. + - Swap the blue channel and red channel of the image. + - Convert the image to grayscale. + - Convert the CSV file to an Excel spreadsheet. +""" + + +from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, is_the_upload_folder +from toolbox import promote_file_to_downloadzone, get_log_folder, update_ui_lastest_msg +from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, get_plugin_arg +from .crazy_utils import input_clipping, try_install_deps +from crazy_functions.gen_fns.gen_fns_shared import is_function_successfully_generated +from crazy_functions.gen_fns.gen_fns_shared import get_class_name +from crazy_functions.gen_fns.gen_fns_shared import subprocess_worker +from crazy_functions.gen_fns.gen_fns_shared import try_make_module +import os +import time +import glob +import multiprocessing + +templete = """ +```python +import ... # Put dependencies here, e.g. import numpy as np. + +class TerminalFunction(object): # Do not change the name of the class, The name of the class must be `TerminalFunction` + + def run(self, path): # The name of the function must be `run`, it takes only a positional argument. + # rewrite the function you have just written here + ... + return generated_file_path +``` +""" + +def inspect_dependency(chatbot, history): + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + return True + +def get_code_block(reply): + import re + pattern = r"```([\s\S]*?)```" # regex pattern to match code blocks + matches = re.findall(pattern, reply) # find all code blocks in text + if len(matches) == 1: + return matches[0].strip('python') # code block + for match in matches: + if 'class TerminalFunction' in match: + return match.strip('python') # code block + raise RuntimeError("GPT is not generating proper code.") + +def gpt_interact_multi_step(txt, file_type, llm_kwargs, chatbot, history): + # 输入 + prompt_compose = [ + f'Your job:\n' + f'1. write a single Python function, which takes a path of a `{file_type}` file as the only argument and returns a `string` containing the result of analysis or the path of generated files. \n', + f"2. You should write this function to perform following task: " + txt + "\n", + f"3. Wrap the output python function with markdown codeblock." + ] + i_say = "".join(prompt_compose) + demo = [] + + # 第一步 + gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( + inputs=i_say, inputs_show_user=i_say, + llm_kwargs=llm_kwargs, chatbot=chatbot, history=demo, + sys_prompt= r"You are a world-class programmer." + ) + history.extend([i_say, gpt_say]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 + + # 第二步 + prompt_compose = [ + "If previous stage is successful, rewrite the function you have just written to satisfy following templete: \n", + templete + ] + i_say = "".join(prompt_compose); inputs_show_user = "If previous stage is successful, rewrite the function you have just written to satisfy executable templete. " + gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( + inputs=i_say, inputs_show_user=inputs_show_user, + llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, + sys_prompt= r"You are a programmer. You need to replace `...` with valid packages, do not give `...` in your answer!" + ) + code_to_return = gpt_say + history.extend([i_say, gpt_say]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 + + # # 第三步 + # i_say = "Please list to packages to install to run the code above. Then show me how to use `try_install_deps` function to install them." + # i_say += 'For instance. `try_install_deps(["opencv-python", "scipy", "numpy"])`' + # installation_advance = yield from request_gpt_model_in_new_thread_with_ui_alive( + # inputs=i_say, inputs_show_user=inputs_show_user, + # llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, + # sys_prompt= r"You are a programmer." + # ) + + # # # 第三步 + # i_say = "Show me how to use `pip` to install packages to run the code above. " + # i_say += 'For instance. `pip install -r opencv-python scipy numpy`' + # installation_advance = yield from request_gpt_model_in_new_thread_with_ui_alive( + # inputs=i_say, inputs_show_user=i_say, + # llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, + # sys_prompt= r"You are a programmer." + # ) + installation_advance = "" + + return code_to_return, installation_advance, txt, file_type, llm_kwargs, chatbot, history + + + + +def for_immediate_show_off_when_possible(file_type, fp, chatbot): + if file_type in ['png', 'jpg']: + image_path = os.path.abspath(fp) + chatbot.append(['这是一张图片, 展示如下:', + f'本地文件地址:
`{image_path}`
'+ + f'本地文件预览:
' + ]) + return chatbot + + + +def have_any_recent_upload_files(chatbot): + _5min = 5 * 60 + if not chatbot: return False # chatbot is None + most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None) + if not most_recent_uploaded: return False # most_recent_uploaded is None + if time.time() - most_recent_uploaded["time"] < _5min: return True # most_recent_uploaded is new + else: return False # most_recent_uploaded is too old + +def get_recent_file_prompt_support(chatbot): + most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None) + path = most_recent_uploaded['path'] + return path + +@CatchException +def 函数动态生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): + """ + txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 + llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 + plugin_kwargs 插件模型的参数,暂时没有用武之地 + chatbot 聊天显示框的句柄,用于显示给用户 + history 聊天历史,前情提要 + system_prompt 给gpt的静默提醒 + web_port 当前软件运行的端口号 + """ + + # 清空历史 + history = [] + + # 基本信息:功能、贡献者 + chatbot.append(["正在启动: 插件动态生成插件", "插件动态生成, 执行开始, 作者Binary-Husky."]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + + # ⭐ 文件上传区是否有东西 + # 1. 如果有文件: 作为函数参数 + # 2. 如果没有文件:需要用GPT提取参数 (太懒了,以后再写,虚空终端已经实现了类似的代码) + file_list = [] + if get_plugin_arg(plugin_kwargs, key="file_path_arg", default=False): + file_path = get_plugin_arg(plugin_kwargs, key="file_path_arg", default=None) + file_list.append(file_path) + yield from update_ui_lastest_msg(f"当前文件: {file_path}", chatbot, history, 1) + elif have_any_recent_upload_files(chatbot): + file_dir = get_recent_file_prompt_support(chatbot) + file_list = glob.glob(os.path.join(file_dir, '**/*'), recursive=True) + yield from update_ui_lastest_msg(f"当前文件处理列表: {file_list}", chatbot, history, 1) + else: + chatbot.append(["文件检索", "没有发现任何近期上传的文件。"]) + yield from update_ui_lastest_msg("没有发现任何近期上传的文件。", chatbot, history, 1) + return # 2. 如果没有文件 + if len(file_list) == 0: + chatbot.append(["文件检索", "没有发现任何近期上传的文件。"]) + yield from update_ui_lastest_msg("没有发现任何近期上传的文件。", chatbot, history, 1) + return # 2. 如果没有文件 + + # 读取文件 + file_type = file_list[0].split('.')[-1] + + # 粗心检查 + if is_the_upload_folder(txt): + yield from update_ui_lastest_msg(f"请在输入框内填写需求, 然后再次点击该插件! 至于您的文件,不用担心, 文件路径 {txt} 已经被记忆. ", chatbot, history, 1) + return + + # 开始干正事 + MAX_TRY = 3 + for j in range(MAX_TRY): # 最多重试5次 + traceback = "" + try: + # ⭐ 开始啦 ! + code, installation_advance, txt, file_type, llm_kwargs, chatbot, history = \ + yield from gpt_interact_multi_step(txt, file_type, llm_kwargs, chatbot, history) + chatbot.append(["代码生成阶段结束", ""]) + yield from update_ui_lastest_msg(f"正在验证上述代码的有效性 ...", chatbot, history, 1) + # ⭐ 分离代码块 + code = get_code_block(code) + # ⭐ 检查模块 + ok, traceback = try_make_module(code, chatbot) + # 搞定代码生成 + if ok: break + except Exception as e: + if not traceback: traceback = trimmed_format_exc() + # 处理异常 + if not traceback: traceback = trimmed_format_exc() + yield from update_ui_lastest_msg(f"第 {j+1}/{MAX_TRY} 次代码生成尝试, 失败了~ 别担心, 我们5秒后再试一次... \n\n此次我们的错误追踪是\n```\n{traceback}\n```\n", chatbot, history, 5) + + # 代码生成结束, 开始执行 + TIME_LIMIT = 15 + yield from update_ui_lastest_msg(f"开始创建新进程并执行代码! 时间限制 {TIME_LIMIT} 秒. 请等待任务完成... ", chatbot, history, 1) + manager = multiprocessing.Manager() + return_dict = manager.dict() + + # ⭐ 到最后一步了,开始逐个文件进行处理 + for file_path in file_list: + if os.path.exists(file_path): + chatbot.append([f"正在处理文件: {file_path}", f"请稍等..."]) + chatbot = for_immediate_show_off_when_possible(file_type, file_path, chatbot) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 + else: + continue + + # ⭐⭐⭐ subprocess_worker ⭐⭐⭐ + p = multiprocessing.Process(target=subprocess_worker, args=(code, file_path, return_dict)) + # ⭐ 开始执行,时间限制TIME_LIMIT + p.start(); p.join(timeout=TIME_LIMIT) + if p.is_alive(): p.terminate(); p.join() + p.close() + res = return_dict['result'] + success = return_dict['success'] + traceback = return_dict['traceback'] + if not success: + if not traceback: traceback = trimmed_format_exc() + chatbot.append(["执行失败了", f"错误追踪\n```\n{trimmed_format_exc()}\n```\n"]) + # chatbot.append(["如果是缺乏依赖,请参考以下建议", installation_advance]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + return + + # 顺利完成,收尾 + res = str(res) + if os.path.exists(res): + chatbot.append(["执行成功了,结果是一个有效文件", "结果:" + res]) + new_file_path = promote_file_to_downloadzone(res, chatbot=chatbot) + chatbot = for_immediate_show_off_when_possible(file_type, new_file_path, chatbot) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 + else: + chatbot.append(["执行成功了,结果是一个字符串", "结果:" + res]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 + diff --git a/crazy_functions/批量翻译PDF文档_NOUGAT.py b/crazy_functions/批量翻译PDF文档_NOUGAT.py index 2dc15f79..c0961c14 100644 --- a/crazy_functions/批量翻译PDF文档_NOUGAT.py +++ b/crazy_functions/批量翻译PDF文档_NOUGAT.py @@ -1,11 +1,12 @@ -from toolbox import CatchException, report_execption, gen_time_str +from toolbox import CatchException, report_execption, get_log_folder, gen_time_str from toolbox import update_ui, promote_file_to_downloadzone, update_ui_lastest_msg, disable_auto_promotion -from toolbox import write_history_to_file, get_log_folder +from toolbox import write_history_to_file, promote_file_to_downloadzone from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency from .crazy_utils import read_and_clean_pdf_text -from .pdf_fns.parse_pdf import parse_pdf, get_avail_grobid_url +from .pdf_fns.parse_pdf import parse_pdf, get_avail_grobid_url, translate_pdf from colorful import * +import copy import os import math import logging @@ -92,7 +93,7 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst def 解析PDF_基于NOUGAT(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): import copy import tiktoken - TOKEN_LIMIT_PER_FRAGMENT = 1280 + TOKEN_LIMIT_PER_FRAGMENT = 1024 generated_conclusion_files = [] generated_html_files = [] DST_LANG = "中文" @@ -101,101 +102,12 @@ def 解析PDF_基于NOUGAT(file_manifest, project_folder, llm_kwargs, plugin_kwa for index, fp in enumerate(file_manifest): chatbot.append(["当前进度:", f"正在解析论文,请稍候。(第一次运行时,需要花费较长时间下载NOUGAT参数)"]); yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 fpp = yield from nougat_handle.NOUGAT_parse_pdf(fp, chatbot, history) - + promote_file_to_downloadzone(fpp, rename_file=os.path.basename(fpp)+'.nougat.mmd', chatbot=chatbot) with open(fpp, 'r', encoding='utf8') as f: article_content = f.readlines() article_dict = markdown_to_dict(article_content) logging.info(article_dict) - - prompt = "以下是一篇学术论文的基本信息:\n" - # title - title = article_dict.get('title', '无法获取 title'); prompt += f'title:{title}\n\n' - # authors - authors = article_dict.get('authors', '无法获取 authors'); prompt += f'authors:{authors}\n\n' - # abstract - abstract = article_dict.get('abstract', '无法获取 abstract'); prompt += f'abstract:{abstract}\n\n' - # command - prompt += f"请将题目和摘要翻译为{DST_LANG}。" - meta = [f'# Title:\n\n', title, f'# Abstract:\n\n', abstract ] - - # 单线,获取文章meta信息 - paper_meta_info = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=prompt, - inputs_show_user=prompt, - llm_kwargs=llm_kwargs, - chatbot=chatbot, history=[], - sys_prompt="You are an academic paper reader。", - ) - - # 多线,翻译 - inputs_array = [] - inputs_show_user_array = [] - - # get_token_num - from request_llm.bridge_all import model_info - enc = model_info[llm_kwargs['llm_model']]['tokenizer'] - def get_token_num(txt): return len(enc.encode(txt, disallowed_special=())) - from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf - - def break_down(txt): - raw_token_num = get_token_num(txt) - if raw_token_num <= TOKEN_LIMIT_PER_FRAGMENT: - return [txt] - else: - # raw_token_num > TOKEN_LIMIT_PER_FRAGMENT - # find a smooth token limit to achieve even seperation - count = int(math.ceil(raw_token_num / TOKEN_LIMIT_PER_FRAGMENT)) - token_limit_smooth = raw_token_num // count + count - return breakdown_txt_to_satisfy_token_limit_for_pdf(txt, get_token_fn=get_token_num, limit=token_limit_smooth) - - for section in article_dict.get('sections'): - if len(section['text']) == 0: continue - section_frags = break_down(section['text']) - for i, fragment in enumerate(section_frags): - heading = section['heading'] - if len(section_frags) > 1: heading += f' Part-{i+1}' - inputs_array.append( - f"你需要翻译{heading}章节,内容如下: \n\n{fragment}" - ) - inputs_show_user_array.append( - f"# {heading}\n\n{fragment}" - ) - - gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency( - inputs_array=inputs_array, - inputs_show_user_array=inputs_show_user_array, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history_array=[meta for _ in inputs_array], - sys_prompt_array=[ - "请你作为一个学术翻译,负责把学术论文准确翻译成中文。注意文章中的每一句话都要翻译。" for _ in inputs_array], - ) - res_path = write_history_to_file(meta + ["# Meta Translation" , paper_meta_info] + gpt_response_collection, file_basename=None, file_fullname=None) - promote_file_to_downloadzone(res_path, rename_file=os.path.basename(fp)+'.md', chatbot=chatbot) - generated_conclusion_files.append(res_path) - - ch = construct_html() - orig = "" - trans = "" - gpt_response_collection_html = copy.deepcopy(gpt_response_collection) - for i,k in enumerate(gpt_response_collection_html): - if i%2==0: - gpt_response_collection_html[i] = inputs_show_user_array[i//2] - else: - gpt_response_collection_html[i] = gpt_response_collection_html[i] - - final = ["", "", "一、论文概况", "", "Abstract", paper_meta_info, "二、论文翻译", ""] - final.extend(gpt_response_collection_html) - for i, k in enumerate(final): - if i%2==0: - orig = k - if i%2==1: - trans = k - ch.add_row(a=orig, b=trans) - create_report_file_name = f"{os.path.basename(fp)}.trans.html" - html_file = ch.save_file(create_report_file_name) - generated_html_files.append(html_file) - promote_file_to_downloadzone(html_file, rename_file=os.path.basename(html_file), chatbot=chatbot) + yield from translate_pdf(article_dict, llm_kwargs, chatbot, fp, generated_conclusion_files, TOKEN_LIMIT_PER_FRAGMENT, DST_LANG) chatbot.append(("给出输出文件清单", str(generated_conclusion_files + generated_html_files))) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 diff --git a/crazy_functions/批量翻译PDF文档_多线程.py b/crazy_functions/批量翻译PDF文档_多线程.py index d620715b..0f60a90a 100644 --- a/crazy_functions/批量翻译PDF文档_多线程.py +++ b/crazy_functions/批量翻译PDF文档_多线程.py @@ -1,12 +1,12 @@ -from toolbox import CatchException, report_execption, get_log_folder +from toolbox import CatchException, report_execption, get_log_folder, gen_time_str from toolbox import update_ui, promote_file_to_downloadzone, update_ui_lastest_msg, disable_auto_promotion from toolbox import write_history_to_file, promote_file_to_downloadzone from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency from .crazy_utils import read_and_clean_pdf_text -from .pdf_fns.parse_pdf import parse_pdf, get_avail_grobid_url +from .pdf_fns.parse_pdf import parse_pdf, get_avail_grobid_url, translate_pdf from colorful import * -import glob +import copy import os import math @@ -58,8 +58,8 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst def 解析PDF_基于GROBID(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, grobid_url): - import copy - TOKEN_LIMIT_PER_FRAGMENT = 1280 + import copy, json + TOKEN_LIMIT_PER_FRAGMENT = 1024 generated_conclusion_files = [] generated_html_files = [] DST_LANG = "中文" @@ -67,104 +67,23 @@ def 解析PDF_基于GROBID(file_manifest, project_folder, llm_kwargs, plugin_kwa for index, fp in enumerate(file_manifest): chatbot.append(["当前进度:", f"正在连接GROBID服务,请稍候: {grobid_url}\n如果等待时间过长,请修改config中的GROBID_URL,可修改成本地GROBID服务。"]); yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 article_dict = parse_pdf(fp, grobid_url) + grobid_json_res = os.path.join(get_log_folder(), gen_time_str() + "grobid.json") + with open(grobid_json_res, 'w+', encoding='utf8') as f: + f.write(json.dumps(article_dict, indent=4, ensure_ascii=False)) + promote_file_to_downloadzone(grobid_json_res, chatbot=chatbot) + if article_dict is None: raise RuntimeError("解析PDF失败,请检查PDF是否损坏。") - prompt = "以下是一篇学术论文的基本信息:\n" - # title - title = article_dict.get('title', '无法获取 title'); prompt += f'title:{title}\n\n' - # authors - authors = article_dict.get('authors', '无法获取 authors'); prompt += f'authors:{authors}\n\n' - # abstract - abstract = article_dict.get('abstract', '无法获取 abstract'); prompt += f'abstract:{abstract}\n\n' - # command - prompt += f"请将题目和摘要翻译为{DST_LANG}。" - meta = [f'# Title:\n\n', title, f'# Abstract:\n\n', abstract ] - - # 单线,获取文章meta信息 - paper_meta_info = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=prompt, - inputs_show_user=prompt, - llm_kwargs=llm_kwargs, - chatbot=chatbot, history=[], - sys_prompt="You are an academic paper reader。", - ) - - # 多线,翻译 - inputs_array = [] - inputs_show_user_array = [] - - # get_token_num - from request_llm.bridge_all import model_info - enc = model_info[llm_kwargs['llm_model']]['tokenizer'] - def get_token_num(txt): return len(enc.encode(txt, disallowed_special=())) - from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf - - def break_down(txt): - raw_token_num = get_token_num(txt) - if raw_token_num <= TOKEN_LIMIT_PER_FRAGMENT: - return [txt] - else: - # raw_token_num > TOKEN_LIMIT_PER_FRAGMENT - # find a smooth token limit to achieve even seperation - count = int(math.ceil(raw_token_num / TOKEN_LIMIT_PER_FRAGMENT)) - token_limit_smooth = raw_token_num // count + count - return breakdown_txt_to_satisfy_token_limit_for_pdf(txt, get_token_fn=get_token_num, limit=token_limit_smooth) - - for section in article_dict.get('sections'): - if len(section['text']) == 0: continue - section_frags = break_down(section['text']) - for i, fragment in enumerate(section_frags): - heading = section['heading'] - if len(section_frags) > 1: heading += f' Part-{i+1}' - inputs_array.append( - f"你需要翻译{heading}章节,内容如下: \n\n{fragment}" - ) - inputs_show_user_array.append( - f"# {heading}\n\n{fragment}" - ) - - gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency( - inputs_array=inputs_array, - inputs_show_user_array=inputs_show_user_array, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history_array=[meta for _ in inputs_array], - sys_prompt_array=[ - "请你作为一个学术翻译,负责把学术论文准确翻译成中文。注意文章中的每一句话都要翻译。" for _ in inputs_array], - ) - res_path = write_history_to_file(meta + ["# Meta Translation" , paper_meta_info] + gpt_response_collection, file_basename=None, file_fullname=None) - promote_file_to_downloadzone(res_path, rename_file=os.path.basename(fp)+'.md', chatbot=chatbot) - generated_conclusion_files.append(res_path) - - ch = construct_html() - orig = "" - trans = "" - gpt_response_collection_html = copy.deepcopy(gpt_response_collection) - for i,k in enumerate(gpt_response_collection_html): - if i%2==0: - gpt_response_collection_html[i] = inputs_show_user_array[i//2] - else: - gpt_response_collection_html[i] = gpt_response_collection_html[i] - - final = ["", "", "一、论文概况", "", "Abstract", paper_meta_info, "二、论文翻译", ""] - final.extend(gpt_response_collection_html) - for i, k in enumerate(final): - if i%2==0: - orig = k - if i%2==1: - trans = k - ch.add_row(a=orig, b=trans) - create_report_file_name = f"{os.path.basename(fp)}.trans.html" - html_file = ch.save_file(create_report_file_name) - generated_html_files.append(html_file) - promote_file_to_downloadzone(html_file, rename_file=os.path.basename(html_file), chatbot=chatbot) - + yield from translate_pdf(article_dict, llm_kwargs, chatbot, fp, generated_conclusion_files, TOKEN_LIMIT_PER_FRAGMENT, DST_LANG) chatbot.append(("给出输出文件清单", str(generated_conclusion_files + generated_html_files))) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): + """ + 此函数已经弃用 + """ import copy - TOKEN_LIMIT_PER_FRAGMENT = 1280 + TOKEN_LIMIT_PER_FRAGMENT = 1024 generated_conclusion_files = [] generated_html_files = [] from crazy_functions.crazy_utils import construct_html diff --git a/crazy_functions/解析项目源代码.py b/crazy_functions/解析项目源代码.py index 10333629..f17a584d 100644 --- a/crazy_functions/解析项目源代码.py +++ b/crazy_functions/解析项目源代码.py @@ -136,6 +136,23 @@ def 解析一个Python项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) +@CatchException +def 解析一个Matlab项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): + history = [] # 清空历史,以免输入溢出 + import glob, os + if os.path.exists(txt): + project_folder = txt + else: + if txt == "": txt = '空空如也的输入栏' + report_execption(chatbot, history, a = f"解析Matlab项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + return + file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.m', recursive=True)] + if len(file_manifest) == 0: + report_execption(chatbot, history, a = f"解析Matlab项目: {txt}", b = f"找不到任何`.m`源文件: {txt}") + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + return + yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @CatchException def 解析一个C项目的头文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): diff --git a/docker-compose.yml b/docker-compose.yml index 2387527f..2bf9dfe4 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,4 +1,81 @@ -#【请修改完参数后,删除此行】请在以下方案中选择一种,然后删除其他的方案,最后docker-compose up运行 | Please choose from one of these options below, delete other options as well as This Line +## =================================================== +# docker-compose.yml +## =================================================== +# 1. 请在以下方案中选择任意一种,然后删除其他的方案 +# 2. 修改你选择的方案中的environment环境变量,详情请见github wiki或者config.py +# 3. 选择一种暴露服务端口的方法,并对相应的配置做出修改: + # 【方法1: 适用于Linux,很方便,可惜windows不支持】与宿主的网络融合为一体,这个是默认配置 + # network_mode: "host" + # 【方法2: 适用于所有系统包括Windows和MacOS】端口映射,把容器的端口映射到宿主的端口(注意您需要先删除network_mode: "host",再追加以下内容) + # ports: + # - "12345:12345" # 注意!12345必须与WEB_PORT环境变量相互对应 +# 4. 最后`docker-compose up`运行 +# 5. 如果希望使用显卡,请关注 LOCAL_MODEL_DEVICE 和 英伟达显卡运行时 选项 +## =================================================== +# 1. Please choose one of the following options and delete the others. +# 2. Modify the environment variables in the selected option, see GitHub wiki or config.py for more details. +# 3. Choose a method to expose the server port and make the corresponding configuration changes: + # [Method 1: Suitable for Linux, convenient, but not supported for Windows] Fusion with the host network, this is the default configuration + # network_mode: "host" + # [Method 2: Suitable for all systems including Windows and MacOS] Port mapping, mapping the container port to the host port (note that you need to delete network_mode: "host" first, and then add the following content) + # ports: + # - "12345: 12345" # Note! 12345 must correspond to the WEB_PORT environment variable. +# 4. Finally, run `docker-compose up`. +# 5. If you want to use a graphics card, pay attention to the LOCAL_MODEL_DEVICE and Nvidia GPU runtime options. +## =================================================== + +## =================================================== +## 【方案零】 部署项目的全部能力(这个是包含cuda和latex的大型镜像。如果您网速慢、硬盘小或没有显卡,则不推荐使用这个) +## =================================================== +version: '3' +services: + gpt_academic_full_capability: + image: ghcr.io/binary-husky/gpt_academic_with_all_capacity:master + environment: + # 请查阅 `config.py`或者 github wiki 以查看所有的配置信息 + API_KEY: ' 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 ' + # USE_PROXY: ' True ' + # proxies: ' { "http": "http://localhost:10881", "https": "http://localhost:10881", } ' + LLM_MODEL: ' gpt-3.5-turbo ' + AVAIL_LLM_MODELS: ' ["gpt-3.5-turbo", "gpt-4", "qianfan", "sparkv2", "spark", "chatglm"] ' + BAIDU_CLOUD_API_KEY : ' bTUtwEAveBrQipEowUvDwYWq ' + BAIDU_CLOUD_SECRET_KEY : ' jqXtLvXiVw6UNdjliATTS61rllG8Iuni ' + XFYUN_APPID: ' 53a8d816 ' + XFYUN_API_SECRET: ' MjMxNDQ4NDE4MzM0OSNlNjQ2NTlhMTkx ' + XFYUN_API_KEY: ' 95ccdec285364869d17b33e75ee96447 ' + ENABLE_AUDIO: ' False ' + DEFAULT_WORKER_NUM: ' 20 ' + WEB_PORT: ' 12345 ' + ADD_WAIFU: ' False ' + ALIYUN_APPKEY: ' RxPlZrM88DnAFkZK ' + THEME: ' Chuanhu-Small-and-Beautiful ' + ALIYUN_ACCESSKEY: ' LTAI5t6BrFUzxRXVGUWnekh1 ' + ALIYUN_SECRET: ' eHmI20SVWIwQZxCiTD2bGQVspP9i68 ' + # LOCAL_MODEL_DEVICE: ' cuda ' + + # 加载英伟达显卡运行时 + # runtime: nvidia + # deploy: + # resources: + # reservations: + # devices: + # - driver: nvidia + # count: 1 + # capabilities: [gpu] + + # 【WEB_PORT暴露方法1: 适用于Linux】与宿主的网络融合 + network_mode: "host" + + # 【WEB_PORT暴露方法2: 适用于所有系统】端口映射 + # ports: + # - "12345:12345" # 12345必须与WEB_PORT相互对应 + + # 启动容器后,运行main.py主程序 + command: > + bash -c "python3 -u main.py" + + + ## =================================================== ## 【方案一】 如果不需要运行本地模型(仅 chatgpt, azure, 星火, 千帆, claude 等在线大模型服务) diff --git a/docs/GithubAction+AllCapacity b/docs/GithubAction+AllCapacity index 2ed9f4c9..5e50b401 100644 --- a/docs/GithubAction+AllCapacity +++ b/docs/GithubAction+AllCapacity @@ -13,21 +13,20 @@ RUN python3 -m pip install openai numpy arxiv rich RUN python3 -m pip install colorama Markdown pygments pymupdf RUN python3 -m pip install python-docx moviepy pdfminer RUN python3 -m pip install zh_langchain==0.2.1 pypinyin -RUN python3 -m pip install nougat-ocr RUN python3 -m pip install rarfile py7zr RUN python3 -m pip install aliyun-python-sdk-core==2.13.3 pyOpenSSL scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git # 下载分支 WORKDIR /gpt RUN git clone --depth=1 https://github.com/binary-husky/gpt_academic.git WORKDIR /gpt/gpt_academic -RUN git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss +RUN git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llm/moss RUN python3 -m pip install -r requirements.txt RUN python3 -m pip install -r request_llm/requirements_moss.txt RUN python3 -m pip install -r request_llm/requirements_qwen.txt RUN python3 -m pip install -r request_llm/requirements_chatglm.txt RUN python3 -m pip install -r request_llm/requirements_newbing.txt - +RUN python3 -m pip install nougat-ocr # 预热Tiktoken模块 diff --git a/docs/GithubAction+NoLocal+Latex b/docs/GithubAction+NoLocal+Latex index 2f2608cc..516eb8fd 100644 --- a/docs/GithubAction+NoLocal+Latex +++ b/docs/GithubAction+NoLocal+Latex @@ -5,6 +5,9 @@ FROM fuqingxu/python311_texlive_ctex:latest +# 删除文档文件以节约空间 +RUN rm -rf /usr/local/texlive/2023/texmf-dist/doc + # 指定路径 WORKDIR /gpt diff --git a/docs/translate_english.json b/docs/translate_english.json index 91eba9d3..f3ec0c48 100644 --- a/docs/translate_english.json +++ b/docs/translate_english.json @@ -322,7 +322,7 @@ "任何文件": "Any file", "但推荐上传压缩文件": "But it is recommended to upload compressed files", "更换模型 & SysPrompt & 交互界面布局": "Change model & SysPrompt & interactive interface layout", - "底部输入区": "Bottom input area", + "浮动输入区": "Floating input area", "输入清除键": "Input clear key", "插件参数区": "Plugin parameter area", "显示/隐藏功能区": "Show/hide function area", @@ -2513,5 +2513,141 @@ "此处待注入的知识库名称id": "The knowledge base name ID to be injected here", "您需要构建知识库后再运行此插件": "You need to build the knowledge base before running this plugin", "判定是否为公式 | 测试1 写出洛伦兹定律": "Determine whether it is a formula | Test 1 write out the Lorentz law", - "构建知识库后": "After building the knowledge base" + "构建知识库后": "After building the knowledge base", + "找不到本地项目或无法处理": "Unable to find local project or unable to process", + "再做一个小修改": "Make another small modification", + "解析整个Matlab项目": "Parse the entire Matlab project", + "需要用GPT提取参数": "Need to extract parameters using GPT", + "文件路径": "File path", + "正在排队": "In queue", + "-=-=-=-=-=-=-=-= 写出第1个文件": "-=-=-=-=-=-=-=-= Write the first file", + "仅翻译后的文本 -=-=-=-=-=-=-=-=": "Translated text only -=-=-=-=-=-=-=-=", + "对话通道": "Conversation channel", + "找不到任何": "Unable to find any", + "正在启动": "Starting", + "开始创建新进程并执行代码! 时间限制": "Start creating a new process and executing the code! Time limit", + "解析Matlab项目": "Parse Matlab project", + "更换UI主题": "Change UI theme", + "⭐ 开始啦 !": "⭐ Let's start!", + "先提取当前英文标题": "First extract the current English title", + "睡一会防止触发google反爬虫": "Sleep for a while to prevent triggering Google anti-crawler", + "测试": "Test", + "-=-=-=-=-=-=-=-= 写出Markdown文件 -=-=-=-=-=-=-=-=": "-=-=-=-=-=-=-=-= Write out Markdown file", + "如果index是1的话": "If the index is 1", + "VoidTerminal已经实现了类似的代码": "VoidTerminal has already implemented similar code", + "等待线程锁": "Waiting for thread lock", + "那么我们默认代理生效": "Then we default to proxy", + "结果是一个有效文件": "The result is a valid file", + "⭐ 检查模块": "⭐ Check module", + "备份一份History作为记录": "Backup a copy of History as a record", + "作者Binary-Husky": "Author Binary-Husky", + "将csv文件转excel表格": "Convert CSV file to Excel table", + "获取文章摘要": "Get article summary", + "次代码生成尝试": "Attempt to generate code", + "如果参数是空的": "If the parameter is empty", + "请配置讯飞星火大模型的XFYUN_APPID": "Please configure XFYUN_APPID for the Xunfei Starfire model", + "-=-=-=-=-=-=-=-= 写出第2个文件": "Write the second file", + "代码生成阶段结束": "Code generation phase completed", + "则进行提醒": "Then remind", + "处理异常": "Handle exception", + "可能触发了google反爬虫机制": "May have triggered Google anti-crawler mechanism", + "AnalyzeAMatlabProject的所有源文件": "All source files of AnalyzeAMatlabProject", + "写入": "Write", + "我们5秒后再试一次...": "Let's try again in 5 seconds...", + "判断一下用户是否错误地通过对话通道进入": "Check if the user entered through the dialogue channel by mistake", + "结果": "Result", + "2. 如果没有文件": "2. If there is no file", + "由 test_on_sentence_end": "By test_on_sentence_end", + "则直接使用first section name": "Then directly use the first section name", + "太懒了": "Too lazy", + "记录当前的大章节标题": "Record the current chapter title", + "然后再次点击该插件! 至于您的文件": "Then click the plugin again! As for your file", + "此次我们的错误追踪是": "This time our error tracking is", + "首先在arxiv上搜索": "First search on arxiv", + "被新插件取代": "Replaced by a new plugin", + "正在处理文件": "Processing file", + "除了连接OpenAI之外": "In addition to connecting OpenAI", + "我们检查一下": "Let's check", + "进度": "Progress", + "处理少数情况下的特殊插件的锁定状态": "Handle the locked state of special plugins in a few cases", + "⭐ 开始执行": "⭐ Start execution", + "正常情况": "Normal situation", + "下个句子中已经说完的部分": "The part that has already been said in the next sentence", + "首次运行需要花费较长时间下载NOUGAT参数": "The first run takes a long time to download NOUGAT parameters", + "使用tex格式公式 测试2 给出柯西不等式": "Use the tex format formula to test 2 and give the Cauchy inequality", + "无法从bing获取信息!": "Unable to retrieve information from Bing!", + "秒. 请等待任务完成": "Wait for the task to complete", + "开始干正事": "Start doing real work", + "需要花费较长时间下载NOUGAT参数": "It takes a long time to download NOUGAT parameters", + "然后再次点击该插件": "Then click the plugin again", + "受到bing限制": "Restricted by Bing", + "检索文章的历史版本的题目": "Retrieve the titles of historical versions of the article", + "收尾": "Wrap up", + "给定了task": "Given a task", + "某段话的整个句子": "The whole sentence of a paragraph", + "-=-=-=-=-=-=-=-= 写出HTML文件 -=-=-=-=-=-=-=-=": "-=-=-=-=-=-=-=-= Write out HTML file -=-=-=-=-=-=-=-=", + "当前文件": "Current file", + "请在输入框内填写需求": "Please fill in the requirements in the input box", + "结果是一个字符串": "The result is a string", + "用插件实现」": "Implemented with a plugin", + "⭐ 到最后一步了": "⭐ Reached the final step", + "重新修改当前part的标题": "Modify the title of the current part again", + "请勿点击“提交”按钮或者“基础功能区”按钮": "Do not click the 'Submit' button or the 'Basic Function Area' button", + "正在执行命令": "Executing command", + "检测到**滞留的缓存文档**": "Detected **stuck cache document**", + "第三步": "Step three", + "失败了~ 别担心": "Failed~ Don't worry", + "动态代码解释器": "Dynamic code interpreter", + "开始执行": "Start executing", + "不给定task": "No task given", + "正在加载NOUGAT...": "Loading NOUGAT...", + "精准翻译PDF文档": "Accurate translation of PDF documents", + "时间限制TIME_LIMIT": "Time limit TIME_LIMIT", + "翻译前后混合 -=-=-=-=-=-=-=-=": "Mixed translation before and after -=-=-=-=-=-=-=-=", + "搞定代码生成": "Code generation is done", + "插件通道": "Plugin channel", + "智能体": "Intelligent agent", + "切换界面明暗 ☀": "Switch interface brightness ☀", + "交换图像的蓝色通道和红色通道": "Swap blue channel and red channel of the image", + "作为函数参数": "As a function parameter", + "先挑选偶数序列号": "First select even serial numbers", + "仅供测试": "For testing only", + "执行成功了": "Execution succeeded", + "开始逐个文件进行处理": "Start processing files one by one", + "当前文件处理列表": "Current file processing list", + "执行失败了": "Execution failed", + "请及时处理": "Please handle it in time", + "源文件": "Source file", + "裁剪图像": "Crop image", + "插件动态生成插件": "Dynamic generation of plugins", + "正在验证上述代码的有效性": "Validating the above code", + "⭐ = 关键步骤": "⭐ = Key step", + "!= 0 代表“提交”键对话通道": "!= 0 represents the 'Submit' key dialogue channel", + "解析python源代码项目": "Parsing Python source code project", + "请检查PDF是否损坏": "Please check if the PDF is damaged", + "插件动态生成": "Dynamic generation of plugins", + "⭐ 分离代码块": "⭐ Separating code blocks", + "已经被记忆": "Already memorized", + "默认用英文的": "Default to English", + "错误追踪": "Error tracking", + "对话|编程|学术|智能体": "Dialogue|Programming|Academic|Intelligent agent", + "请检查": "Please check", + "检测到被滞留的缓存文档": "Detected cached documents being left behind", + "还有哪些场合允许使用代理": "What other occasions allow the use of proxies", + "1. 如果有文件": "1. If there is a file", + "执行开始": "Execution starts", + "代码生成结束": "Code generation ends", + "请及时点击“**保存当前对话**”获取所有滞留文档": "Please click '**Save Current Dialogue**' in time to obtain all cached documents", + "需点击“**函数插件区**”按钮进行处理": "Click the '**Function Plugin Area**' button for processing", + "此函数已经弃用": "This function has been deprecated", + "以后再写": "Write it later", + "返回给定的url解析出的arxiv_id": "Return the arxiv_id parsed from the given URL", + "⭐ 文件上传区是否有东西": "⭐ Is there anything in the file upload area", + "Nougat解析论文失败": "Nougat failed to parse the paper", + "本源代码中": "In this source code", + "或者基础功能通道": "Or the basic function channel", + "使用zip压缩格式": "Using zip compression format", + "受到google限制": "Restricted by Google", + "如果是": "If it is", + "不用担心": "don't worry" } \ No newline at end of file diff --git a/docs/translate_japanese.json b/docs/translate_japanese.json index ecd370e9..fa3af4e0 100644 --- a/docs/translate_japanese.json +++ b/docs/translate_japanese.json @@ -1007,7 +1007,6 @@ "第一部分": "第1部分", "的分析如下": "の分析は以下の通りです", "解决一个mdx_math的bug": "mdx_mathのバグを解決する", - "底部输入区": "下部の入力エリア", "函数插件输入输出接驳区": "関数プラグインの入出力接続エリア", "打开浏览器": "ブラウザを開く", "免费用户填3": "無料ユーザーは3を入力してください", diff --git a/docs/translate_std.json b/docs/translate_std.json index 84690c2e..827dcdb3 100644 --- a/docs/translate_std.json +++ b/docs/translate_std.json @@ -90,5 +90,7 @@ "解析PDF_基于GROBID": "ParsePDF_BasedOnGROBID", "虚空终端主路由": "VoidTerminalMainRoute", "批量翻译PDF文档_NOUGAT": "BatchTranslatePDFDocuments_NOUGAT", - "解析PDF_基于NOUGAT": "ParsePDF_NOUGAT" + "解析PDF_基于NOUGAT": "ParsePDF_NOUGAT", + "解析一个Matlab项目": "AnalyzeAMatlabProject", + "函数动态生成": "DynamicFunctionGeneration" } \ No newline at end of file diff --git a/docs/translate_traditionalchinese.json b/docs/translate_traditionalchinese.json index efaa62fc..53570aea 100644 --- a/docs/translate_traditionalchinese.json +++ b/docs/translate_traditionalchinese.json @@ -346,7 +346,6 @@ "情况会好转": "情況會好轉", "超过512个": "超過512個", "多线": "多線", - "底部输入区": "底部輸入區", "合并小写字母开头的段落块并替换为空格": "合併小寫字母開頭的段落塊並替換為空格", "暗色主题": "暗色主題", "提高限制请查询": "提高限制請查詢", diff --git a/docs/use_azure.md b/docs/use_azure.md index f7e7b77e..4c43a7ef 100644 --- a/docs/use_azure.md +++ b/docs/use_azure.md @@ -107,6 +107,12 @@ AZURE_API_KEY = "填入azure openai api的密钥" AZURE_API_VERSION = "2023-05-15" # 默认使用 2023-05-15 版本,无需修改 AZURE_ENGINE = "填入部署名" # 见上述图片 + +# 例如 +API_KEY = '6424e9d19e674092815cea1cb35e67a5' +AZURE_ENDPOINT = 'https://rhtjjjjjj.openai.azure.com/' +AZURE_ENGINE = 'qqwe' +LLM_MODEL = "azure-gpt-3.5" # 可选 ↓↓↓ ``` diff --git a/request_llm/bridge_all.py b/request_llm/bridge_all.py index bb325e46..44e0ae4b 100644 --- a/request_llm/bridge_all.py +++ b/request_llm/bridge_all.py @@ -52,6 +52,7 @@ API_URL_REDIRECT, AZURE_ENDPOINT, AZURE_ENGINE = get_conf("API_URL_REDIRECT", "A openai_endpoint = "https://api.openai.com/v1/chat/completions" api2d_endpoint = "https://openai.api2d.net/v1/chat/completions" newbing_endpoint = "wss://sydney.bing.com/sydney/ChatHub" +if not AZURE_ENDPOINT.endswith('/'): AZURE_ENDPOINT += '/' azure_endpoint = AZURE_ENDPOINT + f'openai/deployments/{AZURE_ENGINE}/chat/completions?api-version=2023-05-15' # 兼容旧版的配置 try: @@ -125,6 +126,15 @@ model_info = { "token_cnt": get_token_num_gpt4, }, + "gpt-4-32k": { + "fn_with_ui": chatgpt_ui, + "fn_without_ui": chatgpt_noui, + "endpoint": openai_endpoint, + "max_token": 32768, + "tokenizer": tokenizer_gpt4, + "token_cnt": get_token_num_gpt4, + }, + # azure openai "azure-gpt-3.5":{ "fn_with_ui": chatgpt_ui, @@ -135,6 +145,15 @@ model_info = { "token_cnt": get_token_num_gpt35, }, + "azure-gpt-4":{ + "fn_with_ui": chatgpt_ui, + "fn_without_ui": chatgpt_noui, + "endpoint": azure_endpoint, + "max_token": 8192, + "tokenizer": tokenizer_gpt35, + "token_cnt": get_token_num_gpt35, + }, + # api_2d "api2d-gpt-3.5-turbo": { "fn_with_ui": chatgpt_ui, diff --git a/request_llm/bridge_chatglm.py b/request_llm/bridge_chatglm.py index 6dac8639..387b3e21 100644 --- a/request_llm/bridge_chatglm.py +++ b/request_llm/bridge_chatglm.py @@ -3,7 +3,7 @@ from transformers import AutoModel, AutoTokenizer import time import threading import importlib -from toolbox import update_ui, get_conf +from toolbox import update_ui, get_conf, ProxyNetworkActivate from multiprocessing import Process, Pipe load_message = "ChatGLM尚未加载,加载需要一段时间。注意,取决于`config.py`的配置,ChatGLM消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……" @@ -48,16 +48,17 @@ class GetGLMHandle(Process): while True: try: - if self.chatglm_model is None: - self.chatglm_tokenizer = AutoTokenizer.from_pretrained(_model_name_, trust_remote_code=True) - if device=='cpu': - self.chatglm_model = AutoModel.from_pretrained(_model_name_, trust_remote_code=True).float() + with ProxyNetworkActivate('Download_LLM'): + if self.chatglm_model is None: + self.chatglm_tokenizer = AutoTokenizer.from_pretrained(_model_name_, trust_remote_code=True) + if device=='cpu': + self.chatglm_model = AutoModel.from_pretrained(_model_name_, trust_remote_code=True).float() + else: + self.chatglm_model = AutoModel.from_pretrained(_model_name_, trust_remote_code=True).half().cuda() + self.chatglm_model = self.chatglm_model.eval() + break else: - self.chatglm_model = AutoModel.from_pretrained(_model_name_, trust_remote_code=True).half().cuda() - self.chatglm_model = self.chatglm_model.eval() - break - else: - break + break except: retry += 1 if retry > 3: diff --git a/request_llm/bridge_llama2.py b/request_llm/bridge_llama2.py index e236c942..d1be4463 100644 --- a/request_llm/bridge_llama2.py +++ b/request_llm/bridge_llama2.py @@ -30,7 +30,7 @@ class GetONNXGLMHandle(LocalLLMHandle): with open(os.path.expanduser('~/.cache/huggingface/token'), 'w') as f: f.write(huggingface_token) model_id = 'meta-llama/Llama-2-7b-chat-hf' - with ProxyNetworkActivate(): + with ProxyNetworkActivate('Download_LLM'): self._tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=huggingface_token) # use fp16 model = AutoModelForCausalLM.from_pretrained(model_id, use_auth_token=huggingface_token).eval() diff --git a/request_llm/requirements_chatglm.txt b/request_llm/requirements_chatglm.txt index b2629f83..cd53cd73 100644 --- a/request_llm/requirements_chatglm.txt +++ b/request_llm/requirements_chatglm.txt @@ -1,5 +1,4 @@ protobuf -transformers>=4.27.1 cpm_kernels torch>=1.10 mdtex2html diff --git a/request_llm/requirements_chatglm_onnx.txt b/request_llm/requirements_chatglm_onnx.txt index 70ab6684..54811472 100644 --- a/request_llm/requirements_chatglm_onnx.txt +++ b/request_llm/requirements_chatglm_onnx.txt @@ -1,5 +1,4 @@ protobuf -transformers>=4.27.1 cpm_kernels torch>=1.10 mdtex2html diff --git a/request_llm/requirements_jittorllms.txt b/request_llm/requirements_jittorllms.txt index 1d86ff81..ddb61955 100644 --- a/request_llm/requirements_jittorllms.txt +++ b/request_llm/requirements_jittorllms.txt @@ -2,6 +2,5 @@ jittor >= 1.3.7.9 jtorch >= 0.1.3 torch torchvision -transformers==4.26.1 pandas jieba \ No newline at end of file diff --git a/request_llm/requirements_moss.txt b/request_llm/requirements_moss.txt index 8dd75bff..c27907c2 100644 --- a/request_llm/requirements_moss.txt +++ b/request_llm/requirements_moss.txt @@ -1,5 +1,4 @@ torch -transformers==4.25.1 sentencepiece datasets accelerate diff --git a/requirements.txt b/requirements.txt index 0a9a4c83..2ca2f5cd 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ pydantic==1.10.11 tiktoken>=0.3.3 requests[socks] -transformers +transformers>=4.27.1 python-markdown-math beautifulsoup4 prompt_toolkit diff --git a/tests/test_plugins.py b/tests/test_plugins.py index ec28af1e..d9f78d6d 100644 --- a/tests/test_plugins.py +++ b/tests/test_plugins.py @@ -6,11 +6,14 @@ import os, sys def validate_path(): dir_name = os.path.dirname(__file__); root_dir_assume = os.path.abspath(dir_name + '/..'); os.chdir(root_dir_assume); sys.path.append(root_dir_assume) validate_path() # 返回项目根路径 -from tests.test_utils import plugin_test if __name__ == "__main__": + from tests.test_utils import plugin_test + plugin_test(plugin='crazy_functions.函数动态生成->函数动态生成', main_input='交换图像的蓝色通道和红色通道', advanced_arg={"file_path_arg": "./build/ants.jpg"}) + # plugin_test(plugin='crazy_functions.虚空终端->虚空终端', main_input='修改api-key为sk-jhoejriotherjep') - plugin_test(plugin='crazy_functions.批量翻译PDF文档_NOUGAT->批量翻译PDF文档', main_input='crazy_functions/test_project/pdf_and_word/aaai.pdf') + + # plugin_test(plugin='crazy_functions.批量翻译PDF文档_NOUGAT->批量翻译PDF文档', main_input='crazy_functions/test_project/pdf_and_word/aaai.pdf') # plugin_test(plugin='crazy_functions.虚空终端->虚空终端', main_input='调用插件,对C:/Users/fuqingxu/Desktop/旧文件/gpt/chatgpt_academic/crazy_functions/latex_fns中的python文件进行解析') diff --git a/tests/test_utils.py b/tests/test_utils.py index f3a45aa8..1fdca1eb 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -74,7 +74,7 @@ def plugin_test(main_input, plugin, advanced_arg=None): plugin_kwargs['plugin_kwargs'] = advanced_arg my_working_plugin = silence_stdout(plugin)(**plugin_kwargs) - with Live(Markdown(""), auto_refresh=False) as live: + with Live(Markdown(""), auto_refresh=False, vertical_overflow="visible") as live: for cookies, chat, hist, msg in my_working_plugin: md_str = vt.chat_to_markdown_str(chat) md = Markdown(md_str) diff --git a/themes/common.css b/themes/common.css index 5880d00e..f4bb6e48 100644 --- a/themes/common.css +++ b/themes/common.css @@ -9,6 +9,11 @@ box-shadow: none; } +#input-plugin-group .secondary-wrap.svelte-aqlk7e.svelte-aqlk7e.svelte-aqlk7e { + border: none; + min-width: 0; +} + /* hide selector label */ #input-plugin-group .svelte-1gfkn6j { visibility: hidden; @@ -19,3 +24,91 @@ .wrap.svelte-xwlu1w { min-height: var(--size-32); } + +/* status bar height */ +.min.svelte-1yrv54 { + min-height: var(--size-12); +} + +/* copy btn */ +.message-btn-row { + width: 19px; + height: 19px; + position: absolute; + left: calc(100% + 3px); + top: 0; + display: flex; + justify-content: space-between; +} +/* .message-btn-row-leading, .message-btn-row-trailing { + display: inline-flex; + gap: 4px; +} */ +.message-btn-row button { + font-size: 18px; + align-self: center; + align-items: center; + flex-wrap: nowrap; + white-space: nowrap; + display: inline-flex; + flex-direction: row; + gap: 4px; + padding-block: 2px !important; +} + + +/* Scrollbar Width */ +::-webkit-scrollbar { + width: 12px; +} + +/* Scrollbar Track */ +::-webkit-scrollbar-track { + background: #f1f1f1; + border-radius: 12px; +} + +/* Scrollbar Handle */ +::-webkit-scrollbar-thumb { + background: #888; + border-radius: 12px; +} + +/* Scrollbar Handle on hover */ +::-webkit-scrollbar-thumb:hover { + background: #555; +} + +/* input btns: clear, reset, stop */ +#input-panel button { + min-width: min(80px, 100%); +} + +/* input btns: clear, reset, stop */ +#input-panel2 button { + min-width: min(80px, 100%); +} + + +#cbs { + background-color: var(--block-background-fill) !important; +} + +#interact-panel .form { + border: hidden +} + +.drag-area { + border: solid; + border-width: thin; + user-select: none; + padding-left: 2%; +} + +.floating-component #input-panel2 { + border-top-left-radius: 0px; + border-top-right-radius: 0px; + border: solid; + border-width: thin; + border-top-width: 0; +} \ No newline at end of file diff --git a/themes/common.js b/themes/common.js index 7733c7b1..4e7a75e2 100644 --- a/themes/common.js +++ b/themes/common.js @@ -1,4 +1,81 @@ -function ChatBotHeight() { +function gradioApp() { + // https://github.com/GaiZhenbiao/ChuanhuChatGPT/tree/main/web_assets/javascript + const elems = document.getElementsByTagName('gradio-app'); + const elem = elems.length == 0 ? document : elems[0]; + if (elem !== document) { + elem.getElementById = function(id) { + return document.getElementById(id); + }; + } + return elem.shadowRoot ? elem.shadowRoot : elem; +} + + + + +function addCopyButton(botElement) { + // https://github.com/GaiZhenbiao/ChuanhuChatGPT/tree/main/web_assets/javascript + // Copy bot button + const copiedIcon = ''; + const copyIcon = ''; + + const messageBtnColumnElement = botElement.querySelector('.message-btn-row'); + if (messageBtnColumnElement) { + // Do something if .message-btn-column exists, for example, remove it + // messageBtnColumnElement.remove(); + return; + } + + var copyButton = document.createElement('button'); + copyButton.classList.add('copy-bot-btn'); + copyButton.setAttribute('aria-label', 'Copy'); + copyButton.innerHTML = copyIcon; + copyButton.addEventListener('click', async () => { + const textToCopy = botElement.innerText; + try { + if ("clipboard" in navigator) { + await navigator.clipboard.writeText(textToCopy); + copyButton.innerHTML = copiedIcon; + setTimeout(() => { + copyButton.innerHTML = copyIcon; + }, 1500); + } else { + const textArea = document.createElement("textarea"); + textArea.value = textToCopy; + document.body.appendChild(textArea); + textArea.select(); + try { + document.execCommand('copy'); + copyButton.innerHTML = copiedIcon; + setTimeout(() => { + copyButton.innerHTML = copyIcon; + }, 1500); + } catch (error) { + console.error("Copy failed: ", error); + } + document.body.removeChild(textArea); + } + } catch (error) { + console.error("Copy failed: ", error); + } + }); + var messageBtnColumn = document.createElement('div'); + messageBtnColumn.classList.add('message-btn-row'); + messageBtnColumn.appendChild(copyButton); + botElement.appendChild(messageBtnColumn); +} + +function chatbotContentChanged(attempt = 1, force = false) { + // https://github.com/GaiZhenbiao/ChuanhuChatGPT/tree/main/web_assets/javascript + for (var i = 0; i < attempt; i++) { + setTimeout(() => { + gradioApp().querySelectorAll('#gpt-chatbot .message-wrap .message.bot').forEach(addCopyButton); + }, i === 0 ? 0 : 200); + } +} + +function chatbotAutoHeight(){ + // 自动调整高度 function update_height(){ var { panel_height_target, chatbot_height, chatbot } = get_elements(true); if (panel_height_target!=chatbot_height) @@ -28,6 +105,15 @@ function ChatBotHeight() { }, 50); // 每100毫秒执行一次 } +function GptAcademicJavaScriptInit(LAYOUT = "LEFT-RIGHT") { + chatbotIndicator = gradioApp().querySelector('#gpt-chatbot > div.wrap'); + var chatbotObserver = new MutationObserver(() => { + chatbotContentChanged(1); + }); + chatbotObserver.observe(chatbotIndicator, { attributes: true, childList: true, subtree: true }); + if (LAYOUT === "LEFT-RIGHT") {chatbotAutoHeight();} +} + function get_elements(consider_state_panel=false) { var chatbot = document.querySelector('#gpt-chatbot > div.wrap.svelte-18telvq'); if (!chatbot) { @@ -36,14 +122,14 @@ function get_elements(consider_state_panel=false) { const panel1 = document.querySelector('#input-panel').getBoundingClientRect(); const panel2 = document.querySelector('#basic-panel').getBoundingClientRect() const panel3 = document.querySelector('#plugin-panel').getBoundingClientRect(); - const panel4 = document.querySelector('#interact-panel').getBoundingClientRect(); + // const panel4 = document.querySelector('#interact-panel').getBoundingClientRect(); const panel5 = document.querySelector('#input-panel2').getBoundingClientRect(); const panel_active = document.querySelector('#state-panel').getBoundingClientRect(); if (consider_state_panel || panel_active.height < 25){ document.state_panel_height = panel_active.height; } // 25 是chatbot的label高度, 16 是右侧的gap - var panel_height_target = panel1.height + panel2.height + panel3.height + panel4.height + panel5.height - 25 + 16*3; + var panel_height_target = panel1.height + panel2.height + panel3.height + 0 + 0 - 25 + 16*2; // 禁止动态的state-panel高度影响 panel_height_target = panel_height_target + (document.state_panel_height-panel_active.height) var panel_height_target = parseInt(panel_height_target); diff --git a/themes/contrast.css b/themes/contrast.css index 54a1b2b4..22d5d480 100644 --- a/themes/contrast.css +++ b/themes/contrast.css @@ -198,7 +198,7 @@ } /* 小按钮 */ -.sm.svelte-1ipelgc { +.sm { font-family: "Microsoft YaHei UI", "Helvetica", "Microsoft YaHei", "ui-sans-serif", "sans-serif", "system-ui"; --button-small-text-weight: 600; --button-small-text-size: 16px; @@ -208,7 +208,7 @@ border-top-left-radius: 0px; } -#plugin-panel .sm.svelte-1ipelgc { +#plugin-panel .sm { font-family: "Microsoft YaHei UI", "Helvetica", "Microsoft YaHei", "ui-sans-serif", "sans-serif", "system-ui"; --button-small-text-weight: 400; --button-small-text-size: 14px; diff --git a/themes/contrast.py b/themes/contrast.py index fd4ef046..d407d92a 100644 --- a/themes/contrast.py +++ b/themes/contrast.py @@ -57,12 +57,9 @@ def adjust_theme(): button_cancel_text_color_dark="white", ) - if LAYOUT=="TOP-DOWN": - js = "" - else: - with open('themes/common.js', 'r', encoding='utf8') as f: - js = f"" - + with open('themes/common.js', 'r', encoding='utf8') as f: + js = f"" + # 添加一个萌萌的看板娘 if ADD_WAIFU: js += """ diff --git a/themes/default.css b/themes/default.css index a35cd1d4..65d5940b 100644 --- a/themes/default.css +++ b/themes/default.css @@ -9,15 +9,15 @@ border-radius: 4px; } -#plugin-panel .dropdown-arrow.svelte-p5edak { - width: 50px; +#plugin-panel .dropdown-arrow { + width: 25px; } #plugin-panel input.svelte-aqlk7e.svelte-aqlk7e.svelte-aqlk7e { padding-left: 5px; } /* 小按钮 */ -.sm.svelte-1ipelgc { +#basic-panel .sm { font-family: "Microsoft YaHei UI", "Helvetica", "Microsoft YaHei", "ui-sans-serif", "sans-serif", "system-ui"; --button-small-text-weight: 600; --button-small-text-size: 16px; @@ -27,7 +27,7 @@ border-top-left-radius: 6px; } -#plugin-panel .sm.svelte-1ipelgc { +#plugin-panel .sm { font-family: "Microsoft YaHei UI", "Helvetica", "Microsoft YaHei", "ui-sans-serif", "sans-serif", "system-ui"; --button-small-text-weight: 400; --button-small-text-size: 14px; diff --git a/themes/default.py b/themes/default.py index 2611e7aa..da1f1874 100644 --- a/themes/default.py +++ b/themes/default.py @@ -57,11 +57,8 @@ def adjust_theme(): button_cancel_text_color_dark="white", ) - if LAYOUT=="TOP-DOWN": - js = "" - else: - with open('themes/common.js', 'r', encoding='utf8') as f: - js = f"" + with open('themes/common.js', 'r', encoding='utf8') as f: + js = f"" # 添加一个萌萌的看板娘 if ADD_WAIFU: diff --git a/themes/gradios.py b/themes/gradios.py index 8b661a56..7693a238 100644 --- a/themes/gradios.py +++ b/themes/gradios.py @@ -3,23 +3,29 @@ import logging from toolbox import get_conf, ProxyNetworkActivate CODE_HIGHLIGHT, ADD_WAIFU, LAYOUT = get_conf('CODE_HIGHLIGHT', 'ADD_WAIFU', 'LAYOUT') +def dynamic_set_theme(THEME): + set_theme = gr.themes.ThemeClass() + with ProxyNetworkActivate('Download_Gradio_Theme'): + logging.info('正在下载Gradio主题,请稍等。') + if THEME.startswith('Huggingface-'): THEME = THEME.lstrip('Huggingface-') + if THEME.startswith('huggingface-'): THEME = THEME.lstrip('huggingface-') + set_theme = set_theme.from_hub(THEME.lower()) + return set_theme + def adjust_theme(): try: set_theme = gr.themes.ThemeClass() - with ProxyNetworkActivate(): + with ProxyNetworkActivate('Download_Gradio_Theme'): logging.info('正在下载Gradio主题,请稍等。') THEME, = get_conf('THEME') if THEME.startswith('Huggingface-'): THEME = THEME.lstrip('Huggingface-') if THEME.startswith('huggingface-'): THEME = THEME.lstrip('huggingface-') set_theme = set_theme.from_hub(THEME.lower()) - if LAYOUT=="TOP-DOWN": - js = "" - else: - with open('themes/common.js', 'r', encoding='utf8') as f: - js = f"" - + with open('themes/common.js', 'r', encoding='utf8') as f: + js = f"" + # 添加一个萌萌的看板娘 if ADD_WAIFU: js += """ diff --git a/themes/green.py b/themes/green.py index 5aa9e8b0..a29a0fa1 100644 --- a/themes/green.py +++ b/themes/green.py @@ -73,12 +73,8 @@ def adjust_theme(): chatbot_code_background_color_dark="*neutral_950", ) - js = '' - if LAYOUT=="TOP-DOWN": - js = "" - else: - with open('themes/common.js', 'r', encoding='utf8') as f: - js = f"" + with open('themes/common.js', 'r', encoding='utf8') as f: + js = f"" # 添加一个萌萌的看板娘 if ADD_WAIFU: diff --git a/themes/theme.py b/themes/theme.py index dbb8f1e2..42ee7500 100644 --- a/themes/theme.py +++ b/themes/theme.py @@ -2,17 +2,22 @@ import gradio as gr from toolbox import get_conf THEME, = get_conf('THEME') -if THEME == 'Chuanhu-Small-and-Beautiful': - from .green import adjust_theme, advanced_css - theme_declaration = "

[Chuanhu-Small-and-Beautiful主题]

" -elif THEME == 'High-Contrast': - from .contrast import adjust_theme, advanced_css - theme_declaration = "" -elif '/' in THEME: - from .gradios import adjust_theme, advanced_css - theme_declaration = "" -else: - from .default import adjust_theme, advanced_css - theme_declaration = "" - +def load_dynamic_theme(THEME): + adjust_dynamic_theme = None + if THEME == 'Chuanhu-Small-and-Beautiful': + from .green import adjust_theme, advanced_css + theme_declaration = "

[Chuanhu-Small-and-Beautiful主题]

" + elif THEME == 'High-Contrast': + from .contrast import adjust_theme, advanced_css + theme_declaration = "" + elif '/' in THEME: + from .gradios import adjust_theme, advanced_css + from .gradios import dynamic_set_theme + adjust_dynamic_theme = dynamic_set_theme(THEME) + theme_declaration = "" + else: + from .default import adjust_theme, advanced_css + theme_declaration = "" + return adjust_theme, advanced_css, theme_declaration, adjust_dynamic_theme +adjust_theme, advanced_css, theme_declaration, _ = load_dynamic_theme(THEME) \ No newline at end of file diff --git a/toolbox.py b/toolbox.py index 1452c13a..36acbd5f 100644 --- a/toolbox.py +++ b/toolbox.py @@ -216,7 +216,7 @@ def get_reduce_token_percent(text): return 0.5, '不详' -def write_history_to_file(history, file_basename=None, file_fullname=None): +def write_history_to_file(history, file_basename=None, file_fullname=None, auto_caption=True): """ 将对话记录history以Markdown格式写入文件中。如果没有指定文件名,则使用当前时间生成文件名。 """ @@ -235,7 +235,7 @@ def write_history_to_file(history, file_basename=None, file_fullname=None): if type(content) != str: content = str(content) except: continue - if i % 2 == 0: + if i % 2 == 0 and auto_caption: f.write('## ') try: f.write(content) @@ -472,7 +472,7 @@ def extract_archive(file_path, dest_dir): print("Successfully extracted rar archive to {}".format(dest_dir)) except: print("Rar format requires additional dependencies to install") - return '\n\n解压失败! 需要安装pip install rarfile来解压rar文件' + return '\n\n解压失败! 需要安装pip install rarfile来解压rar文件。建议:使用zip压缩格式。' # 第三方库,需要预先pip install py7zr elif file_extension == '.7z': @@ -523,10 +523,11 @@ def promote_file_to_downloadzone(file, rename_file=None, chatbot=None): # 把文件复制过去 if not os.path.exists(new_path): shutil.copyfile(file, new_path) # 将文件添加到chatbot cookie中,避免多用户干扰 - if chatbot: + if chatbot is not None: if 'files_to_promote' in chatbot._cookies: current = chatbot._cookies['files_to_promote'] else: current = [] chatbot._cookies.update({'files_to_promote': [new_path] + current}) + return new_path def disable_auto_promotion(chatbot): chatbot._cookies.update({'files_to_promote': []}) @@ -580,7 +581,7 @@ def on_file_uploaded(request: gradio.Request, files, chatbot, txt, txt2, checkbo # 整理文件集合 moved_files = [fp for fp in glob.glob(f'{target_path_base}/**/*', recursive=True)] - if "底部输入区" in checkboxes: + if "浮动输入区" in checkboxes: txt, txt2 = "", target_path_base else: txt, txt2 = target_path_base, "" @@ -955,7 +956,19 @@ class ProxyNetworkActivate(): """ 这段代码定义了一个名为TempProxy的空上下文管理器, 用于给一小段代码上代理 """ + def __init__(self, task=None) -> None: + self.task = task + if not task: + # 不给定task, 那么我们默认代理生效 + self.valid = True + else: + # 给定了task, 我们检查一下 + from toolbox import get_conf + WHEN_TO_USE_PROXY, = get_conf('WHEN_TO_USE_PROXY') + self.valid = (task in WHEN_TO_USE_PROXY) + def __enter__(self): + if not self.valid: return self from toolbox import get_conf proxies, = get_conf('proxies') if 'no_proxy' in os.environ: os.environ.pop('no_proxy') diff --git a/version b/version index 897b0428..d5c2012e 100644 --- a/version +++ b/version @@ -1,5 +1,5 @@ { - "version": 3.52, + "version": 3.55, "show_feature": true, - "new_feature": "提高稳定性&解决多用户冲突问题 <-> 支持插件分类和更多UI皮肤外观 <-> 支持用户使用自然语言调度各个插件(虚空终端) ! <-> 改进UI,设计新主题 <-> 支持借助GROBID实现PDF高精度翻译 <-> 接入百度千帆平台和文心一言 <-> 接入阿里通义千问、讯飞星火、上海AI-Lab书生 <-> 优化一键升级 <-> 提高arxiv翻译速度和成功率" + "new_feature": "重新编译Gradio优化使用体验 <-> 新增动态代码解释器(CodeInterpreter) <-> 增加文本回答复制按钮 <-> 细分代理场合 <-> 支持动态选择不同界面主题 <-> 提高稳定性&解决多用户冲突问题 <-> 支持插件分类和更多UI皮肤外观 <-> 支持用户使用自然语言调度各个插件(虚空终端) ! <-> 改进UI,设计新主题 <-> 支持借助GROBID实现PDF高精度翻译 <-> 接入百度千帆平台和文心一言 <-> 接入阿里通义千问、讯飞星火、上海AI-Lab书生 <-> 优化一键升级 <-> 提高arxiv翻译速度和成功率" }