From 37f9b94deeaf38eb03966e3247f5ba5dff0bf02f Mon Sep 17 00:00:00 2001 From: binary-husky Date: Fri, 7 Feb 2025 00:17:36 +0800 Subject: [PATCH 1/8] add options to hide ui components --- main.py | 4 +- themes/gui_toolbar.py | 4 +- themes/init.js | 139 +++++++++++++++++++++++++++++++++++++++++- themes/theme.py | 20 ------ 4 files changed, 140 insertions(+), 27 deletions(-) diff --git a/main.py b/main.py index 310fa6a3..c250ab29 100644 --- a/main.py +++ b/main.py @@ -57,7 +57,7 @@ def main(): # 如果WEB_PORT是-1, 则随机选取WEB端口 PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT from check_proxy import get_current_version - from themes.theme import adjust_theme, advanced_css, theme_declaration, js_code_clear, js_code_show_or_hide, js_code_show_or_hide_group2 + from themes.theme import adjust_theme, advanced_css, theme_declaration, js_code_clear, js_code_show_or_hide from themes.theme import js_code_for_toggle_darkmode from themes.theme import load_dynamic_theme, to_cookie_str, from_cookie_str, assign_user_uuid title_html = f"

GPT 学术优化 {get_current_version()}

{theme_declaration}" @@ -210,7 +210,7 @@ def main(): ret.update({area_customize: gr.update(visible=("自定义菜单" in a))}) return ret checkboxes_2.select(fn_area_visibility_2, [checkboxes_2], [area_customize] ) - checkboxes_2.select(None, [checkboxes_2], None, _js=js_code_show_or_hide_group2) + checkboxes_2.select(None, [checkboxes_2], None, _js="""apply_checkbox_change_for_group2""") # 整理反复出现的控件句柄组合 input_combo = [cookies, max_length_sl, md_dropdown, txt, txt2, top_p, temperature, chatbot, history, system_prompt, plugin_advanced_arg] diff --git a/themes/gui_toolbar.py b/themes/gui_toolbar.py index 703edcfb..072fbfe3 100644 --- a/themes/gui_toolbar.py +++ b/themes/gui_toolbar.py @@ -26,8 +26,8 @@ def define_gui_toolbar(AVAIL_LLM_MODELS, LLM_MODEL, INIT_SYS_PROMPT, THEME, AVAI fontfamily_dropdown = gr.Dropdown(AVAIL_FONTS, value=get_conf("FONT"), elem_id="elem_fontfamily", label="更换字体类型").style(container=False) fontsize_slider = gr.Slider(minimum=5, maximum=25, value=15, step=1, interactive=True, label="字体大小(默认15)", elem_id="elem_fontsize") checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "浮动输入区", "输入清除键", "插件参数区"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区", elem_id='cbs').style(container=False) - opt = ["自定义菜单"] - value=[] + opt = ["自定义菜单", "主标题", "副标题", "显示logo"] + value=["主标题", "副标题", "显示logo"] if ADD_WAIFU: opt += ["添加Live2D形象"]; value += ["添加Live2D形象"] checkboxes_2 = gr.CheckboxGroup(opt, value=value, label="显示/隐藏自定义菜单", elem_id='cbsc').style(container=False) dark_mode_btn = gr.Button("切换界面明暗 ☀", variant="secondary").style(size="sm") diff --git a/themes/init.js b/themes/init.js index 5d7de63b..f240d80e 100644 --- a/themes/init.js +++ b/themes/init.js @@ -128,6 +128,14 @@ function gpt_academic_change_chatbot_font(fontfamily, fontsize, fontcolor) { } } +function footer_show_hide(show) { + if (show) { + document.querySelector('footer').style.display = ''; + } else { + document.querySelector('footer').style.display = 'none'; + } +} + async function GptAcademicJavaScriptInit(dark, prompt, live2d, layout, tts) { // 第一部分,布局初始化 remove_legacy_cookie(); @@ -179,6 +187,7 @@ async function GptAcademicJavaScriptInit(dark, prompt, live2d, layout, tts) { } } } + // 字体 gpt_academic_gradio_saveload("load", "elem_fontfamily", "js_fontfamily", null, "str"); gpt_academic_change_chatbot_font(getCookie("js_fontfamily"), null, null); @@ -205,7 +214,87 @@ async function GptAcademicJavaScriptInit(dark, prompt, live2d, layout, tts) { } - + if (getCookie("js_show_title")) { + // have cookie + bool_value = getCookie("js_show_title") + bool_value = bool_value == "True"; + searchString = "主标题"; + tool_bar_group = "cbsc"; + const true_function = function () { + document.querySelector('.prose.svelte-1ybaih5 h1').style.display = ''; + } + const false_function = function () { + document.querySelector('.prose.svelte-1ybaih5 h1').style.display = 'none'; + } + if (bool_value) { + // make btns appear + true_function(); + // deal with checkboxes + let arr_with_clear_btn = update_array( + await get_data_from_gradio_component(tool_bar_group), searchString, "add" + ) + push_data_to_gradio_component(arr_with_clear_btn, tool_bar_group, "no_conversion"); + } else { + false_function(); + // deal with checkboxes + let arr_without_clear_btn = update_array( + await get_data_from_gradio_component(tool_bar_group), searchString, "remove" + ) + push_data_to_gradio_component(arr_without_clear_btn, tool_bar_group, "no_conversion"); + } + } + if (getCookie("js_show_subtitle")) { + // have cookie + bool_value = getCookie("js_show_subtitle") + bool_value = bool_value == "True"; + searchString = "副标题"; + tool_bar_group = "cbsc"; + const true_function = function () { + document.querySelector('.prose.svelte-1ybaih5 h2').style.display = ''; + } + const false_function = function () { + document.querySelector('.prose.svelte-1ybaih5 h2').style.display = 'none'; + } + if (bool_value) { + // make btns appear + true_function(); + // deal with checkboxes + let arr_with_clear_btn = update_array( + await get_data_from_gradio_component(tool_bar_group), searchString, "add" + ) + push_data_to_gradio_component(arr_with_clear_btn, tool_bar_group, "no_conversion"); + } else { + false_function(); + // deal with checkboxes + let arr_without_clear_btn = update_array( + await get_data_from_gradio_component(tool_bar_group), searchString, "remove" + ) + push_data_to_gradio_component(arr_without_clear_btn, tool_bar_group, "no_conversion"); + } + } + if (getCookie("js_show_footer")) { + // have cookie + bool_value = getCookie("js_show_footer") + searchString = "显示logo"; + tool_bar_group = "cbsc"; + bool_value = bool_value == "True"; + if (bool_value) { + // make btns appear + footer_show_hide(true); + // deal with checkboxes + let arr_with_clear_btn = update_array( + await get_data_from_gradio_component(tool_bar_group), searchString, "add" + ) + push_data_to_gradio_component(arr_with_clear_btn, tool_bar_group, "no_conversion"); + } else { + footer_show_hide(false); + // deal with checkboxes + let arr_without_clear_btn = update_array( + await get_data_from_gradio_component(tool_bar_group), searchString, "remove" + ) + push_data_to_gradio_component(arr_without_clear_btn, tool_bar_group, "no_conversion"); + } + } // clearButton 自动清除按钮 if (getCookie("js_clearbtn_show_cookie")) { // have cookie @@ -219,7 +308,7 @@ async function GptAcademicJavaScriptInit(dark, prompt, live2d, layout, tts) { let clearButton2 = document.getElementById("elem_clear2"); clearButton2.style.display = "block"; // deal with checkboxes let arr_with_clear_btn = update_array( - await get_data_from_gradio_component('cbs'), "输入清除键", "add" + await get_data_from_gradio_component("cbs"), "输入清除键", "add" ) push_data_to_gradio_component(arr_with_clear_btn, "cbs", "no_conversion"); } else { @@ -228,7 +317,7 @@ async function GptAcademicJavaScriptInit(dark, prompt, live2d, layout, tts) { let clearButton2 = document.getElementById("elem_clear2"); clearButton2.style.display = "none"; // deal with checkboxes let arr_without_clear_btn = update_array( - await get_data_from_gradio_component('cbs'), "输入清除键", "remove" + await get_data_from_gradio_component("cbs"), "输入清除键", "remove" ) push_data_to_gradio_component(arr_without_clear_btn, "cbs", "no_conversion"); } @@ -268,3 +357,47 @@ async function GptAcademicJavaScriptInit(dark, prompt, live2d, layout, tts) { change_theme("", "") } + + + +function apply_checkbox_change_for_group2(display_panel_arr) { + setTimeout(() => { + display_panel_arr = get_checkbox_selected_items("cbsc"); + + let searchString = "添加Live2D形象"; + if (display_panel_arr.includes(searchString)) { + setCookie("js_live2d_show_cookie", "True", 365); + loadLive2D(); + } else { + try { + setCookie("js_live2d_show_cookie", "False", 365); + $('.waifu').hide(); + } catch (e) { + } + } + + + function handleDisplay(searchString, key, displayElement, showFn, hideFn) { + if (display_panel_arr.includes(searchString)) { + setCookie(key, "True", 365); + if (showFn) showFn(); + if (displayElement) displayElement.style.display = ''; + } else { + setCookie(key, "False", 365); + if (hideFn) hideFn(); + if (displayElement) displayElement.style.display = 'none'; + } + } + + // 主标题 + const mainTitle = document.querySelector('.prose.svelte-1ybaih5 h1'); + handleDisplay("主标题", "js_show_title", mainTitle, null, null); + + // 副标题 + const subTitle = document.querySelector('.prose.svelte-1ybaih5 h2'); + handleDisplay("副标题", "js_show_subtitle", subTitle, null, null); + + // 显示logo + handleDisplay("显示logo", "js_show_footer", null, () => footer_show_hide(true), () => footer_show_hide(false)); + }, 50); +} \ No newline at end of file diff --git a/themes/theme.py b/themes/theme.py index 4ad83e8d..fb217d47 100644 --- a/themes/theme.py +++ b/themes/theme.py @@ -141,23 +141,3 @@ setTimeout(() => { } """ - - -js_code_show_or_hide_group2 = """ -(display_panel_arr)=>{ -setTimeout(() => { - display_panel_arr = get_checkbox_selected_items("cbsc"); - - let searchString = "添加Live2D形象"; - let ele = "none"; - if (display_panel_arr.includes(searchString)) { - setCookie("js_live2d_show_cookie", "True", 365); - loadLive2D(); - } else { - setCookie("js_live2d_show_cookie", "False", 365); - $('.waifu').hide(); - } - -}, 50); -} -""" From 8a0d96afd3c825930e6677961e2e60065be4f663 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Fri, 7 Feb 2025 01:21:21 +0800 Subject: [PATCH 2/8] consider element missing cases in js --- themes/init.js | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/themes/init.js b/themes/init.js index f240d80e..06cb5616 100644 --- a/themes/init.js +++ b/themes/init.js @@ -250,10 +250,16 @@ async function GptAcademicJavaScriptInit(dark, prompt, live2d, layout, tts) { searchString = "副标题"; tool_bar_group = "cbsc"; const true_function = function () { - document.querySelector('.prose.svelte-1ybaih5 h2').style.display = ''; + element = document.querySelector('.prose.svelte-1ybaih5 h2'); + if (element) element.style.display = ''; + element = document.querySelector('.prose.svelte-1ybaih5 h6'); + if (element) element.style.display = ''; } const false_function = function () { - document.querySelector('.prose.svelte-1ybaih5 h2').style.display = 'none'; + element = document.querySelector('.prose.svelte-1ybaih5 h2'); + if (element) element.style.display = 'none'; + element = document.querySelector('.prose.svelte-1ybaih5 h6'); + if (element) element.style.display = 'none'; } if (bool_value) { // make btns appear From 6dda2061dd682f004303b747ef773e9ccf25b939 Mon Sep 17 00:00:00 2001 From: barry <56376794+oovvxxss@users.noreply.github.com> Date: Fri, 7 Feb 2025 21:28:05 +0800 Subject: [PATCH 3/8] Update bridge_openrouter.py (#2132) fix openrouter api 400 post bug Co-authored-by: lan <56376794+lostatnight@users.noreply.github.com> --- request_llms/bridge_openrouter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/request_llms/bridge_openrouter.py b/request_llms/bridge_openrouter.py index 71a53a9c..374c982b 100644 --- a/request_llms/bridge_openrouter.py +++ b/request_llms/bridge_openrouter.py @@ -512,7 +512,7 @@ def generate_payload(inputs:str, llm_kwargs:dict, history:list, system_prompt:st model, _ = read_one_api_model_name(model) if llm_kwargs['llm_model'].startswith('openrouter-'): model = llm_kwargs['llm_model'][len('openrouter-'):] - model= read_one_api_model_name(model) + model, _= read_one_api_model_name(model) if model == "gpt-3.5-random": # 随机选择, 绕过openai访问频率限制 model = random.choice([ "gpt-3.5-turbo", From cf7c81170c8660cc00357bea4ff91707eae23090 Mon Sep 17 00:00:00 2001 From: Steven Moder Date: Fri, 7 Feb 2025 21:33:06 +0800 Subject: [PATCH 4/8] =?UTF-8?q?fix:=20=20return=20=E5=8F=82=E6=95=B0?= =?UTF-8?q?=E6=95=B0=E9=87=8F=20=E5=8F=8A=20=E8=BF=94=E5=9B=9E=E7=B1=BB?= =?UTF-8?q?=E5=9E=8B=E8=80=83=E8=99=91=20(#2129)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- request_llms/oai_std_model_template.py | 63 ++++++++++++++------------ 1 file changed, 35 insertions(+), 28 deletions(-) diff --git a/request_llms/oai_std_model_template.py b/request_llms/oai_std_model_template.py index 973eb10d..a5da92ea 100644 --- a/request_llms/oai_std_model_template.py +++ b/request_llms/oai_std_model_template.py @@ -1,16 +1,13 @@ import json import time import traceback + import requests from loguru import logger # config_private.py放自己的秘密如API和代理网址 # 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件 -from toolbox import ( - get_conf, - update_ui, - is_the_upload_folder, -) +from toolbox import get_conf, is_the_upload_folder, update_ui proxies, TIMEOUT_SECONDS, MAX_RETRY = get_conf( "proxies", "TIMEOUT_SECONDS", "MAX_RETRY" @@ -39,27 +36,35 @@ def decode_chunk(chunk): 用于解读"content"和"finish_reason"的内容(如果支持思维链也会返回"reasoning_content"内容) """ chunk = chunk.decode() - respose = "" + response = "" reasoning_content = "" finish_reason = "False" + + # 考虑返回类型是 text/json 和 text/event-stream 两种 + if chunk.startswith("data: "): + chunk = chunk[6:] + else: + chunk = chunk + try: - chunk = json.loads(chunk[6:]) + chunk = json.loads(chunk) except: - respose = "" + response = "" finish_reason = chunk + # 错误处理部分 if "error" in chunk: - respose = "API_ERROR" + response = "API_ERROR" try: chunk = json.loads(chunk) finish_reason = chunk["error"]["code"] except: finish_reason = "API_ERROR" - return respose, finish_reason + return response, reasoning_content, finish_reason try: if chunk["choices"][0]["delta"]["content"] is not None: - respose = chunk["choices"][0]["delta"]["content"] + response = chunk["choices"][0]["delta"]["content"] except: pass try: @@ -71,7 +76,7 @@ def decode_chunk(chunk): finish_reason = chunk["choices"][0]["finish_reason"] except: pass - return respose, reasoning_content, finish_reason + return response, reasoning_content, finish_reason def generate_message(input, model, key, history, max_output_token, system_prompt, temperature): @@ -106,7 +111,7 @@ def generate_message(input, model, key, history, max_output_token, system_prompt what_i_ask_now["role"] = "user" what_i_ask_now["content"] = input messages.append(what_i_ask_now) - playload = { + payload = { "model": model, "messages": messages, "temperature": temperature, @@ -114,7 +119,7 @@ def generate_message(input, model, key, history, max_output_token, system_prompt "max_tokens": max_output_token, } - return headers, playload + return headers, payload def get_predict_function( @@ -141,7 +146,7 @@ def get_predict_function( history=[], sys_prompt="", observe_window=None, - console_slience=False, + console_silence=False, ): """ 发送至chatGPT,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。 @@ -162,7 +167,7 @@ def get_predict_function( raise RuntimeError(f"APIKEY为空,请检查配置文件的{APIKEY}") if inputs == "": inputs = "你好👋" - headers, playload = generate_message( + headers, payload = generate_message( input=inputs, model=llm_kwargs["llm_model"], key=APIKEY, @@ -182,7 +187,7 @@ def get_predict_function( endpoint, headers=headers, proxies=None if disable_proxy else proxies, - json=playload, + json=payload, stream=True, timeout=TIMEOUT_SECONDS, ) @@ -198,7 +203,7 @@ def get_predict_function( result = "" finish_reason = "" if reasoning: - resoning_buffer = "" + reasoning_buffer = "" stream_response = response.iter_lines() while True: @@ -226,12 +231,12 @@ def get_predict_function( if chunk: try: if finish_reason == "stop": - if not console_slience: + if not console_silence: print(f"[response] {result}") break result += response_text if reasoning: - resoning_buffer += reasoning_content + reasoning_buffer += reasoning_content if observe_window is not None: # 观测窗,把已经获取的数据显示出去 if len(observe_window) >= 1: @@ -247,9 +252,9 @@ def get_predict_function( logger.error(error_msg) raise RuntimeError("Json解析不合常规") if reasoning: - # reasoning 的部分加上框 (>) - return '\n'.join(map(lambda x: '> ' + x, resoning_buffer.split('\n'))) + \ - '\n\n' + result + return f'''
+ {''.join([f'

{line}

' for line in reasoning_buffer.split('\n')])} +
\n\n''' + result return result def predict( @@ -268,7 +273,7 @@ def get_predict_function( inputs 是本次问询的输入 top_p, temperature是chatGPT的内部调优参数 history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误) - chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容 + chatbot 为WebUI中显示的对话列表,修改它,然后yield出去,可以直接修改对话界面内容 additional_fn代表点击的哪个按钮,按钮见functional.py """ from .bridge_all import model_info @@ -299,7 +304,7 @@ def get_predict_function( ) # 刷新界面 time.sleep(2) - headers, playload = generate_message( + headers, payload = generate_message( input=inputs, model=llm_kwargs["llm_model"], key=APIKEY, @@ -321,7 +326,7 @@ def get_predict_function( endpoint, headers=headers, proxies=None if disable_proxy else proxies, - json=playload, + json=payload, stream=True, timeout=TIMEOUT_SECONDS, ) @@ -367,7 +372,7 @@ def get_predict_function( chunk_decoded = chunk.decode() chatbot[-1] = ( chatbot[-1][0], - "[Local Message] {finish_reason},获得以下报错信息:\n" + f"[Local Message] {finish_reason},获得以下报错信息:\n" + chunk_decoded, ) yield from update_ui( @@ -385,7 +390,9 @@ def get_predict_function( if reasoning: gpt_replying_buffer += response_text gpt_reasoning_buffer += reasoning_content - history[-1] = '\n'.join(map(lambda x: '> ' + x, gpt_reasoning_buffer.split('\n'))) + '\n\n' + gpt_replying_buffer + history[-1] = f'''
+ {''.join([f'

{line}

' for line in gpt_reasoning_buffer.split('\n')])} +
\n\n''' + gpt_replying_buffer else: gpt_replying_buffer += response_text # 如果这里抛出异常,一般是文本过长,详情见get_full_error的输出 From 991a903fa957bc1bd92c3e7e5d915d0577243d33 Mon Sep 17 00:00:00 2001 From: Steven Moder Date: Sat, 8 Feb 2025 20:50:54 +0800 Subject: [PATCH 5/8] fix: f-string expression part cannot include a backslash (#2139) --- request_llms/oai_std_model_template.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/request_llms/oai_std_model_template.py b/request_llms/oai_std_model_template.py index a5da92ea..9b6a1c4c 100644 --- a/request_llms/oai_std_model_template.py +++ b/request_llms/oai_std_model_template.py @@ -252,9 +252,9 @@ def get_predict_function( logger.error(error_msg) raise RuntimeError("Json解析不合常规") if reasoning: - return f'''
- {''.join([f'

{line}

' for line in reasoning_buffer.split('\n')])} -
\n\n''' + result + style = 'padding: 1em; line-height: 1.5; text-wrap: wrap; opacity: 0.8' + paragraphs = ''.join([f'

{line}

' for line in reasoning_buffer.split('\n')]) + return f'''
{paragraphs}
\n\n''' + result return result def predict( @@ -390,9 +390,9 @@ def get_predict_function( if reasoning: gpt_replying_buffer += response_text gpt_reasoning_buffer += reasoning_content - history[-1] = f'''
- {''.join([f'

{line}

' for line in gpt_reasoning_buffer.split('\n')])} -
\n\n''' + gpt_replying_buffer + style = 'padding: 1em; line-height: 1.5; text-wrap: wrap; opacity: 0.8' + paragraphs = ''.join([f'

{line}

' for line in gpt_reasoning_buffer.split('\n')]) + history[-1] = f'
{paragraphs}
\n\n' + gpt_replying_buffer else: gpt_replying_buffer += response_text # 如果这里抛出异常,一般是文本过长,详情见get_full_error的输出 From 07ece29c7ce429d6df1c92f74261975c4faedb6a Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Sat, 8 Feb 2025 20:54:01 +0800 Subject: [PATCH 6/8] raise error when the uploaded tar contain hard/soft link (#2136) --- shared_utils/handle_upload.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/shared_utils/handle_upload.py b/shared_utils/handle_upload.py index 14974ef0..89ad50a4 100644 --- a/shared_utils/handle_upload.py +++ b/shared_utils/handle_upload.py @@ -111,6 +111,8 @@ def extract_archive(file_path, dest_dir): member_path = os.path.normpath(member.name) full_path = os.path.join(dest_dir, member_path) full_path = os.path.abspath(full_path) + if member.islnk() or member.issym(): + raise Exception(f"Attempted Symlink in {member.name}") if not full_path.startswith(os.path.abspath(dest_dir) + os.sep): raise Exception(f"Attempted Path Traversal in {member.name}") From 163e59c0f309c731e8c8475466ad4c3597487f08 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Sun, 9 Feb 2025 19:33:02 +0800 Subject: [PATCH 7/8] minor bug fix --- config.py | 3 ++- request_llms/bridge_deepseekcoder.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/config.py b/config.py index 11ee666c..297ccbf2 100644 --- a/config.py +++ b/config.py @@ -81,7 +81,7 @@ API_URL_REDIRECT = {} # 多线程函数插件中,默认允许多少路线程同时访问OpenAI。Free trial users的限制是每分钟3次,Pay-as-you-go users的限制是每分钟3500次 # 一言以蔽之:免费(5刀)用户填3,OpenAI绑了信用卡的用户可以填 16 或者更高。提高限制请查询:https://platform.openai.com/docs/guides/rate-limits/overview -DEFAULT_WORKER_NUM = 3 +DEFAULT_WORKER_NUM = 8 # 色彩主题, 可选 ["Default", "Chuanhu-Small-and-Beautiful", "High-Contrast"] @@ -103,6 +103,7 @@ AVAIL_FONTS = [ "华文中宋(STZhongsong)", "华文新魏(STXinwei)", "华文隶书(STLiti)", + # 备注:以下字体需要网络支持,您可以自定义任意您喜欢的字体,如下所示,需要满足的格式为 "字体昵称(字体英文真名@字体css下载链接)" "思源宋体(Source Han Serif CN VF@https://chinese-fonts-cdn.deno.dev/packages/syst/dist/SourceHanSerifCN/result.css)", "月星楷(Moon Stars Kai HW@https://chinese-fonts-cdn.deno.dev/packages/moon-stars-kai/dist/MoonStarsKaiHW-Regular/result.css)", "珠圆体(MaokenZhuyuanTi@https://chinese-fonts-cdn.deno.dev/packages/mkzyt/dist/猫啃珠圆体/result.css)", diff --git a/request_llms/bridge_deepseekcoder.py b/request_llms/bridge_deepseekcoder.py index 9f73375a..706de6eb 100644 --- a/request_llms/bridge_deepseekcoder.py +++ b/request_llms/bridge_deepseekcoder.py @@ -6,7 +6,6 @@ from toolbox import get_conf from request_llms.local_llm_class import LocalLLMHandle, get_local_llm_predict_fns from threading import Thread from loguru import logger -import torch import os def download_huggingface_model(model_name, max_retry, local_dir): @@ -29,6 +28,7 @@ class GetCoderLMHandle(LocalLLMHandle): self.cmd_to_install = cmd_to_install def load_model_and_tokenizer(self): + import torch # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行 with ProxyNetworkActivate('Download_LLM'): from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer From add29eba08eacc6fc01cf6712eb7ca8c2cb2c5f2 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Sun, 9 Feb 2025 20:26:52 +0800 Subject: [PATCH 8/8] fine tune reasoning css --- main.py | 2 +- request_llms/bridge_all.py | 2 +- request_llms/oai_std_model_template.py | 30 ++++++++++++++------------ themes/common.css | 12 +++++++++++ 4 files changed, 30 insertions(+), 16 deletions(-) diff --git a/main.py b/main.py index c250ab29..02f10b13 100644 --- a/main.py +++ b/main.py @@ -217,7 +217,7 @@ def main(): input_combo_order = ["cookies", "max_length_sl", "md_dropdown", "txt", "txt2", "top_p", "temperature", "chatbot", "history", "system_prompt", "plugin_advanced_arg"] output_combo = [cookies, chatbot, history, status] predict_args = dict(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True)], outputs=output_combo) - + # 提交按钮、重置按钮 multiplex_submit_btn.click( None, [multiplex_sel], None, _js="""(multiplex_sel)=>multiplex_function_begin(multiplex_sel)""") diff --git a/request_llms/bridge_all.py b/request_llms/bridge_all.py index 0679000a..cc5c1bde 100644 --- a/request_llms/bridge_all.py +++ b/request_llms/bridge_all.py @@ -1072,7 +1072,7 @@ if "zhipuai" in AVAIL_LLM_MODELS: # zhipuai 是glm-4的别名,向后兼容 }) except: logger.error(trimmed_format_exc()) -# -=-=-=-=-=-=- 幻方-深度求索大模型 -=-=-=-=-=-=- +# -=-=-=-=-=-=- 幻方-深度求索本地大模型 -=-=-=-=-=-=- if "deepseekcoder" in AVAIL_LLM_MODELS: # deepseekcoder try: from .bridge_deepseekcoder import predict_no_ui_long_connection as deepseekcoder_noui diff --git a/request_llms/oai_std_model_template.py b/request_llms/oai_std_model_template.py index 9b6a1c4c..c652a0ef 100644 --- a/request_llms/oai_std_model_template.py +++ b/request_llms/oai_std_model_template.py @@ -1,13 +1,10 @@ import json import time import traceback - import requests -from loguru import logger -# config_private.py放自己的秘密如API和代理网址 -# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件 -from toolbox import get_conf, is_the_upload_folder, update_ui +from loguru import logger +from toolbox import get_conf, is_the_upload_folder, update_ui, update_ui_lastest_msg proxies, TIMEOUT_SECONDS, MAX_RETRY = get_conf( "proxies", "TIMEOUT_SECONDS", "MAX_RETRY" @@ -76,7 +73,7 @@ def decode_chunk(chunk): finish_reason = chunk["choices"][0]["finish_reason"] except: pass - return response, reasoning_content, finish_reason + return response, reasoning_content, finish_reason, str(chunk) def generate_message(input, model, key, history, max_output_token, system_prompt, temperature): @@ -162,7 +159,7 @@ def get_predict_function( 用于负责跨越线程传递已经输出的部分,大部分时候仅仅为了fancy的视觉效果,留空即可。observe_window[0]:观测窗。observe_window[1]:看门狗 """ from .bridge_all import model_info - watch_dog_patience = 5 # 看门狗的耐心,设置5秒不准咬人(咬的也不是人 + watch_dog_patience = 5 # 看门狗的耐心,设置5秒不准咬人 (咬的也不是人) if len(APIKEY) == 0: raise RuntimeError(f"APIKEY为空,请检查配置文件的{APIKEY}") if inputs == "": @@ -215,7 +212,7 @@ def get_predict_function( break except requests.exceptions.ConnectionError: chunk = next(stream_response) # 失败了,重试一次?再失败就没办法了。 - response_text, reasoning_content, finish_reason = decode_chunk(chunk) + response_text, reasoning_content, finish_reason, decoded_chunk = decode_chunk(chunk) # 返回的数据流第一次为空,继续等待 if response_text == "" and (reasoning == False or reasoning_content == "") and finish_reason != "False": continue @@ -252,9 +249,8 @@ def get_predict_function( logger.error(error_msg) raise RuntimeError("Json解析不合常规") if reasoning: - style = 'padding: 1em; line-height: 1.5; text-wrap: wrap; opacity: 0.8' paragraphs = ''.join([f'

{line}

' for line in reasoning_buffer.split('\n')]) - return f'''
{paragraphs}
\n\n''' + result + return f'''
{paragraphs}
\n\n''' + result return result def predict( @@ -348,14 +344,21 @@ def get_predict_function( gpt_reasoning_buffer = "" stream_response = response.iter_lines() + wait_counter = 0 while True: try: chunk = next(stream_response) except StopIteration: + if wait_counter != 0 and gpt_replying_buffer == "": + yield from update_ui_lastest_msg(lastmsg="模型调用失败 ...", chatbot=chatbot, history=history, msg="failed") break except requests.exceptions.ConnectionError: chunk = next(stream_response) # 失败了,重试一次?再失败就没办法了。 - response_text, reasoning_content, finish_reason = decode_chunk(chunk) + response_text, reasoning_content, finish_reason, decoded_chunk = decode_chunk(chunk) + if decoded_chunk == ': keep-alive': + wait_counter += 1 + yield from update_ui_lastest_msg(lastmsg="等待中 " + "".join(["."] * (wait_counter%10)), chatbot=chatbot, history=history, msg="waiting ...") + continue # 返回的数据流第一次为空,继续等待 if response_text == "" and (reasoning == False or reasoning_content == "") and finish_reason != "False": status_text = f"finish_reason: {finish_reason}" @@ -372,7 +375,7 @@ def get_predict_function( chunk_decoded = chunk.decode() chatbot[-1] = ( chatbot[-1][0], - f"[Local Message] {finish_reason},获得以下报错信息:\n" + f"[Local Message] {finish_reason}, 获得以下报错信息:\n" + chunk_decoded, ) yield from update_ui( @@ -390,9 +393,8 @@ def get_predict_function( if reasoning: gpt_replying_buffer += response_text gpt_reasoning_buffer += reasoning_content - style = 'padding: 1em; line-height: 1.5; text-wrap: wrap; opacity: 0.8' paragraphs = ''.join([f'

{line}

' for line in gpt_reasoning_buffer.split('\n')]) - history[-1] = f'
{paragraphs}
\n\n' + gpt_replying_buffer + history[-1] = f'
{paragraphs}
\n\n---\n\n' + gpt_replying_buffer else: gpt_replying_buffer += response_text # 如果这里抛出异常,一般是文本过长,详情见get_full_error的输出 diff --git a/themes/common.css b/themes/common.css index f2cbd32a..20bf3e8a 100644 --- a/themes/common.css +++ b/themes/common.css @@ -311,3 +311,15 @@ backdrop-filter: blur(10px); background-color: rgba(var(--block-background-fill), 0.5); } + + +.reasoning_process { + font-size: smaller; + font-style: italic; + margin: 0px; + padding: 1em; + line-height: 1.5; + text-wrap: wrap; + opacity: 0.8; +} +