更多模型切换

This commit is contained in:
Your Name
2023-04-17 21:23:03 +08:00
parent 03ba072c16
commit 9bd8511ba4
5 changed files with 166 additions and 141 deletions

View File

@@ -21,38 +21,42 @@ from .bridge_chatglm import predict as chatglm_ui
from .bridge_tgui import predict_no_ui_long_connection as tgui_noui
from .bridge_tgui import predict as tgui_ui
methods = {
"openai-no-ui": chatgpt_noui,
"openai-ui": chatgpt_ui,
"chatglm-no-ui": chatglm_noui,
"chatglm-ui": chatglm_ui,
"tgui-no-ui": tgui_noui,
"tgui-ui": tgui_ui,
}
colors = ['#FF00FF', '#00FFFF', '#FF0000', '#990099', '#009999', '#990044']
model_info = {
# openai
"gpt-3.5-turbo": {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
"endpoint": "https://api.openai.com/v1/chat/completions",
"max_token": 4096,
"tokenizer": tiktoken.encoding_for_model("gpt-3.5-turbo"),
"token_cnt": lambda txt: len(tiktoken.encoding_for_model("gpt-3.5-turbo").encode(txt, disallowed_special=())),
},
"gpt-4": {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
"endpoint": "https://api.openai.com/v1/chat/completions",
"max_token": 4096,
"tokenizer": tiktoken.encoding_for_model("gpt-4"),
"token_cnt": lambda txt: len(tiktoken.encoding_for_model("gpt-4").encode(txt, disallowed_special=())),
},
# api_2d
"gpt-3.5-turbo-api2d": {
"api2d-gpt-3.5-turbo": {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
"endpoint": "https://openai.api2d.net/v1/chat/completions",
"max_token": 4096,
"tokenizer": tiktoken.encoding_for_model("gpt-3.5-turbo"),
"token_cnt": lambda txt: len(tiktoken.encoding_for_model("gpt-3.5-turbo").encode(txt, disallowed_special=())),
},
"gpt-4-api2d": {
"api2d-gpt-4": {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
"endpoint": "https://openai.api2d.net/v1/chat/completions",
"max_token": 4096,
"tokenizer": tiktoken.encoding_for_model("gpt-4"),
"token_cnt": lambda txt: len(tiktoken.encoding_for_model("gpt-4").encode(txt, disallowed_special=())),
@@ -60,18 +64,20 @@ model_info = {
# chatglm
"chatglm": {
"fn_with_ui": chatglm_ui,
"fn_without_ui": chatglm_noui,
"endpoint": None,
"max_token": 1024,
"tokenizer": tiktoken.encoding_for_model("gpt-3.5-turbo"),
"token_cnt": lambda txt: len(tiktoken.encoding_for_model("gpt-3.5-turbo").encode(txt, disallowed_special=())),
},
}
def LLM_CATCH_EXCEPTION(f):
"""
装饰器函数,将错误显示出来
装饰器函数,将错误显示出来
"""
def decorated(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience):
try:
@@ -85,21 +91,20 @@ def LLM_CATCH_EXCEPTION(f):
return tb_str
return decorated
colors = ['#FF00FF', '#00FFFF', '#FF0000', '#990099', '#009999', '#990044']
def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience=False):
"""
发送至LLM等待回复一次性完成不显示中间过程。但内部用stream的方法避免中途网线被掐。
inputs
是本次问询的输入
sys_prompt:
系统静默prompt
llm_kwargs
LLM的内部调优参数
history
是之前的对话列表
observe_window = None
用于负责跨越线程传递已经输出的部分大部分时候仅仅为了fancy的视觉效果留空即可。observe_window[0]观测窗。observe_window[1]:看门狗
发送至LLM等待回复一次性完成不显示中间过程。但内部用stream的方法避免中途网线被掐。
inputs
是本次问询的输入
sys_prompt:
系统静默prompt
llm_kwargs
LLM的内部调优参数
history
是之前的对话列表
observe_window = None
用于负责跨越线程传递已经输出的部分大部分时候仅仅为了fancy的视觉效果留空即可。observe_window[0]观测窗。observe_window[1]:看门狗
"""
import threading, time, copy
@@ -109,12 +114,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
assert not model.startswith("tgui"), "TGUI不支持函数插件的实现"
# 如果只询问1个大语言模型
if model.startswith('gpt'):
method = methods['openai-no-ui']
elif model == 'chatglm':
method = methods['chatglm-no-ui']
elif model.startswith('tgui'):
method = methods['tgui-no-ui']
method = model_info[model]["fn_without_ui"]
return method(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
else:
# 如果同时询问多个大语言模型:
@@ -129,12 +129,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
futures = []
for i in range(n_model):
model = models[i]
if model.startswith('gpt'):
method = methods['openai-no-ui']
elif model == 'chatglm':
method = methods['chatglm-no-ui']
elif model.startswith('tgui'):
method = methods['tgui-no-ui']
method = model_info[model]["fn_without_ui"]
llm_kwargs_feedin = copy.deepcopy(llm_kwargs)
llm_kwargs_feedin['llm_model'] = model
future = executor.submit(LLM_CATCH_EXCEPTION(method), inputs, llm_kwargs_feedin, history, sys_prompt, window_mutex[i], console_slience)
@@ -176,20 +171,15 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
def predict(inputs, llm_kwargs, *args, **kwargs):
"""
发送至LLM流式获取输出。
用于基础的对话功能。
inputs 是本次问询的输入
top_p, temperature是LLM的内部调优参数
history 是之前的对话列表注意无论是inputs还是history内容太长了都会触发token数量溢出的错误
chatbot 为WebUI中显示的对话列表修改它然后yeild出去可以直接修改对话界面内容
additional_fn代表点击的哪个按钮按钮见functional.py
发送至LLM流式获取输出。
用于基础的对话功能。
inputs 是本次问询的输入
top_p, temperature是LLM的内部调优参数
history 是之前的对话列表注意无论是inputs还是history内容太长了都会触发token数量溢出的错误
chatbot 为WebUI中显示的对话列表修改它然后yeild出去可以直接修改对话界面内容
additional_fn代表点击的哪个按钮按钮见functional.py
"""
if llm_kwargs['llm_model'].startswith('gpt'):
method = methods['openai-ui']
elif llm_kwargs['llm_model'] == 'chatglm':
method = methods['chatglm-ui']
elif llm_kwargs['llm_model'].startswith('tgui'):
method = methods['tgui-ui']
method = model_info[llm_kwargs['llm_model']]["fn_with_ui"]
yield from method(inputs, llm_kwargs, *args, **kwargs)