Version 3.74: Merge latest updates on dev branch (frontier) (#1621)
* Update version to 3.74 * Add support for Yi Model API (#1635) * 更新以支持零一万物模型 * 删除newbing * 修改config --------- Co-authored-by: binary-husky <qingxu.fu@outlook.com> * Refactor function signatures in bridge files * fix qwen api change * rename and ref functions * rename and move some cookie functions * 增加haiku模型,新增endpoint配置说明 (#1626) * haiku added * 新增haiku,新增endpoint配置说明 * Haiku added * 将说明同步至最新Endpoint --------- Co-authored-by: binary-husky <qingxu.fu@outlook.com> * private_upload目录下进行文件鉴权 (#1596) * private_upload目录下进行文件鉴权 * minor fastapi adjustment * Add logging functionality to enable saving conversation records * waiting to fix username retrieve * support 2rd web path * allow accessing default user dir --------- Co-authored-by: binary-husky <qingxu.fu@outlook.com> * remove yaml deps * fix favicon * fix abs path auth problem * forget to write a return * add `dashscope` to deps * fix GHSA-v9q9-xj86-953p * 用户名重叠越权访问patch (#1681) * add cohere model api access * cohere + can_multi_thread * fix block user access(fail) * fix fastapi bug * change cohere api endpoint * explain version --------- Co-authored-by: Menghuan1918 <menghuan2003@outlook.com> Co-authored-by: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Co-authored-by: XIao <46100050+Kilig947@users.noreply.github.com>
This commit is contained in:
@@ -34,6 +34,9 @@ from .bridge_google_gemini import predict_no_ui_long_connection as genai_noui
|
||||
from .bridge_zhipu import predict_no_ui_long_connection as zhipu_noui
|
||||
from .bridge_zhipu import predict as zhipu_ui
|
||||
|
||||
from .bridge_cohere import predict as cohere_ui
|
||||
from .bridge_cohere import predict_no_ui_long_connection as cohere_noui
|
||||
|
||||
colors = ['#FF00FF', '#00FFFF', '#FF0000', '#990099', '#009999', '#990044']
|
||||
|
||||
class LazyloadTiktoken(object):
|
||||
@@ -64,6 +67,7 @@ newbing_endpoint = "wss://sydney.bing.com/sydney/ChatHub"
|
||||
gemini_endpoint = "https://generativelanguage.googleapis.com/v1beta/models"
|
||||
claude_endpoint = "https://api.anthropic.com/v1/messages"
|
||||
yimodel_endpoint = "https://api.lingyiwanwu.com/v1/chat/completions"
|
||||
cohere_endpoint = 'https://api.cohere.ai/v1/chat'
|
||||
|
||||
if not AZURE_ENDPOINT.endswith('/'): AZURE_ENDPOINT += '/'
|
||||
azure_endpoint = AZURE_ENDPOINT + f'openai/deployments/{AZURE_ENGINE}/chat/completions?api-version=2023-05-15'
|
||||
@@ -82,6 +86,7 @@ if newbing_endpoint in API_URL_REDIRECT: newbing_endpoint = API_URL_REDIRECT[new
|
||||
if gemini_endpoint in API_URL_REDIRECT: gemini_endpoint = API_URL_REDIRECT[gemini_endpoint]
|
||||
if claude_endpoint in API_URL_REDIRECT: claude_endpoint = API_URL_REDIRECT[claude_endpoint]
|
||||
if yimodel_endpoint in API_URL_REDIRECT: yimodel_endpoint = API_URL_REDIRECT[yimodel_endpoint]
|
||||
if cohere_endpoint in API_URL_REDIRECT: cohere_endpoint = API_URL_REDIRECT[cohere_endpoint]
|
||||
|
||||
# 获取tokenizer
|
||||
tokenizer_gpt35 = LazyloadTiktoken("gpt-3.5-turbo")
|
||||
@@ -310,6 +315,18 @@ model_info = {
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
|
||||
# cohere
|
||||
"cohere-command-r-plus": {
|
||||
"fn_with_ui": cohere_ui,
|
||||
"fn_without_ui": cohere_noui,
|
||||
"can_multi_thread": True,
|
||||
"endpoint": cohere_endpoint,
|
||||
"max_token": 1024 * 4,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
|
||||
}
|
||||
# -=-=-=-=-=-=- 月之暗面 -=-=-=-=-=-=-
|
||||
from request_llms.bridge_moonshot import predict as moonshot_ui
|
||||
@@ -359,7 +376,7 @@ for model in AVAIL_LLM_MODELS:
|
||||
|
||||
# -=-=-=-=-=-=- 以下部分是新加入的模型,可能附带额外依赖 -=-=-=-=-=-=-
|
||||
# claude家族
|
||||
claude_models = ["claude-instant-1.2","claude-2.0","claude-2.1","claude-3-sonnet-20240229","claude-3-opus-20240229"]
|
||||
claude_models = ["claude-instant-1.2","claude-2.0","claude-2.1","claude-3-haiku-20240307","claude-3-sonnet-20240229","claude-3-opus-20240229"]
|
||||
if any(item in claude_models for item in AVAIL_LLM_MODELS):
|
||||
from .bridge_claude import predict_no_ui_long_connection as claude_noui
|
||||
from .bridge_claude import predict as claude_ui
|
||||
@@ -393,6 +410,16 @@ if any(item in claude_models for item in AVAIL_LLM_MODELS):
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
})
|
||||
model_info.update({
|
||||
"claude-3-haiku-20240307": {
|
||||
"fn_with_ui": claude_ui,
|
||||
"fn_without_ui": claude_noui,
|
||||
"endpoint": claude_endpoint,
|
||||
"max_token": 200000,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
})
|
||||
model_info.update({
|
||||
"claude-3-sonnet-20240229": {
|
||||
"fn_with_ui": claude_ui,
|
||||
@@ -789,7 +816,7 @@ def LLM_CATCH_EXCEPTION(f):
|
||||
"""
|
||||
装饰器函数,将错误显示出来
|
||||
"""
|
||||
def decorated(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience):
|
||||
def decorated(inputs:str, llm_kwargs:dict, history:list, sys_prompt:str, observe_window:list, console_slience:bool):
|
||||
try:
|
||||
return f(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
|
||||
except Exception as e:
|
||||
@@ -799,9 +826,9 @@ def LLM_CATCH_EXCEPTION(f):
|
||||
return decorated
|
||||
|
||||
|
||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window=[], console_slience=False):
|
||||
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list, sys_prompt:str, observe_window:list=[], console_slience:bool=False):
|
||||
"""
|
||||
发送至LLM,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
|
||||
发送至LLM,等待回复,一次性完成,不显示中间过程。但内部(尽可能地)用stream的方法避免中途网线被掐。
|
||||
inputs:
|
||||
是本次问询的输入
|
||||
sys_prompt:
|
||||
@@ -819,7 +846,6 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
|
||||
model = llm_kwargs['llm_model']
|
||||
n_model = 1
|
||||
if '&' not in model:
|
||||
assert not model.startswith("tgui"), "TGUI不支持函数插件的实现"
|
||||
|
||||
# 如果只询问1个大语言模型:
|
||||
method = model_info[model]["fn_without_ui"]
|
||||
@@ -880,15 +906,22 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
|
||||
return res
|
||||
|
||||
|
||||
def predict(inputs, llm_kwargs, *args, **kwargs):
|
||||
def predict(inputs:str, llm_kwargs:dict, *args, **kwargs):
|
||||
"""
|
||||
发送至LLM,流式获取输出。
|
||||
用于基础的对话功能。
|
||||
inputs 是本次问询的输入
|
||||
top_p, temperature是LLM的内部调优参数
|
||||
history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误)
|
||||
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
|
||||
additional_fn代表点击的哪个按钮,按钮见functional.py
|
||||
|
||||
完整参数列表:
|
||||
predict(
|
||||
inputs:str, # 是本次问询的输入
|
||||
llm_kwargs:dict, # 是LLM的内部调优参数
|
||||
plugin_kwargs:dict, # 是插件的内部参数
|
||||
chatbot:ChatBotWithCookies, # 原样传递,负责向用户前端展示对话,兼顾前端状态的功能
|
||||
history:list=[], # 是之前的对话列表
|
||||
system_prompt:str='', # 系统静默prompt
|
||||
stream:bool=True, # 是否流式输出(已弃用)
|
||||
additional_fn:str=None # 基础功能区按钮的附加功能
|
||||
):
|
||||
"""
|
||||
|
||||
inputs = apply_gpt_academic_string_mask(inputs, mode="show_llm")
|
||||
|
||||
@@ -137,7 +137,8 @@ class GetGLMFTHandle(Process):
|
||||
global glmft_handle
|
||||
glmft_handle = None
|
||||
#################################################################################
|
||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
|
||||
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
|
||||
observe_window:list=[], console_slience:bool=False):
|
||||
"""
|
||||
多线程方法
|
||||
函数的说明请见 request_llms/bridge_all.py
|
||||
|
||||
@@ -23,6 +23,7 @@ import random
|
||||
# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
|
||||
from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys, clip_history
|
||||
from toolbox import trimmed_format_exc, is_the_upload_folder, read_one_api_model_name, log_chat
|
||||
from toolbox import ChatBotWithCookies
|
||||
proxies, TIMEOUT_SECONDS, MAX_RETRY, API_ORG, AZURE_CFG_ARRAY = \
|
||||
get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY', 'API_ORG', 'AZURE_CFG_ARRAY')
|
||||
|
||||
@@ -69,7 +70,7 @@ def verify_endpoint(endpoint):
|
||||
raise ValueError("Endpoint不正确, 请检查AZURE_ENDPOINT的配置! 当前的Endpoint为:" + endpoint)
|
||||
return endpoint
|
||||
|
||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
|
||||
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="", observe_window:list=None, console_slience:bool=False):
|
||||
"""
|
||||
发送至chatGPT,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
|
||||
inputs:
|
||||
@@ -147,7 +148,8 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
||||
return result
|
||||
|
||||
|
||||
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
|
||||
def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWithCookies,
|
||||
history:list=[], system_prompt:str='', stream:bool=True, additional_fn:str=None):
|
||||
"""
|
||||
发送至chatGPT,流式获取输出。
|
||||
用于基础的对话功能。
|
||||
|
||||
@@ -13,11 +13,11 @@ import logging
|
||||
import os
|
||||
import time
|
||||
import traceback
|
||||
from toolbox import get_conf, update_ui, trimmed_format_exc, encode_image, every_image_file_in_path
|
||||
import json
|
||||
import requests
|
||||
from toolbox import get_conf, update_ui, trimmed_format_exc, encode_image, every_image_file_in_path, log_chat
|
||||
picture_system_prompt = "\n当回复图像时,必须说明正在回复哪张图像。所有图像仅在最后一个问题中提供,即使它们在历史记录中被提及。请使用'这是第X张图像:'的格式来指明您正在描述的是哪张图像。"
|
||||
Claude_3_Models = ["claude-3-sonnet-20240229", "claude-3-opus-20240229"]
|
||||
Claude_3_Models = ["claude-3-haiku-20240307", "claude-3-sonnet-20240229", "claude-3-opus-20240229"]
|
||||
|
||||
# config_private.py放自己的秘密如API和代理网址
|
||||
# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
|
||||
@@ -95,7 +95,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
||||
# make a POST request to the API endpoint, stream=False
|
||||
from .bridge_all import model_info
|
||||
endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
|
||||
response = requests.post(endpoint, headers=headers, json=message,
|
||||
response = requests.post(endpoint, headers=headers, json=message,
|
||||
proxies=proxies, stream=True, timeout=TIMEOUT_SECONDS);break
|
||||
except requests.exceptions.ReadTimeout as e:
|
||||
retry += 1
|
||||
@@ -116,7 +116,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
||||
if need_to_pass:
|
||||
pass
|
||||
elif is_last_chunk:
|
||||
logging.info(f'[response] {result}')
|
||||
# logging.info(f'[response] {result}')
|
||||
break
|
||||
else:
|
||||
if chunkjson and chunkjson['type'] == 'content_block_delta':
|
||||
@@ -194,7 +194,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
# make a POST request to the API endpoint, stream=True
|
||||
from .bridge_all import model_info
|
||||
endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
|
||||
response = requests.post(endpoint, headers=headers, json=message,
|
||||
response = requests.post(endpoint, headers=headers, json=message,
|
||||
proxies=proxies, stream=True, timeout=TIMEOUT_SECONDS);break
|
||||
except requests.exceptions.ReadTimeout as e:
|
||||
retry += 1
|
||||
@@ -216,7 +216,8 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
if need_to_pass:
|
||||
pass
|
||||
elif is_last_chunk:
|
||||
logging.info(f'[response] {gpt_replying_buffer}')
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
||||
# logging.info(f'[response] {gpt_replying_buffer}')
|
||||
break
|
||||
else:
|
||||
if chunkjson and chunkjson['type'] == 'content_block_delta':
|
||||
@@ -305,4 +306,4 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, image_paths):
|
||||
'stream': True,
|
||||
'system': system_prompt
|
||||
}
|
||||
return headers, payload
|
||||
return headers, payload
|
||||
|
||||
328
request_llms/bridge_cohere.py
Normal file
328
request_llms/bridge_cohere.py
Normal file
@@ -0,0 +1,328 @@
|
||||
# 借鉴了 https://github.com/GaiZhenbiao/ChuanhuChatGPT 项目
|
||||
|
||||
"""
|
||||
该文件中主要包含三个函数
|
||||
|
||||
不具备多线程能力的函数:
|
||||
1. predict: 正常对话时使用,具备完备的交互功能,不可多线程
|
||||
|
||||
具备多线程调用能力的函数
|
||||
2. predict_no_ui_long_connection:支持多线程
|
||||
"""
|
||||
|
||||
import json
|
||||
import time
|
||||
import gradio as gr
|
||||
import logging
|
||||
import traceback
|
||||
import requests
|
||||
import importlib
|
||||
import random
|
||||
|
||||
# config_private.py放自己的秘密如API和代理网址
|
||||
# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
|
||||
from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys, clip_history
|
||||
from toolbox import trimmed_format_exc, is_the_upload_folder, read_one_api_model_name, log_chat
|
||||
from toolbox import ChatBotWithCookies
|
||||
proxies, TIMEOUT_SECONDS, MAX_RETRY, API_ORG, AZURE_CFG_ARRAY = \
|
||||
get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY', 'API_ORG', 'AZURE_CFG_ARRAY')
|
||||
|
||||
timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \
|
||||
'网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。'
|
||||
|
||||
def get_full_error(chunk, stream_response):
|
||||
"""
|
||||
获取完整的从Cohere返回的报错
|
||||
"""
|
||||
while True:
|
||||
try:
|
||||
chunk += next(stream_response)
|
||||
except:
|
||||
break
|
||||
return chunk
|
||||
|
||||
def decode_chunk(chunk):
|
||||
# 提前读取一些信息 (用于判断异常)
|
||||
chunk_decoded = chunk.decode()
|
||||
chunkjson = None
|
||||
has_choices = False
|
||||
choice_valid = False
|
||||
has_content = False
|
||||
has_role = False
|
||||
try:
|
||||
chunkjson = json.loads(chunk_decoded)
|
||||
has_choices = 'choices' in chunkjson
|
||||
if has_choices: choice_valid = (len(chunkjson['choices']) > 0)
|
||||
if has_choices and choice_valid: has_content = ("content" in chunkjson['choices'][0]["delta"])
|
||||
if has_content: has_content = (chunkjson['choices'][0]["delta"]["content"] is not None)
|
||||
if has_choices and choice_valid: has_role = "role" in chunkjson['choices'][0]["delta"]
|
||||
except:
|
||||
pass
|
||||
return chunk_decoded, chunkjson, has_choices, choice_valid, has_content, has_role
|
||||
|
||||
from functools import lru_cache
|
||||
@lru_cache(maxsize=32)
|
||||
def verify_endpoint(endpoint):
|
||||
"""
|
||||
检查endpoint是否可用
|
||||
"""
|
||||
if "你亲手写的api名称" in endpoint:
|
||||
raise ValueError("Endpoint不正确, 请检查AZURE_ENDPOINT的配置! 当前的Endpoint为:" + endpoint)
|
||||
return endpoint
|
||||
|
||||
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="", observe_window:list=None, console_slience:bool=False):
|
||||
"""
|
||||
发送,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
|
||||
inputs:
|
||||
是本次问询的输入
|
||||
sys_prompt:
|
||||
系统静默prompt
|
||||
llm_kwargs:
|
||||
内部调优参数
|
||||
history:
|
||||
是之前的对话列表
|
||||
observe_window = None:
|
||||
用于负责跨越线程传递已经输出的部分,大部分时候仅仅为了fancy的视觉效果,留空即可。observe_window[0]:观测窗。observe_window[1]:看门狗
|
||||
"""
|
||||
watch_dog_patience = 5 # 看门狗的耐心, 设置5秒即可
|
||||
headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt=sys_prompt, stream=True)
|
||||
retry = 0
|
||||
while True:
|
||||
try:
|
||||
# make a POST request to the API endpoint, stream=False
|
||||
from .bridge_all import model_info
|
||||
endpoint = verify_endpoint(model_info[llm_kwargs['llm_model']]['endpoint'])
|
||||
response = requests.post(endpoint, headers=headers, proxies=proxies,
|
||||
json=payload, stream=True, timeout=TIMEOUT_SECONDS); break
|
||||
except requests.exceptions.ReadTimeout as e:
|
||||
retry += 1
|
||||
traceback.print_exc()
|
||||
if retry > MAX_RETRY: raise TimeoutError
|
||||
if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……')
|
||||
|
||||
stream_response = response.iter_lines()
|
||||
result = ''
|
||||
json_data = None
|
||||
while True:
|
||||
try: chunk = next(stream_response)
|
||||
except StopIteration:
|
||||
break
|
||||
except requests.exceptions.ConnectionError:
|
||||
chunk = next(stream_response) # 失败了,重试一次?再失败就没办法了。
|
||||
chunk_decoded, chunkjson, has_choices, choice_valid, has_content, has_role = decode_chunk(chunk)
|
||||
if chunkjson['event_type'] == 'stream-start': continue
|
||||
if chunkjson['event_type'] == 'text-generation':
|
||||
result += chunkjson["text"]
|
||||
if not console_slience: print(chunkjson["text"], end='')
|
||||
if observe_window is not None:
|
||||
# 观测窗,把已经获取的数据显示出去
|
||||
if len(observe_window) >= 1:
|
||||
observe_window[0] += chunkjson["text"]
|
||||
# 看门狗,如果超过期限没有喂狗,则终止
|
||||
if len(observe_window) >= 2:
|
||||
if (time.time()-observe_window[1]) > watch_dog_patience:
|
||||
raise RuntimeError("用户取消了程序。")
|
||||
if chunkjson['event_type'] == 'stream-end': break
|
||||
return result
|
||||
|
||||
|
||||
def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWithCookies,
|
||||
history:list=[], system_prompt:str='', stream:bool=True, additional_fn:str=None):
|
||||
"""
|
||||
发送至chatGPT,流式获取输出。
|
||||
用于基础的对话功能。
|
||||
inputs 是本次问询的输入
|
||||
top_p, temperature是chatGPT的内部调优参数
|
||||
history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误)
|
||||
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
|
||||
additional_fn代表点击的哪个按钮,按钮见functional.py
|
||||
"""
|
||||
# if is_any_api_key(inputs):
|
||||
# chatbot._cookies['api_key'] = inputs
|
||||
# chatbot.append(("输入已识别为Cohere的api_key", what_keys(inputs)))
|
||||
# yield from update_ui(chatbot=chatbot, history=history, msg="api_key已导入") # 刷新界面
|
||||
# return
|
||||
# elif not is_any_api_key(chatbot._cookies['api_key']):
|
||||
# chatbot.append((inputs, "缺少api_key。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。"))
|
||||
# yield from update_ui(chatbot=chatbot, history=history, msg="缺少api_key") # 刷新界面
|
||||
# return
|
||||
|
||||
user_input = inputs
|
||||
if additional_fn is not None:
|
||||
from core_functional import handle_core_functionality
|
||||
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
||||
|
||||
raw_input = inputs
|
||||
# logging.info(f'[raw_input] {raw_input}')
|
||||
chatbot.append((inputs, ""))
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
|
||||
|
||||
# check mis-behavior
|
||||
if is_the_upload_folder(user_input):
|
||||
chatbot[-1] = (inputs, f"[Local Message] 检测到操作错误!当您上传文档之后,需点击“**函数插件区**”按钮进行处理,请勿点击“提交”按钮或者“基础功能区”按钮。")
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="正常") # 刷新界面
|
||||
time.sleep(2)
|
||||
|
||||
try:
|
||||
headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt, stream)
|
||||
except RuntimeError as e:
|
||||
chatbot[-1] = (inputs, f"您提供的api-key不满足要求,不包含任何可用于{llm_kwargs['llm_model']}的api-key。您可能选择了错误的模型或请求源。")
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="api-key不满足要求") # 刷新界面
|
||||
return
|
||||
|
||||
# 检查endpoint是否合法
|
||||
try:
|
||||
from .bridge_all import model_info
|
||||
endpoint = verify_endpoint(model_info[llm_kwargs['llm_model']]['endpoint'])
|
||||
except:
|
||||
tb_str = '```\n' + trimmed_format_exc() + '```'
|
||||
chatbot[-1] = (inputs, tb_str)
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="Endpoint不满足要求") # 刷新界面
|
||||
return
|
||||
|
||||
history.append(inputs); history.append("")
|
||||
|
||||
retry = 0
|
||||
while True:
|
||||
try:
|
||||
# make a POST request to the API endpoint, stream=True
|
||||
response = requests.post(endpoint, headers=headers, proxies=proxies,
|
||||
json=payload, stream=True, timeout=TIMEOUT_SECONDS);break
|
||||
except:
|
||||
retry += 1
|
||||
chatbot[-1] = ((chatbot[-1][0], timeout_bot_msg))
|
||||
retry_msg = f",正在重试 ({retry}/{MAX_RETRY}) ……" if MAX_RETRY > 0 else ""
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="请求超时"+retry_msg) # 刷新界面
|
||||
if retry > MAX_RETRY: raise TimeoutError
|
||||
|
||||
gpt_replying_buffer = ""
|
||||
|
||||
is_head_of_the_stream = True
|
||||
if stream:
|
||||
stream_response = response.iter_lines()
|
||||
while True:
|
||||
try:
|
||||
chunk = next(stream_response)
|
||||
except StopIteration:
|
||||
# 非Cohere官方接口的出现这样的报错,Cohere和API2D不会走这里
|
||||
chunk_decoded = chunk.decode()
|
||||
error_msg = chunk_decoded
|
||||
# 其他情况,直接返回报错
|
||||
chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg)
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="非Cohere官方接口返回了错误:" + chunk.decode()) # 刷新界面
|
||||
return
|
||||
|
||||
# 提前读取一些信息 (用于判断异常)
|
||||
chunk_decoded, chunkjson, has_choices, choice_valid, has_content, has_role = decode_chunk(chunk)
|
||||
|
||||
if chunkjson:
|
||||
try:
|
||||
if chunkjson['event_type'] == 'stream-start':
|
||||
continue
|
||||
if chunkjson['event_type'] == 'text-generation':
|
||||
gpt_replying_buffer = gpt_replying_buffer + chunkjson["text"]
|
||||
history[-1] = gpt_replying_buffer
|
||||
chatbot[-1] = (history[-2], history[-1])
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="正常") # 刷新界面
|
||||
if chunkjson['event_type'] == 'stream-end':
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
||||
history[-1] = gpt_replying_buffer
|
||||
chatbot[-1] = (history[-2], history[-1])
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="正常") # 刷新界面
|
||||
break
|
||||
except Exception as e:
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="Json解析不合常规") # 刷新界面
|
||||
chunk = get_full_error(chunk, stream_response)
|
||||
chunk_decoded = chunk.decode()
|
||||
error_msg = chunk_decoded
|
||||
chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg)
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + error_msg) # 刷新界面
|
||||
print(error_msg)
|
||||
return
|
||||
|
||||
def handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg):
|
||||
from .bridge_all import model_info
|
||||
Cohere_website = ' 请登录Cohere查看详情 https://platform.Cohere.com/signup'
|
||||
if "reduce the length" in error_msg:
|
||||
if len(history) >= 2: history[-1] = ""; history[-2] = "" # 清除当前溢出的输入:history[-2] 是本次输入, history[-1] 是本次输出
|
||||
history = clip_history(inputs=inputs, history=history, tokenizer=model_info[llm_kwargs['llm_model']]['tokenizer'],
|
||||
max_token_limit=(model_info[llm_kwargs['llm_model']]['max_token'])) # history至少释放二分之一
|
||||
chatbot[-1] = (chatbot[-1][0], "[Local Message] Reduce the length. 本次输入过长, 或历史数据过长. 历史缓存数据已部分释放, 您可以请再次尝试. (若再次失败则更可能是因为输入过长.)")
|
||||
elif "does not exist" in error_msg:
|
||||
chatbot[-1] = (chatbot[-1][0], f"[Local Message] Model {llm_kwargs['llm_model']} does not exist. 模型不存在, 或者您没有获得体验资格.")
|
||||
elif "Incorrect API key" in error_msg:
|
||||
chatbot[-1] = (chatbot[-1][0], "[Local Message] Incorrect API key. Cohere以提供了不正确的API_KEY为由, 拒绝服务. " + Cohere_website)
|
||||
elif "exceeded your current quota" in error_msg:
|
||||
chatbot[-1] = (chatbot[-1][0], "[Local Message] You exceeded your current quota. Cohere以账户额度不足为由, 拒绝服务." + Cohere_website)
|
||||
elif "account is not active" in error_msg:
|
||||
chatbot[-1] = (chatbot[-1][0], "[Local Message] Your account is not active. Cohere以账户失效为由, 拒绝服务." + Cohere_website)
|
||||
elif "associated with a deactivated account" in error_msg:
|
||||
chatbot[-1] = (chatbot[-1][0], "[Local Message] You are associated with a deactivated account. Cohere以账户失效为由, 拒绝服务." + Cohere_website)
|
||||
elif "API key has been deactivated" in error_msg:
|
||||
chatbot[-1] = (chatbot[-1][0], "[Local Message] API key has been deactivated. Cohere以账户失效为由, 拒绝服务." + Cohere_website)
|
||||
elif "bad forward key" in error_msg:
|
||||
chatbot[-1] = (chatbot[-1][0], "[Local Message] Bad forward key. API2D账户额度不足.")
|
||||
elif "Not enough point" in error_msg:
|
||||
chatbot[-1] = (chatbot[-1][0], "[Local Message] Not enough point. API2D账户点数不足.")
|
||||
else:
|
||||
from toolbox import regular_txt_to_markdown
|
||||
tb_str = '```\n' + trimmed_format_exc() + '```'
|
||||
chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk_decoded)}")
|
||||
return chatbot, history
|
||||
|
||||
def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
|
||||
"""
|
||||
整合所有信息,选择LLM模型,生成http请求,为发送请求做准备
|
||||
"""
|
||||
# if not is_any_api_key(llm_kwargs['api_key']):
|
||||
# raise AssertionError("你提供了错误的API_KEY。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。")
|
||||
|
||||
api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
|
||||
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": f"Bearer {api_key}"
|
||||
}
|
||||
if API_ORG.startswith('org-'): headers.update({"Cohere-Organization": API_ORG})
|
||||
if llm_kwargs['llm_model'].startswith('azure-'):
|
||||
headers.update({"api-key": api_key})
|
||||
if llm_kwargs['llm_model'] in AZURE_CFG_ARRAY.keys():
|
||||
azure_api_key_unshared = AZURE_CFG_ARRAY[llm_kwargs['llm_model']]["AZURE_API_KEY"]
|
||||
headers.update({"api-key": azure_api_key_unshared})
|
||||
|
||||
conversation_cnt = len(history) // 2
|
||||
|
||||
messages = [{"role": "SYSTEM", "message": system_prompt}]
|
||||
if conversation_cnt:
|
||||
for index in range(0, 2*conversation_cnt, 2):
|
||||
what_i_have_asked = {}
|
||||
what_i_have_asked["role"] = "USER"
|
||||
what_i_have_asked["message"] = history[index]
|
||||
what_gpt_answer = {}
|
||||
what_gpt_answer["role"] = "CHATBOT"
|
||||
what_gpt_answer["message"] = history[index+1]
|
||||
if what_i_have_asked["message"] != "":
|
||||
if what_gpt_answer["message"] == "": continue
|
||||
if what_gpt_answer["message"] == timeout_bot_msg: continue
|
||||
messages.append(what_i_have_asked)
|
||||
messages.append(what_gpt_answer)
|
||||
else:
|
||||
messages[-1]['message'] = what_gpt_answer['message']
|
||||
|
||||
model = llm_kwargs['llm_model']
|
||||
if model.startswith('cohere-'): model = model[len('cohere-'):]
|
||||
payload = {
|
||||
"model": model,
|
||||
"message": inputs,
|
||||
"chat_history": messages,
|
||||
"temperature": llm_kwargs['temperature'], # 1.0,
|
||||
"top_p": llm_kwargs['top_p'], # 1.0,
|
||||
"n": 1,
|
||||
"stream": stream,
|
||||
"presence_penalty": 0,
|
||||
"frequency_penalty": 0,
|
||||
}
|
||||
|
||||
return headers,payload
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ import re
|
||||
import os
|
||||
import time
|
||||
from request_llms.com_google import GoogleChatInit
|
||||
from toolbox import ChatBotWithCookies
|
||||
from toolbox import get_conf, update_ui, update_ui_lastest_msg, have_any_recent_upload_image_files, trimmed_format_exc
|
||||
|
||||
proxies, TIMEOUT_SECONDS, MAX_RETRY = get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY')
|
||||
@@ -44,7 +45,8 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
||||
return gpt_replying_buffer
|
||||
|
||||
|
||||
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream=True, additional_fn=None):
|
||||
def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWithCookies,
|
||||
history:list=[], system_prompt:str='', stream:bool=True, additional_fn:str=None):
|
||||
# 检查API_KEY
|
||||
if get_conf("GEMINI_API_KEY") == "":
|
||||
yield from update_ui_lastest_msg(f"请配置 GEMINI_API_KEY。", chatbot=chatbot, history=history, delay=0)
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
|
||||
from transformers import AutoModel, AutoTokenizer
|
||||
import time
|
||||
import threading
|
||||
import importlib
|
||||
from toolbox import update_ui, get_conf
|
||||
from multiprocessing import Process, Pipe
|
||||
from transformers import AutoModel, AutoTokenizer
|
||||
|
||||
load_message = "jittorllms尚未加载,加载需要一段时间。注意,请避免混用多种jittor模型,否则可能导致显存溢出而造成卡顿,取决于`config.py`的配置,jittorllms消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……"
|
||||
|
||||
@@ -106,7 +106,8 @@ class GetGLMHandle(Process):
|
||||
global llama_glm_handle
|
||||
llama_glm_handle = None
|
||||
#################################################################################
|
||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
|
||||
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
|
||||
observe_window:list=[], console_slience:bool=False):
|
||||
"""
|
||||
多线程方法
|
||||
函数的说明请见 request_llms/bridge_all.py
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
|
||||
from transformers import AutoModel, AutoTokenizer
|
||||
import time
|
||||
import threading
|
||||
import importlib
|
||||
from toolbox import update_ui, get_conf
|
||||
from multiprocessing import Process, Pipe
|
||||
from transformers import AutoModel, AutoTokenizer
|
||||
|
||||
load_message = "jittorllms尚未加载,加载需要一段时间。注意,请避免混用多种jittor模型,否则可能导致显存溢出而造成卡顿,取决于`config.py`的配置,jittorllms消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……"
|
||||
|
||||
@@ -106,7 +106,8 @@ class GetGLMHandle(Process):
|
||||
global pangu_glm_handle
|
||||
pangu_glm_handle = None
|
||||
#################################################################################
|
||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
|
||||
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
|
||||
observe_window:list=[], console_slience:bool=False):
|
||||
"""
|
||||
多线程方法
|
||||
函数的说明请见 request_llms/bridge_all.py
|
||||
|
||||
@@ -106,7 +106,8 @@ class GetGLMHandle(Process):
|
||||
global rwkv_glm_handle
|
||||
rwkv_glm_handle = None
|
||||
#################################################################################
|
||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
|
||||
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
|
||||
observe_window:list=[], console_slience:bool=False):
|
||||
"""
|
||||
多线程方法
|
||||
函数的说明请见 request_llms/bridge_all.py
|
||||
|
||||
@@ -8,6 +8,7 @@ import time
|
||||
import logging
|
||||
|
||||
from toolbox import get_conf, update_ui, log_chat
|
||||
from toolbox import ChatBotWithCookies
|
||||
|
||||
import requests
|
||||
|
||||
@@ -146,7 +147,8 @@ def msg_handle_error(llm_kwargs, chunk_decoded):
|
||||
return error_msg
|
||||
|
||||
|
||||
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream=True, additional_fn=None):
|
||||
def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWithCookies,
|
||||
history:list=[], system_prompt:str='', stream:bool=True, additional_fn:str=None):
|
||||
chatbot.append([inputs, ""])
|
||||
|
||||
if additional_fn is not None:
|
||||
|
||||
@@ -171,7 +171,8 @@ class GetGLMHandle(Process):
|
||||
global moss_handle
|
||||
moss_handle = None
|
||||
#################################################################################
|
||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
|
||||
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
|
||||
observe_window:list=[], console_slience:bool=False):
|
||||
"""
|
||||
多线程方法
|
||||
函数的说明请见 request_llms/bridge_all.py
|
||||
|
||||
@@ -117,7 +117,8 @@ def generate_from_baidu_qianfan(inputs, llm_kwargs, history, system_prompt):
|
||||
raise RuntimeError(dec['error_msg'])
|
||||
|
||||
|
||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
|
||||
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
|
||||
observe_window:list=[], console_slience:bool=False):
|
||||
"""
|
||||
⭐多线程方法
|
||||
函数的说明请见 request_llms/bridge_all.py
|
||||
|
||||
@@ -5,7 +5,8 @@ from toolbox import check_packages, report_exception
|
||||
|
||||
model_name = 'Qwen'
|
||||
|
||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
|
||||
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
|
||||
observe_window:list=[], console_slience:bool=False):
|
||||
"""
|
||||
⭐多线程方法
|
||||
函数的说明请见 request_llms/bridge_all.py
|
||||
@@ -47,6 +48,8 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
if additional_fn is not None:
|
||||
from core_functional import handle_core_functionality
|
||||
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
||||
chatbot[-1] = (inputs, "")
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
|
||||
# 开始接收回复
|
||||
from .com_qwenapi import QwenRequestInstance
|
||||
|
||||
@@ -9,7 +9,8 @@ def validate_key():
|
||||
if YUNQUE_SECRET_KEY == '': return False
|
||||
return True
|
||||
|
||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
|
||||
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
|
||||
observe_window:list=[], console_slience:bool=False):
|
||||
"""
|
||||
⭐ 多线程方法
|
||||
函数的说明请见 request_llms/bridge_all.py
|
||||
|
||||
@@ -13,7 +13,8 @@ def validate_key():
|
||||
return False
|
||||
return True
|
||||
|
||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
|
||||
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
|
||||
observe_window:list=[], console_slience:bool=False):
|
||||
"""
|
||||
⭐多线程方法
|
||||
函数的说明请见 request_llms/bridge_all.py
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import time
|
||||
import os
|
||||
from toolbox import update_ui, get_conf, update_ui_lastest_msg
|
||||
from toolbox import update_ui, get_conf, update_ui_lastest_msg, log_chat
|
||||
from toolbox import check_packages, report_exception, have_any_recent_upload_image_files
|
||||
from toolbox import ChatBotWithCookies
|
||||
|
||||
model_name = '智谱AI大模型'
|
||||
zhipuai_default_model = 'glm-4'
|
||||
@@ -16,7 +17,8 @@ def make_media_input(inputs, image_paths):
|
||||
inputs = inputs + f'<br/><br/><div align="center"><img src="file={os.path.abspath(image_path)}"></div>'
|
||||
return inputs
|
||||
|
||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
|
||||
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
|
||||
observe_window:list=[], console_slience:bool=False):
|
||||
"""
|
||||
⭐多线程方法
|
||||
函数的说明请见 request_llms/bridge_all.py
|
||||
@@ -42,7 +44,8 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
||||
return response
|
||||
|
||||
|
||||
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream=True, additional_fn=None):
|
||||
def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWithCookies,
|
||||
history:list=[], system_prompt:str='', stream:bool=True, additional_fn:str=None):
|
||||
"""
|
||||
⭐单线程方法
|
||||
函数的说明请见 request_llms/bridge_all.py
|
||||
@@ -90,4 +93,5 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
chatbot[-1] = [inputs, response]
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
history.extend([inputs, response])
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response)
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
@@ -1,6 +1,7 @@
|
||||
import time
|
||||
import threading
|
||||
from toolbox import update_ui, Singleton
|
||||
from toolbox import ChatBotWithCookies
|
||||
from multiprocessing import Process, Pipe
|
||||
from contextlib import redirect_stdout
|
||||
from request_llms.queued_pipe import create_queue_pipe
|
||||
@@ -214,7 +215,7 @@ class LocalLLMHandle(Process):
|
||||
def get_local_llm_predict_fns(LLMSingletonClass, model_name, history_format='classic'):
|
||||
load_message = f"{model_name}尚未加载,加载需要一段时间。注意,取决于`config.py`的配置,{model_name}消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……"
|
||||
|
||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
|
||||
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="", observe_window:list=[], console_slience:bool=False):
|
||||
"""
|
||||
refer to request_llms/bridge_all.py
|
||||
"""
|
||||
@@ -260,7 +261,8 @@ def get_local_llm_predict_fns(LLMSingletonClass, model_name, history_format='cla
|
||||
raise RuntimeError("程序终止。")
|
||||
return response
|
||||
|
||||
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream=True, additional_fn=None):
|
||||
def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWithCookies,
|
||||
history:list=[], system_prompt:str='', stream:bool=True, additional_fn:str=None):
|
||||
"""
|
||||
refer to request_llms/bridge_all.py
|
||||
"""
|
||||
|
||||
Reference in New Issue
Block a user