diff --git a/request_llms/bridge_openrouter.py b/request_llms/bridge_openrouter.py index 10dfe57f..71a53a9c 100644 --- a/request_llms/bridge_openrouter.py +++ b/request_llms/bridge_openrouter.py @@ -170,7 +170,7 @@ def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], except requests.exceptions.ConnectionError: chunk = next(stream_response) # 失败了,重试一次?再失败就没办法了。 chunk_decoded, chunkjson, has_choices, choice_valid, has_content, has_role = decode_chunk(chunk) - if len(chunk_decoded)==0: continue + if len(chunk_decoded)==0 or chunk_decoded.startswith(':'): continue if not chunk_decoded.startswith('data:'): error_msg = get_full_error(chunk, stream_response).decode() if "reduce the length" in error_msg: @@ -181,9 +181,6 @@ def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], raise RuntimeError("OpenAI拒绝了请求:" + error_msg) if ('data: [DONE]' in chunk_decoded): break # api2d 正常完成 # 提前读取一些信息 (用于判断异常) - if (has_choices and not choice_valid) or ('OPENROUTER PROCESSING' in chunk_decoded): - # 一些垃圾第三方接口的出现这样的错误,openrouter的特殊处理 - continue json_data = chunkjson['choices'][0] delta = json_data["delta"] if len(delta) == 0: break @@ -328,8 +325,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith if chunk: try: - if (has_choices and not choice_valid) or ('OPENROUTER PROCESSING' in chunk_decoded): - # 一些垃圾第三方接口的出现这样的错误, 或者OPENROUTER的特殊处理,因为OPENROUTER的数据流未连接到模型时会出现OPENROUTER PROCESSING + if (has_choices and not choice_valid) or chunk_decoded.startswith(':'): continue if ('data: [DONE]' not in chunk_decoded) and len(chunk_decoded) > 0 and (chunkjson is None): # 传递进来一些奇怪的东西 diff --git a/request_llms/com_google.py b/request_llms/com_google.py index 88e094f5..afb81097 100644 --- a/request_llms/com_google.py +++ b/request_llms/com_google.py @@ -202,16 +202,29 @@ class GoogleChatInit: ) # 处理 history messages.append(self.__conversation_user(inputs, llm_kwargs, enable_multimodal_capacity)) # 处理用户对话 - payload = { - "contents": messages, - "generationConfig": { - # "maxOutputTokens": llm_kwargs.get("max_token", 1024), - "stopSequences": str(llm_kwargs.get("stop", "")).split(" "), - "temperature": llm_kwargs.get("temperature", 1), - "topP": llm_kwargs.get("top_p", 0.8), - "topK": 10, - }, - } + stop_sequences = str(llm_kwargs.get("stop", "")).split(" ") + # 过滤空字符串并确保至少有一个停止序列 + stop_sequences = [s for s in stop_sequences if s] + if not stop_sequences: + payload = { + "contents": messages, + "generationConfig": { + "temperature": llm_kwargs.get("temperature", 1), + "topP": llm_kwargs.get("top_p", 0.8), + "topK": 10, + }, + } + else: + payload = { + "contents": messages, + "generationConfig": { + # "maxOutputTokens": llm_kwargs.get("max_token", 1024), + "stopSequences": stop_sequences, + "temperature": llm_kwargs.get("temperature", 1), + "topP": llm_kwargs.get("top_p", 0.8), + "topK": 10, + }, + } return header, payload diff --git a/themes/common.js b/themes/common.js index d22d5a6e..04628d1b 100644 --- a/themes/common.js +++ b/themes/common.js @@ -1070,6 +1070,14 @@ function restore_chat_from_local_storage(event) { } +function clear_conversation(a, b, c) { + update_conversation_metadata(); + let stopButton = document.getElementById("elem_stop"); + stopButton.click(); + // console.log("clear_conversation"); + return reset_conversation(a, b); +} + function reset_conversation(a, b) { // console.log("js_code_reset");