fix cookie overflow bug

This commit is contained in:
binary-husky
2024-12-25 00:33:20 +08:00
parent 060af0d2e6
commit 1dd1d0ed6c
3 changed files with 63 additions and 23 deletions

View File

@@ -180,14 +180,20 @@ def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[],
raise ConnectionAbortedError("正常结束但显示Token不足导致输出不完整请削减单次输入的文本量。")
else:
raise RuntimeError("OpenAI拒绝了请求" + error_msg)
if ('data: [DONE]' in chunk_decoded): break # api2d 正常完成
if ('data: [DONE]' in chunk_decoded): break # api2d & one-api 正常完成
# 提前读取一些信息 (用于判断异常)
if has_choices and not choice_valid:
# 一些垃圾第三方接口的出现这样的错误
continue
json_data = chunkjson['choices'][0]
delta = json_data["delta"]
if len(delta) == 0: break
if len(delta) == 0:
is_termination_certain = False
if (chunkjson['choices'][0].get('finish_reason', 'null') == 'stop'): is_termination_certain = True
if is_termination_certain: break
else: continue # 对于不符合规范的狗屎接口,这里需要继续
if (not has_content) and has_role: continue
if (not has_content) and (not has_role): continue # raise RuntimeError("发现不标准的第三方接口:"+delta)
if has_content: # has_role = True/False
@@ -285,6 +291,8 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
history.extend([inputs, ""])
retry = 0
previous_ui_reflesh_time = 0
ui_reflesh_min_interval = 0.1
while True:
try:
# make a POST request to the API endpoint, stream=True
@@ -297,13 +305,13 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
yield from update_ui(chatbot=chatbot, history=history, msg="请求超时"+retry_msg) # 刷新界面
if retry > MAX_RETRY: raise TimeoutError
if not stream:
# 该分支仅适用于不支持stream的o1模型其他情形一律不适用
yield from handle_o1_model_special(response, inputs, llm_kwargs, chatbot, history)
return
if stream:
reach_termination = False # 处理一些 new-api 的奇葩异常
gpt_replying_buffer = ""
is_head_of_the_stream = True
stream_response = response.iter_lines()
@@ -317,6 +325,9 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
# 首先排除一个one-api没有done数据包的第三方Bug情形
if len(gpt_replying_buffer.strip()) > 0 and len(error_msg) == 0:
yield from update_ui(chatbot=chatbot, history=history, msg="检测到有缺陷的非OpenAI官方接口建议选择更稳定的接口。")
if not reach_termination:
reach_termination = True
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
break
# 其他情况,直接返回报错
chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg)
@@ -338,14 +349,25 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
if ('data: [DONE]' not in chunk_decoded) and len(chunk_decoded) > 0 and (chunkjson is None):
# 传递进来一些奇怪的东西
raise ValueError(f'无法读取以下数据,请检查配置。\n\n{chunk_decoded}')
# 前者是API2D的结束条件后者是OPENAI的结束条件
if ('data: [DONE]' in chunk_decoded) or (len(chunkjson['choices'][0]["delta"]) == 0):
# 判定为数据流的结束gpt_replying_buffer也写完了
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
break
# 前者是API2D & One-API的结束条件后者是OPENAI的结束条件
one_api_terminate = ('data: [DONE]' in chunk_decoded)
openai_terminate = (len(chunkjson['choices'][0]["delta"]) == 0)
if one_api_terminate or openai_terminate:
is_termination_certain = False
if one_api_terminate: is_termination_certain = True # 抓取符合规范的结束条件
if (chunkjson['choices'][0].get('finish_reason', 'null') == 'stop'): is_termination_certain = True # 抓取符合规范的结束条件
if is_termination_certain:
reach_termination = True
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
break # 对于符合规范的接口这里可以break
else:
continue # 对于不符合规范的狗屎接口,这里需要继续
# 到这里我们已经可以假定必须包含choice了
try:
status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}"
except:
logger.error(f"一些垃圾第三方接口出现这样的错误,兼容一下吧: {chunk_decoded}")
# 处理数据流的主体
status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}"
# 如果这里抛出异常一般是文本过长详情见get_full_error的输出
if has_content:
# 正常情况
gpt_replying_buffer = gpt_replying_buffer + chunkjson['choices'][0]["delta"]["content"]
@@ -354,21 +376,26 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
continue
else:
# 至此已经超出了正常接口应该进入的范围,一些垃圾第三方接口会出现这样的错误
if chunkjson['choices'][0]["delta"]["content"] is None: continue # 一些垃圾第三方接口出现这样的错误,兼容一下吧
if chunkjson['choices'][0]["delta"].get("content", None) is None:
logger.error(f"一些垃圾第三方接口出现这样的错误,兼容一下吧: {chunk_decoded}")
continue
gpt_replying_buffer = gpt_replying_buffer + chunkjson['choices'][0]["delta"]["content"]
history[-1] = gpt_replying_buffer
chatbot[-1] = (history[-2], history[-1])
yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面
if time.time() - previous_ui_reflesh_time > ui_reflesh_min_interval:
yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面
previous_ui_reflesh_time = time.time()
except Exception as e:
yield from update_ui(chatbot=chatbot, history=history, msg="Json解析不合常规") # 刷新界面
chunk = get_full_error(chunk, stream_response)
chunk_decoded = chunk.decode()
error_msg = chunk_decoded
chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg)
yield from update_ui(chatbot=chatbot, history=history, msg="Json解析异常" + error_msg) # 刷新界面
logger.error(error_msg)
yield from update_ui(chatbot=chatbot, history=history, msg="Json解析异常" + error_msg) # 刷新界面
return
yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面
return # return from stream-branch
def handle_o1_model_special(response, inputs, llm_kwargs, chatbot, history):

View File

@@ -1070,26 +1070,31 @@ function restore_chat_from_local_storage(event) {
}
function clear_conversation(a, b, c) {
update_conversation_metadata();
let stopButton = document.getElementById("elem_stop");
stopButton.click();
// console.log("clear_conversation");
return reset_conversation(a, b);
}
function reset_conversation(a, b) {
// console.log("js_code_reset");
a = btoa(unescape(encodeURIComponent(JSON.stringify(a))));
setCookie("js_previous_chat_cookie", a, 1);
localStorage.setItem("js_previous_chat_cookie", a);
b = btoa(unescape(encodeURIComponent(JSON.stringify(b))));
setCookie("js_previous_history_cookie", b, 1);
localStorage.setItem("js_previous_history_cookie", b);
// gen_restore_btn();
return [[], [], "已重置"];
}
// clear -> 将 history 缓存至 history_cache -> 点击复原 -> restore_previous_chat() -> 触发elem_update_history -> 读取 history_cache
function restore_previous_chat() {
// console.log("restore_previous_chat");
let chat = localStorage.getItem("js_previous_chat_cookie");
chat = JSON.parse(decodeURIComponent(escape(atob(chat))));
push_data_to_gradio_component(chat, "gpt-chatbot", "obj");
let history = localStorage.getItem("js_previous_history_cookie");
history = JSON.parse(decodeURIComponent(escape(atob(history))));
push_data_to_gradio_component(history, "history-ng", "obj");
// document.querySelector("#elem_update_history").click(); // in order to call set_history_gr_state, and send history state to server
}
// clear -> 将 history 缓存至 history_cache -> 点击复原 -> restore_previous_chat() -> 触发elem_update_history -> 读取 history_cache
function restore_previous_chat() {
// console.log("restore_previous_chat");

View File

@@ -1,5 +1,13 @@
function remove_legacy_cookie() {
setCookie("web_cookie_cache", "", -1);
setCookie("js_previous_chat_cookie", "", -1);
setCookie("js_previous_history_cookie", "", -1);
}
async function GptAcademicJavaScriptInit(dark, prompt, live2d, layout, tts) {
// 第一部分,布局初始化
remove_legacy_cookie();
audio_fn_init();
minor_ui_adjustment();
ButtonWithDropdown_init();