fix cookie overflow bug
This commit is contained in:
@@ -180,14 +180,20 @@ def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[],
|
|||||||
raise ConnectionAbortedError("正常结束,但显示Token不足,导致输出不完整,请削减单次输入的文本量。")
|
raise ConnectionAbortedError("正常结束,但显示Token不足,导致输出不完整,请削减单次输入的文本量。")
|
||||||
else:
|
else:
|
||||||
raise RuntimeError("OpenAI拒绝了请求:" + error_msg)
|
raise RuntimeError("OpenAI拒绝了请求:" + error_msg)
|
||||||
if ('data: [DONE]' in chunk_decoded): break # api2d 正常完成
|
if ('data: [DONE]' in chunk_decoded): break # api2d & one-api 正常完成
|
||||||
# 提前读取一些信息 (用于判断异常)
|
# 提前读取一些信息 (用于判断异常)
|
||||||
if has_choices and not choice_valid:
|
if has_choices and not choice_valid:
|
||||||
# 一些垃圾第三方接口的出现这样的错误
|
# 一些垃圾第三方接口的出现这样的错误
|
||||||
continue
|
continue
|
||||||
json_data = chunkjson['choices'][0]
|
json_data = chunkjson['choices'][0]
|
||||||
delta = json_data["delta"]
|
delta = json_data["delta"]
|
||||||
if len(delta) == 0: break
|
|
||||||
|
if len(delta) == 0:
|
||||||
|
is_termination_certain = False
|
||||||
|
if (chunkjson['choices'][0].get('finish_reason', 'null') == 'stop'): is_termination_certain = True
|
||||||
|
if is_termination_certain: break
|
||||||
|
else: continue # 对于不符合规范的狗屎接口,这里需要继续
|
||||||
|
|
||||||
if (not has_content) and has_role: continue
|
if (not has_content) and has_role: continue
|
||||||
if (not has_content) and (not has_role): continue # raise RuntimeError("发现不标准的第三方接口:"+delta)
|
if (not has_content) and (not has_role): continue # raise RuntimeError("发现不标准的第三方接口:"+delta)
|
||||||
if has_content: # has_role = True/False
|
if has_content: # has_role = True/False
|
||||||
@@ -285,6 +291,8 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
|||||||
history.extend([inputs, ""])
|
history.extend([inputs, ""])
|
||||||
|
|
||||||
retry = 0
|
retry = 0
|
||||||
|
previous_ui_reflesh_time = 0
|
||||||
|
ui_reflesh_min_interval = 0.1
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
# make a POST request to the API endpoint, stream=True
|
# make a POST request to the API endpoint, stream=True
|
||||||
@@ -297,13 +305,13 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
|||||||
yield from update_ui(chatbot=chatbot, history=history, msg="请求超时"+retry_msg) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history, msg="请求超时"+retry_msg) # 刷新界面
|
||||||
if retry > MAX_RETRY: raise TimeoutError
|
if retry > MAX_RETRY: raise TimeoutError
|
||||||
|
|
||||||
|
|
||||||
if not stream:
|
if not stream:
|
||||||
# 该分支仅适用于不支持stream的o1模型,其他情形一律不适用
|
# 该分支仅适用于不支持stream的o1模型,其他情形一律不适用
|
||||||
yield from handle_o1_model_special(response, inputs, llm_kwargs, chatbot, history)
|
yield from handle_o1_model_special(response, inputs, llm_kwargs, chatbot, history)
|
||||||
return
|
return
|
||||||
|
|
||||||
if stream:
|
if stream:
|
||||||
|
reach_termination = False # 处理一些 new-api 的奇葩异常
|
||||||
gpt_replying_buffer = ""
|
gpt_replying_buffer = ""
|
||||||
is_head_of_the_stream = True
|
is_head_of_the_stream = True
|
||||||
stream_response = response.iter_lines()
|
stream_response = response.iter_lines()
|
||||||
@@ -317,6 +325,9 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
|||||||
# 首先排除一个one-api没有done数据包的第三方Bug情形
|
# 首先排除一个one-api没有done数据包的第三方Bug情形
|
||||||
if len(gpt_replying_buffer.strip()) > 0 and len(error_msg) == 0:
|
if len(gpt_replying_buffer.strip()) > 0 and len(error_msg) == 0:
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="检测到有缺陷的非OpenAI官方接口,建议选择更稳定的接口。")
|
yield from update_ui(chatbot=chatbot, history=history, msg="检测到有缺陷的非OpenAI官方接口,建议选择更稳定的接口。")
|
||||||
|
if not reach_termination:
|
||||||
|
reach_termination = True
|
||||||
|
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
||||||
break
|
break
|
||||||
# 其他情况,直接返回报错
|
# 其他情况,直接返回报错
|
||||||
chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg)
|
chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg)
|
||||||
@@ -338,14 +349,25 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
|||||||
if ('data: [DONE]' not in chunk_decoded) and len(chunk_decoded) > 0 and (chunkjson is None):
|
if ('data: [DONE]' not in chunk_decoded) and len(chunk_decoded) > 0 and (chunkjson is None):
|
||||||
# 传递进来一些奇怪的东西
|
# 传递进来一些奇怪的东西
|
||||||
raise ValueError(f'无法读取以下数据,请检查配置。\n\n{chunk_decoded}')
|
raise ValueError(f'无法读取以下数据,请检查配置。\n\n{chunk_decoded}')
|
||||||
# 前者是API2D的结束条件,后者是OPENAI的结束条件
|
# 前者是API2D & One-API的结束条件,后者是OPENAI的结束条件
|
||||||
if ('data: [DONE]' in chunk_decoded) or (len(chunkjson['choices'][0]["delta"]) == 0):
|
one_api_terminate = ('data: [DONE]' in chunk_decoded)
|
||||||
# 判定为数据流的结束,gpt_replying_buffer也写完了
|
openai_terminate = (len(chunkjson['choices'][0]["delta"]) == 0)
|
||||||
|
if one_api_terminate or openai_terminate:
|
||||||
|
is_termination_certain = False
|
||||||
|
if one_api_terminate: is_termination_certain = True # 抓取符合规范的结束条件
|
||||||
|
if (chunkjson['choices'][0].get('finish_reason', 'null') == 'stop'): is_termination_certain = True # 抓取符合规范的结束条件
|
||||||
|
if is_termination_certain:
|
||||||
|
reach_termination = True
|
||||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
||||||
break
|
break # 对于符合规范的接口,这里可以break
|
||||||
# 处理数据流的主体
|
else:
|
||||||
|
continue # 对于不符合规范的狗屎接口,这里需要继续
|
||||||
|
# 到这里,我们已经可以假定必须包含choice了
|
||||||
|
try:
|
||||||
status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}"
|
status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}"
|
||||||
# 如果这里抛出异常,一般是文本过长,详情见get_full_error的输出
|
except:
|
||||||
|
logger.error(f"一些垃圾第三方接口出现这样的错误,兼容一下吧: {chunk_decoded}")
|
||||||
|
# 处理数据流的主体
|
||||||
if has_content:
|
if has_content:
|
||||||
# 正常情况
|
# 正常情况
|
||||||
gpt_replying_buffer = gpt_replying_buffer + chunkjson['choices'][0]["delta"]["content"]
|
gpt_replying_buffer = gpt_replying_buffer + chunkjson['choices'][0]["delta"]["content"]
|
||||||
@@ -354,21 +376,26 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
|||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
# 至此已经超出了正常接口应该进入的范围,一些垃圾第三方接口会出现这样的错误
|
# 至此已经超出了正常接口应该进入的范围,一些垃圾第三方接口会出现这样的错误
|
||||||
if chunkjson['choices'][0]["delta"]["content"] is None: continue # 一些垃圾第三方接口出现这样的错误,兼容一下吧
|
if chunkjson['choices'][0]["delta"].get("content", None) is None:
|
||||||
|
logger.error(f"一些垃圾第三方接口出现这样的错误,兼容一下吧: {chunk_decoded}")
|
||||||
|
continue
|
||||||
gpt_replying_buffer = gpt_replying_buffer + chunkjson['choices'][0]["delta"]["content"]
|
gpt_replying_buffer = gpt_replying_buffer + chunkjson['choices'][0]["delta"]["content"]
|
||||||
|
|
||||||
history[-1] = gpt_replying_buffer
|
history[-1] = gpt_replying_buffer
|
||||||
chatbot[-1] = (history[-2], history[-1])
|
chatbot[-1] = (history[-2], history[-1])
|
||||||
|
if time.time() - previous_ui_reflesh_time > ui_reflesh_min_interval:
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面
|
||||||
|
previous_ui_reflesh_time = time.time()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="Json解析不合常规") # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history, msg="Json解析不合常规") # 刷新界面
|
||||||
chunk = get_full_error(chunk, stream_response)
|
chunk = get_full_error(chunk, stream_response)
|
||||||
chunk_decoded = chunk.decode()
|
chunk_decoded = chunk.decode()
|
||||||
error_msg = chunk_decoded
|
error_msg = chunk_decoded
|
||||||
chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg)
|
chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg)
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="Json解析异常" + error_msg) # 刷新界面
|
|
||||||
logger.error(error_msg)
|
logger.error(error_msg)
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history, msg="Json解析异常" + error_msg) # 刷新界面
|
||||||
return
|
return
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面
|
||||||
return # return from stream-branch
|
return # return from stream-branch
|
||||||
|
|
||||||
def handle_o1_model_special(response, inputs, llm_kwargs, chatbot, history):
|
def handle_o1_model_special(response, inputs, llm_kwargs, chatbot, history):
|
||||||
|
|||||||
@@ -1070,26 +1070,31 @@ function restore_chat_from_local_storage(event) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
function clear_conversation(a, b, c) {
|
|
||||||
update_conversation_metadata();
|
|
||||||
let stopButton = document.getElementById("elem_stop");
|
|
||||||
stopButton.click();
|
|
||||||
// console.log("clear_conversation");
|
|
||||||
return reset_conversation(a, b);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
function reset_conversation(a, b) {
|
function reset_conversation(a, b) {
|
||||||
// console.log("js_code_reset");
|
// console.log("js_code_reset");
|
||||||
a = btoa(unescape(encodeURIComponent(JSON.stringify(a))));
|
a = btoa(unescape(encodeURIComponent(JSON.stringify(a))));
|
||||||
setCookie("js_previous_chat_cookie", a, 1);
|
localStorage.setItem("js_previous_chat_cookie", a);
|
||||||
b = btoa(unescape(encodeURIComponent(JSON.stringify(b))));
|
b = btoa(unescape(encodeURIComponent(JSON.stringify(b))));
|
||||||
setCookie("js_previous_history_cookie", b, 1);
|
localStorage.setItem("js_previous_history_cookie", b);
|
||||||
// gen_restore_btn();
|
// gen_restore_btn();
|
||||||
return [[], [], "已重置"];
|
return [[], [], "已重置"];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// clear -> 将 history 缓存至 history_cache -> 点击复原 -> restore_previous_chat() -> 触发elem_update_history -> 读取 history_cache
|
||||||
|
function restore_previous_chat() {
|
||||||
|
// console.log("restore_previous_chat");
|
||||||
|
let chat = localStorage.getItem("js_previous_chat_cookie");
|
||||||
|
chat = JSON.parse(decodeURIComponent(escape(atob(chat))));
|
||||||
|
push_data_to_gradio_component(chat, "gpt-chatbot", "obj");
|
||||||
|
let history = localStorage.getItem("js_previous_history_cookie");
|
||||||
|
history = JSON.parse(decodeURIComponent(escape(atob(history))));
|
||||||
|
push_data_to_gradio_component(history, "history-ng", "obj");
|
||||||
|
// document.querySelector("#elem_update_history").click(); // in order to call set_history_gr_state, and send history state to server
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
// clear -> 将 history 缓存至 history_cache -> 点击复原 -> restore_previous_chat() -> 触发elem_update_history -> 读取 history_cache
|
// clear -> 将 history 缓存至 history_cache -> 点击复原 -> restore_previous_chat() -> 触发elem_update_history -> 读取 history_cache
|
||||||
function restore_previous_chat() {
|
function restore_previous_chat() {
|
||||||
// console.log("restore_previous_chat");
|
// console.log("restore_previous_chat");
|
||||||
|
|||||||
@@ -1,5 +1,13 @@
|
|||||||
|
function remove_legacy_cookie() {
|
||||||
|
setCookie("web_cookie_cache", "", -1);
|
||||||
|
setCookie("js_previous_chat_cookie", "", -1);
|
||||||
|
setCookie("js_previous_history_cookie", "", -1);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
async function GptAcademicJavaScriptInit(dark, prompt, live2d, layout, tts) {
|
async function GptAcademicJavaScriptInit(dark, prompt, live2d, layout, tts) {
|
||||||
// 第一部分,布局初始化
|
// 第一部分,布局初始化
|
||||||
|
remove_legacy_cookie();
|
||||||
audio_fn_init();
|
audio_fn_init();
|
||||||
minor_ui_adjustment();
|
minor_ui_adjustment();
|
||||||
ButtonWithDropdown_init();
|
ButtonWithDropdown_init();
|
||||||
|
|||||||
Reference in New Issue
Block a user