Compare commits
1 Commits
516f5af00a
...
chat_log_n
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
82e125d439 |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -161,3 +161,5 @@ temp.*
|
|||||||
objdump*
|
objdump*
|
||||||
*.min.*.js
|
*.min.*.js
|
||||||
TODO
|
TODO
|
||||||
|
experimental_mods
|
||||||
|
search_results
|
||||||
|
|||||||
@@ -341,7 +341,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
|||||||
# 前者是API2D的结束条件,后者是OPENAI的结束条件
|
# 前者是API2D的结束条件,后者是OPENAI的结束条件
|
||||||
if ('data: [DONE]' in chunk_decoded) or (len(chunkjson['choices'][0]["delta"]) == 0):
|
if ('data: [DONE]' in chunk_decoded) or (len(chunkjson['choices'][0]["delta"]) == 0):
|
||||||
# 判定为数据流的结束,gpt_replying_buffer也写完了
|
# 判定为数据流的结束,gpt_replying_buffer也写完了
|
||||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user())
|
||||||
break
|
break
|
||||||
# 处理数据流的主体
|
# 处理数据流的主体
|
||||||
status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}"
|
status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}"
|
||||||
@@ -375,7 +375,7 @@ def handle_o1_model_special(response, inputs, llm_kwargs, chatbot, history):
|
|||||||
try:
|
try:
|
||||||
chunkjson = json.loads(response.content.decode())
|
chunkjson = json.loads(response.content.decode())
|
||||||
gpt_replying_buffer = chunkjson['choices'][0]["message"]["content"]
|
gpt_replying_buffer = chunkjson['choices'][0]["message"]["content"]
|
||||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user())
|
||||||
history[-1] = gpt_replying_buffer
|
history[-1] = gpt_replying_buffer
|
||||||
chatbot[-1] = (history[-2], history[-1])
|
chatbot[-1] = (history[-2], history[-1])
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|||||||
@@ -184,7 +184,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
# 判定为数据流的结束,gpt_replying_buffer也写完了
|
# 判定为数据流的结束,gpt_replying_buffer也写完了
|
||||||
lastmsg = chatbot[-1][-1] + f"\n\n\n\n「{llm_kwargs['llm_model']}调用结束,该模型不具备上下文对话能力,如需追问,请及时切换模型。」"
|
lastmsg = chatbot[-1][-1] + f"\n\n\n\n「{llm_kwargs['llm_model']}调用结束,该模型不具备上下文对话能力,如需追问,请及时切换模型。」"
|
||||||
yield from update_ui_lastest_msg(lastmsg, chatbot, history, delay=1)
|
yield from update_ui_lastest_msg(lastmsg, chatbot, history, delay=1)
|
||||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user())
|
||||||
break
|
break
|
||||||
# 处理数据流的主体
|
# 处理数据流的主体
|
||||||
status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}"
|
status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}"
|
||||||
|
|||||||
@@ -216,7 +216,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
if need_to_pass:
|
if need_to_pass:
|
||||||
pass
|
pass
|
||||||
elif is_last_chunk:
|
elif is_last_chunk:
|
||||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user())
|
||||||
# logger.info(f'[response] {gpt_replying_buffer}')
|
# logger.info(f'[response] {gpt_replying_buffer}')
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
|
|||||||
@@ -223,7 +223,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
|||||||
chatbot[-1] = (history[-2], history[-1])
|
chatbot[-1] = (history[-2], history[-1])
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="正常") # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history, msg="正常") # 刷新界面
|
||||||
if chunkjson['event_type'] == 'stream-end':
|
if chunkjson['event_type'] == 'stream-end':
|
||||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user())
|
||||||
history[-1] = gpt_replying_buffer
|
history[-1] = gpt_replying_buffer
|
||||||
chatbot[-1] = (history[-2], history[-1])
|
chatbot[-1] = (history[-2], history[-1])
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="正常") # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history, msg="正常") # 刷新界面
|
||||||
|
|||||||
@@ -109,7 +109,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
|||||||
gpt_replying_buffer += paraphrase['text'] # 使用 json 解析库进行处理
|
gpt_replying_buffer += paraphrase['text'] # 使用 json 解析库进行处理
|
||||||
chatbot[-1] = (inputs, gpt_replying_buffer)
|
chatbot[-1] = (inputs, gpt_replying_buffer)
|
||||||
history[-1] = gpt_replying_buffer
|
history[-1] = gpt_replying_buffer
|
||||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user())
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
if error_match:
|
if error_match:
|
||||||
history = history[-2] # 错误的不纳入对话
|
history = history[-2] # 错误的不纳入对话
|
||||||
|
|||||||
@@ -166,7 +166,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
|||||||
history = history[:-2]
|
history = history[:-2]
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
break
|
break
|
||||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_bro_result)
|
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_bro_result, user_name=chatbot.get_user())
|
||||||
|
|
||||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None,
|
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None,
|
||||||
console_slience=False):
|
console_slience=False):
|
||||||
|
|||||||
@@ -337,7 +337,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
|||||||
# 前者是API2D的结束条件,后者是OPENAI的结束条件
|
# 前者是API2D的结束条件,后者是OPENAI的结束条件
|
||||||
if ('data: [DONE]' in chunk_decoded) or (len(chunkjson['choices'][0]["delta"]) == 0):
|
if ('data: [DONE]' in chunk_decoded) or (len(chunkjson['choices'][0]["delta"]) == 0):
|
||||||
# 判定为数据流的结束,gpt_replying_buffer也写完了
|
# 判定为数据流的结束,gpt_replying_buffer也写完了
|
||||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user())
|
||||||
break
|
break
|
||||||
# 处理数据流的主体
|
# 处理数据流的主体
|
||||||
status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}"
|
status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}"
|
||||||
@@ -371,7 +371,7 @@ def handle_o1_model_special(response, inputs, llm_kwargs, chatbot, history):
|
|||||||
try:
|
try:
|
||||||
chunkjson = json.loads(response.content.decode())
|
chunkjson = json.loads(response.content.decode())
|
||||||
gpt_replying_buffer = chunkjson['choices'][0]["message"]["content"]
|
gpt_replying_buffer = chunkjson['choices'][0]["message"]["content"]
|
||||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user())
|
||||||
history[-1] = gpt_replying_buffer
|
history[-1] = gpt_replying_buffer
|
||||||
chatbot[-1] = (history[-2], history[-1])
|
chatbot[-1] = (history[-2], history[-1])
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|||||||
@@ -59,7 +59,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
chatbot[-1] = (inputs, response)
|
chatbot[-1] = (inputs, response)
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
|
||||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response)
|
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response, user_name=chatbot.get_user())
|
||||||
# 总结输出
|
# 总结输出
|
||||||
if response == f"[Local Message] 等待{model_name}响应中 ...":
|
if response == f"[Local Message] 等待{model_name}响应中 ...":
|
||||||
response = f"[Local Message] {model_name}响应异常 ..."
|
response = f"[Local Message] {model_name}响应异常 ..."
|
||||||
|
|||||||
@@ -68,5 +68,5 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
|||||||
chatbot[-1] = [inputs, response]
|
chatbot[-1] = [inputs, response]
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
history.extend([inputs, response])
|
history.extend([inputs, response])
|
||||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response)
|
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response, user_name=chatbot.get_user())
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
@@ -97,5 +97,5 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
|||||||
chatbot[-1] = [inputs, response]
|
chatbot[-1] = [inputs, response]
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
history.extend([inputs, response])
|
history.extend([inputs, response])
|
||||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response)
|
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response, user_name=chatbot.get_user())
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
@@ -1029,7 +1029,7 @@ def check_repeat_upload(new_pdf_path, pdf_hash):
|
|||||||
# 如果所有页的内容都相同,返回 True
|
# 如果所有页的内容都相同,返回 True
|
||||||
return False, None
|
return False, None
|
||||||
|
|
||||||
def log_chat(llm_model: str, input_str: str, output_str: str):
|
def log_chat(llm_model: str, input_str: str, output_str: str, user_name: str=default_user_name):
|
||||||
try:
|
try:
|
||||||
if output_str and input_str and llm_model:
|
if output_str and input_str and llm_model:
|
||||||
uid = str(uuid.uuid4().hex)
|
uid = str(uuid.uuid4().hex)
|
||||||
@@ -1038,8 +1038,8 @@ def log_chat(llm_model: str, input_str: str, output_str: str):
|
|||||||
logger.bind(chat_msg=True).info(dedent(
|
logger.bind(chat_msg=True).info(dedent(
|
||||||
"""
|
"""
|
||||||
╭──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
|
╭──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
|
||||||
[UID]
|
[UID/USER]
|
||||||
{uid}
|
{uid}/{user_name}
|
||||||
[Model]
|
[Model]
|
||||||
{llm_model}
|
{llm_model}
|
||||||
[Query]
|
[Query]
|
||||||
@@ -1047,6 +1047,6 @@ def log_chat(llm_model: str, input_str: str, output_str: str):
|
|||||||
[Response]
|
[Response]
|
||||||
{output_str}
|
{output_str}
|
||||||
╰──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
|
╰──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
|
||||||
""").format(uid=uid, llm_model=llm_model, input_str=input_str, output_str=output_str))
|
""").format(uid=uid, user_name=user_name, llm_model=llm_model, input_str=input_str, output_str=output_str))
|
||||||
except:
|
except:
|
||||||
logger.error(trimmed_format_exc())
|
logger.error(trimmed_format_exc())
|
||||||
|
|||||||
Reference in New Issue
Block a user