Compare commits
3 Commits
boyin_rag
...
chat_log_n
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
82e125d439 | ||
|
|
197287fc30 | ||
|
|
c37fcc9299 |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -161,3 +161,5 @@ temp.*
|
||||
objdump*
|
||||
*.min.*.js
|
||||
TODO
|
||||
experimental_mods
|
||||
search_results
|
||||
|
||||
@@ -341,7 +341,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
||||
# 前者是API2D的结束条件,后者是OPENAI的结束条件
|
||||
if ('data: [DONE]' in chunk_decoded) or (len(chunkjson['choices'][0]["delta"]) == 0):
|
||||
# 判定为数据流的结束,gpt_replying_buffer也写完了
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user())
|
||||
break
|
||||
# 处理数据流的主体
|
||||
status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}"
|
||||
@@ -375,7 +375,7 @@ def handle_o1_model_special(response, inputs, llm_kwargs, chatbot, history):
|
||||
try:
|
||||
chunkjson = json.loads(response.content.decode())
|
||||
gpt_replying_buffer = chunkjson['choices'][0]["message"]["content"]
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user())
|
||||
history[-1] = gpt_replying_buffer
|
||||
chatbot[-1] = (history[-2], history[-1])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
@@ -184,7 +184,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
# 判定为数据流的结束,gpt_replying_buffer也写完了
|
||||
lastmsg = chatbot[-1][-1] + f"\n\n\n\n「{llm_kwargs['llm_model']}调用结束,该模型不具备上下文对话能力,如需追问,请及时切换模型。」"
|
||||
yield from update_ui_lastest_msg(lastmsg, chatbot, history, delay=1)
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user())
|
||||
break
|
||||
# 处理数据流的主体
|
||||
status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}"
|
||||
|
||||
@@ -216,7 +216,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
if need_to_pass:
|
||||
pass
|
||||
elif is_last_chunk:
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user())
|
||||
# logger.info(f'[response] {gpt_replying_buffer}')
|
||||
break
|
||||
else:
|
||||
|
||||
@@ -223,7 +223,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
||||
chatbot[-1] = (history[-2], history[-1])
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="正常") # 刷新界面
|
||||
if chunkjson['event_type'] == 'stream-end':
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user())
|
||||
history[-1] = gpt_replying_buffer
|
||||
chatbot[-1] = (history[-2], history[-1])
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="正常") # 刷新界面
|
||||
|
||||
@@ -109,7 +109,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
||||
gpt_replying_buffer += paraphrase['text'] # 使用 json 解析库进行处理
|
||||
chatbot[-1] = (inputs, gpt_replying_buffer)
|
||||
history[-1] = gpt_replying_buffer
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user())
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
if error_match:
|
||||
history = history[-2] # 错误的不纳入对话
|
||||
|
||||
@@ -166,7 +166,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
||||
history = history[:-2]
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
break
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_bro_result)
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_bro_result, user_name=chatbot.get_user())
|
||||
|
||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None,
|
||||
console_slience=False):
|
||||
|
||||
@@ -337,7 +337,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
||||
# 前者是API2D的结束条件,后者是OPENAI的结束条件
|
||||
if ('data: [DONE]' in chunk_decoded) or (len(chunkjson['choices'][0]["delta"]) == 0):
|
||||
# 判定为数据流的结束,gpt_replying_buffer也写完了
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user())
|
||||
break
|
||||
# 处理数据流的主体
|
||||
status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}"
|
||||
@@ -371,7 +371,7 @@ def handle_o1_model_special(response, inputs, llm_kwargs, chatbot, history):
|
||||
try:
|
||||
chunkjson = json.loads(response.content.decode())
|
||||
gpt_replying_buffer = chunkjson['choices'][0]["message"]["content"]
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user())
|
||||
history[-1] = gpt_replying_buffer
|
||||
chatbot[-1] = (history[-2], history[-1])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
@@ -59,7 +59,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
chatbot[-1] = (inputs, response)
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response)
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response, user_name=chatbot.get_user())
|
||||
# 总结输出
|
||||
if response == f"[Local Message] 等待{model_name}响应中 ...":
|
||||
response = f"[Local Message] {model_name}响应异常 ..."
|
||||
|
||||
@@ -68,5 +68,5 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
||||
chatbot[-1] = [inputs, response]
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
history.extend([inputs, response])
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response)
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response, user_name=chatbot.get_user())
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
@@ -97,5 +97,5 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
||||
chatbot[-1] = [inputs, response]
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
history.extend([inputs, response])
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response)
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response, user_name=chatbot.get_user())
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
@@ -104,17 +104,27 @@ def extract_archive(file_path, dest_dir):
|
||||
logger.info("Successfully extracted zip archive to {}".format(dest_dir))
|
||||
|
||||
elif file_extension in [".tar", ".gz", ".bz2"]:
|
||||
with tarfile.open(file_path, "r:*") as tarobj:
|
||||
# 清理提取路径,移除任何不安全的元素
|
||||
for member in tarobj.getmembers():
|
||||
member_path = os.path.normpath(member.name)
|
||||
full_path = os.path.join(dest_dir, member_path)
|
||||
full_path = os.path.abspath(full_path)
|
||||
if not full_path.startswith(os.path.abspath(dest_dir) + os.sep):
|
||||
raise Exception(f"Attempted Path Traversal in {member.name}")
|
||||
try:
|
||||
with tarfile.open(file_path, "r:*") as tarobj:
|
||||
# 清理提取路径,移除任何不安全的元素
|
||||
for member in tarobj.getmembers():
|
||||
member_path = os.path.normpath(member.name)
|
||||
full_path = os.path.join(dest_dir, member_path)
|
||||
full_path = os.path.abspath(full_path)
|
||||
if not full_path.startswith(os.path.abspath(dest_dir) + os.sep):
|
||||
raise Exception(f"Attempted Path Traversal in {member.name}")
|
||||
|
||||
tarobj.extractall(path=dest_dir)
|
||||
logger.info("Successfully extracted tar archive to {}".format(dest_dir))
|
||||
tarobj.extractall(path=dest_dir)
|
||||
logger.info("Successfully extracted tar archive to {}".format(dest_dir))
|
||||
except tarfile.ReadError as e:
|
||||
if file_extension == ".gz":
|
||||
# 一些特别奇葩的项目,是一个gz文件,里面不是tar,只有一个tex文件
|
||||
import gzip
|
||||
with gzip.open(file_path, 'rb') as f_in:
|
||||
with open(os.path.join(dest_dir, 'main.tex'), 'wb') as f_out:
|
||||
f_out.write(f_in.read())
|
||||
else:
|
||||
raise e
|
||||
|
||||
# 第三方库,需要预先pip install rarfile
|
||||
# 此外,Windows上还需要安装winrar软件,配置其Path环境变量,如"C:\Program Files\WinRAR"才可以
|
||||
|
||||
@@ -14,6 +14,7 @@ openai_regex = re.compile(
|
||||
r"sk-[a-zA-Z0-9_-]{92}$|" +
|
||||
r"sk-proj-[a-zA-Z0-9_-]{48}$|"+
|
||||
r"sk-proj-[a-zA-Z0-9_-]{124}$|"+
|
||||
r"sk-proj-[a-zA-Z0-9_-]{156}$|"+ #新版apikey位数不匹配故修改此正则表达式
|
||||
r"sess-[a-zA-Z0-9]{40}$"
|
||||
)
|
||||
def is_openai_api_key(key):
|
||||
|
||||
@@ -1029,7 +1029,7 @@ def check_repeat_upload(new_pdf_path, pdf_hash):
|
||||
# 如果所有页的内容都相同,返回 True
|
||||
return False, None
|
||||
|
||||
def log_chat(llm_model: str, input_str: str, output_str: str):
|
||||
def log_chat(llm_model: str, input_str: str, output_str: str, user_name: str=default_user_name):
|
||||
try:
|
||||
if output_str and input_str and llm_model:
|
||||
uid = str(uuid.uuid4().hex)
|
||||
@@ -1038,8 +1038,8 @@ def log_chat(llm_model: str, input_str: str, output_str: str):
|
||||
logger.bind(chat_msg=True).info(dedent(
|
||||
"""
|
||||
╭──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
|
||||
[UID]
|
||||
{uid}
|
||||
[UID/USER]
|
||||
{uid}/{user_name}
|
||||
[Model]
|
||||
{llm_model}
|
||||
[Query]
|
||||
@@ -1047,6 +1047,6 @@ def log_chat(llm_model: str, input_str: str, output_str: str):
|
||||
[Response]
|
||||
{output_str}
|
||||
╰──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
""").format(uid=uid, llm_model=llm_model, input_str=input_str, output_str=output_str))
|
||||
""").format(uid=uid, user_name=user_name, llm_model=llm_model, input_str=input_str, output_str=output_str))
|
||||
except:
|
||||
logger.error(trimmed_format_exc())
|
||||
|
||||
Reference in New Issue
Block a user