Compare commits
1 Commits
dynamic_pl
...
chat_log_n
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
82e125d439 |
@@ -17,7 +17,7 @@ def get_crazy_functions():
|
||||
from crazy_functions.SourceCode_Analyse import 解析一个前端项目
|
||||
from crazy_functions.高级功能函数模板 import 高阶功能模板函数
|
||||
from crazy_functions.高级功能函数模板 import Demo_Wrap
|
||||
from crazy_functions.Latex_Project_Polish import Latex英文润色
|
||||
from crazy_functions.Latex全文润色 import Latex英文润色
|
||||
from crazy_functions.询问多个大语言模型 import 同时问询
|
||||
from crazy_functions.SourceCode_Analyse import 解析一个Lua项目
|
||||
from crazy_functions.SourceCode_Analyse import 解析一个CSharp项目
|
||||
@@ -33,8 +33,8 @@ def get_crazy_functions():
|
||||
from crazy_functions.PDF_Translate import 批量翻译PDF文档
|
||||
from crazy_functions.谷歌检索小助手 import 谷歌检索小助手
|
||||
from crazy_functions.理解PDF文档内容 import 理解PDF文档内容标准文件输入
|
||||
from crazy_functions.Latex_Project_Polish import Latex中文润色
|
||||
from crazy_functions.Latex_Project_Polish import Latex英文纠错
|
||||
from crazy_functions.Latex全文润色 import Latex中文润色
|
||||
from crazy_functions.Latex全文润色 import Latex英文纠错
|
||||
from crazy_functions.Markdown_Translate import Markdown中译英
|
||||
from crazy_functions.虚空终端 import 虚空终端
|
||||
from crazy_functions.生成多种Mermaid图表 import Mermaid_Gen
|
||||
@@ -740,6 +740,19 @@ def get_crazy_functions():
|
||||
# logger.error(trimmed_format_exc())
|
||||
# print('Load function plugin failed')
|
||||
|
||||
# try:
|
||||
# from crazy_functions.chatglm微调工具 import 微调数据集生成
|
||||
# function_plugins.update({
|
||||
# "黑盒模型学习: 微调数据集生成 (先上传数据集)": {
|
||||
# "Color": "stop",
|
||||
# "AsButton": False,
|
||||
# "AdvancedArgs": True,
|
||||
# "ArgsReminder": "针对数据集输入(如 绿帽子*深蓝色衬衫*黑色运动裤)给出指令,例如您可以将以下命令复制到下方: --llm_to_learn=azure-gpt-3.5 --prompt_prefix='根据下面的服装类型提示,想象一个穿着者,对这个人外貌、身处的环境、内心世界、过去经历进行描写。要求:100字以内,用第二人称。' --system_prompt=''",
|
||||
# "Function": HotReload(微调数据集生成)
|
||||
# }
|
||||
# })
|
||||
# except:
|
||||
# print('Load function plugin failed')
|
||||
|
||||
"""
|
||||
设置默认值:
|
||||
|
||||
@@ -1,62 +0,0 @@
|
||||
|
||||
from toolbox import get_conf, update_ui
|
||||
from crazy_functions.Image_Generate import 图片生成_DALLE2, 图片生成_DALLE3, 图片修改_DALLE2
|
||||
from crazy_functions.plugin_template.plugin_class_template import GptAcademicPluginTemplate, ArgProperty
|
||||
|
||||
|
||||
|
||||
def update_js_plugin_info():
|
||||
# encode_plugin_info
|
||||
...
|
||||
|
||||
|
||||
|
||||
class ImageGen_Wrap(GptAcademicPluginTemplate):
|
||||
def __init__(self):
|
||||
"""
|
||||
请注意`execute`会执行在不同的线程中,因此您在定义和使用类变量时,应当慎之又慎!
|
||||
"""
|
||||
pass
|
||||
|
||||
def define_arg_selection_menu(self):
|
||||
"""
|
||||
定义插件的二级选项菜单
|
||||
|
||||
第一个参数,名称`main_input`,参数`type`声明这是一个文本框,文本框上方显示`title`,文本框内部显示`description`,`default_value`为默认值;
|
||||
第二个参数,名称`advanced_arg`,参数`type`声明这是一个文本框,文本框上方显示`title`,文本框内部显示`description`,`default_value`为默认值;
|
||||
|
||||
"""
|
||||
gui_definition = {
|
||||
"main_input":
|
||||
ArgProperty(title="输入图片描述", description="需要生成图像的文本描述,尽量使用英文", default_value="", type="string").model_dump_json(), # 主输入,自动从输入框同步
|
||||
"model_name":
|
||||
ArgProperty(title="模型", options=["DALLE2", "DALLE3"], default_value="DALLE3", description="无", type="dropdown").model_dump_json(),
|
||||
"resolution":
|
||||
ArgProperty(title="分辨率", options=["256x256(限DALLE2)", "512x512(限DALLE2)", "1024x1024", "1792x1024(限DALLE3)", "1024x1792(限DALLE3)"], default_value="1024x1024", description="无", type="dropdown").model_dump_json(),
|
||||
"quality (仅DALLE3生效)":
|
||||
ArgProperty(title="质量", options=["standard", "hd"], default_value="standard", description="无", type="dropdown").model_dump_json(),
|
||||
"style (仅DALLE3生效)":
|
||||
ArgProperty(title="风格", options=["vivid", "natural"], default_value="vivid", description="无", type="dropdown").model_dump_json(),
|
||||
}
|
||||
return gui_definition
|
||||
|
||||
def execute(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
"""
|
||||
执行插件
|
||||
"""
|
||||
# 分辨率
|
||||
resolution = plugin_kwargs["resolution"].replace("(限DALLE2)", "").replace("(限DALLE3)", "")
|
||||
|
||||
if plugin_kwargs["model_name"] == "DALLE2":
|
||||
plugin_kwargs["advanced_arg"] = resolution
|
||||
yield from 图片生成_DALLE2(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)
|
||||
|
||||
elif plugin_kwargs["model_name"] == "DALLE3":
|
||||
quality = plugin_kwargs["quality (仅DALLE3生效)"]
|
||||
style = plugin_kwargs["style (仅DALLE3生效)"]
|
||||
plugin_kwargs["advanced_arg"] = f"{resolution}-{quality}-{style}"
|
||||
yield from 图片生成_DALLE3(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)
|
||||
|
||||
else:
|
||||
chatbot.append([None, "抱歉,找不到该模型"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
141
crazy_functions/chatglm微调工具.py
Normal file
141
crazy_functions/chatglm微调工具.py
Normal file
@@ -0,0 +1,141 @@
|
||||
from toolbox import CatchException, update_ui, promote_file_to_downloadzone
|
||||
from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
||||
import datetime, json
|
||||
|
||||
def fetch_items(list_of_items, batch_size):
|
||||
for i in range(0, len(list_of_items), batch_size):
|
||||
yield list_of_items[i:i + batch_size]
|
||||
|
||||
def string_to_options(arguments):
|
||||
import argparse
|
||||
import shlex
|
||||
|
||||
# Create an argparse.ArgumentParser instance
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
# Add command-line arguments
|
||||
parser.add_argument("--llm_to_learn", type=str, help="LLM model to learn", default="gpt-3.5-turbo")
|
||||
parser.add_argument("--prompt_prefix", type=str, help="Prompt prefix", default='')
|
||||
parser.add_argument("--system_prompt", type=str, help="System prompt", default='')
|
||||
parser.add_argument("--batch", type=int, help="System prompt", default=50)
|
||||
parser.add_argument("--pre_seq_len", type=int, help="pre_seq_len", default=50)
|
||||
parser.add_argument("--learning_rate", type=float, help="learning_rate", default=2e-2)
|
||||
parser.add_argument("--num_gpus", type=int, help="num_gpus", default=1)
|
||||
parser.add_argument("--json_dataset", type=str, help="json_dataset", default="")
|
||||
parser.add_argument("--ptuning_directory", type=str, help="ptuning_directory", default="")
|
||||
|
||||
|
||||
|
||||
# Parse the arguments
|
||||
args = parser.parse_args(shlex.split(arguments))
|
||||
|
||||
return args
|
||||
|
||||
@CatchException
|
||||
def 微调数据集生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
"""
|
||||
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
||||
plugin_kwargs 插件模型的参数
|
||||
chatbot 聊天显示框的句柄,用于显示给用户
|
||||
history 聊天历史,前情提要
|
||||
system_prompt 给gpt的静默提醒
|
||||
user_request 当前用户的请求信息(IP地址等)
|
||||
"""
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
chatbot.append(("这是什么功能?", "[Local Message] 微调数据集生成"))
|
||||
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
||||
args = plugin_kwargs.get("advanced_arg", None)
|
||||
if args is None:
|
||||
chatbot.append(("没给定指令", "退出"))
|
||||
yield from update_ui(chatbot=chatbot, history=history); return
|
||||
else:
|
||||
arguments = string_to_options(arguments=args)
|
||||
|
||||
dat = []
|
||||
with open(txt, 'r', encoding='utf8') as f:
|
||||
for line in f.readlines():
|
||||
json_dat = json.loads(line)
|
||||
dat.append(json_dat["content"])
|
||||
|
||||
llm_kwargs['llm_model'] = arguments.llm_to_learn
|
||||
for batch in fetch_items(dat, arguments.batch):
|
||||
res = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
inputs_array=[f"{arguments.prompt_prefix}\n\n{b}" for b in (batch)],
|
||||
inputs_show_user_array=[f"Show Nothing" for _ in (batch)],
|
||||
llm_kwargs=llm_kwargs,
|
||||
chatbot=chatbot,
|
||||
history_array=[[] for _ in (batch)],
|
||||
sys_prompt_array=[arguments.system_prompt for _ in (batch)],
|
||||
max_workers=10 # OpenAI所允许的最大并行过载
|
||||
)
|
||||
|
||||
with open(txt+'.generated.json', 'a+', encoding='utf8') as f:
|
||||
for b, r in zip(batch, res[1::2]):
|
||||
f.write(json.dumps({"content":b, "summary":r}, ensure_ascii=False)+'\n')
|
||||
|
||||
promote_file_to_downloadzone(txt+'.generated.json', rename_file='generated.json', chatbot=chatbot)
|
||||
return
|
||||
|
||||
|
||||
|
||||
@CatchException
|
||||
def 启动微调(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
"""
|
||||
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
||||
plugin_kwargs 插件模型的参数
|
||||
chatbot 聊天显示框的句柄,用于显示给用户
|
||||
history 聊天历史,前情提要
|
||||
system_prompt 给gpt的静默提醒
|
||||
user_request 当前用户的请求信息(IP地址等)
|
||||
"""
|
||||
import subprocess
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
chatbot.append(("这是什么功能?", "[Local Message] 微调数据集生成"))
|
||||
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
||||
args = plugin_kwargs.get("advanced_arg", None)
|
||||
if args is None:
|
||||
chatbot.append(("没给定指令", "退出"))
|
||||
yield from update_ui(chatbot=chatbot, history=history); return
|
||||
else:
|
||||
arguments = string_to_options(arguments=args)
|
||||
|
||||
|
||||
|
||||
pre_seq_len = arguments.pre_seq_len # 128
|
||||
learning_rate = arguments.learning_rate # 2e-2
|
||||
num_gpus = arguments.num_gpus # 1
|
||||
json_dataset = arguments.json_dataset # 't_code.json'
|
||||
ptuning_directory = arguments.ptuning_directory # '/home/hmp/ChatGLM2-6B/ptuning'
|
||||
|
||||
command = f"torchrun --standalone --nnodes=1 --nproc-per-node={num_gpus} main.py \
|
||||
--do_train \
|
||||
--train_file AdvertiseGen/{json_dataset} \
|
||||
--validation_file AdvertiseGen/{json_dataset} \
|
||||
--preprocessing_num_workers 20 \
|
||||
--prompt_column content \
|
||||
--response_column summary \
|
||||
--overwrite_cache \
|
||||
--model_name_or_path THUDM/chatglm2-6b \
|
||||
--output_dir output/clothgen-chatglm2-6b-pt-{pre_seq_len}-{learning_rate} \
|
||||
--overwrite_output_dir \
|
||||
--max_source_length 256 \
|
||||
--max_target_length 256 \
|
||||
--per_device_train_batch_size 1 \
|
||||
--per_device_eval_batch_size 1 \
|
||||
--gradient_accumulation_steps 16 \
|
||||
--predict_with_generate \
|
||||
--max_steps 100 \
|
||||
--logging_steps 10 \
|
||||
--save_steps 20 \
|
||||
--learning_rate {learning_rate} \
|
||||
--pre_seq_len {pre_seq_len} \
|
||||
--quantization_bit 4"
|
||||
|
||||
process = subprocess.Popen(command, shell=True, cwd=ptuning_directory)
|
||||
try:
|
||||
process.communicate(timeout=3600*24)
|
||||
except subprocess.TimeoutExpired:
|
||||
process.kill()
|
||||
return
|
||||
@@ -149,19 +149,6 @@
|
||||
DEFINE_ARG_INPUT_INTERFACE = json.dumps(define_arg_selection)
|
||||
return base64.b64encode(DEFINE_ARG_INPUT_INTERFACE.encode('utf-8')).decode('utf-8')
|
||||
```
|
||||
1-2. 预留4个动态插件按钮(常规状态隐藏)
|
||||
|
||||
点击 “+插件按钮”:跳转到插件市场
|
||||
|
||||
点击 加载插件:
|
||||
- 下载文件
|
||||
- 注册 exe_dynamic_plugin开始执行
|
||||
- 执行浏览器js函数
|
||||
|
||||
点击动态插件按钮
|
||||
- js: 检查register_advanced_plugin_init_code_arr,如果为空,提示
|
||||
- 如果非空,先跳二级菜单,设定诸元后,执行一个隐藏的按钮 -- 关联 exe_dynamic_plugin
|
||||
- exe_dynamic_plugin开始执行
|
||||
|
||||
|
||||
2. 用户加载阶段(主javascript程序`common.js`中),浏览器加载`register_advanced_plugin_init_code_arr`,存入本地的字典`advanced_plugin_init_code_lib`:
|
||||
|
||||
@@ -341,7 +341,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
||||
# 前者是API2D的结束条件,后者是OPENAI的结束条件
|
||||
if ('data: [DONE]' in chunk_decoded) or (len(chunkjson['choices'][0]["delta"]) == 0):
|
||||
# 判定为数据流的结束,gpt_replying_buffer也写完了
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user())
|
||||
break
|
||||
# 处理数据流的主体
|
||||
status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}"
|
||||
@@ -375,7 +375,7 @@ def handle_o1_model_special(response, inputs, llm_kwargs, chatbot, history):
|
||||
try:
|
||||
chunkjson = json.loads(response.content.decode())
|
||||
gpt_replying_buffer = chunkjson['choices'][0]["message"]["content"]
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user())
|
||||
history[-1] = gpt_replying_buffer
|
||||
chatbot[-1] = (history[-2], history[-1])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
@@ -184,7 +184,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
# 判定为数据流的结束,gpt_replying_buffer也写完了
|
||||
lastmsg = chatbot[-1][-1] + f"\n\n\n\n「{llm_kwargs['llm_model']}调用结束,该模型不具备上下文对话能力,如需追问,请及时切换模型。」"
|
||||
yield from update_ui_lastest_msg(lastmsg, chatbot, history, delay=1)
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user())
|
||||
break
|
||||
# 处理数据流的主体
|
||||
status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}"
|
||||
|
||||
@@ -216,7 +216,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
if need_to_pass:
|
||||
pass
|
||||
elif is_last_chunk:
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user())
|
||||
# logger.info(f'[response] {gpt_replying_buffer}')
|
||||
break
|
||||
else:
|
||||
|
||||
@@ -223,7 +223,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
||||
chatbot[-1] = (history[-2], history[-1])
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="正常") # 刷新界面
|
||||
if chunkjson['event_type'] == 'stream-end':
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user())
|
||||
history[-1] = gpt_replying_buffer
|
||||
chatbot[-1] = (history[-2], history[-1])
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="正常") # 刷新界面
|
||||
|
||||
@@ -109,7 +109,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
||||
gpt_replying_buffer += paraphrase['text'] # 使用 json 解析库进行处理
|
||||
chatbot[-1] = (inputs, gpt_replying_buffer)
|
||||
history[-1] = gpt_replying_buffer
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user())
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
if error_match:
|
||||
history = history[-2] # 错误的不纳入对话
|
||||
|
||||
@@ -166,7 +166,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
||||
history = history[:-2]
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
break
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_bro_result)
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_bro_result, user_name=chatbot.get_user())
|
||||
|
||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None,
|
||||
console_slience=False):
|
||||
|
||||
@@ -337,7 +337,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
||||
# 前者是API2D的结束条件,后者是OPENAI的结束条件
|
||||
if ('data: [DONE]' in chunk_decoded) or (len(chunkjson['choices'][0]["delta"]) == 0):
|
||||
# 判定为数据流的结束,gpt_replying_buffer也写完了
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user())
|
||||
break
|
||||
# 处理数据流的主体
|
||||
status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}"
|
||||
@@ -371,7 +371,7 @@ def handle_o1_model_special(response, inputs, llm_kwargs, chatbot, history):
|
||||
try:
|
||||
chunkjson = json.loads(response.content.decode())
|
||||
gpt_replying_buffer = chunkjson['choices'][0]["message"]["content"]
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user())
|
||||
history[-1] = gpt_replying_buffer
|
||||
chatbot[-1] = (history[-2], history[-1])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
@@ -59,7 +59,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
chatbot[-1] = (inputs, response)
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response)
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response, user_name=chatbot.get_user())
|
||||
# 总结输出
|
||||
if response == f"[Local Message] 等待{model_name}响应中 ...":
|
||||
response = f"[Local Message] {model_name}响应异常 ..."
|
||||
|
||||
@@ -68,5 +68,5 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
||||
chatbot[-1] = [inputs, response]
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
history.extend([inputs, response])
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response)
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response, user_name=chatbot.get_user())
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
@@ -97,5 +97,5 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
||||
chatbot[-1] = [inputs, response]
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
history.extend([inputs, response])
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response)
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response, user_name=chatbot.get_user())
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
@@ -36,7 +36,7 @@ if __name__ == "__main__":
|
||||
|
||||
# plugin_test(plugin='crazy_functions.SourceCode_Analyse->解析一个C项目', main_input="crazy_functions/test_project/cpp/cppipc")
|
||||
|
||||
# plugin_test(plugin='crazy_functions.Latex_Project_Polish->Latex英文润色', main_input="crazy_functions/test_project/latex/attention")
|
||||
# plugin_test(plugin='crazy_functions.Latex全文润色->Latex英文润色', main_input="crazy_functions/test_project/latex/attention")
|
||||
|
||||
# plugin_test(plugin='crazy_functions.Markdown_Translate->Markdown中译英', main_input="README.md")
|
||||
|
||||
@@ -65,3 +65,8 @@ if __name__ == "__main__":
|
||||
|
||||
# plugin_test(plugin='crazy_functions.Latex_Function->Latex翻译中文并重新编译PDF', main_input="2210.03629")
|
||||
|
||||
# advanced_arg = {"advanced_arg":"--llm_to_learn=gpt-3.5-turbo --prompt_prefix='根据下面的服装类型提示,想象一个穿着者,对这个人外貌、身处的环境、内心世界、人设进行描写。要求:100字以内,用第二人称。' --system_prompt=''" }
|
||||
# plugin_test(plugin='crazy_functions.chatglm微调工具->微调数据集生成', main_input='build/dev.json', advanced_arg=advanced_arg)
|
||||
|
||||
# advanced_arg = {"advanced_arg":"--pre_seq_len=128 --learning_rate=2e-2 --num_gpus=1 --json_dataset='t_code.json' --ptuning_directory='/home/hmp/ChatGLM2-6B/ptuning' " }
|
||||
# plugin_test(plugin='crazy_functions.chatglm微调工具->启动微调', main_input='build/dev.json', advanced_arg=advanced_arg)
|
||||
|
||||
@@ -1029,7 +1029,7 @@ def check_repeat_upload(new_pdf_path, pdf_hash):
|
||||
# 如果所有页的内容都相同,返回 True
|
||||
return False, None
|
||||
|
||||
def log_chat(llm_model: str, input_str: str, output_str: str):
|
||||
def log_chat(llm_model: str, input_str: str, output_str: str, user_name: str=default_user_name):
|
||||
try:
|
||||
if output_str and input_str and llm_model:
|
||||
uid = str(uuid.uuid4().hex)
|
||||
@@ -1038,8 +1038,8 @@ def log_chat(llm_model: str, input_str: str, output_str: str):
|
||||
logger.bind(chat_msg=True).info(dedent(
|
||||
"""
|
||||
╭──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
|
||||
[UID]
|
||||
{uid}
|
||||
[UID/USER]
|
||||
{uid}/{user_name}
|
||||
[Model]
|
||||
{llm_model}
|
||||
[Query]
|
||||
@@ -1047,6 +1047,6 @@ def log_chat(llm_model: str, input_str: str, output_str: str):
|
||||
[Response]
|
||||
{output_str}
|
||||
╰──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
""").format(uid=uid, llm_model=llm_model, input_str=input_str, output_str=output_str))
|
||||
""").format(uid=uid, user_name=user_name, llm_model=llm_model, input_str=input_str, output_str=output_str))
|
||||
except:
|
||||
logger.error(trimmed_format_exc())
|
||||
|
||||
Reference in New Issue
Block a user