提高虚空终端的成功率

This commit is contained in:
qingxu fu
2023-08-31 18:04:31 +08:00
parent b69140307b
commit 8b0905c076
6 changed files with 83 additions and 47 deletions

View File

@@ -37,10 +37,18 @@ Here is the output schema:
{schema}
```"""
PYDANTIC_FORMAT_INSTRUCTIONS_SIMPLE = """The output should be formatted as a JSON instance that conforms to the JSON schema below.
```
{schema}
```"""
class GptJsonIO():
def __init__(self, schema):
def __init__(self, schema, example_instruction=True):
self.pydantic_object = schema
self.example_instruction = example_instruction
self.format_instructions = self.generate_format_instructions()
def generate_format_instructions(self):
@@ -53,9 +61,11 @@ class GptJsonIO():
if "type" in reduced_schema:
del reduced_schema["type"]
# Ensure json in context is well-formed with double quotes.
schema_str = json.dumps(reduced_schema)
return PYDANTIC_FORMAT_INSTRUCTIONS.format(schema=schema_str)
if self.example_instruction:
schema_str = json.dumps(reduced_schema)
return PYDANTIC_FORMAT_INSTRUCTIONS.format(schema=schema_str)
else:
return PYDANTIC_FORMAT_INSTRUCTIONS_SIMPLE.format(schema=schema_str)
def generate_output(self, text):
# Greedy search for 1st json candidate.

View File

@@ -11,32 +11,47 @@ def read_avail_plugin_enum():
plugin_arr = get_crazy_functions()
# remove plugins with out explaination
plugin_arr = {k:v for k, v in plugin_arr.items() if 'Info' in v}
plugin_arr_info = {"F{:04d}".format(i):v["Info"] for i, v in enumerate(plugin_arr.values(), start=1)}
plugin_arr_dict = {"F{:04d}".format(i):v for i, v in enumerate(plugin_arr.values(), start=1)}
plugin_arr_info = {"F_{:04d}".format(i):v["Info"] for i, v in enumerate(plugin_arr.values(), start=1)}
plugin_arr_dict = {"F_{:04d}".format(i):v for i, v in enumerate(plugin_arr.values(), start=1)}
prompt = json.dumps(plugin_arr_info, ensure_ascii=False, indent=2)
prompt = "\n\nThe defination of PluginEnum:\nPluginEnum=" + prompt
return prompt, plugin_arr_dict
def wrap_code(txt):
return f"\n```\n{txt}\n```\n"
def execute_plugin(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention):
plugin_arr_enum_prompt, plugin_arr_dict = read_avail_plugin_enum()
class Plugin(BaseModel):
plugin_selection: str = Field(description="The most related plugin from one of the PluginEnum.", default="F0000000000000")
plugin_arg: str = Field(description="The argument of the plugin. A path or url or empty.", default="")
plugin_selection: str = Field(description="The most related plugin from one of the PluginEnum.", default="F_0000")
reason_of_selection: str = Field(description="The reason why you should select this plugin.", default="This plugin satisfy user requirement most")
# ⭐ ⭐ ⭐ 选择插件
yield from update_ui_lastest_msg(lastmsg=f"正在执行任务: {txt}\n\n查找可用插件中...", chatbot=chatbot, history=history, delay=0)
gpt_json_io = GptJsonIO(Plugin)
gpt_json_io.format_instructions = "The format of your output should be a json that can be parsed by json.loads.\n"
gpt_json_io.format_instructions += """Output example: {"plugin_selection":"F_1234", "reason_of_selection":"F_1234 plugin satisfy user requirement most"}\n"""
gpt_json_io.format_instructions += "The plugins you are authorized to use are listed below:\n"
gpt_json_io.format_instructions += plugin_arr_enum_prompt
inputs = "Choose the correct plugin and extract plugin_arg, the user requirement is: \n\n" + \
">> " + txt.rstrip('\n').replace('\n','\n>> ') + '\n\n' + \
gpt_json_io.format_instructions
inputs = "Choose the correct plugin according to user requirements, the user requirement is: \n\n" + \
">> " + txt.rstrip('\n').replace('\n','\n>> ') + '\n\n' + gpt_json_io.format_instructions
run_gpt_fn = lambda inputs, sys_prompt: predict_no_ui_long_connection(
inputs=inputs, llm_kwargs=llm_kwargs, history=[], sys_prompt=sys_prompt, observe_window=[])
plugin_sel = gpt_json_io.generate_output_auto_repair(run_gpt_fn(inputs, ""), run_gpt_fn)
try:
gpt_reply = run_gpt_fn(inputs, "")
plugin_sel = gpt_json_io.generate_output_auto_repair(gpt_reply, run_gpt_fn)
except:
msg = "抱歉,当前的大语言模型无法理解您的需求。"
msg += "请求的Prompt为\n" + wrap_code(inputs)
msg += "语言模型回复为:\n" + wrap_code(gpt_reply)
msg += "但您可以尝试再试一次\n"
yield from update_ui_lastest_msg(lastmsg=msg, chatbot=chatbot, history=history, delay=2)
return
if plugin_sel.plugin_selection not in plugin_arr_dict:
msg = f'找不到合适插件执行该任务'
msg = "抱歉, 找不到合适插件执行该任务, 当前的大语言模型可能无法理解您的需求。"
msg += "请求的Prompt为\n" + wrap_code(inputs)
msg += "语言模型回复为:\n" + wrap_code(gpt_reply)
msg += "但您可以尝试再试一次\n"
yield from update_ui_lastest_msg(lastmsg=msg, chatbot=chatbot, history=history, delay=2)
return

View File

@@ -46,7 +46,7 @@ def analyze_with_rule(txt):
return is_certain, user_intention
@CatchException
def 自动终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 虚空终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
"""
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数, 如温度和top_p等, 一般原样传递下去就行
@@ -57,7 +57,7 @@ def 自动终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
web_port 当前软件运行的端口号
"""
history = [] # 清空历史,以免输入溢出
chatbot.append(("自动终端状态: ", f"正在执行任务: {txt}"))
chatbot.append(("虚空终端状态: ", f"正在执行任务: {txt}"))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
# 初始化插件状态
@@ -67,21 +67,29 @@ def 自动终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
def update_vt_state():
# 赋予插件锁定 锁定插件回调路径,当下一次用户提交时,会直接转到该函数
chatbot._cookies['lock_plugin'] = 'crazy_functions.虚空终端->自动终端'
chatbot._cookies['lock_plugin'] = 'crazy_functions.虚空终端->虚空终端'
chatbot._cookies['vt_state'] = pickle.dumps(state)
# ⭐ ⭐ ⭐ 分析用户意图
is_certain, user_intention = analyze_with_rule(txt)
if not is_certain:
yield from update_ui_lastest_msg(lastmsg=f"正在执行任务: {txt}\n\n分析用户意图中", chatbot=chatbot, history=history, delay=0)
yield from update_ui_lastest_msg(
lastmsg=f"正在执行任务: {txt}\n\n分析用户意图中", chatbot=chatbot, history=history, delay=0)
gpt_json_io = GptJsonIO(UserIntention)
inputs = "Analyze the intention of the user according to following user input: \n\n" + txt + '\n\n' + gpt_json_io.format_instructions
run_gpt_fn = lambda inputs, sys_prompt: predict_no_ui_long_connection(
inputs=inputs, llm_kwargs=llm_kwargs, history=[], sys_prompt=sys_prompt, observe_window=[])
user_intention = gpt_json_io.generate_output_auto_repair(run_gpt_fn(inputs, ""), run_gpt_fn)
try:
user_intention = gpt_json_io.generate_output_auto_repair(run_gpt_fn(inputs, ""), run_gpt_fn)
except:
yield from update_ui_lastest_msg(
lastmsg=f"正在执行任务: {txt}\n\n用户意图理解: 失败 当前语言模型不能理解您的意图", chatbot=chatbot, history=history, delay=0)
return
else:
pass
yield from update_ui_lastest_msg(
lastmsg=f"正在执行任务: {txt}\n\n用户意图理解: intention_type={user_intention.intention_type}", chatbot=chatbot, history=history, delay=0)
# 用户意图: 修改本项目的配置
if user_intention.intention_type == 'ModifyConfiguration':
yield from modify_configuration_reboot(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention)
@@ -97,22 +105,3 @@ def 自动终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
return
# # if state == 'wait_user_keyword':
# # chatbot._cookies['lock_plugin'] = None # 解除插件锁定,避免遗忘导致死锁
# # chatbot._cookies['plugin_state_0001'] = None # 解除插件状态,避免遗忘导致死锁
# # # 解除插件锁定
# # chatbot.append((f"获取关键词:{txt}", ""))
# # yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
# # inputs=inputs_show_user=f"Extract all image urls in this html page, pick the first 5 images and show them with markdown format: \n\n {page_return}"
# # gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
# # inputs=inputs, inputs_show_user=inputs_show_user,
# # llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
# # sys_prompt="When you want to show an image, use markdown format. e.g. ![image_description](image_url). If there are no image url provided, answer 'no image url provided'"
# # )
# # chatbot[-1] = [chatbot[-1][0], gpt_say]
# yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
# return