update multi-language module

This commit is contained in:
505030475
2023-08-04 23:53:23 +08:00
parent 1721edc990
commit 43809c107d
4 changed files with 176 additions and 85 deletions

View File

@@ -1,87 +1,70 @@
from toolbox import CatchException, update_ui, gen_time_str
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
from .crazy_utils import input_clipping
import copy, json
prompt = """
I have to achieve some functionalities by calling one of the functions below.
Your job is to find the correct funtion to use to satisfy my requirement,
and then write python code to call this function with correct parameters.
These are functions you are allowed to choose from:
1.
功能描述: 总结音视频内容
调用函数: ConcludeAudioContent(txt, llm_kwargs)
参数说明:
txt: 音频文件的路径
llm_kwargs: 模型参数, 永远给定None
2.
功能描述: 将每次对话记录写入Markdown格式的文件中
调用函数: WriteMarkdown()
3.
功能描述: 将指定目录下的PDF文件从英文翻译成中文
调用函数: BatchTranslatePDFDocuments_MultiThreaded(txt, llm_kwargs)
参数说明:
txt: PDF文件所在的路径
llm_kwargs: 模型参数, 永远给定None
4.
功能描述: 根据文本使用GPT模型生成相应的图像
调用函数: ImageGeneration(txt, llm_kwargs)
参数说明:
txt: 图像生成所用到的提示文本
llm_kwargs: 模型参数, 永远给定None
5.
功能描述: 对输入的word文档进行摘要生成
调用函数: SummarizingWordDocuments(input_path, output_path)
参数说明:
input_path: 待处理的word文档路径
output_path: 摘要生成后的文档路径
You should always anwser with following format:
----------------
Code:
```
class AutoAcademic(object):
def __init__(self):
self.selected_function = "FILL_CORRECT_FUNCTION_HERE" # e.g., "GenerateImage"
self.txt = "FILL_MAIN_PARAMETER_HERE" # e.g., "荷叶上的蜻蜓"
self.llm_kwargs = None
```
Explanation:
只有GenerateImage和生成图像相关, 因此选择GenerateImage函数。
----------------
Now, this is my requirement:
"""
def get_fn_lib():
return {
"BatchTranslatePDFDocuments_MultiThreaded": ("crazy_functions.批量翻译PDF文档_多线程", "批量翻译PDF文档"),
"SummarizingWordDocuments": ("crazy_functions.总结word文档", "总结word文档"),
"ImageGeneration": ("crazy_functions.图片生成", "图片生成"),
"TranslateMarkdownFromEnglishToChinese": ("crazy_functions.批量Markdown翻译", "Markdown中译英"),
"SummaryAudioVideo": ("crazy_functions.总结音视频", "总结音视频"),
"BatchTranslatePDFDocuments_MultiThreaded": {
"module": "crazy_functions.批量翻译PDF文档_多线程",
"function": "批量翻译PDF文档",
"description": "Translate PDF Documents",
"arg_1_description": "A path containing pdf files.",
},
"SummarizingWordDocuments": {
"module": "crazy_functions.总结word文档",
"function": "总结word文档",
"description": "Summarize Word Documents",
"arg_1_description": "A path containing Word files.",
},
"ImageGeneration": {
"module": "crazy_functions.图片生成",
"function": "图片生成",
"description": "Generate a image that satisfies some description.",
"arg_1_description": "Descriptions about the image to be generated.",
},
"TranslateMarkdownFromEnglishToChinese": {
"module": "crazy_functions.批量Markdown翻译",
"function": "Markdown中译英",
"description": "Translate Markdown Documents from English to Chinese.",
"arg_1_description": "A path containing Markdown files.",
},
"SummaryAudioVideo": {
"module": "crazy_functions.总结音视频",
"function": "总结音视频",
"description": "Get text from a piece of audio and summarize this audio.",
"arg_1_description": "A path containing audio files.",
},
}
functions = [
{
"name": k,
"description": v['description'],
"parameters": {
"type": "object",
"properties": {
"plugin_arg_1": {
"type": "string",
"description": v['arg_1_description'],
},
},
"required": ["plugin_arg_1"],
},
} for k, v in get_fn_lib().items()
]
def inspect_dependency(chatbot, history):
return True
def eval_code(code, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
import subprocess, sys, os, shutil, importlib
with open('gpt_log/void_terminal_runtime.py', 'w', encoding='utf8') as f:
f.write(code)
import importlib
try:
AutoAcademic = getattr(importlib.import_module('gpt_log.void_terminal_runtime', 'AutoAcademic'), 'AutoAcademic')
# importlib.reload(AutoAcademic)
auto_dict = AutoAcademic()
selected_function = auto_dict.selected_function
txt = auto_dict.txt
fp, fn = get_fn_lib()[selected_function]
tmp = get_fn_lib()[code['name']]
fp, fn = tmp['module'], tmp['function']
fn_plugin = getattr(importlib.import_module(fp, fn), fn)
yield from fn_plugin(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port)
arg = json.loads(code['arguments'])['plugin_arg_1']
yield from fn_plugin(arg, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port)
except:
from toolbox import trimmed_format_exc
chatbot.append(["执行错误", f"\n```\n{trimmed_format_exc()}\n```\n"])
@@ -110,22 +93,27 @@ def 终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_
history = []
# 基本信息:功能、贡献者
chatbot.append(["函数插件功能?", "根据自然语言执行插件命令, 作者: binary-husky, 插件初始化中 ..."])
chatbot.append(["虚空终端插件功能?", "根据自然语言的描述, 执行任意插件命令."])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
# # 尝试导入依赖, 如果缺少依赖, 则给出安装建议
# dep_ok = yield from inspect_dependency(chatbot=chatbot, history=history) # 刷新界面
# if not dep_ok: return
# 输入
i_say = prompt + txt
i_say = txt
# 开始
llm_kwargs_function_call = copy.deepcopy(llm_kwargs)
llm_kwargs_function_call['llm_model'] = 'gpt-call-fn' # 修改调用函数
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
inputs=i_say, inputs_show_user=txt,
llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
sys_prompt=""
llm_kwargs=llm_kwargs_function_call, chatbot=chatbot, history=[],
sys_prompt=functions
)
# 将代码转为动画
code = get_code_block(gpt_say)
yield from eval_code(code, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port)
res = json.loads(gpt_say)['choices'][0]
if res['finish_reason'] == 'function_call':
code = json.loads(gpt_say)['choices'][0]
yield from eval_code(code['message']['function_call'], llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port)
else:
chatbot.append(["无法调用相关功能", res])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面