Version 3.56 - Merge branch 'frontier'
This commit is contained in:
23
crazy_functions/agent_fns/auto_agent.py
Normal file
23
crazy_functions/agent_fns/auto_agent.py
Normal file
@@ -0,0 +1,23 @@
|
||||
from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, ProxyNetworkActivate
|
||||
from toolbox import report_execption, get_log_folder, update_ui_lastest_msg, Singleton
|
||||
from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom
|
||||
from crazy_functions.agent_fns.autogen_general import AutoGenGeneral
|
||||
import time
|
||||
|
||||
|
||||
class AutoGenMath(AutoGenGeneral):
|
||||
|
||||
def define_agents(self):
|
||||
from autogen import AssistantAgent, UserProxyAgent
|
||||
return [
|
||||
{
|
||||
"name": "assistant", # name of the agent.
|
||||
"cls": AssistantAgent, # class of the agent.
|
||||
},
|
||||
{
|
||||
"name": "user_proxy", # name of the agent.
|
||||
"cls": UserProxyAgent, # class of the agent.
|
||||
"human_input_mode": "ALWAYS", # always ask for human input.
|
||||
"llm_config": False, # disables llm-based auto reply.
|
||||
},
|
||||
]
|
||||
75
crazy_functions/agent_fns/autogen_general.py
Normal file
75
crazy_functions/agent_fns/autogen_general.py
Normal file
@@ -0,0 +1,75 @@
|
||||
from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, ProxyNetworkActivate
|
||||
from toolbox import report_execption, get_log_folder, update_ui_lastest_msg, Singleton
|
||||
from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom
|
||||
import time
|
||||
|
||||
|
||||
class AutoGenGeneral(PluginMultiprocessManager):
|
||||
|
||||
def gpt_academic_print_override(self, user_proxy, message, sender):
|
||||
# ⭐⭐ 子进程执行
|
||||
self.child_conn.send(PipeCom("show", sender.name + '\n\n---\n\n' + message['content']))
|
||||
|
||||
def gpt_academic_get_human_input(self, user_proxy, message):
|
||||
# ⭐⭐ 子进程执行
|
||||
patience = 300
|
||||
begin_waiting_time = time.time()
|
||||
self.child_conn.send(PipeCom("interact", message))
|
||||
while True:
|
||||
time.sleep(0.5)
|
||||
if self.child_conn.poll():
|
||||
wait_success = True
|
||||
break
|
||||
if time.time() - begin_waiting_time > patience:
|
||||
self.child_conn.send(PipeCom("done", ""))
|
||||
wait_success = False
|
||||
break
|
||||
if wait_success:
|
||||
return self.child_conn.recv().content
|
||||
else:
|
||||
raise TimeoutError("等待用户输入超时")
|
||||
|
||||
def define_agents(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def do_audogen(self, input):
|
||||
# ⭐⭐ 子进程执行
|
||||
input = input.content
|
||||
with ProxyNetworkActivate("AutoGen"):
|
||||
from autogen import AssistantAgent, UserProxyAgent
|
||||
config_list = [{
|
||||
'model': self.llm_kwargs['llm_model'],
|
||||
'api_key': self.llm_kwargs['api_key'],
|
||||
},]
|
||||
code_execution_config={"work_dir": self.autogen_work_dir, "use_docker":True}
|
||||
agents = self.define_agents()
|
||||
user_proxy = None
|
||||
assistant = None
|
||||
for agent_kwargs in agents:
|
||||
agent_cls = agent_kwargs.pop('cls')
|
||||
kwargs = {
|
||||
'llm_config':{
|
||||
"config_list": config_list,
|
||||
},
|
||||
'code_execution_config':code_execution_config
|
||||
}
|
||||
kwargs.update(agent_kwargs)
|
||||
agent_handle = agent_cls(**kwargs)
|
||||
agent_handle._print_received_message = lambda a,b: self.gpt_academic_print_override(agent_kwargs, a, b)
|
||||
if agent_kwargs['name'] == 'user_proxy':
|
||||
agent_handle.get_human_input = lambda a: self.gpt_academic_get_human_input(user_proxy, a)
|
||||
user_proxy = agent_handle
|
||||
if agent_kwargs['name'] == 'assistant': assistant = agent_handle
|
||||
try:
|
||||
if user_proxy is None or assistant is None: raise Exception("用户代理或助理代理未定义")
|
||||
user_proxy.initiate_chat(assistant, message=input)
|
||||
except Exception as e:
|
||||
tb_str = '```\n' + trimmed_format_exc() + '```'
|
||||
self.child_conn.send(PipeCom("done", "AutoGen 执行失败: \n\n" + tb_str))
|
||||
|
||||
def subprocess_worker(self, child_conn):
|
||||
# ⭐⭐ 子进程执行
|
||||
self.child_conn = child_conn
|
||||
while True:
|
||||
msg = self.child_conn.recv() # PipeCom
|
||||
self.do_audogen(msg)
|
||||
19
crazy_functions/agent_fns/echo_agent.py
Normal file
19
crazy_functions/agent_fns/echo_agent.py
Normal file
@@ -0,0 +1,19 @@
|
||||
from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom
|
||||
|
||||
class EchoDemo(PluginMultiprocessManager):
|
||||
def subprocess_worker(self, child_conn):
|
||||
# ⭐⭐ 子进程
|
||||
self.child_conn = child_conn
|
||||
while True:
|
||||
msg = self.child_conn.recv() # PipeCom
|
||||
if msg.cmd == "user_input":
|
||||
# wait futher user input
|
||||
self.child_conn.send(PipeCom("show", msg.content))
|
||||
wait_success = self.subprocess_worker_wait_user_feedback(wait_msg="我准备好处理下一个问题了.")
|
||||
if not wait_success:
|
||||
# wait timeout, terminate this subprocess_worker
|
||||
break
|
||||
elif msg.cmd == "terminate":
|
||||
self.child_conn.send(PipeCom("done", ""))
|
||||
break
|
||||
print('[debug] subprocess_worker terminated')
|
||||
16
crazy_functions/agent_fns/persistent.py
Normal file
16
crazy_functions/agent_fns/persistent.py
Normal file
@@ -0,0 +1,16 @@
|
||||
from toolbox import Singleton
|
||||
@Singleton
|
||||
class GradioMultiuserManagerForPersistentClasses():
|
||||
def __init__(self):
|
||||
self.mapping = {}
|
||||
|
||||
def already_alive(self, key):
|
||||
return (key in self.mapping) and (self.mapping[key].is_alive())
|
||||
|
||||
def set(self, key, x):
|
||||
self.mapping[key] = x
|
||||
return self.mapping[key]
|
||||
|
||||
def get(self, key):
|
||||
return self.mapping[key]
|
||||
|
||||
150
crazy_functions/agent_fns/pipe.py
Normal file
150
crazy_functions/agent_fns/pipe.py
Normal file
@@ -0,0 +1,150 @@
|
||||
from toolbox import get_log_folder, update_ui, gen_time_str, trimmed_format_exc, promote_file_to_downloadzone
|
||||
import time, os
|
||||
|
||||
class PipeCom():
|
||||
def __init__(self, cmd, content) -> None:
|
||||
self.cmd = cmd
|
||||
self.content = content
|
||||
|
||||
|
||||
class PluginMultiprocessManager():
|
||||
def __init__(self, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
# ⭐ 主进程
|
||||
self.autogen_work_dir = os.path.join(get_log_folder('autogen'), gen_time_str())
|
||||
self.previous_work_dir_files = {}
|
||||
self.llm_kwargs = llm_kwargs
|
||||
self.plugin_kwargs = plugin_kwargs
|
||||
self.chatbot = chatbot
|
||||
self.history = history
|
||||
self.system_prompt = system_prompt
|
||||
self.web_port = web_port
|
||||
self.alive = True
|
||||
|
||||
def is_alive(self):
|
||||
return self.alive
|
||||
|
||||
def launch_subprocess_with_pipe(self):
|
||||
# ⭐ 主进程
|
||||
from multiprocessing import Process, Pipe
|
||||
parent_conn, child_conn = Pipe()
|
||||
self.p = Process(target=self.subprocess_worker, args=(child_conn,))
|
||||
self.p.daemon = True
|
||||
self.p.start()
|
||||
return parent_conn
|
||||
|
||||
def terminate(self):
|
||||
self.p.terminate()
|
||||
self.alive = False
|
||||
print('[debug] instance terminated')
|
||||
|
||||
def subprocess_worker(self, child_conn):
|
||||
# ⭐⭐ 子进程
|
||||
raise NotImplementedError
|
||||
|
||||
def send_command(self, cmd):
|
||||
# ⭐ 主进程
|
||||
self.parent_conn.send(PipeCom("user_input", cmd))
|
||||
|
||||
def immediate_showoff_when_possible(self, fp):
|
||||
# ⭐ 主进程
|
||||
# 获取fp的拓展名
|
||||
file_type = fp.split('.')[-1]
|
||||
# 如果是文本文件, 则直接显示文本内容
|
||||
if file_type in ['png', 'jpg']:
|
||||
image_path = os.path.abspath(fp)
|
||||
self.chatbot.append(['检测到新生图像:', f'本地文件预览: <br/><div align="center"><img src="file={image_path}"></div>'])
|
||||
yield from update_ui(chatbot=self.chatbot, history=self.history)
|
||||
|
||||
def overwatch_workdir_file_change(self):
|
||||
# ⭐ 主进程 Docker 外挂文件夹监控
|
||||
path_to_overwatch = self.autogen_work_dir
|
||||
change_list = []
|
||||
# 扫描路径下的所有文件, 并与self.previous_work_dir_files中所记录的文件进行对比,
|
||||
# 如果有新文件出现,或者文件的修改时间发生变化,则更新self.previous_work_dir_files中
|
||||
# 把新文件和发生变化的文件的路径记录到 change_list 中
|
||||
for root, dirs, files in os.walk(path_to_overwatch):
|
||||
for file in files:
|
||||
file_path = os.path.join(root, file)
|
||||
if file_path not in self.previous_work_dir_files.keys():
|
||||
last_modified_time = os.stat(file_path).st_mtime
|
||||
self.previous_work_dir_files.update({file_path:last_modified_time})
|
||||
change_list.append(file_path)
|
||||
else:
|
||||
last_modified_time = os.stat(file_path).st_mtime
|
||||
if last_modified_time != self.previous_work_dir_files[file_path]:
|
||||
self.previous_work_dir_files[file_path] = last_modified_time
|
||||
change_list.append(file_path)
|
||||
if len(change_list) > 0:
|
||||
file_links = ''
|
||||
for f in change_list:
|
||||
res = promote_file_to_downloadzone(f)
|
||||
file_links += f'<br/><a href="file={res}" target="_blank">{res}</a>'
|
||||
yield from self.immediate_showoff_when_possible(file_path)
|
||||
|
||||
self.chatbot.append(['检测到新生文档.', f'文档清单如下: {file_links}'])
|
||||
yield from update_ui(chatbot=self.chatbot, history=self.history)
|
||||
return change_list
|
||||
|
||||
|
||||
def main_process_ui_control(self, txt, create_or_resume) -> str:
|
||||
# ⭐ 主进程
|
||||
if create_or_resume == 'create':
|
||||
self.cnt = 1
|
||||
self.parent_conn = self.launch_subprocess_with_pipe() # ⭐⭐⭐
|
||||
self.send_command(txt)
|
||||
if txt == 'exit':
|
||||
self.chatbot.append([f"结束", "结束信号已明确,终止AutoGen程序。"])
|
||||
yield from update_ui(chatbot=self.chatbot, history=self.history)
|
||||
self.terminate()
|
||||
return "terminate"
|
||||
|
||||
while True:
|
||||
time.sleep(0.5)
|
||||
if self.parent_conn.poll():
|
||||
if '[GPT-Academic] 等待中' in self.chatbot[-1][-1]:
|
||||
self.chatbot.pop(-1) # remove the last line
|
||||
msg = self.parent_conn.recv() # PipeCom
|
||||
if msg.cmd == "done":
|
||||
self.chatbot.append([f"结束", msg.content]); self.cnt += 1
|
||||
yield from update_ui(chatbot=self.chatbot, history=self.history)
|
||||
self.terminate(); break
|
||||
if msg.cmd == "show":
|
||||
yield from self.overwatch_workdir_file_change()
|
||||
self.chatbot.append([f"运行阶段-{self.cnt}", msg.content]); self.cnt += 1
|
||||
yield from update_ui(chatbot=self.chatbot, history=self.history)
|
||||
if msg.cmd == "interact":
|
||||
yield from self.overwatch_workdir_file_change()
|
||||
self.chatbot.append([f"程序抵达用户反馈节点.", msg.content +
|
||||
"\n\n等待您的进一步指令." +
|
||||
"\n\n(1) 一般情况下您不需要说什么, 清空输入区, 然后直接点击“提交”以继续. " +
|
||||
"\n\n(2) 如果您需要补充些什么, 输入要反馈的内容, 直接点击“提交”以继续. " +
|
||||
"\n\n(3) 如果您想终止程序, 输入exit, 直接点击“提交”以终止AutoGen并解锁. "
|
||||
])
|
||||
yield from update_ui(chatbot=self.chatbot, history=self.history)
|
||||
# do not terminate here, leave the subprocess_worker instance alive
|
||||
return "wait_feedback"
|
||||
else:
|
||||
if '[GPT-Academic] 等待中' not in self.chatbot[-1][-1]:
|
||||
self.chatbot.append(["[GPT-Academic] 等待AutoGen执行结果 ...", "[GPT-Academic] 等待中"])
|
||||
self.chatbot[-1] = [self.chatbot[-1][0], self.chatbot[-1][1].replace("[GPT-Academic] 等待中", "[GPT-Academic] 等待中.")]
|
||||
yield from update_ui(chatbot=self.chatbot, history=self.history)
|
||||
|
||||
self.terminate()
|
||||
return "terminate"
|
||||
|
||||
def subprocess_worker_wait_user_feedback(self, wait_msg="wait user feedback"):
|
||||
# ⭐⭐ 子进程
|
||||
patience = 5 * 60
|
||||
begin_waiting_time = time.time()
|
||||
self.child_conn.send(PipeCom("interact", wait_msg))
|
||||
while True:
|
||||
time.sleep(0.5)
|
||||
if self.child_conn.poll():
|
||||
wait_success = True
|
||||
break
|
||||
if time.time() - begin_waiting_time > patience:
|
||||
self.child_conn.send(PipeCom("done", ""))
|
||||
wait_success = False
|
||||
break
|
||||
return wait_success
|
||||
|
||||
@@ -721,8 +721,10 @@ class nougat_interface():
|
||||
|
||||
def nougat_with_timeout(self, command, cwd, timeout=3600):
|
||||
import subprocess
|
||||
from toolbox import ProxyNetworkActivate
|
||||
logging.info(f'正在执行命令 {command}')
|
||||
process = subprocess.Popen(command, shell=True, cwd=cwd)
|
||||
with ProxyNetworkActivate("Nougat_Download"):
|
||||
process = subprocess.Popen(command, shell=True, cwd=cwd, env=os.environ)
|
||||
try:
|
||||
stdout, stderr = process.communicate(timeout=timeout)
|
||||
except subprocess.TimeoutExpired:
|
||||
@@ -767,54 +769,6 @@ def try_install_deps(deps, reload_m=[]):
|
||||
importlib.reload(__import__(m))
|
||||
|
||||
|
||||
HTML_CSS = """
|
||||
.row {
|
||||
display: flex;
|
||||
flex-wrap: wrap;
|
||||
}
|
||||
.column {
|
||||
flex: 1;
|
||||
padding: 10px;
|
||||
}
|
||||
.table-header {
|
||||
font-weight: bold;
|
||||
border-bottom: 1px solid black;
|
||||
}
|
||||
.table-row {
|
||||
border-bottom: 1px solid lightgray;
|
||||
}
|
||||
.table-cell {
|
||||
padding: 5px;
|
||||
}
|
||||
"""
|
||||
|
||||
TABLE_CSS = """
|
||||
<div class="row table-row">
|
||||
<div class="column table-cell">REPLACE_A</div>
|
||||
<div class="column table-cell">REPLACE_B</div>
|
||||
</div>
|
||||
"""
|
||||
|
||||
class construct_html():
|
||||
def __init__(self) -> None:
|
||||
self.css = HTML_CSS
|
||||
self.html_string = f'<!DOCTYPE html><head><meta charset="utf-8"><title>翻译结果</title><style>{self.css}</style></head>'
|
||||
|
||||
|
||||
def add_row(self, a, b):
|
||||
tmp = TABLE_CSS
|
||||
from toolbox import markdown_convertion
|
||||
tmp = tmp.replace('REPLACE_A', markdown_convertion(a))
|
||||
tmp = tmp.replace('REPLACE_B', markdown_convertion(b))
|
||||
self.html_string += tmp
|
||||
|
||||
|
||||
def save_file(self, file_name):
|
||||
with open(os.path.join(get_log_folder(), file_name), 'w', encoding='utf8') as f:
|
||||
f.write(self.html_string.encode('utf-8', 'ignore').decode())
|
||||
return os.path.join(get_log_folder(), file_name)
|
||||
|
||||
|
||||
def get_plugin_arg(plugin_kwargs, key, default):
|
||||
# 如果参数是空的
|
||||
if (key in plugin_kwargs) and (plugin_kwargs[key] == ""): plugin_kwargs.pop(key)
|
||||
|
||||
@@ -423,7 +423,7 @@ def write_html(sp_file_contents, sp_file_result, chatbot, project_folder):
|
||||
# write html
|
||||
try:
|
||||
import shutil
|
||||
from ..crazy_utils import construct_html
|
||||
from crazy_functions.pdf_fns.report_gen_html import construct_html
|
||||
from toolbox import gen_time_str
|
||||
ch = construct_html()
|
||||
orig = ""
|
||||
|
||||
@@ -308,7 +308,10 @@ def merge_tex_files_(project_foler, main_file, mode):
|
||||
fp = os.path.join(project_foler, f)
|
||||
fp_ = find_tex_file_ignore_case(fp)
|
||||
if fp_:
|
||||
with open(fp_, 'r', encoding='utf-8', errors='replace') as fx: c = fx.read()
|
||||
try:
|
||||
with open(fp_, 'r', encoding='utf-8', errors='replace') as fx: c = fx.read()
|
||||
except:
|
||||
c = f"\n\nWarning from GPT-Academic: LaTex source file is missing!\n\n"
|
||||
else:
|
||||
raise RuntimeError(f'找不到{fp},Tex源文件缺失!')
|
||||
c = merge_tex_files_(project_foler, c, mode)
|
||||
@@ -366,6 +369,14 @@ def insert_abstract(tex_content):
|
||||
# insert "abs_str" on the next line
|
||||
modified_tex = tex_content[:end_line_index+1] + '\n\n' + insert_missing_abs_str + '\n\n' + tex_content[end_line_index+1:]
|
||||
return modified_tex
|
||||
elif r"\begin{document}" in tex_content:
|
||||
# find the position of "\maketitle"
|
||||
find_index = tex_content.index(r"\begin{document}")
|
||||
# find the nearest ending line
|
||||
end_line_index = tex_content.find("\n", find_index)
|
||||
# insert "abs_str" on the next line
|
||||
modified_tex = tex_content[:end_line_index+1] + '\n\n' + insert_missing_abs_str + '\n\n' + tex_content[end_line_index+1:]
|
||||
return modified_tex
|
||||
else:
|
||||
return tex_content
|
||||
|
||||
|
||||
@@ -73,7 +73,7 @@ def produce_report_markdown(gpt_response_collection, meta, paper_meta_info, chat
|
||||
return res_path
|
||||
|
||||
def translate_pdf(article_dict, llm_kwargs, chatbot, fp, generated_conclusion_files, TOKEN_LIMIT_PER_FRAGMENT, DST_LANG):
|
||||
from crazy_functions.crazy_utils import construct_html
|
||||
from crazy_functions.pdf_fns.report_gen_html import construct_html
|
||||
from crazy_functions.crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
||||
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
||||
|
||||
58
crazy_functions/pdf_fns/report_gen_html.py
Normal file
58
crazy_functions/pdf_fns/report_gen_html.py
Normal file
@@ -0,0 +1,58 @@
|
||||
from toolbox import update_ui, get_conf, trimmed_format_exc, get_log_folder
|
||||
import os
|
||||
|
||||
|
||||
|
||||
|
||||
class construct_html():
|
||||
def __init__(self) -> None:
|
||||
self.html_string = ""
|
||||
|
||||
def add_row(self, a, b):
|
||||
from toolbox import markdown_convertion
|
||||
template = """
|
||||
{
|
||||
primary_col: {
|
||||
header: String.raw`__PRIMARY_HEADER__`,
|
||||
msg: String.raw`__PRIMARY_MSG__`,
|
||||
},
|
||||
secondary_rol: {
|
||||
header: String.raw`__SECONDARY_HEADER__`,
|
||||
msg: String.raw`__SECONDARY_MSG__`,
|
||||
}
|
||||
},
|
||||
"""
|
||||
def std(str):
|
||||
str = str.replace(r'`',r'`')
|
||||
if str.endswith("\\"): str += ' '
|
||||
if str.endswith("}"): str += ' '
|
||||
if str.endswith("$"): str += ' '
|
||||
return str
|
||||
|
||||
template_ = template
|
||||
a_lines = a.split('\n')
|
||||
b_lines = b.split('\n')
|
||||
|
||||
if len(a_lines) == 1 or len(a_lines[0]) > 50:
|
||||
template_ = template_.replace("__PRIMARY_HEADER__", std(a[:20]))
|
||||
template_ = template_.replace("__PRIMARY_MSG__", std(markdown_convertion(a)))
|
||||
else:
|
||||
template_ = template_.replace("__PRIMARY_HEADER__", std(a_lines[0]))
|
||||
template_ = template_.replace("__PRIMARY_MSG__", std(markdown_convertion('\n'.join(a_lines[1:]))))
|
||||
|
||||
if len(b_lines) == 1 or len(b_lines[0]) > 50:
|
||||
template_ = template_.replace("__SECONDARY_HEADER__", std(b[:20]))
|
||||
template_ = template_.replace("__SECONDARY_MSG__", std(markdown_convertion(b)))
|
||||
else:
|
||||
template_ = template_.replace("__SECONDARY_HEADER__", std(b_lines[0]))
|
||||
template_ = template_.replace("__SECONDARY_MSG__", std(markdown_convertion('\n'.join(b_lines[1:]))))
|
||||
self.html_string += template_
|
||||
|
||||
def save_file(self, file_name):
|
||||
from toolbox import get_log_folder
|
||||
with open('crazy_functions/pdf_fns/report_template.html', 'r', encoding='utf8') as f:
|
||||
html_template = f.read()
|
||||
html_template = html_template.replace("__TF_ARR__", self.html_string)
|
||||
with open(os.path.join(get_log_folder(), file_name), 'w', encoding='utf8') as f:
|
||||
f.write(html_template.encode('utf-8', 'ignore').decode())
|
||||
return os.path.join(get_log_folder(), file_name)
|
||||
104
crazy_functions/pdf_fns/report_template.html
Normal file
104
crazy_functions/pdf_fns/report_template.html
Normal file
File diff suppressed because one or more lines are too long
96
crazy_functions/多智能体.py
Normal file
96
crazy_functions/多智能体.py
Normal file
@@ -0,0 +1,96 @@
|
||||
# 本源代码中, ⭐ = 关键步骤
|
||||
"""
|
||||
测试:
|
||||
- 裁剪图像,保留下半部分
|
||||
- 交换图像的蓝色通道和红色通道
|
||||
- 将图像转为灰度图像
|
||||
- 将csv文件转excel表格
|
||||
|
||||
Testing:
|
||||
- Crop the image, keeping the bottom half.
|
||||
- Swap the blue channel and red channel of the image.
|
||||
- Convert the image to grayscale.
|
||||
- Convert the CSV file to an Excel spreadsheet.
|
||||
"""
|
||||
|
||||
|
||||
from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, ProxyNetworkActivate
|
||||
from toolbox import get_conf, select_api_key, update_ui_lastest_msg, Singleton
|
||||
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, get_plugin_arg
|
||||
from crazy_functions.crazy_utils import input_clipping, try_install_deps
|
||||
from crazy_functions.agent_fns.persistent import GradioMultiuserManagerForPersistentClasses
|
||||
from crazy_functions.agent_fns.auto_agent import AutoGenMath
|
||||
import time
|
||||
|
||||
|
||||
@CatchException
|
||||
def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
"""
|
||||
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
||||
plugin_kwargs 插件模型的参数
|
||||
chatbot 聊天显示框的句柄,用于显示给用户
|
||||
history 聊天历史,前情提要
|
||||
system_prompt 给gpt的静默提醒
|
||||
web_port 当前软件运行的端口号
|
||||
"""
|
||||
# 检查当前的模型是否符合要求
|
||||
supported_llms = ['gpt-3.5-turbo-16k', 'gpt-4', 'gpt-4-32k']
|
||||
llm_kwargs['api_key'] = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
|
||||
if llm_kwargs['llm_model'] not in supported_llms:
|
||||
chatbot.append([f"处理任务: {txt}", f"当前插件只支持{str(supported_llms)}, 当前模型{llm_kwargs['llm_model']}."])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 检查当前的模型是否符合要求
|
||||
API_URL_REDIRECT, = get_conf('API_URL_REDIRECT')
|
||||
if len(API_URL_REDIRECT) > 0:
|
||||
chatbot.append([f"处理任务: {txt}", f"暂不支持中转."])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
import autogen, docker
|
||||
except:
|
||||
chatbot.append([ f"处理任务: {txt}",
|
||||
f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pyautogen docker```。"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
import autogen
|
||||
import glob, os, time, subprocess
|
||||
subprocess.Popen(['docker', '--version'])
|
||||
except:
|
||||
chatbot.append([f"处理任务: {txt}", f"缺少docker运行环境!"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 解锁插件
|
||||
chatbot.get_cookies()['lock_plugin'] = None
|
||||
persistent_class_multi_user_manager = GradioMultiuserManagerForPersistentClasses()
|
||||
user_uuid = chatbot.get_cookies().get('uuid')
|
||||
persistent_key = f"{user_uuid}->多智能体终端"
|
||||
if persistent_class_multi_user_manager.already_alive(persistent_key):
|
||||
# 当已经存在一个正在运行的多智能体终端时,直接将用户输入传递给它,而不是再次启动一个新的多智能体终端
|
||||
print('[debug] feed new user input')
|
||||
executor = persistent_class_multi_user_manager.get(persistent_key)
|
||||
exit_reason = yield from executor.main_process_ui_control(txt, create_or_resume="resume")
|
||||
else:
|
||||
# 运行多智能体终端 (首次)
|
||||
print('[debug] create new executor instance')
|
||||
history = []
|
||||
chatbot.append(["正在启动: 多智能体终端", "插件动态生成, 执行开始, 作者 Microsoft & Binary-Husky."])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
executor = AutoGenMath(llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port)
|
||||
persistent_class_multi_user_manager.set(persistent_key, executor)
|
||||
exit_reason = yield from executor.main_process_ui_control(txt, create_or_resume="create")
|
||||
|
||||
if exit_reason == "wait_feedback":
|
||||
# 当用户点击了“等待反馈”按钮时,将executor存储到cookie中,等待用户的再次调用
|
||||
executor.chatbot.get_cookies()['lock_plugin'] = 'crazy_functions.多智能体->多智能体终端'
|
||||
else:
|
||||
executor.chatbot.get_cookies()['lock_plugin'] = None
|
||||
yield from update_ui(chatbot=executor.chatbot, history=executor.history) # 更新状态
|
||||
@@ -97,7 +97,8 @@ def 解析PDF_基于NOUGAT(file_manifest, project_folder, llm_kwargs, plugin_kwa
|
||||
generated_conclusion_files = []
|
||||
generated_html_files = []
|
||||
DST_LANG = "中文"
|
||||
from crazy_functions.crazy_utils import nougat_interface, construct_html
|
||||
from crazy_functions.crazy_utils import nougat_interface
|
||||
from crazy_functions.pdf_fns.report_gen_html import construct_html
|
||||
nougat_handle = nougat_interface()
|
||||
for index, fp in enumerate(file_manifest):
|
||||
chatbot.append(["当前进度:", f"正在解析论文,请稍候。(第一次运行时,需要花费较长时间下载NOUGAT参数)"]); yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
@@ -63,7 +63,7 @@ def 解析PDF_基于GROBID(file_manifest, project_folder, llm_kwargs, plugin_kwa
|
||||
generated_conclusion_files = []
|
||||
generated_html_files = []
|
||||
DST_LANG = "中文"
|
||||
from crazy_functions.crazy_utils import construct_html
|
||||
from crazy_functions.pdf_fns.report_gen_html import construct_html
|
||||
for index, fp in enumerate(file_manifest):
|
||||
chatbot.append(["当前进度:", f"正在连接GROBID服务,请稍候: {grobid_url}\n如果等待时间过长,请修改config中的GROBID_URL,可修改成本地GROBID服务。"]); yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
article_dict = parse_pdf(fp, grobid_url)
|
||||
@@ -86,7 +86,7 @@ def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot,
|
||||
TOKEN_LIMIT_PER_FRAGMENT = 1024
|
||||
generated_conclusion_files = []
|
||||
generated_html_files = []
|
||||
from crazy_functions.crazy_utils import construct_html
|
||||
from crazy_functions.pdf_fns.report_gen_html import construct_html
|
||||
for index, fp in enumerate(file_manifest):
|
||||
# 读取PDF文件
|
||||
file_content, page_one = read_and_clean_pdf_text(fp)
|
||||
|
||||
Reference in New Issue
Block a user