From 7ee0c94924cc54693ee6cc95e724774677232f47 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Fri, 20 Oct 2023 21:31:50 +0800 Subject: [PATCH] =?UTF-8?q?=E6=8E=A5=E5=85=A5autogen?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- config.py | 3 +- crazy_functional.py | 12 ++ crazy_functions/agent_fns/echo_agent.py | 19 ++++ crazy_functions/agent_fns/persistent.py | 16 +++ crazy_functions/agent_fns/pipe.py | 103 +++++++++++++++++ crazy_functions/多智能体.py | 142 ++++++++++++++++++++++++ 6 files changed, 294 insertions(+), 1 deletion(-) create mode 100644 crazy_functions/agent_fns/echo_agent.py create mode 100644 crazy_functions/agent_fns/persistent.py create mode 100644 crazy_functions/agent_fns/pipe.py create mode 100644 crazy_functions/多智能体.py diff --git a/config.py b/config.py index 46ca55f..f25b119 100644 --- a/config.py +++ b/config.py @@ -200,7 +200,8 @@ PATH_LOGGING = "gpt_log" # 除了连接OpenAI之外,还有哪些场合允许使用代理,请勿修改 -WHEN_TO_USE_PROXY = ["Download_LLM", "Download_Gradio_Theme", "Connect_Grobid", "Warmup_Modules", "Nougat_Download"] +WHEN_TO_USE_PROXY = ["Download_LLM", "Download_Gradio_Theme", "Connect_Grobid", + "Warmup_Modules", "Nougat_Download", "AutoGen"] # 自定义按钮的最大数量限制 diff --git a/crazy_functional.py b/crazy_functional.py index 1d8f5c7..c11e7cb 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -539,6 +539,18 @@ def get_crazy_functions(): except: print('Load function plugin failed') + try: + from crazy_functions.多智能体 import 多智能体终端 + function_plugins.update({ + "多智能体终端(微软AutoGen)": { + "Group": "智能体", + "Color": "stop", + "AsButton": True, + "Function": HotReload(多智能体终端) + } + }) + except: + print('Load function plugin failed') # try: # from crazy_functions.chatglm微调工具 import 微调数据集生成 diff --git a/crazy_functions/agent_fns/echo_agent.py b/crazy_functions/agent_fns/echo_agent.py new file mode 100644 index 0000000..52bf72d --- /dev/null +++ b/crazy_functions/agent_fns/echo_agent.py @@ -0,0 +1,19 @@ +from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom + +class EchoDemo(PluginMultiprocessManager): + def subprocess_worker(self, child_conn): + # ⭐⭐ 子进程 + self.child_conn = child_conn + while True: + msg = self.child_conn.recv() # PipeCom + if msg.cmd == "user_input": + # wait futher user input + self.child_conn.send(PipeCom("show", msg.content)) + wait_success = self.subprocess_worker_wait_user_feedback(wait_msg="我准备好处理下一个问题了.") + if not wait_success: + # wait timeout, terminate this subprocess_worker + break + elif msg.cmd == "terminate": + self.child_conn.send(PipeCom("done", "")) + break + print('[debug] subprocess_worker terminated') \ No newline at end of file diff --git a/crazy_functions/agent_fns/persistent.py b/crazy_functions/agent_fns/persistent.py new file mode 100644 index 0000000..82c869c --- /dev/null +++ b/crazy_functions/agent_fns/persistent.py @@ -0,0 +1,16 @@ +from toolbox import Singleton +@Singleton +class GradioMultiuserManagerForPersistentClasses(): + def __init__(self): + self.mapping = {} + + def already_alive(self, key): + return (key in self.mapping) and (self.mapping[key].is_alive()) + + def set(self, key, x): + self.mapping[key] = x + return self.mapping[key] + + def get(self, key): + return self.mapping[key] + diff --git a/crazy_functions/agent_fns/pipe.py b/crazy_functions/agent_fns/pipe.py new file mode 100644 index 0000000..df9770a --- /dev/null +++ b/crazy_functions/agent_fns/pipe.py @@ -0,0 +1,103 @@ +from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, is_the_upload_folder +import time + +class PipeCom(): + def __init__(self, cmd, content) -> None: + self.cmd = cmd + self.content = content + +class PluginMultiprocessManager(): + def __init__(self, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): + # ⭐ 主进程 + self.llm_kwargs = llm_kwargs + self.plugin_kwargs = plugin_kwargs + self.chatbot = chatbot + self.history = history + self.system_prompt = system_prompt + self.web_port = web_port + self.alive = True + + def is_alive(self): + return self.alive + + def launch_subprocess_with_pipe(self): + # ⭐ 主进程 + from multiprocessing import Process, Pipe + parent_conn, child_conn = Pipe() + self.p = Process(target=self.subprocess_worker, args=(child_conn,)) + self.p.daemon = True + self.p.start() + return parent_conn + + def terminate(self): + self.p.terminate() + self.alive = False + print('[debug] instance terminated') + + def subprocess_worker(self, child_conn): + # ⭐⭐ 子进程 + raise NotImplementedError + + def send_command(self, cmd): + # ⭐ 主进程 + self.parent_conn.send(PipeCom("user_input", cmd)) + + def main_process_ui_control(self, txt, create_or_resume) -> str: + # ⭐ 主进程 + if create_or_resume == 'create': + self.cnt = 1 + self.parent_conn = self.launch_subprocess_with_pipe() # ⭐⭐⭐ + self.send_command(txt) + if txt == 'exit': + self.chatbot.append([f"结束", "结束信号已明确,终止AutoGen程序。"]) + yield from update_ui(chatbot=self.chatbot, history=self.history) + self.terminate() + return "terminate" + + while True: + time.sleep(0.5) + if self.parent_conn.poll(): + if '[GPT-Academic] 等待中' in self.chatbot[-1][-1]: + self.chatbot.pop(-1) # remove the last line + msg = self.parent_conn.recv() # PipeCom + if msg.cmd == "done": + self.chatbot.append([f"结束", msg.content]); self.cnt += 1 + yield from update_ui(chatbot=self.chatbot, history=self.history) + self.terminate(); break + if msg.cmd == "show": + self.chatbot.append([f"运行阶段-{self.cnt}", msg.content]); self.cnt += 1 + yield from update_ui(chatbot=self.chatbot, history=self.history) + if msg.cmd == "interact": + self.chatbot.append([f"程序抵达用户反馈节点.", msg.content + + "\n\n等待您的进一步指令. \n\n(1) 如果您没有什么想说的, 清空输入区,然后直接点击“提交”以继续. " + + "\n\n(2) 如果您需要补充些什么, 输入要反馈的内容, 直接点击“提交”以继续. " + + "\n\n(3) 如果您想终止程序, 输入exit, 直接点击“提交”以终止AutoGen并解锁. " + ]) + yield from update_ui(chatbot=self.chatbot, history=self.history) + # do not terminate here, leave the subprocess_worker instance alive + return "wait_feedback" + else: + if '[GPT-Academic] 等待中' not in self.chatbot[-1][-1]: + self.chatbot.append(["[GPT-Academic] 等待AutoGen执行结果 ...", "[GPT-Academic] 等待中"]) + self.chatbot[-1] = [self.chatbot[-1][0], self.chatbot[-1][1].replace("[GPT-Academic] 等待中", "[GPT-Academic] 等待中.")] + yield from update_ui(chatbot=self.chatbot, history=self.history) + + self.terminate() + return "terminate" + + def subprocess_worker_wait_user_feedback(self, wait_msg="wait user feedback"): + # ⭐⭐ 子进程 + patience = 5 * 60 + begin_waiting_time = time.time() + self.child_conn.send(PipeCom("interact", wait_msg)) + while True: + time.sleep(0.5) + if self.child_conn.poll(): + wait_success = True + break + if time.time() - begin_waiting_time > patience: + self.child_conn.send(PipeCom("done", "")) + wait_success = False + break + return wait_success + diff --git a/crazy_functions/多智能体.py b/crazy_functions/多智能体.py new file mode 100644 index 0000000..b245bc0 --- /dev/null +++ b/crazy_functions/多智能体.py @@ -0,0 +1,142 @@ +# 本源代码中, ⭐ = 关键步骤 +""" +测试: + - 裁剪图像,保留下半部分 + - 交换图像的蓝色通道和红色通道 + - 将图像转为灰度图像 + - 将csv文件转excel表格 + +Testing: + - Crop the image, keeping the bottom half. + - Swap the blue channel and red channel of the image. + - Convert the image to grayscale. + - Convert the CSV file to an Excel spreadsheet. +""" + + +from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, ProxyNetworkActivate +from toolbox import report_execption, get_log_folder, update_ui_lastest_msg, Singleton +from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, get_plugin_arg +from crazy_functions.crazy_utils import input_clipping, try_install_deps +from crazy_functions.agent_fns.persistent import GradioMultiuserManagerForPersistentClasses +from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom +from crazy_functions.agent_fns.echo_agent import EchoDemo +import time + +class AutoGenWorker(PluginMultiprocessManager): + + def gpt_academic_print_override(self, user_proxy, message, sender): + self.child_conn.send(PipeCom("show", sender.name + '\n\n---\n\n' + message['content'])) + + def gpt_academic_get_human_input(self, user_proxy, message): + # ⭐⭐ 子进程 + patience = 300 + begin_waiting_time = time.time() + self.child_conn.send(PipeCom("interact", message)) + while True: + time.sleep(0.5) + if self.child_conn.poll(): + wait_success = True + break + if time.time() - begin_waiting_time > patience: + self.child_conn.send(PipeCom("done", "")) + wait_success = False + break + if wait_success: + return self.child_conn.recv().content + else: + raise TimeoutError("等待用户输入超时") + + def do_audogen(self, input): + # ⭐⭐ 子进程 + input = input.content + with ProxyNetworkActivate("AutoGen"): + from autogen import AssistantAgent, UserProxyAgent + config_list = [{ + 'model': 'gpt-3.5-turbo-16k', + 'api_key': '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', + },] + + + autogen_work_dir = get_log_folder('autogen') + code_execution_config={"work_dir": autogen_work_dir, "use_docker":True} + # create an AssistantAgent instance named "assistant" + assistant = AssistantAgent( + name="assistant", + llm_config={ + "config_list": config_list, + } + ) + # create a UserProxyAgent instance named "user_proxy" + user_proxy = UserProxyAgent( + name="user_proxy", + human_input_mode="ALWAYS", + is_termination_msg=lambda x: x.get("content", "").rstrip().endswith("TERMINATE"), + ) + + # assistant = AssistantAgent("assistant", llm_config={"config_list": config_list}, code_execution_config=code_execution_config) + # user_proxy = UserProxyAgent("user_proxy", code_execution_config=code_execution_config) + + user_proxy._print_received_message = lambda a,b: self.gpt_academic_print_override(user_proxy, a, b) + assistant._print_received_message = lambda a,b: self.gpt_academic_print_override(user_proxy, a, b) + user_proxy.get_human_input = lambda a: self.gpt_academic_get_human_input(user_proxy, a) + # user_proxy.initiate_chat(assistant, message=input) + try: + user_proxy.initiate_chat(assistant, message=input) + except Exception as e: + tb_str = '```\n' + trimmed_format_exc() + '```' + self.child_conn.send(PipeCom("done", "AutoGen 执行失败: \n\n" + tb_str)) + + def subprocess_worker(self, child_conn): + # ⭐⭐ 子进程 + self.child_conn = child_conn + while True: + msg = self.child_conn.recv() # PipeCom + self.do_audogen(msg) + +@CatchException +def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): + """ + txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 + llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 + plugin_kwargs 插件模型的参数 + chatbot 聊天显示框的句柄,用于显示给用户 + history 聊天历史,前情提要 + system_prompt 给gpt的静默提醒 + web_port 当前软件运行的端口号 + """ + # 尝试导入依赖,如果缺少依赖,则给出安装建议 + try: + import autogen + except: + report_execption(chatbot, history, + a=f"解析项目: {txt}", + b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pyautogen```。") + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + return + + chatbot.get_cookies()['lock_plugin'] = None + persistent_class_multi_user_manager = GradioMultiuserManagerForPersistentClasses() + user_uuid = chatbot.get_cookies().get('uuid') + persistent_key = f"{user_uuid}->多智能体终端" + if persistent_class_multi_user_manager.already_alive(persistent_key): + # 当已经存在一个正在运行的多智能体终端时,直接将用户输入传递给它,而不是再次启动一个新的多智能体终端 + print('[debug] feed new user input') + executor = persistent_class_multi_user_manager.get(persistent_key) + exit_reason = yield from executor.main_process_ui_control(txt, create_or_resume="resume") + else: + # 运行多智能体终端 (首次) + print('[debug] create new executor instance') + history = [] + chatbot.append(["正在启动: 多智能体终端", "插件动态生成, 执行开始, 作者 Microsoft & Binary-Husky."]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + executor = AutoGenWorker(llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port) + persistent_class_multi_user_manager.set(persistent_key, executor) + exit_reason = yield from executor.main_process_ui_control(txt, create_or_resume="create") + + if exit_reason == "wait_feedback": + # 当用户点击了“等待反馈”按钮时,将executor存储到cookie中,等待用户的再次调用 + executor.chatbot.get_cookies()['lock_plugin'] = 'crazy_functions.多智能体->多智能体终端' + else: + executor.chatbot.get_cookies()['lock_plugin'] = None + yield from update_ui(chatbot=executor.chatbot, history=executor.history) # 更新状态