From a3e6fc0141ba5afaffd1d9da0a0f9bf317a53b09 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Sun, 3 Sep 2023 15:32:39 +0800 Subject: [PATCH] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E6=96=87=E5=BF=83=E4=B8=80?= =?UTF-8?q?=E8=A8=80=E7=9A=84=E6=8E=A5=E5=8F=A3=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functions/vt_fns/vt_call_plugin.py | 26 +++++++++++++----------- crazy_functions/虚空终端.py | 12 +++++++---- request_llm/bridge_qianfan.py | 5 +++-- 3 files changed, 25 insertions(+), 18 deletions(-) diff --git a/crazy_functions/vt_fns/vt_call_plugin.py b/crazy_functions/vt_fns/vt_call_plugin.py index 1f84d23..614e9fe 100644 --- a/crazy_functions/vt_fns/vt_call_plugin.py +++ b/crazy_functions/vt_fns/vt_call_plugin.py @@ -1,6 +1,6 @@ from pydantic import BaseModel, Field from typing import List -from toolbox import update_ui_lastest_msg, get_conf +from toolbox import update_ui_lastest_msg, disable_auto_promotion from request_llm.bridge_all import predict_no_ui_long_connection from crazy_functions.json_fns.pydantic_io import GptJsonIO import copy, json, pickle, os, sys, time @@ -13,11 +13,14 @@ def read_avail_plugin_enum(): plugin_arr = {k:v for k, v in plugin_arr.items() if 'Info' in v} plugin_arr_info = {"F_{:04d}".format(i):v["Info"] for i, v in enumerate(plugin_arr.values(), start=1)} plugin_arr_dict = {"F_{:04d}".format(i):v for i, v in enumerate(plugin_arr.values(), start=1)} + plugin_arr_dict_parse = {"F_{:04d}".format(i):v for i, v in enumerate(plugin_arr.values(), start=1)} + plugin_arr_dict_parse.update({f"F_{i}":v for i, v in enumerate(plugin_arr.values(), start=1)}) prompt = json.dumps(plugin_arr_info, ensure_ascii=False, indent=2) prompt = "\n\nThe defination of PluginEnum:\nPluginEnum=" + prompt - return prompt, plugin_arr_dict + return prompt, plugin_arr_dict, plugin_arr_dict_parse def wrap_code(txt): + txt = txt.replace('```','') return f"\n```\n{txt}\n```\n" def have_any_recent_upload_files(chatbot): @@ -47,7 +50,7 @@ def get_inputs_show_user(inputs, plugin_arr_enum_prompt): return inputs_show_user def execute_plugin(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention): - plugin_arr_enum_prompt, plugin_arr_dict = read_avail_plugin_enum() + plugin_arr_enum_prompt, plugin_arr_dict, plugin_arr_dict_parse = read_avail_plugin_enum() class Plugin(BaseModel): plugin_selection: str = Field(description="The most related plugin from one of the PluginEnum.", default="F_0000") reason_of_selection: str = Field(description="The reason why you should select this plugin.", default="This plugin satisfy user requirement most") @@ -67,17 +70,16 @@ def execute_plugin(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom gpt_reply = run_gpt_fn(inputs, "") plugin_sel = gpt_json_io.generate_output_auto_repair(gpt_reply, run_gpt_fn) except: - msg = "抱歉,当前的大语言模型无法理解您的需求。" + msg = f"抱歉, {llm_kwargs['llm_model']}无法理解您的需求。" msg += "请求的Prompt为:\n" + wrap_code(get_inputs_show_user(inputs, plugin_arr_enum_prompt)) msg += "语言模型回复为:\n" + wrap_code(gpt_reply) - msg += "但您可以尝试再试一次\n" + msg += "\n但您可以尝试再试一次\n" yield from update_ui_lastest_msg(lastmsg=msg, chatbot=chatbot, history=history, delay=2) return - if plugin_sel.plugin_selection not in plugin_arr_dict: - msg = "抱歉, 找不到合适插件执行该任务, 当前的大语言模型可能无法理解您的需求。" - msg += "请求的Prompt为:\n" + wrap_code(get_inputs_show_user(inputs, plugin_arr_enum_prompt)) - msg += "语言模型回复为:\n" + wrap_code(gpt_reply) - msg += "但您可以尝试再试一次\n" + if plugin_sel.plugin_selection not in plugin_arr_dict_parse: + msg = f"抱歉, 找不到合适插件执行该任务, 或者{llm_kwargs['llm_model']}无法理解您的需求。" + msg += f"语言模型{llm_kwargs['llm_model']}选择了不存在的插件:\n" + wrap_code(gpt_reply) + msg += "\n但您可以尝试再试一次\n" yield from update_ui_lastest_msg(lastmsg=msg, chatbot=chatbot, history=history, delay=2) return @@ -87,7 +89,7 @@ def execute_plugin(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom else: appendix_info = get_recent_file_prompt_support(chatbot) - plugin = plugin_arr_dict[plugin_sel.plugin_selection] + plugin = plugin_arr_dict_parse[plugin_sel.plugin_selection] yield from update_ui_lastest_msg(lastmsg=f"正在执行任务: {txt}\n\n提取插件参数...", chatbot=chatbot, history=history, delay=0) class PluginExplicit(BaseModel): plugin_selection: str = plugin_sel.plugin_selection @@ -106,7 +108,7 @@ def execute_plugin(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom # ⭐ ⭐ ⭐ 执行插件 fn = plugin['Function'] fn_name = fn.__name__ - msg = f'正在调用插件: {fn_name}\n\n插件说明:{plugin["Info"]}\n\n插件参数:{plugin_sel.plugin_arg}' + msg = f'{llm_kwargs["llm_model"]}为您选择了插件: `{fn_name}`\n\n插件说明:{plugin["Info"]}\n\n插件参数:{plugin_sel.plugin_arg}\n\n假如偏离了您的要求,按停止键终止。' yield from update_ui_lastest_msg(lastmsg=msg, chatbot=chatbot, history=history, delay=2) yield from fn(plugin_sel.plugin_arg, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, -1) return \ No newline at end of file diff --git a/crazy_functions/虚空终端.py b/crazy_functions/虚空终端.py index cdf0605..caf241c 100644 --- a/crazy_functions/虚空终端.py +++ b/crazy_functions/虚空终端.py @@ -36,7 +36,7 @@ explain_msg = """ 2. 如果您使用「调用插件xxx」、「修改配置xxx」、「请问」等关键词,您的意图可以被识别的更准确。 -3. 使用GPT4等强力模型时,您的意图可以被识别的更准确。该插件诞生时间不长,欢迎您前往Github反馈问题。 +3. 建议使用 GPT3.5 或更强的模型,弱模型可能无法理解您的想法。该插件诞生时间不长,欢迎您前往Github反馈问题。 4. 现在,如果需要处理文件,请您上传文件(将文件拖动到文件上传区),或者描述文件所在的路径。 @@ -46,7 +46,7 @@ explain_msg = """ from pydantic import BaseModel, Field from typing import List from toolbox import CatchException, update_ui, gen_time_str -from toolbox import update_ui_lastest_msg +from toolbox import update_ui_lastest_msg, disable_auto_promotion from request_llm.bridge_all import predict_no_ui_long_connection from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive from crazy_functions.crazy_utils import input_clipping @@ -104,6 +104,7 @@ def analyze_intention_with_simple_rules(txt): @CatchException def 虚空终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): + disable_auto_promotion(chatbot=chatbot) # 获取当前虚空终端状态 state = VoidTerminalState.get_state(chatbot) appendix_msg = "" @@ -142,14 +143,17 @@ def 虚空终端主路由(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst yield from update_ui_lastest_msg( lastmsg=f"正在执行任务: {txt}\n\n分析用户意图中", chatbot=chatbot, history=history, delay=0) gpt_json_io = GptJsonIO(UserIntention) - inputs = "Analyze the intention of the user according to following user input: \n\n" + txt + '\n\n' + gpt_json_io.format_instructions + rf_req = "\nchoose from ['ModifyConfiguration', 'ExecutePlugin', 'Chat']" + inputs = "Analyze the intention of the user according to following user input: \n\n" + \ + ">> " + (txt+rf_req).rstrip('\n').replace('\n','\n>> ') + '\n\n' + gpt_json_io.format_instructions run_gpt_fn = lambda inputs, sys_prompt: predict_no_ui_long_connection( inputs=inputs, llm_kwargs=llm_kwargs, history=[], sys_prompt=sys_prompt, observe_window=[]) try: user_intention = gpt_json_io.generate_output_auto_repair(run_gpt_fn(inputs, ""), run_gpt_fn) + lastmsg=f"正在执行任务: {txt}\n\n用户意图理解: 意图={explain_intention_to_user[user_intention.intention_type]}", except: yield from update_ui_lastest_msg( - lastmsg=f"正在执行任务: {txt}\n\n用户意图理解: 失败 当前语言模型不能理解您的意图", chatbot=chatbot, history=history, delay=0) + lastmsg=f"正在执行任务: {txt}\n\n用户意图理解: 失败 当前语言模型({llm_kwargs['llm_model']})不能理解您的意图", chatbot=chatbot, history=history, delay=0) return else: pass diff --git a/request_llm/bridge_qianfan.py b/request_llm/bridge_qianfan.py index e2cdb0e..be73976 100644 --- a/request_llm/bridge_qianfan.py +++ b/request_llm/bridge_qianfan.py @@ -49,16 +49,17 @@ def get_access_token(): def generate_message_payload(inputs, llm_kwargs, history, system_prompt): conversation_cnt = len(history) // 2 + if system_prompt == "": system_prompt = "Hello" messages = [{"role": "user", "content": system_prompt}] messages.append({"role": "assistant", "content": 'Certainly!'}) if conversation_cnt: for index in range(0, 2*conversation_cnt, 2): what_i_have_asked = {} what_i_have_asked["role"] = "user" - what_i_have_asked["content"] = history[index] + what_i_have_asked["content"] = history[index] if history[index]!="" else "Hello" what_gpt_answer = {} what_gpt_answer["role"] = "assistant" - what_gpt_answer["content"] = history[index+1] + what_gpt_answer["content"] = history[index+1] if history[index]!="" else "Hello" if what_i_have_asked["content"] != "": if what_gpt_answer["content"] == "": continue if what_gpt_answer["content"] == timeout_bot_msg: continue