diff --git a/core_functional.py b/core_functional.py index 71c4e79..0b283a8 100644 --- a/core_functional.py +++ b/core_functional.py @@ -82,14 +82,14 @@ def get_core_functions(): }, - "学术中英互译": { - "Prefix": r"I want you to act as a scientific English-Chinese translator, " - r"I will provide you with some paragraphs in one language " - r"and your task is to accurately and academically translate the paragraphs only into the other language. " - r"Do not repeat the original provided paragraphs after translation. " - r"You should use artificial intelligence tools, " - r"such as natural language processing, and rhetorical knowledge " - r"and experience about effective writing techniques to reply. " + "学术英中互译": { + "Prefix": r"I want you to act as a scientific English-Chinese translator, " + + r"I will provide you with some paragraphs in one language " + + r"and your task is to accurately and academically translate the paragraphs only into the other language. " + + r"Do not repeat the original provided paragraphs after translation. " + + r"You should use artificial intelligence tools, " + + r"such as natural language processing, and rhetorical knowledge " + + r"and experience about effective writing techniques to reply. " + r"I'll give you my paragraphs as follows, tell me what language it is written in, and then translate:" + "\n\n", "Suffix": r"", }, diff --git a/request_llms/bridge_google_gemini.py b/request_llms/bridge_google_gemini.py index 49d8211..48e5419 100644 --- a/request_llms/bridge_google_gemini.py +++ b/request_llms/bridge_google_gemini.py @@ -19,7 +19,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", # 检查API_KEY if get_conf("GEMINI_API_KEY") == "": raise ValueError(f"请配置 GEMINI_API_KEY。") - + genai = GoogleChatInit() watch_dog_patience = 5 # 看门狗的耐心, 设置5秒即可 gpt_replying_buffer = '' @@ -50,6 +50,11 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp yield from update_ui_lastest_msg(f"请配置 GEMINI_API_KEY。", chatbot=chatbot, history=history, delay=0) return + # 适配润色区域 + if additional_fn is not None: + from core_functional import handle_core_functionality + inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot) + if "vision" in llm_kwargs["llm_model"]: have_recent_file, image_paths = have_any_recent_upload_image_files(chatbot) def make_media_input(inputs, image_paths): diff --git a/request_llms/bridge_zhipu.py b/request_llms/bridge_zhipu.py index 915a13e..91903ad 100644 --- a/request_llms/bridge_zhipu.py +++ b/request_llms/bridge_zhipu.py @@ -42,7 +42,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp try: check_packages(["zhipuai"]) except: - yield from update_ui_lastest_msg(f"导入软件依赖失败。使用该模型需要额外依赖,安装方法```pip install --upgrade zhipuai```。", + yield from update_ui_lastest_msg(f"导入软件依赖失败。使用该模型需要额外依赖,安装方法```pip install zhipuai==1.0.7```。", chatbot=chatbot, history=history, delay=0) return