From e359fff0405c4cb865b809b4ecfc0a95a54d2512 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Thu, 15 Feb 2024 00:02:24 +0800 Subject: [PATCH] Fix response message bug in bridge_qianfan.py, bridge_qwen.py, and bridge_skylark2.py --- request_llms/bridge_qianfan.py | 12 ++++-------- request_llms/bridge_qwen.py | 1 + request_llms/bridge_skylark2.py | 1 + request_llms/bridge_spark.py | 5 +++-- 4 files changed, 9 insertions(+), 10 deletions(-) diff --git a/request_llms/bridge_qianfan.py b/request_llms/bridge_qianfan.py index a806e0d..0f02457 100644 --- a/request_llms/bridge_qianfan.py +++ b/request_llms/bridge_qianfan.py @@ -146,21 +146,17 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp yield from update_ui(chatbot=chatbot, history=history) # 开始接收回复 try: + response = f"[Local Message] 等待{model_name}响应中 ..." for response in generate_from_baidu_qianfan(inputs, llm_kwargs, history, system_prompt): chatbot[-1] = (inputs, response) yield from update_ui(chatbot=chatbot, history=history) + history.extend([inputs, response]) + yield from update_ui(chatbot=chatbot, history=history) except ConnectionAbortedError as e: from .bridge_all import model_info if len(history) >= 2: history[-1] = ""; history[-2] = "" # 清除当前溢出的输入:history[-2] 是本次输入, history[-1] 是本次输出 - history = clip_history(inputs=inputs, history=history, tokenizer=model_info[llm_kwargs['llm_model']]['tokenizer'], + history = clip_history(inputs=inputs, history=history, tokenizer=model_info[llm_kwargs['llm_model']]['tokenizer'], max_token_limit=(model_info[llm_kwargs['llm_model']]['max_token'])) # history至少释放二分之一 chatbot[-1] = (chatbot[-1][0], "[Local Message] Reduce the length. 本次输入过长, 或历史数据过长. 历史缓存数据已部分释放, 您可以请再次尝试. (若再次失败则更可能是因为输入过长.)") yield from update_ui(chatbot=chatbot, history=history, msg="异常") # 刷新界面 return - - # 总结输出 - response = f"[Local Message] {model_name}响应异常 ..." - if response == f"[Local Message] 等待{model_name}响应中 ...": - response = f"[Local Message] {model_name}响应异常 ..." - history.extend([inputs, response]) - yield from update_ui(chatbot=chatbot, history=history) \ No newline at end of file diff --git a/request_llms/bridge_qwen.py b/request_llms/bridge_qwen.py index 18877b9..808c2c7 100644 --- a/request_llms/bridge_qwen.py +++ b/request_llms/bridge_qwen.py @@ -51,6 +51,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp # 开始接收回复 from .com_qwenapi import QwenRequestInstance sri = QwenRequestInstance() + response = f"[Local Message] 等待{model_name}响应中 ..." for response in sri.generate(inputs, llm_kwargs, history, system_prompt): chatbot[-1] = (inputs, response) yield from update_ui(chatbot=chatbot, history=history) diff --git a/request_llms/bridge_skylark2.py b/request_llms/bridge_skylark2.py index 8f10b83..1a8edcb 100644 --- a/request_llms/bridge_skylark2.py +++ b/request_llms/bridge_skylark2.py @@ -56,6 +56,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp # 开始接收回复 from .com_skylark2api import YUNQUERequestInstance sri = YUNQUERequestInstance() + response = f"[Local Message] 等待{model_name}响应中 ..." for response in sri.generate(inputs, llm_kwargs, history, system_prompt): chatbot[-1] = (inputs, response) yield from update_ui(chatbot=chatbot, history=history) diff --git a/request_llms/bridge_spark.py b/request_llms/bridge_spark.py index 1fe31ce..8449494 100644 --- a/request_llms/bridge_spark.py +++ b/request_llms/bridge_spark.py @@ -9,7 +9,7 @@ model_name = '星火认知大模型' def validate_key(): XFYUN_APPID = get_conf('XFYUN_APPID') - if XFYUN_APPID == '00000000' or XFYUN_APPID == '': + if XFYUN_APPID == '00000000' or XFYUN_APPID == '': return False return True @@ -49,9 +49,10 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp from core_functional import handle_core_functionality inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot) - # 开始接收回复 + # 开始接收回复 from .com_sparkapi import SparkRequestInstance sri = SparkRequestInstance() + response = f"[Local Message] 等待{model_name}响应中 ..." for response in sri.generate(inputs, llm_kwargs, history, system_prompt, use_image_api=True): chatbot[-1] = (inputs, response) yield from update_ui(chatbot=chatbot, history=history)