From 73b22f85bef9addbfb9ae8ab85a538ce72211324 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Thu, 20 Jul 2023 11:09:22 +0800 Subject: [PATCH] compat third party gpt error handle --- config.py | 6 ++-- request_llm/bridge_chatgpt.py | 67 +++++++++++++++++++---------------- 2 files changed, 39 insertions(+), 34 deletions(-) diff --git a/config.py b/config.py index 7c5fd89..d8e687a 100644 --- a/config.py +++ b/config.py @@ -32,9 +32,9 @@ else: # ------------------------------------ 以下配置可以优化体验, 但大部分场合下并不需要修改 ------------------------------------ -# 重新URL重新定向,实现更换API_URL的作用(常规情况下,不要修改!! 高危设置!通过修改此设置,您将把您的API-KEY和对话隐私完全暴露给您设定的中间人!) -# 格式 API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "在这里填写重定向的api.openai.com的URL"} -# 例如 API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions":"https://reverse-proxy-url/v1/chat/completions"} +# 重新URL重新定向,实现更换API_URL的作用(高危设置! 常规情况下不要修改! 通过修改此设置,您将把您的API-KEY和对话隐私完全暴露给您设定的中间人!) +# 格式: API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "在这里填写重定向的api.openai.com的URL"} +# 举例: API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "https://reverse-proxy-url/v1/chat/completions"} API_URL_REDIRECT = {} diff --git a/request_llm/bridge_chatgpt.py b/request_llm/bridge_chatgpt.py index ca47bd2..11457c9 100644 --- a/request_llm/bridge_chatgpt.py +++ b/request_llm/bridge_chatgpt.py @@ -174,9 +174,10 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp chunk = next(stream_response) except StopIteration: # 非OpenAI官方接口的出现这样的报错,OpenAI和API2D不会走这里 - from toolbox import regular_txt_to_markdown; tb_str = '```\n' + trimmed_format_exc() + '```' - chatbot[-1] = (chatbot[-1][0], f"[Local Message] 远程返回错误: \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk.decode())}") - yield from update_ui(chatbot=chatbot, history=history, msg="远程返回错误:" + chunk.decode()) # 刷新界面 + chunk_decoded = chunk.decode() + error_msg = chunk_decoded + chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg) + yield from update_ui(chatbot=chatbot, history=history, msg="非Openai官方接口返回了错误:" + chunk.decode()) # 刷新界面 return # print(chunk.decode()[6:]) @@ -187,7 +188,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp if chunk: try: chunk_decoded = chunk.decode() - # 前者API2D的 + # 前者是API2D的结束条件,后者是OPENAI的结束条件 if ('data: [DONE]' in chunk_decoded) or (len(json.loads(chunk_decoded[6:])['choices'][0]["delta"]) == 0): # 判定为数据流的结束,gpt_replying_buffer也写完了 logging.info(f'[response] {gpt_replying_buffer}') @@ -200,41 +201,45 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp history[-1] = gpt_replying_buffer chatbot[-1] = (history[-2], history[-1]) yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面 - except Exception as e: - traceback.print_exc() yield from update_ui(chatbot=chatbot, history=history, msg="Json解析不合常规") # 刷新界面 chunk = get_full_error(chunk, stream_response) chunk_decoded = chunk.decode() error_msg = chunk_decoded - openai_website = ' 请登录OpenAI查看详情 https://platform.openai.com/signup' - if "reduce the length" in error_msg: - if len(history) >= 2: history[-1] = ""; history[-2] = "" # 清除当前溢出的输入:history[-2] 是本次输入, history[-1] 是本次输出 - history = clip_history(inputs=inputs, history=history, tokenizer=model_info[llm_kwargs['llm_model']]['tokenizer'], - max_token_limit=(model_info[llm_kwargs['llm_model']]['max_token'])) # history至少释放二分之一 - chatbot[-1] = (chatbot[-1][0], "[Local Message] Reduce the length. 本次输入过长, 或历史数据过长. 历史缓存数据已部分释放, 您可以请再次尝试. (若再次失败则更可能是因为输入过长.)") - # history = [] # 清除历史 - elif "does not exist" in error_msg: - chatbot[-1] = (chatbot[-1][0], f"[Local Message] Model {llm_kwargs['llm_model']} does not exist. 模型不存在, 或者您没有获得体验资格.") - elif "Incorrect API key" in error_msg: - chatbot[-1] = (chatbot[-1][0], "[Local Message] Incorrect API key. OpenAI以提供了不正确的API_KEY为由, 拒绝服务. " + openai_website) - elif "exceeded your current quota" in error_msg: - chatbot[-1] = (chatbot[-1][0], "[Local Message] You exceeded your current quota. OpenAI以账户额度不足为由, 拒绝服务." + openai_website) - elif "account is not active" in error_msg: - chatbot[-1] = (chatbot[-1][0], "[Local Message] Your account is not active. OpenAI以账户失效为由, 拒绝服务." + openai_website) - elif "associated with a deactivated account" in error_msg: - chatbot[-1] = (chatbot[-1][0], "[Local Message] You are associated with a deactivated account. OpenAI以账户失效为由, 拒绝服务." + openai_website) - elif "bad forward key" in error_msg: - chatbot[-1] = (chatbot[-1][0], "[Local Message] Bad forward key. API2D账户额度不足.") - elif "Not enough point" in error_msg: - chatbot[-1] = (chatbot[-1][0], "[Local Message] Not enough point. API2D账户点数不足.") - else: - from toolbox import regular_txt_to_markdown - tb_str = '```\n' + trimmed_format_exc() + '```' - chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk_decoded)}") + chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg) yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + error_msg) # 刷新界面 + print(error_msg) return +def handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg): + from .bridge_all import model_info + openai_website = ' 请登录OpenAI查看详情 https://platform.openai.com/signup' + if "reduce the length" in error_msg: + if len(history) >= 2: history[-1] = ""; history[-2] = "" # 清除当前溢出的输入:history[-2] 是本次输入, history[-1] 是本次输出 + history = clip_history(inputs=inputs, history=history, tokenizer=model_info[llm_kwargs['llm_model']]['tokenizer'], + max_token_limit=(model_info[llm_kwargs['llm_model']]['max_token'])) # history至少释放二分之一 + chatbot[-1] = (chatbot[-1][0], "[Local Message] Reduce the length. 本次输入过长, 或历史数据过长. 历史缓存数据已部分释放, 您可以请再次尝试. (若再次失败则更可能是因为输入过长.)") + # history = [] # 清除历史 + elif "does not exist" in error_msg: + chatbot[-1] = (chatbot[-1][0], f"[Local Message] Model {llm_kwargs['llm_model']} does not exist. 模型不存在, 或者您没有获得体验资格.") + elif "Incorrect API key" in error_msg: + chatbot[-1] = (chatbot[-1][0], "[Local Message] Incorrect API key. OpenAI以提供了不正确的API_KEY为由, 拒绝服务. " + openai_website) + elif "exceeded your current quota" in error_msg: + chatbot[-1] = (chatbot[-1][0], "[Local Message] You exceeded your current quota. OpenAI以账户额度不足为由, 拒绝服务." + openai_website) + elif "account is not active" in error_msg: + chatbot[-1] = (chatbot[-1][0], "[Local Message] Your account is not active. OpenAI以账户失效为由, 拒绝服务." + openai_website) + elif "associated with a deactivated account" in error_msg: + chatbot[-1] = (chatbot[-1][0], "[Local Message] You are associated with a deactivated account. OpenAI以账户失效为由, 拒绝服务." + openai_website) + elif "bad forward key" in error_msg: + chatbot[-1] = (chatbot[-1][0], "[Local Message] Bad forward key. API2D账户额度不足.") + elif "Not enough point" in error_msg: + chatbot[-1] = (chatbot[-1][0], "[Local Message] Not enough point. API2D账户点数不足.") + else: + from toolbox import regular_txt_to_markdown + tb_str = '```\n' + trimmed_format_exc() + '```' + chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk_decoded)}") + return chatbot, history + def generate_payload(inputs, llm_kwargs, history, system_prompt, stream): """ 整合所有信息,选择LLM模型,生成http请求,为发送请求做准备