diff --git a/crazy_functions/询问多个大语言模型.py b/crazy_functions/询问多个大语言模型.py index 55bfb41..4457e56 100644 --- a/crazy_functions/询问多个大语言模型.py +++ b/crazy_functions/询问多个大语言模型.py @@ -16,7 +16,7 @@ def 同时问询(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt chatbot.append((txt, "正在同时咨询ChatGPT和ChatGLM……")) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 - llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo' + llm_kwargs['llm_model'] = 'gpt-3.5-turbo&api2d-gpt-3.5-turbo' gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( inputs=txt, inputs_show_user=txt, llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, diff --git a/request_llm/bridge_chatgpt.py b/request_llm/bridge_chatgpt.py index bd90769..719a14f 100644 --- a/request_llm/bridge_chatgpt.py +++ b/request_llm/bridge_chatgpt.py @@ -60,7 +60,9 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", while True: try: # make a POST request to the API endpoint, stream=False - response = requests.post(llm_kwargs['endpoint'], headers=headers, proxies=proxies, + from .bridge_all import model_info + endpoint = model_info[llm_kwargs['llm_model']]['endpoint'] + response = requests.post(endpoint, headers=headers, proxies=proxies, json=payload, stream=True, timeout=TIMEOUT_SECONDS); break except requests.exceptions.ReadTimeout as e: retry += 1 @@ -148,7 +150,9 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp while True: try: # make a POST request to the API endpoint, stream=True - response = requests.post(llm_kwargs['endpoint'], headers=headers, proxies=proxies, + from .bridge_all import model_info + endpoint = model_info[llm_kwargs['llm_model']]['endpoint'] + response = requests.post(endpoint, headers=headers, proxies=proxies, json=payload, stream=True, timeout=TIMEOUT_SECONDS);break except: retry += 1 diff --git a/toolbox.py b/toolbox.py index 3688a33..dfd3d1d 100644 --- a/toolbox.py +++ b/toolbox.py @@ -36,7 +36,6 @@ def ArgsGeneralWrapper(f): llm_kwargs = { 'api_key': cookies['api_key'], 'llm_model': llm_model, - 'endpoint': model_info[llm_model]['endpoint'], 'top_p':top_p, 'max_length': max_length, 'temperature':temperature,