动态endpoint
This commit is contained in:
parent
5316b5c373
commit
deb8e5e137
@ -16,7 +16,7 @@ def 同时问询(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
|
|||||||
chatbot.append((txt, "正在同时咨询ChatGPT和ChatGLM……"))
|
chatbot.append((txt, "正在同时咨询ChatGPT和ChatGLM……"))
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
||||||
|
|
||||||
llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo'
|
llm_kwargs['llm_model'] = 'gpt-3.5-turbo&api2d-gpt-3.5-turbo'
|
||||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||||
inputs=txt, inputs_show_user=txt,
|
inputs=txt, inputs_show_user=txt,
|
||||||
llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
|
llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
|
||||||
|
@ -60,7 +60,9 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
|||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
# make a POST request to the API endpoint, stream=False
|
# make a POST request to the API endpoint, stream=False
|
||||||
response = requests.post(llm_kwargs['endpoint'], headers=headers, proxies=proxies,
|
from .bridge_all import model_info
|
||||||
|
endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
|
||||||
|
response = requests.post(endpoint, headers=headers, proxies=proxies,
|
||||||
json=payload, stream=True, timeout=TIMEOUT_SECONDS); break
|
json=payload, stream=True, timeout=TIMEOUT_SECONDS); break
|
||||||
except requests.exceptions.ReadTimeout as e:
|
except requests.exceptions.ReadTimeout as e:
|
||||||
retry += 1
|
retry += 1
|
||||||
@ -148,7 +150,9 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
# make a POST request to the API endpoint, stream=True
|
# make a POST request to the API endpoint, stream=True
|
||||||
response = requests.post(llm_kwargs['endpoint'], headers=headers, proxies=proxies,
|
from .bridge_all import model_info
|
||||||
|
endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
|
||||||
|
response = requests.post(endpoint, headers=headers, proxies=proxies,
|
||||||
json=payload, stream=True, timeout=TIMEOUT_SECONDS);break
|
json=payload, stream=True, timeout=TIMEOUT_SECONDS);break
|
||||||
except:
|
except:
|
||||||
retry += 1
|
retry += 1
|
||||||
|
@ -36,7 +36,6 @@ def ArgsGeneralWrapper(f):
|
|||||||
llm_kwargs = {
|
llm_kwargs = {
|
||||||
'api_key': cookies['api_key'],
|
'api_key': cookies['api_key'],
|
||||||
'llm_model': llm_model,
|
'llm_model': llm_model,
|
||||||
'endpoint': model_info[llm_model]['endpoint'],
|
|
||||||
'top_p':top_p,
|
'top_p':top_p,
|
||||||
'max_length': max_length,
|
'max_length': max_length,
|
||||||
'temperature':temperature,
|
'temperature':temperature,
|
||||||
|
Loading…
x
Reference in New Issue
Block a user