diff --git a/config.py b/config.py index 22104f0..737ff16 100644 --- a/config.py +++ b/config.py @@ -83,9 +83,9 @@ DEFAULT_FN_GROUPS = ['对话', '编程', '学术', '智能体'] # 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 ) LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓ -AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", +AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", "gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "stack-claude"] -# P.S. 其他可用的模型还包括 ["qianfan", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", +# P.S. 其他可用的模型还包括 ["qianfan", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-random" # "spark", "sparkv2", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"] diff --git a/request_llm/bridge_all.py b/request_llm/bridge_all.py index 44e0ae4..0639951 100644 --- a/request_llm/bridge_all.py +++ b/request_llm/bridge_all.py @@ -134,6 +134,15 @@ model_info = { "tokenizer": tokenizer_gpt4, "token_cnt": get_token_num_gpt4, }, + + "gpt-3.5-random": { + "fn_with_ui": chatgpt_ui, + "fn_without_ui": chatgpt_noui, + "endpoint": openai_endpoint, + "max_token": 4096, + "tokenizer": tokenizer_gpt4, + "token_cnt": get_token_num_gpt4, + }, # azure openai "azure-gpt-3.5":{ diff --git a/request_llm/bridge_chatgpt.py b/request_llm/bridge_chatgpt.py index a1b6ba4..2323fb8 100644 --- a/request_llm/bridge_chatgpt.py +++ b/request_llm/bridge_chatgpt.py @@ -18,6 +18,7 @@ import logging import traceback import requests import importlib +import random # config_private.py放自己的秘密如API和代理网址 # 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件 @@ -288,9 +289,19 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, stream): what_i_ask_now["role"] = "user" what_i_ask_now["content"] = inputs messages.append(what_i_ask_now) + model = llm_kwargs['llm_model'].strip('api2d-') + if model == "gpt-3.5-random": # 随机选择, 绕过openai访问频率限制 + model = random.choice([ + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-16k-0613", + "gpt-3.5-turbo-0301", + ]) + logging.info("Random select model:" + model) payload = { - "model": llm_kwargs['llm_model'].strip('api2d-'), + "model": model, "messages": messages, "temperature": llm_kwargs['temperature'], # 1.0, "top_p": llm_kwargs['top_p'], # 1.0,