diff --git a/config.py b/config.py index 0b4d119..34ac2f6 100644 --- a/config.py +++ b/config.py @@ -141,8 +141,14 @@ SLACK_CLAUDE_USER_TOKEN = '' # 如果需要使用AZURE 详情请见额外文档 docs\use_azure.md AZURE_ENDPOINT = "https://你亲手写的api名称.openai.azure.com/" +AZURE_URL_VERSION = 'openai/deployments/{v}/chat/completions?api-version=2023-05-15' # {v}作为占位符 AZURE_API_KEY = "填入azure openai api的密钥" # 建议直接在API_KEY处填写,该选项即将被弃用 -AZURE_ENGINE = "填入你亲手写的部署名" # 读 docs\use_azure.md +AZURE_ENGINE_DICT = { + 'gpt-35-16k': 1024 * 16, + 'gpt-4': 1024 * 8 +} # 读 docs\use_azure.md key填入你的部署名,value请自行计算模型对应最大Token数,如3.5 = 4096 = 1024 * 4即可 +AVAIL_LLM_MODELS.extend([f"azure-{i}" for i in AZURE_ENGINE_DICT]) # 自动加入模型列表 + # 使用Newbing (不推荐使用,未来将删除) diff --git a/request_llm/bridge_all.py b/request_llm/bridge_all.py index 8e2bacb..a6c33a3 100644 --- a/request_llm/bridge_all.py +++ b/request_llm/bridge_all.py @@ -48,12 +48,10 @@ class LazyloadTiktoken(object): return encoder.decode(*args, **kwargs) # Endpoint 重定向 -API_URL_REDIRECT, AZURE_ENDPOINT, AZURE_ENGINE = get_conf("API_URL_REDIRECT", "AZURE_ENDPOINT", "AZURE_ENGINE") +API_URL_REDIRECT, = get_conf("API_URL_REDIRECT") openai_endpoint = "https://api.openai.com/v1/chat/completions" api2d_endpoint = "https://openai.api2d.net/v1/chat/completions" newbing_endpoint = "wss://sydney.bing.com/sydney/ChatHub" -if not AZURE_ENDPOINT.endswith('/'): AZURE_ENDPOINT += '/' -azure_endpoint = AZURE_ENDPOINT + f'openai/deployments/{AZURE_ENGINE}/chat/completions?api-version=2023-05-15' # 兼容旧版的配置 try: API_URL, = get_conf("API_URL") @@ -143,25 +141,6 @@ model_info = { "tokenizer": tokenizer_gpt4, "token_cnt": get_token_num_gpt4, }, - - # azure openai - "azure-gpt-3.5":{ - "fn_with_ui": chatgpt_ui, - "fn_without_ui": chatgpt_noui, - "endpoint": azure_endpoint, - "max_token": 4096, - "tokenizer": tokenizer_gpt35, - "token_cnt": get_token_num_gpt35, - }, - - "azure-gpt-4":{ - "fn_with_ui": chatgpt_ui, - "fn_without_ui": chatgpt_noui, - "endpoint": azure_endpoint, - "max_token": 8192, - "tokenizer": tokenizer_gpt35, - "token_cnt": get_token_num_gpt35, - }, # api_2d "api2d-gpt-3.5-turbo": { @@ -208,6 +187,21 @@ model_info = { "token_cnt": get_token_num_gpt35, }, } +# Azure 多模型支持 +AZURE_ENDPOINT, AZURE_ENGINE_DICT, AZURE_URL_VERSION = get_conf('AZURE_ENDPOINT', 'AZURE_ENGINE_DICT', 'AZURE_URL_VERSION') +for azure in AZURE_ENGINE_DICT: + if not AZURE_ENDPOINT.endswith('/'): AZURE_ENDPOINT += '/' + azure_endpoint = AZURE_ENDPOINT + str(AZURE_URL_VERSION).replace('{v}', azure) + model_info.update({ + f"azure-{azure}": { + "fn_with_ui": chatgpt_ui, + "fn_without_ui": chatgpt_noui, + "endpoint": azure_endpoint, + "max_token": AZURE_ENGINE_DICT[azure], + "tokenizer": tokenizer_gpt35, + "token_cnt": get_token_num_gpt35, + }, + }) # -=-=-=-=-=-=- 以下部分是新加入的模型,可能附带额外依赖 -=-=-=-=-=-=- if "claude-1-100k" in AVAIL_LLM_MODELS or "claude-2" in AVAIL_LLM_MODELS: