From 9a1aff5bb6446b2d9b5c6a5d9c6880799774cc5c Mon Sep 17 00:00:00 2001 From: binary-husky Date: Mon, 30 Oct 2023 11:10:05 +0800 Subject: [PATCH] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dget=5Fconf=E6=8E=A5=E5=8F=A3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- main.py | 2 +- request_llms/bridge_qianfan.py | 2 +- request_llms/bridge_spark.py | 2 +- request_llms/bridge_stackclaude.py | 4 ++-- toolbox.py | 1 - 5 files changed, 5 insertions(+), 6 deletions(-) diff --git a/main.py b/main.py index ee8f5cf..bf84382 100644 --- a/main.py +++ b/main.py @@ -433,7 +433,7 @@ def main(): server_port=PORT, favicon_path=os.path.join(os.path.dirname(__file__), "docs/logo.png"), auth=AUTHENTICATION if len(AUTHENTICATION) != 0 else None, - blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile"]) + blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile","gpt_log/admin"]) # 如果需要在二级路径下运行 # CUSTOM_PATH = get_conf('CUSTOM_PATH') diff --git a/request_llms/bridge_qianfan.py b/request_llms/bridge_qianfan.py index 99f0623..81e7a9c 100644 --- a/request_llms/bridge_qianfan.py +++ b/request_llms/bridge_qianfan.py @@ -75,7 +75,7 @@ def generate_message_payload(inputs, llm_kwargs, history, system_prompt): def generate_from_baidu_qianfan(inputs, llm_kwargs, history, system_prompt): - BAIDU_CLOUD_QIANFAN_MODEL, = get_conf('BAIDU_CLOUD_QIANFAN_MODEL') + BAIDU_CLOUD_QIANFAN_MODEL = get_conf('BAIDU_CLOUD_QIANFAN_MODEL') url_lib = { "ERNIE-Bot": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions" , diff --git a/request_llms/bridge_spark.py b/request_llms/bridge_spark.py index d6ff42f..6ba39ee 100644 --- a/request_llms/bridge_spark.py +++ b/request_llms/bridge_spark.py @@ -8,7 +8,7 @@ from multiprocessing import Process, Pipe model_name = '星火认知大模型' def validate_key(): - XFYUN_APPID, = get_conf('XFYUN_APPID', ) + XFYUN_APPID = get_conf('XFYUN_APPID') if XFYUN_APPID == '00000000' or XFYUN_APPID == '': return False return True diff --git a/request_llms/bridge_stackclaude.py b/request_llms/bridge_stackclaude.py index a393984..0b42a17 100644 --- a/request_llms/bridge_stackclaude.py +++ b/request_llms/bridge_stackclaude.py @@ -36,7 +36,7 @@ try: CHANNEL_ID = None async def open_channel(self): - response = await self.conversations_open(users=get_conf('SLACK_CLAUDE_BOT_ID')[0]) + response = await self.conversations_open(users=get_conf('SLACK_CLAUDE_BOT_ID')) self.CHANNEL_ID = response["channel"]["id"] async def chat(self, text): @@ -51,7 +51,7 @@ try: # TODO:暂时不支持历史消息,因为在同一个频道里存在多人使用时历史消息渗透问题 resp = await self.conversations_history(channel=self.CHANNEL_ID, oldest=self.LAST_TS, limit=1) msg = [msg for msg in resp["messages"] - if msg.get("user") == get_conf('SLACK_CLAUDE_BOT_ID')[0]] + if msg.get("user") == get_conf('SLACK_CLAUDE_BOT_ID')] return msg except (SlackApiError, KeyError) as e: raise RuntimeError(f"获取Slack消息失败。") diff --git a/toolbox.py b/toolbox.py index 5b7a751..8c6e7fa 100644 --- a/toolbox.py +++ b/toolbox.py @@ -732,7 +732,6 @@ def select_api_key(keys, llm_model): raise RuntimeError(f"您提供的api-key不满足要求,不包含任何可用于{llm_model}的api-key。您可能选择了错误的模型或请求源(右下角更换模型菜单中可切换openai,azure,claude,api2d等请求源)。") api_key = random.choice(avail_key_list) # 随机负载均衡 - if ENABLE return api_key def read_env_variable(arg, default_value):