更多模型切换
This commit is contained in:
		
							parent
							
								
									03ba072c16
								
							
						
					
					
						commit
						9bd8511ba4
					
				@ -46,14 +46,12 @@ WEB_PORT = -1
 | 
			
		||||
MAX_RETRY = 2
 | 
			
		||||
 | 
			
		||||
# OpenAI模型选择是(gpt4现在只对申请成功的人开放)
 | 
			
		||||
LLM_MODEL = "gpt-3.5-turbo" # 可选 "chatglm", "tgui:anymodel@localhost:7865"
 | 
			
		||||
LLM_MODEL = "gpt-3.5-turbo" # 可选 "chatglm"
 | 
			
		||||
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "chatglm", "gpt-4", "api2d-gpt-4", "api2d-gpt-3.5-turbo"]
 | 
			
		||||
 | 
			
		||||
# 本地LLM模型如ChatGLM的执行方式 CPU/GPU
 | 
			
		||||
LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
 | 
			
		||||
 | 
			
		||||
# OpenAI的API_URL
 | 
			
		||||
API_URL = "https://api.openai.com/v1/chat/completions"
 | 
			
		||||
 | 
			
		||||
# 设置gradio的并行线程数(不需要修改)
 | 
			
		||||
CONCURRENT_COUNT = 100
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										6
									
								
								main.py
									
									
									
									
									
								
							
							
						
						
									
										6
									
								
								main.py
									
									
									
									
									
								
							@ -5,8 +5,8 @@ def main():
 | 
			
		||||
    from request_llm.bridge_all import predict
 | 
			
		||||
    from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith
 | 
			
		||||
    # 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
 | 
			
		||||
    proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY = \
 | 
			
		||||
        get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY')
 | 
			
		||||
    proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY, AVAIL_LLM_MODELS = \
 | 
			
		||||
        get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY', 'AVAIL_LLM_MODELS')
 | 
			
		||||
 | 
			
		||||
    # 如果WEB_PORT是-1, 则随机选取WEB端口
 | 
			
		||||
    PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
 | 
			
		||||
@ -101,7 +101,7 @@ def main():
 | 
			
		||||
                    temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
 | 
			
		||||
                    max_length_sl = gr.Slider(minimum=256, maximum=4096, value=512, step=1, interactive=True, label="MaxLength",)
 | 
			
		||||
                    checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "底部输入区", "输入清除键"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区")
 | 
			
		||||
                    md_dropdown = gr.Dropdown(["gpt-3.5-turbo", "chatglm"], value=LLM_MODEL, label="").style(container=False)
 | 
			
		||||
                    md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="").style(container=False)
 | 
			
		||||
 | 
			
		||||
                    gr.Markdown(description)
 | 
			
		||||
                with gr.Accordion("备选输入区", open=True, visible=False) as area_input_secondary:
 | 
			
		||||
 | 
			
		||||
@ -21,38 +21,42 @@ from .bridge_chatglm import predict as chatglm_ui
 | 
			
		||||
from .bridge_tgui import predict_no_ui_long_connection as tgui_noui
 | 
			
		||||
from .bridge_tgui import predict as tgui_ui
 | 
			
		||||
 | 
			
		||||
methods = {
 | 
			
		||||
    "openai-no-ui": chatgpt_noui,
 | 
			
		||||
    "openai-ui": chatgpt_ui,
 | 
			
		||||
 | 
			
		||||
    "chatglm-no-ui": chatglm_noui,
 | 
			
		||||
    "chatglm-ui": chatglm_ui,
 | 
			
		||||
 | 
			
		||||
    "tgui-no-ui": tgui_noui,
 | 
			
		||||
    "tgui-ui": tgui_ui,
 | 
			
		||||
}
 | 
			
		||||
colors = ['#FF00FF', '#00FFFF', '#FF0000', '#990099', '#009999', '#990044']
 | 
			
		||||
 | 
			
		||||
model_info = {
 | 
			
		||||
    # openai
 | 
			
		||||
    "gpt-3.5-turbo": {
 | 
			
		||||
        "fn_with_ui": chatgpt_ui,
 | 
			
		||||
        "fn_without_ui": chatgpt_noui,
 | 
			
		||||
        "endpoint": "https://api.openai.com/v1/chat/completions",
 | 
			
		||||
        "max_token": 4096,
 | 
			
		||||
        "tokenizer": tiktoken.encoding_for_model("gpt-3.5-turbo"),
 | 
			
		||||
        "token_cnt": lambda txt: len(tiktoken.encoding_for_model("gpt-3.5-turbo").encode(txt, disallowed_special=())),
 | 
			
		||||
    },
 | 
			
		||||
 | 
			
		||||
    "gpt-4": {
 | 
			
		||||
        "fn_with_ui": chatgpt_ui,
 | 
			
		||||
        "fn_without_ui": chatgpt_noui,
 | 
			
		||||
        "endpoint": "https://api.openai.com/v1/chat/completions",
 | 
			
		||||
        "max_token": 4096,
 | 
			
		||||
        "tokenizer": tiktoken.encoding_for_model("gpt-4"),
 | 
			
		||||
        "token_cnt": lambda txt: len(tiktoken.encoding_for_model("gpt-4").encode(txt, disallowed_special=())),
 | 
			
		||||
    },
 | 
			
		||||
 | 
			
		||||
    # api_2d
 | 
			
		||||
    "gpt-3.5-turbo-api2d": {
 | 
			
		||||
    "api2d-gpt-3.5-turbo": {
 | 
			
		||||
        "fn_with_ui": chatgpt_ui,
 | 
			
		||||
        "fn_without_ui": chatgpt_noui,
 | 
			
		||||
        "endpoint": "https://openai.api2d.net/v1/chat/completions",
 | 
			
		||||
        "max_token": 4096,
 | 
			
		||||
        "tokenizer": tiktoken.encoding_for_model("gpt-3.5-turbo"),
 | 
			
		||||
        "token_cnt": lambda txt: len(tiktoken.encoding_for_model("gpt-3.5-turbo").encode(txt, disallowed_special=())),
 | 
			
		||||
    },
 | 
			
		||||
 | 
			
		||||
    "gpt-4-api2d": {
 | 
			
		||||
    "api2d-gpt-4": {
 | 
			
		||||
        "fn_with_ui": chatgpt_ui,
 | 
			
		||||
        "fn_without_ui": chatgpt_noui,
 | 
			
		||||
        "endpoint": "https://openai.api2d.net/v1/chat/completions",
 | 
			
		||||
        "max_token": 4096,
 | 
			
		||||
        "tokenizer": tiktoken.encoding_for_model("gpt-4"),
 | 
			
		||||
        "token_cnt": lambda txt: len(tiktoken.encoding_for_model("gpt-4").encode(txt, disallowed_special=())),
 | 
			
		||||
@ -60,12 +64,14 @@ model_info = {
 | 
			
		||||
 | 
			
		||||
    # chatglm
 | 
			
		||||
    "chatglm": {
 | 
			
		||||
        "fn_with_ui": chatglm_ui,
 | 
			
		||||
        "fn_without_ui": chatglm_noui,
 | 
			
		||||
        "endpoint": None,
 | 
			
		||||
        "max_token": 1024,
 | 
			
		||||
        "tokenizer": tiktoken.encoding_for_model("gpt-3.5-turbo"),
 | 
			
		||||
        "token_cnt": lambda txt: len(tiktoken.encoding_for_model("gpt-3.5-turbo").encode(txt, disallowed_special=())),
 | 
			
		||||
    },
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -85,7 +91,6 @@ def LLM_CATCH_EXCEPTION(f):
 | 
			
		||||
            return tb_str
 | 
			
		||||
    return decorated
 | 
			
		||||
 | 
			
		||||
colors = ['#FF00FF', '#00FFFF', '#FF0000', '#990099', '#009999', '#990044']
 | 
			
		||||
 | 
			
		||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience=False):
 | 
			
		||||
    """
 | 
			
		||||
@ -109,12 +114,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
 | 
			
		||||
        assert not model.startswith("tgui"), "TGUI不支持函数插件的实现"
 | 
			
		||||
 | 
			
		||||
        # 如果只询问1个大语言模型:
 | 
			
		||||
        if model.startswith('gpt'):
 | 
			
		||||
            method = methods['openai-no-ui']
 | 
			
		||||
        elif model == 'chatglm':
 | 
			
		||||
            method = methods['chatglm-no-ui']
 | 
			
		||||
        elif model.startswith('tgui'):
 | 
			
		||||
            method = methods['tgui-no-ui']
 | 
			
		||||
        method = model_info[model]["fn_without_ui"]
 | 
			
		||||
        return method(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
 | 
			
		||||
    else:
 | 
			
		||||
        # 如果同时询问多个大语言模型:
 | 
			
		||||
@ -129,12 +129,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
 | 
			
		||||
        futures = []
 | 
			
		||||
        for i in range(n_model):
 | 
			
		||||
            model = models[i]
 | 
			
		||||
            if model.startswith('gpt'):
 | 
			
		||||
                method = methods['openai-no-ui']
 | 
			
		||||
            elif model == 'chatglm':
 | 
			
		||||
                method = methods['chatglm-no-ui']
 | 
			
		||||
            elif model.startswith('tgui'):
 | 
			
		||||
                method = methods['tgui-no-ui']
 | 
			
		||||
            method = model_info[model]["fn_without_ui"]
 | 
			
		||||
            llm_kwargs_feedin = copy.deepcopy(llm_kwargs)
 | 
			
		||||
            llm_kwargs_feedin['llm_model'] = model
 | 
			
		||||
            future = executor.submit(LLM_CATCH_EXCEPTION(method), inputs, llm_kwargs_feedin, history, sys_prompt, window_mutex[i], console_slience)
 | 
			
		||||
@ -184,12 +179,7 @@ def predict(inputs, llm_kwargs, *args, **kwargs):
 | 
			
		||||
    chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
 | 
			
		||||
    additional_fn代表点击的哪个按钮,按钮见functional.py
 | 
			
		||||
    """
 | 
			
		||||
    if llm_kwargs['llm_model'].startswith('gpt'):
 | 
			
		||||
        method = methods['openai-ui']
 | 
			
		||||
    elif llm_kwargs['llm_model'] == 'chatglm':
 | 
			
		||||
        method = methods['chatglm-ui']
 | 
			
		||||
    elif llm_kwargs['llm_model'].startswith('tgui'):
 | 
			
		||||
        method = methods['tgui-ui']
 | 
			
		||||
 | 
			
		||||
    method = model_info[llm_kwargs['llm_model']]["fn_with_ui"]
 | 
			
		||||
    yield from method(inputs, llm_kwargs, *args, **kwargs)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -21,9 +21,9 @@ import importlib
 | 
			
		||||
 | 
			
		||||
# config_private.py放自己的秘密如API和代理网址
 | 
			
		||||
# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
 | 
			
		||||
from toolbox import get_conf, update_ui
 | 
			
		||||
proxies, API_URL, API_KEY, TIMEOUT_SECONDS, MAX_RETRY = \
 | 
			
		||||
    get_conf('proxies', 'API_URL', 'API_KEY', 'TIMEOUT_SECONDS', 'MAX_RETRY')
 | 
			
		||||
from toolbox import get_conf, update_ui, is_any_api_key, select_api_key
 | 
			
		||||
proxies, API_KEY, TIMEOUT_SECONDS, MAX_RETRY = \
 | 
			
		||||
    get_conf('proxies', 'API_KEY', 'TIMEOUT_SECONDS', 'MAX_RETRY')
 | 
			
		||||
 | 
			
		||||
timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \
 | 
			
		||||
                  '网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。'
 | 
			
		||||
@ -60,7 +60,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
 | 
			
		||||
    while True:
 | 
			
		||||
        try:
 | 
			
		||||
            # make a POST request to the API endpoint, stream=False
 | 
			
		||||
            response = requests.post(API_URL, headers=headers, proxies=proxies,
 | 
			
		||||
            response = requests.post(llm_kwargs['endpoint'], headers=headers, proxies=proxies,
 | 
			
		||||
                                    json=payload, stream=True, timeout=TIMEOUT_SECONDS); break
 | 
			
		||||
        except requests.exceptions.ReadTimeout as e:
 | 
			
		||||
            retry += 1
 | 
			
		||||
@ -113,14 +113,14 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
 | 
			
		||||
        chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
 | 
			
		||||
        additional_fn代表点击的哪个按钮,按钮见functional.py
 | 
			
		||||
    """
 | 
			
		||||
    if inputs.startswith('sk-') and len(inputs) == 51:
 | 
			
		||||
    if is_any_api_key(inputs):
 | 
			
		||||
        chatbot._cookies['api_key'] = inputs
 | 
			
		||||
        chatbot.append(("输入已识别为openai的api_key", "api_key已导入"))
 | 
			
		||||
        yield from update_ui(chatbot=chatbot, history=history, msg="api_key已导入") # 刷新界面
 | 
			
		||||
        return
 | 
			
		||||
    elif len(chatbot._cookies['api_key']) != 51:
 | 
			
		||||
    elif not is_any_api_key(chatbot._cookies['api_key']):
 | 
			
		||||
        chatbot.append((inputs, "缺少api_key。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。"))
 | 
			
		||||
        yield from update_ui(chatbot=chatbot, history=history, msg="api_key已导入") # 刷新界面
 | 
			
		||||
        yield from update_ui(chatbot=chatbot, history=history, msg="缺少api_key") # 刷新界面
 | 
			
		||||
        return
 | 
			
		||||
 | 
			
		||||
    if additional_fn is not None:
 | 
			
		||||
@ -143,7 +143,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
 | 
			
		||||
    while True:
 | 
			
		||||
        try:
 | 
			
		||||
            # make a POST request to the API endpoint, stream=True
 | 
			
		||||
            response = requests.post(API_URL, headers=headers, proxies=proxies,
 | 
			
		||||
            response = requests.post(llm_kwargs['endpoint'], headers=headers, proxies=proxies,
 | 
			
		||||
                                    json=payload, stream=True, timeout=TIMEOUT_SECONDS);break
 | 
			
		||||
        except:
 | 
			
		||||
            retry += 1
 | 
			
		||||
@ -202,12 +202,14 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
 | 
			
		||||
    """
 | 
			
		||||
        整合所有信息,选择LLM模型,生成http请求,为发送请求做准备
 | 
			
		||||
    """
 | 
			
		||||
    if len(llm_kwargs['api_key']) != 51:
 | 
			
		||||
    if not is_any_api_key(llm_kwargs['api_key']):
 | 
			
		||||
        raise AssertionError("你提供了错误的API_KEY。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。")
 | 
			
		||||
 | 
			
		||||
    api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
 | 
			
		||||
 | 
			
		||||
    headers = {
 | 
			
		||||
        "Content-Type": "application/json",
 | 
			
		||||
        "Authorization": f"Bearer {llm_kwargs['api_key']}"
 | 
			
		||||
        "Authorization": f"Bearer {api_key}"
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    conversation_cnt = len(history) // 2
 | 
			
		||||
@ -235,7 +237,7 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
 | 
			
		||||
    messages.append(what_i_ask_now)
 | 
			
		||||
 | 
			
		||||
    payload = {
 | 
			
		||||
        "model": llm_kwargs['llm_model'],
 | 
			
		||||
        "model": llm_kwargs['llm_model'].strip('api2d-'),
 | 
			
		||||
        "messages": messages, 
 | 
			
		||||
        "temperature": llm_kwargs['temperature'],  # 1.0,
 | 
			
		||||
        "top_p": llm_kwargs['top_p'],  # 1.0,
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										125
									
								
								toolbox.py
									
									
									
									
									
								
							
							
						
						
									
										125
									
								
								toolbox.py
									
									
									
									
									
								
							@ -1,13 +1,10 @@
 | 
			
		||||
import markdown
 | 
			
		||||
import mdtex2html
 | 
			
		||||
import threading
 | 
			
		||||
import importlib
 | 
			
		||||
import traceback
 | 
			
		||||
import inspect
 | 
			
		||||
import re
 | 
			
		||||
from latex2mathml.converter import convert as tex2mathml
 | 
			
		||||
from functools import wraps, lru_cache
 | 
			
		||||
 | 
			
		||||
############################### 插件输入输出接驳区 #######################################
 | 
			
		||||
class ChatBotWithCookies(list):
 | 
			
		||||
    def __init__(self, cookie):
 | 
			
		||||
@ -28,6 +25,7 @@ def ArgsGeneralWrapper(f):
 | 
			
		||||
    装饰器函数,用于重组输入参数,改变输入参数的顺序与结构。
 | 
			
		||||
    """
 | 
			
		||||
    def decorated(cookies, max_length, llm_model, txt, txt2, top_p, temperature, chatbot, history, system_prompt, *args):
 | 
			
		||||
        from request_llm.bridge_all import model_info
 | 
			
		||||
        txt_passon = txt
 | 
			
		||||
        if txt == "" and txt2 != "": txt_passon = txt2
 | 
			
		||||
        # 引入一个有cookie的chatbot
 | 
			
		||||
@ -38,6 +36,7 @@ def ArgsGeneralWrapper(f):
 | 
			
		||||
        llm_kwargs = {
 | 
			
		||||
            'api_key': cookies['api_key'],
 | 
			
		||||
            'llm_model': llm_model,
 | 
			
		||||
            'endpoint': model_info[llm_model]['endpoint'],
 | 
			
		||||
            'top_p':top_p, 
 | 
			
		||||
            'max_length': max_length,
 | 
			
		||||
            'temperature':temperature,
 | 
			
		||||
@ -56,8 +55,47 @@ def update_ui(chatbot, history, msg='正常', **kwargs):  # 刷新界面
 | 
			
		||||
    """
 | 
			
		||||
    assert isinstance(chatbot, ChatBotWithCookies), "在传递chatbot的过程中不要将其丢弃。必要时,可用clear将其清空,然后用for+append循环重新赋值。"
 | 
			
		||||
    yield chatbot.get_cookies(), chatbot, history, msg
 | 
			
		||||
############################### ################## #######################################
 | 
			
		||||
##########################################################################################
 | 
			
		||||
 | 
			
		||||
def CatchException(f):
 | 
			
		||||
    """
 | 
			
		||||
    装饰器函数,捕捉函数f中的异常并封装到一个生成器中返回,并显示到聊天当中。
 | 
			
		||||
    """
 | 
			
		||||
    @wraps(f)
 | 
			
		||||
    def decorated(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
 | 
			
		||||
        try:
 | 
			
		||||
            yield from f(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT)
 | 
			
		||||
        except Exception as e:
 | 
			
		||||
            from check_proxy import check_proxy
 | 
			
		||||
            from toolbox import get_conf
 | 
			
		||||
            proxies, = get_conf('proxies')
 | 
			
		||||
            tb_str = '```\n' + traceback.format_exc() + '```'
 | 
			
		||||
            if chatbot is None or len(chatbot) == 0:
 | 
			
		||||
                chatbot = [["插件调度异常", "异常原因"]]
 | 
			
		||||
            chatbot[-1] = (chatbot[-1][0],
 | 
			
		||||
                           f"[Local Message] 实验性函数调用出错: \n\n{tb_str} \n\n当前代理可用性: \n\n{check_proxy(proxies)}")
 | 
			
		||||
            yield from update_ui(chatbot=chatbot, history=history, msg=f'异常 {e}') # 刷新界面
 | 
			
		||||
    return decorated
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def HotReload(f):
 | 
			
		||||
    """
 | 
			
		||||
    HotReload的装饰器函数,用于实现Python函数插件的热更新。
 | 
			
		||||
    函数热更新是指在不停止程序运行的情况下,更新函数代码,从而达到实时更新功能。
 | 
			
		||||
    在装饰器内部,使用wraps(f)来保留函数的元信息,并定义了一个名为decorated的内部函数。
 | 
			
		||||
    内部函数通过使用importlib模块的reload函数和inspect模块的getmodule函数来重新加载并获取函数模块,
 | 
			
		||||
    然后通过getattr函数获取函数名,并在新模块中重新加载函数。
 | 
			
		||||
    最后,使用yield from语句返回重新加载过的函数,并在被装饰的函数上执行。
 | 
			
		||||
    最终,装饰器函数返回内部函数。这个内部函数可以将函数的原始定义更新为最新版本,并执行函数的新版本。
 | 
			
		||||
    """
 | 
			
		||||
    @wraps(f)
 | 
			
		||||
    def decorated(*args, **kwargs):
 | 
			
		||||
        fn_name = f.__name__
 | 
			
		||||
        f_hot_reload = getattr(importlib.reload(inspect.getmodule(f)), fn_name)
 | 
			
		||||
        yield from f_hot_reload(*args, **kwargs)
 | 
			
		||||
    return decorated
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
####################################### 其他小工具 #####################################
 | 
			
		||||
 | 
			
		||||
def get_reduce_token_percent(text):
 | 
			
		||||
    """
 | 
			
		||||
@ -116,43 +154,6 @@ def regular_txt_to_markdown(text):
 | 
			
		||||
    return text
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def CatchException(f):
 | 
			
		||||
    """
 | 
			
		||||
        装饰器函数,捕捉函数f中的异常并封装到一个生成器中返回,并显示到聊天当中。
 | 
			
		||||
    """
 | 
			
		||||
    @wraps(f)
 | 
			
		||||
    def decorated(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
 | 
			
		||||
        try:
 | 
			
		||||
            yield from f(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT)
 | 
			
		||||
        except Exception as e:
 | 
			
		||||
            from check_proxy import check_proxy
 | 
			
		||||
            from toolbox import get_conf
 | 
			
		||||
            proxies, = get_conf('proxies')
 | 
			
		||||
            tb_str = '```\n' + traceback.format_exc() + '```'
 | 
			
		||||
            if chatbot is None or len(chatbot) == 0:
 | 
			
		||||
                chatbot = [["插件调度异常", "异常原因"]]
 | 
			
		||||
            chatbot[-1] = (chatbot[-1][0],
 | 
			
		||||
                           f"[Local Message] 实验性函数调用出错: \n\n{tb_str} \n\n当前代理可用性: \n\n{check_proxy(proxies)}")
 | 
			
		||||
            yield from update_ui(chatbot=chatbot, history=history, msg=f'异常 {e}') # 刷新界面
 | 
			
		||||
    return decorated
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def HotReload(f):
 | 
			
		||||
    """
 | 
			
		||||
    HotReload的装饰器函数,用于实现Python函数插件的热更新。
 | 
			
		||||
    函数热更新是指在不停止程序运行的情况下,更新函数代码,从而达到实时更新功能。
 | 
			
		||||
    在装饰器内部,使用wraps(f)来保留函数的元信息,并定义了一个名为decorated的内部函数。
 | 
			
		||||
    内部函数通过使用importlib模块的reload函数和inspect模块的getmodule函数来重新加载并获取函数模块,
 | 
			
		||||
    然后通过getattr函数获取函数名,并在新模块中重新加载函数。
 | 
			
		||||
    最后,使用yield from语句返回重新加载过的函数,并在被装饰的函数上执行。
 | 
			
		||||
    最终,装饰器函数返回内部函数。这个内部函数可以将函数的原始定义更新为最新版本,并执行函数的新版本。
 | 
			
		||||
    """
 | 
			
		||||
    @wraps(f)
 | 
			
		||||
    def decorated(*args, **kwargs):
 | 
			
		||||
        fn_name = f.__name__
 | 
			
		||||
        f_hot_reload = getattr(importlib.reload(inspect.getmodule(f)), fn_name)
 | 
			
		||||
        yield from f_hot_reload(*args, **kwargs)
 | 
			
		||||
    return decorated
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def report_execption(chatbot, history, a, b):
 | 
			
		||||
@ -410,9 +411,43 @@ def on_report_generated(files, chatbot):
 | 
			
		||||
    return report_files, chatbot
 | 
			
		||||
 | 
			
		||||
def is_openai_api_key(key):
 | 
			
		||||
    # 正确的 API_KEY 是 "sk-" + 48 位大小写字母数字的组合
 | 
			
		||||
    API_MATCH = re.match(r"sk-[a-zA-Z0-9]{48}$", key)
 | 
			
		||||
    return API_MATCH
 | 
			
		||||
    return bool(API_MATCH)
 | 
			
		||||
 | 
			
		||||
def is_api2d_key(key):
 | 
			
		||||
    if key.startswith('fk') and len(key) == 41:
 | 
			
		||||
        return True
 | 
			
		||||
    else:
 | 
			
		||||
        return False
 | 
			
		||||
 | 
			
		||||
def is_any_api_key(key):
 | 
			
		||||
    if ',' in key:
 | 
			
		||||
        keys = key.split(',')
 | 
			
		||||
        for k in keys:
 | 
			
		||||
            if is_any_api_key(k): return True
 | 
			
		||||
        return False
 | 
			
		||||
    else:
 | 
			
		||||
        return is_openai_api_key(key) or is_api2d_key(key)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def select_api_key(keys, llm_model):
 | 
			
		||||
    import random
 | 
			
		||||
    avail_key_list = []
 | 
			
		||||
    key_list = keys.split(',')
 | 
			
		||||
 | 
			
		||||
    if llm_model.startswith('gpt-'):
 | 
			
		||||
        for k in key_list:
 | 
			
		||||
            if is_openai_api_key(k): avail_key_list.append(k)
 | 
			
		||||
 | 
			
		||||
    if llm_model.startswith('api2d-'):
 | 
			
		||||
        for k in key_list:
 | 
			
		||||
            if is_api2d_key(k): avail_key_list.append(k)
 | 
			
		||||
 | 
			
		||||
    if len(avail_key_list) == 0:
 | 
			
		||||
        raise RuntimeError(f"您提供的api-key不满足要求,不包含任何可用于{llm_model}的api-key。")
 | 
			
		||||
 | 
			
		||||
    api_key = random.choice(avail_key_list) # 随机负载均衡
 | 
			
		||||
    return api_key
 | 
			
		||||
 | 
			
		||||
@lru_cache(maxsize=128)
 | 
			
		||||
def read_single_conf_with_lru_cache(arg):
 | 
			
		||||
@ -423,7 +458,7 @@ def read_single_conf_with_lru_cache(arg):
 | 
			
		||||
        r = getattr(importlib.import_module('config'), arg)
 | 
			
		||||
    # 在读取API_KEY时,检查一下是不是忘了改config
 | 
			
		||||
    if arg == 'API_KEY':
 | 
			
		||||
        if is_openai_api_key(r):
 | 
			
		||||
        if is_any_api_key(r):
 | 
			
		||||
            print亮绿(f"[API_KEY] 您的 API_KEY 是: {r[:15]}*** API_KEY 导入成功")
 | 
			
		||||
        else:
 | 
			
		||||
            print亮红( "[API_KEY] 正确的 API_KEY 是 'sk-' + '48 位大小写字母数字' 的组合,请在config文件中修改API密钥, 添加海外代理之后再运行。" + \
 | 
			
		||||
 | 
			
		||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user