From 3725122de15ba5995372b522a6fdabd49c4ccc8c Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Sun, 9 Apr 2023 21:23:21 +0800 Subject: [PATCH] =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E4=B8=B4=E6=97=B6=E8=BE=93?= =?UTF-8?q?=E5=85=A5api-key=E7=9A=84=E5=8A=9F=E8=83=BD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- colorful.py | 91 ++++++++++++++++++++++ crazy_functions/代码重写为全英文_多线程.py | 6 +- main.py | 6 +- request_llm/bridge_chatgpt.py | 19 ++++- requirements.txt | 1 + show_math.py | 80 ------------------- toolbox.py | 26 ++++--- 7 files changed, 130 insertions(+), 99 deletions(-) create mode 100644 colorful.py delete mode 100644 show_math.py diff --git a/colorful.py b/colorful.py new file mode 100644 index 0000000..d90972b --- /dev/null +++ b/colorful.py @@ -0,0 +1,91 @@ +import platform +from sys import stdout + +if platform.system()=="Linux": + pass +else: + from colorama import init + init() + +# Do you like the elegance of Chinese characters? +def print红(*kw,**kargs): + print("\033[0;31m",*kw,"\033[0m",**kargs) +def print绿(*kw,**kargs): + print("\033[0;32m",*kw,"\033[0m",**kargs) +def print黄(*kw,**kargs): + print("\033[0;33m",*kw,"\033[0m",**kargs) +def print蓝(*kw,**kargs): + print("\033[0;34m",*kw,"\033[0m",**kargs) +def print紫(*kw,**kargs): + print("\033[0;35m",*kw,"\033[0m",**kargs) +def print靛(*kw,**kargs): + print("\033[0;36m",*kw,"\033[0m",**kargs) + +def print亮红(*kw,**kargs): + print("\033[1;31m",*kw,"\033[0m",**kargs) +def print亮绿(*kw,**kargs): + print("\033[1;32m",*kw,"\033[0m",**kargs) +def print亮黄(*kw,**kargs): + print("\033[1;33m",*kw,"\033[0m",**kargs) +def print亮蓝(*kw,**kargs): + print("\033[1;34m",*kw,"\033[0m",**kargs) +def print亮紫(*kw,**kargs): + print("\033[1;35m",*kw,"\033[0m",**kargs) +def print亮靛(*kw,**kargs): + print("\033[1;36m",*kw,"\033[0m",**kargs) + + + +def print亮红(*kw,**kargs): + print("\033[1;31m",*kw,"\033[0m",**kargs) +def print亮绿(*kw,**kargs): + print("\033[1;32m",*kw,"\033[0m",**kargs) +def print亮黄(*kw,**kargs): + print("\033[1;33m",*kw,"\033[0m",**kargs) +def print亮蓝(*kw,**kargs): + print("\033[1;34m",*kw,"\033[0m",**kargs) +def print亮紫(*kw,**kargs): + print("\033[1;35m",*kw,"\033[0m",**kargs) +def print亮靛(*kw,**kargs): + print("\033[1;36m",*kw,"\033[0m",**kargs) + +print_red = print红 +print_green = print绿 +print_yellow = print黄 +print_blue = print蓝 +print_purple = print紫 +print_indigo = print靛 + +print_bold_red = print亮红 +print_bold_green = print亮绿 +print_bold_yellow = print亮黄 +print_bold_blue = print亮蓝 +print_bold_purple = print亮紫 +print_bold_indigo = print亮靛 + +if not stdout.isatty(): + # redirection, avoid a fucked up log file + print红 = print + print绿 = print + print黄 = print + print蓝 = print + print紫 = print + print靛 = print + print亮红 = print + print亮绿 = print + print亮黄 = print + print亮蓝 = print + print亮紫 = print + print亮靛 = print + print_red = print + print_green = print + print_yellow = print + print_blue = print + print_purple = print + print_indigo = print + print_bold_red = print + print_bold_green = print + print_bold_yellow = print + print_bold_blue = print + print_bold_purple = print + print_bold_indigo = print \ No newline at end of file diff --git a/crazy_functions/代码重写为全英文_多线程.py b/crazy_functions/代码重写为全英文_多线程.py index 608169f..eef9517 100644 --- a/crazy_functions/代码重写为全英文_多线程.py +++ b/crazy_functions/代码重写为全英文_多线程.py @@ -29,16 +29,16 @@ def 全项目切换英文(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys_ # 第2步:尝试导入依赖,如果缺少依赖,则给出安装建议 try: - import openai, transformers + import tiktoken except: report_execption(chatbot, history, a = f"解析项目: {txt}", - b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade openai transformers```。") + b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return # 第3步:集合文件 - import time, glob, os, shutil, re, openai + import time, glob, os, shutil, re os.makedirs('gpt_log/generated_english_version', exist_ok=True) os.makedirs('gpt_log/generated_english_version/crazy_functions', exist_ok=True) file_manifest = [f for f in glob.glob('./*.py') if ('test_project' not in f) and ('gpt_log' not in f)] + \ diff --git a/main.py b/main.py index 52f6d02..a722eb3 100644 --- a/main.py +++ b/main.py @@ -4,8 +4,8 @@ from request_llm.bridge_chatgpt import predict from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith # 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到 -proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT = \ - get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT') +proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY = \ + get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY') # 如果WEB_PORT是-1, 则随机选取WEB端口 PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT @@ -52,7 +52,7 @@ if LAYOUT == "TOP-DOWN": cancel_handles = [] with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo: gr.HTML(title_html) - cookies = gr.State({}) + cookies = gr.State({'api_key': API_KEY, 'llm_model': LLM_MODEL}) with gr_L1(): with gr_L2(scale=2): chatbot = gr.Chatbot() diff --git a/request_llm/bridge_chatgpt.py b/request_llm/bridge_chatgpt.py index f3159cd..4efdf5e 100644 --- a/request_llm/bridge_chatgpt.py +++ b/request_llm/bridge_chatgpt.py @@ -145,6 +145,16 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容 additional_fn代表点击的哪个按钮,按钮见functional.py """ + if inputs.startswith('sk-') and len(inputs) == 51: + chatbot._cookies['api_key'] = inputs + chatbot.append(("输入已识别为openai的api_key", "api_key已导入")) + yield from update_ui(chatbot=chatbot, history=history, msg="api_key已导入") # 刷新界面 + return + elif len(chatbot._cookies['api_key']) != 51: + chatbot.append((inputs, "缺少api_key。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。")) + yield from update_ui(chatbot=chatbot, history=history, msg="api_key已导入") # 刷新界面 + return + if additional_fn is not None: import core_functional importlib.reload(core_functional) # 热更新prompt @@ -224,9 +234,12 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, stream): """ 整合所有信息,选择LLM模型,生成http请求,为发送请求做准备 """ + if len(llm_kwargs['api_key']) != 51: + raise AssertionError("你提供了错误的API_KEY。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。") + headers = { "Content-Type": "application/json", - "Authorization": f"Bearer {API_KEY}" + "Authorization": f"Bearer {llm_kwargs['api_key']}" } conversation_cnt = len(history) // 2 @@ -254,7 +267,7 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, stream): messages.append(what_i_ask_now) payload = { - "model": LLM_MODEL, + "model": llm_kwargs['llm_model'], "messages": messages, "temperature": llm_kwargs['temperature'], # 1.0, "top_p": llm_kwargs['top_p'], # 1.0, @@ -263,7 +276,7 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, stream): "presence_penalty": 0, "frequency_penalty": 0, } - print(f" {LLM_MODEL} : {conversation_cnt} : {inputs[:100]}") + print(f" {llm_kwargs['llm_model']} : {conversation_cnt} : {inputs[:100]}") return headers,payload diff --git a/requirements.txt b/requirements.txt index 93071dc..b3253be 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,6 +5,7 @@ python-markdown-math beautifulsoup4 latex2mathml mdtex2html +colorama tiktoken Markdown pygments diff --git a/show_math.py b/show_math.py deleted file mode 100644 index 80fa881..0000000 --- a/show_math.py +++ /dev/null @@ -1,80 +0,0 @@ -# This program is written by: https://github.com/polarwinkel/mdtex2html - -from latex2mathml.converter import convert as tex2mathml -import re - -incomplete = 'formula incomplete' -convError = 'LaTeX-convert-error' - -def convert(mdtex, extensions=[], splitParagraphs=True): - ''' converts recursively the Markdown-LaTeX-mixture to HTML with MathML ''' - found = False - # handle all paragraphs separately (prevents aftereffects) - if splitParagraphs: - parts = re.split("\n\n", mdtex) - result = '' - for part in parts: - result += convert(part, extensions, splitParagraphs=False) - return result - # find first $$-formula: - parts = re.split('\${2}', mdtex, 2) - if len(parts)>1: - found = True - result = convert(parts[0], extensions, splitParagraphs=False)+'\n' - try: - result += '
'+tex2mathml(parts[1])+'
\n' - except: - result += '
'+convError+'
' - if len(parts)==3: - result += convert(parts[2], extensions, splitParagraphs=False) - else: - result += '
'+incomplete+'
' - # else find first $-formulas: - else: - parts = re.split('\${1}', mdtex, 2) - if len(parts)>1 and not found: - found = True - try: - mathml = tex2mathml(parts[1]) - except: - mathml = convError - if parts[0].endswith('\n\n') or parts[0]=='': # make sure textblock starts before formula! - parts[0]=parts[0]+'​' - if len(parts)==3: - result = convert(parts[0]+mathml+parts[2], extensions, splitParagraphs=False) - else: - result = convert(parts[0]+mathml+incomplete, extensions, splitParagraphs=False) - # else find first \[..\]-equation: - else: - parts = re.split(r'\\\[', mdtex, 1) - if len(parts)>1 and not found: - found = True - result = convert(parts[0], extensions, splitParagraphs=False)+'\n' - parts = re.split(r'\\\]', parts[1], 1) - try: - result += '
'+tex2mathml(parts[0])+'
\n' - except: - result += '
'+convError+'
' - if len(parts)==2: - result += convert(parts[1], extensions, splitParagraphs=False) - else: - result += '
'+incomplete+'
' - # else find first \(..\)-equation: - else: - parts = re.split(r'\\\(', mdtex, 1) - if len(parts)>1 and not found: - found = True - subp = re.split(r'\\\)', parts[1], 1) - try: - mathml = tex2mathml(subp[0]) - except: - mathml = convError - if parts[0].endswith('\n\n') or parts[0]=='': # make sure textblock starts before formula! - parts[0]=parts[0]+'​' - if len(subp)==2: - result = convert(parts[0]+mathml+subp[1], extensions, splitParagraphs=False) - else: - result = convert(parts[0]+mathml+incomplete, extensions, splitParagraphs=False) - if not found: - result = mdtex - return result diff --git a/toolbox.py b/toolbox.py index e2c53be..47e286a 100644 --- a/toolbox.py +++ b/toolbox.py @@ -12,7 +12,7 @@ from functools import wraps, lru_cache ############################### 插件输入输出接驳区 ####################################### class ChatBotWithCookies(list): def __init__(self, cookie): - self._cookie = cookie + self._cookies = cookie def write_list(self, list): for t in list: @@ -22,7 +22,7 @@ class ChatBotWithCookies(list): return [t for t in self] def get_cookies(self): - return self._cookie + return self._cookies def ArgsGeneralWrapper(f): """ @@ -37,10 +37,13 @@ def ArgsGeneralWrapper(f): 'temperature':temperature, }) llm_kwargs = { + 'api_key': cookies['api_key'], + 'llm_model': cookies['llm_model'], 'top_p':top_p, 'temperature':temperature, } plugin_kwargs = { + # 目前还没有 } chatbot_with_cookie = ChatBotWithCookies(cookies) chatbot_with_cookie.write_list(chatbot) @@ -473,27 +476,30 @@ def on_report_generated(files, chatbot): chatbot.append(['汇总报告如何远程获取?', '汇总报告已经添加到右侧“文件上传区”(可能处于折叠状态),请查收。']) return report_files, chatbot +def is_openai_api_key(key): + # 正确的 API_KEY 是 "sk-" + 48 位大小写字母数字的组合 + API_MATCH = re.match(r"sk-[a-zA-Z0-9]{48}$", key) + return API_MATCH @lru_cache(maxsize=128) def read_single_conf_with_lru_cache(arg): + from colorful import print亮红, print亮绿 try: r = getattr(importlib.import_module('config_private'), arg) except: r = getattr(importlib.import_module('config'), arg) # 在读取API_KEY时,检查一下是不是忘了改config if arg == 'API_KEY': - # 正确的 API_KEY 是 "sk-" + 48 位大小写字母数字的组合 - API_MATCH = re.match(r"sk-[a-zA-Z0-9]{48}$", r) - if API_MATCH: - print(f"[API_KEY] 您的 API_KEY 是: {r[:15]}*** API_KEY 导入成功") + if is_openai_api_key(r): + print亮绿(f"[API_KEY] 您的 API_KEY 是: {r[:15]}*** API_KEY 导入成功") else: - assert False, "正确的 API_KEY 是 'sk-' + '48 位大小写字母数字' 的组合,请在config文件中修改API密钥, 添加海外代理之后再运行。" + \ - "(如果您刚更新过代码,请确保旧版config_private文件中没有遗留任何新增键值)" + print亮红( "[API_KEY] 正确的 API_KEY 是 'sk-' + '48 位大小写字母数字' 的组合,请在config文件中修改API密钥, 添加海外代理之后再运行。" + \ + "(如果您刚更新过代码,请确保旧版config_private文件中没有遗留任何新增键值)") if arg == 'proxies': if r is None: - print('[PROXY] 网络代理状态:未配置。无代理状态下很可能无法访问。建议:检查USE_PROXY选项是否修改。') + print亮红('[PROXY] 网络代理状态:未配置。无代理状态下很可能无法访问。建议:检查USE_PROXY选项是否修改。') else: - print('[PROXY] 网络代理状态:已配置。配置信息如下:', r) + print亮绿('[PROXY] 网络代理状态:已配置。配置信息如下:', r) assert isinstance(r, dict), 'proxies格式错误,请注意proxies选项的格式,不要遗漏括号。' return r