diff --git a/config.py b/config.py index 55510fe..88e0df2 100644 --- a/config.py +++ b/config.py @@ -74,13 +74,13 @@ MAX_RETRY = 2 # 插件分类默认选项 -DEFAULT_FN_GROUPS = ['对话', '编程', '学术'] +DEFAULT_FN_GROUPS = ['对话', '编程', '学术', '智能体'] # 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 ) LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓ AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", - "gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "stack-claude"] + "gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "stack-claude"] # P.S. 其他可用的模型还包括 ["qianfan", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", # "spark", "sparkv2", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"] @@ -183,6 +183,9 @@ ALLOW_RESET_CONFIG = False PATH_PRIVATE_UPLOAD = "private_upload" # 日志文件夹的位置,请勿修改 PATH_LOGGING = "gpt_log" +# 除了连接OpenAI之外,还有哪些场合允许使用代理,请勿修改 +WHEN_TO_USE_PROXY = ["Download_LLM", "Download_Gradio_Theme"] + """ 在线大模型配置关联关系示意图 diff --git a/core_functional.py b/core_functional.py index c4519ef..22c2e45 100644 --- a/core_functional.py +++ b/core_functional.py @@ -11,7 +11,8 @@ def get_core_functions(): # 前缀,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等 "Prefix": r"Below is a paragraph from an academic paper. Polish the writing to meet the academic style, " + r"improve the spelling, grammar, clarity, concision and overall readability. When necessary, rewrite the whole sentence. " + - r"Furthermore, list all modification and explain the reasons to do so in markdown table." + "\n\n", + r"Firstly, you should provide the polished paragraph. " + r"Secondly, you should list all your modification and explain the reasons to do so in markdown table." + "\n\n", # 后缀,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来 "Suffix": r"", # 按钮颜色 (默认 secondary) @@ -27,17 +28,18 @@ def get_core_functions(): "Suffix": r"", }, "查找语法错误": { - "Prefix": r"Can you help me ensure that the grammar and the spelling is correct? " + - r"Do not try to polish the text, if no mistake is found, tell me that this paragraph is good." + - r"If you find grammar or spelling mistakes, please list mistakes you find in a two-column markdown table, " + - r"put the original text the first column, " + - r"put the corrected text in the second column and highlight the key words you fixed.""\n" + "Prefix": r"Help me ensure that the grammar and the spelling is correct. " + r"Do not try to polish the text, if no mistake is found, tell me that this paragraph is good. " + r"If you find grammar or spelling mistakes, please list mistakes you find in a two-column markdown table, " + r"put the original text the first column, " + r"put the corrected text in the second column and highlight the key words you fixed. " + r"Finally, please provide the proofreaded text.""\n\n" r"Example:""\n" r"Paragraph: How is you? Do you knows what is it?""\n" r"| Original sentence | Corrected sentence |""\n" r"| :--- | :--- |""\n" r"| How **is** you? | How **are** you? |""\n" - r"| Do you **knows** what **is** **it**? | Do you **know** what **it** **is** ? |""\n" + r"| Do you **knows** what **is** **it**? | Do you **know** what **it** **is** ? |""\n\n" r"Below is a paragraph from an academic paper. " r"You need to report all grammar and spelling mistakes as the example before." + "\n\n", diff --git a/crazy_functional.py b/crazy_functional.py index 65958ac..927beaa 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -39,7 +39,7 @@ def get_crazy_functions(): function_plugins = { "虚空终端": { - "Group": "对话|编程|学术", + "Group": "对话|编程|学术|智能体", "Color": "stop", "AsButton": True, "Function": HotReload(虚空终端) @@ -521,6 +521,18 @@ def get_crazy_functions(): except: print('Load function plugin failed') + try: + from crazy_functions.函数动态生成 import 函数动态生成 + function_plugins.update({ + "动态代码解释器(CodeInterpreter)": { + "Group": "智能体", + "Color": "stop", + "AsButton": True, + "Function": HotReload(函数动态生成) + } + }) + except: + print('Load function plugin failed') # try: # from crazy_functions.CodeInterpreter import 虚空终端CodeInterpreter diff --git a/crazy_functions/Langchain知识库.py b/crazy_functions/Langchain知识库.py index 741a3d0..8433895 100644 --- a/crazy_functions/Langchain知识库.py +++ b/crazy_functions/Langchain知识库.py @@ -53,14 +53,14 @@ def 知识库问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 print('Checking Text2vec ...') from langchain.embeddings.huggingface import HuggingFaceEmbeddings - with ProxyNetworkActivate(): # 临时地激活代理网络 + with ProxyNetworkActivate('Download_LLM'): # 临时地激活代理网络 HuggingFaceEmbeddings(model_name="GanymedeNil/text2vec-large-chinese") # < -------------------构建知识库--------------- > chatbot.append(['
'.join(file_manifest), "正在构建知识库..."]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 print('Establishing knowledge archive ...') - with ProxyNetworkActivate(): # 临时地激活代理网络 + with ProxyNetworkActivate('Download_LLM'): # 临时地激活代理网络 kai = knowledge_archive_interface() kai.feed_archive(file_manifest=file_manifest, id=kai_id) kai_files = kai.get_loaded_file() diff --git a/crazy_functions/crazy_utils.py b/crazy_functions/crazy_utils.py index ee1ab90..b7a1819 100644 --- a/crazy_functions/crazy_utils.py +++ b/crazy_functions/crazy_utils.py @@ -651,7 +651,7 @@ class knowledge_archive_interface(): from toolbox import ProxyNetworkActivate print('Checking Text2vec ...') from langchain.embeddings.huggingface import HuggingFaceEmbeddings - with ProxyNetworkActivate(): # 临时地激活代理网络 + with ProxyNetworkActivate('Download_LLM'): # 临时地激活代理网络 self.text2vec_large_chinese = HuggingFaceEmbeddings(model_name="GanymedeNil/text2vec-large-chinese") return self.text2vec_large_chinese @@ -807,3 +807,10 @@ class construct_html(): with open(os.path.join(get_log_folder(), file_name), 'w', encoding='utf8') as f: f.write(self.html_string.encode('utf-8', 'ignore').decode()) return os.path.join(get_log_folder(), file_name) + + +def get_plugin_arg(plugin_kwargs, key, default): + # 如果参数是空的 + if (key in plugin_kwargs) and (plugin_kwargs[key] == ""): plugin_kwargs.pop(key) + # 正常情况 + return plugin_kwargs.get(key, default) diff --git a/crazy_functions/gen_fns/gen_fns_shared.py b/crazy_functions/gen_fns/gen_fns_shared.py new file mode 100644 index 0000000..8e73794 --- /dev/null +++ b/crazy_functions/gen_fns/gen_fns_shared.py @@ -0,0 +1,70 @@ +import time +import importlib +from toolbox import trimmed_format_exc, gen_time_str, get_log_folder +from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, is_the_upload_folder +from toolbox import promote_file_to_downloadzone, get_log_folder, update_ui_lastest_msg +import multiprocessing + +def get_class_name(class_string): + import re + # Use regex to extract the class name + class_name = re.search(r'class (\w+)\(', class_string).group(1) + return class_name + +def try_make_module(code, chatbot): + module_file = 'gpt_fn_' + gen_time_str().replace('-','_') + fn_path = f'{get_log_folder(plugin_name="gen_plugin_verify")}/{module_file}.py' + with open(fn_path, 'w', encoding='utf8') as f: f.write(code) + promote_file_to_downloadzone(fn_path, chatbot=chatbot) + class_name = get_class_name(code) + manager = multiprocessing.Manager() + return_dict = manager.dict() + p = multiprocessing.Process(target=is_function_successfully_generated, args=(fn_path, class_name, return_dict)) + # only has 10 seconds to run + p.start(); p.join(timeout=10) + if p.is_alive(): p.terminate(); p.join() + p.close() + return return_dict["success"], return_dict['traceback'] + +# check is_function_successfully_generated +def is_function_successfully_generated(fn_path, class_name, return_dict): + return_dict['success'] = False + return_dict['traceback'] = "" + try: + # Create a spec for the module + module_spec = importlib.util.spec_from_file_location('example_module', fn_path) + # Load the module + example_module = importlib.util.module_from_spec(module_spec) + module_spec.loader.exec_module(example_module) + # Now you can use the module + some_class = getattr(example_module, class_name) + # Now you can create an instance of the class + instance = some_class() + return_dict['success'] = True + return + except: + return_dict['traceback'] = trimmed_format_exc() + return + +def subprocess_worker(code, file_path, return_dict): + return_dict['result'] = None + return_dict['success'] = False + return_dict['traceback'] = "" + try: + module_file = 'gpt_fn_' + gen_time_str().replace('-','_') + fn_path = f'{get_log_folder(plugin_name="gen_plugin_run")}/{module_file}.py' + with open(fn_path, 'w', encoding='utf8') as f: f.write(code) + class_name = get_class_name(code) + # Create a spec for the module + module_spec = importlib.util.spec_from_file_location('example_module', fn_path) + # Load the module + example_module = importlib.util.module_from_spec(module_spec) + module_spec.loader.exec_module(example_module) + # Now you can use the module + some_class = getattr(example_module, class_name) + # Now you can create an instance of the class + instance = some_class() + return_dict['result'] = instance.run(file_path) + return_dict['success'] = True + except: + return_dict['traceback'] = trimmed_format_exc() diff --git a/crazy_functions/函数动态生成.py b/crazy_functions/函数动态生成.py new file mode 100644 index 0000000..d16ef88 --- /dev/null +++ b/crazy_functions/函数动态生成.py @@ -0,0 +1,252 @@ +# 本源代码中, ⭐ = 关键步骤 +""" +测试: + - 裁剪图像,保留下半部分 + - 交换图像的蓝色通道和红色通道 + - 将图像转为灰度图像 + - 将csv文件转excel表格 + +Testing: + - Crop the image, keeping the bottom half. + - Swap the blue channel and red channel of the image. + - Convert the image to grayscale. + - Convert the CSV file to an Excel spreadsheet. +""" + + +from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, is_the_upload_folder +from toolbox import promote_file_to_downloadzone, get_log_folder, update_ui_lastest_msg +from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, get_plugin_arg +from .crazy_utils import input_clipping, try_install_deps +from crazy_functions.gen_fns.gen_fns_shared import is_function_successfully_generated +from crazy_functions.gen_fns.gen_fns_shared import get_class_name +from crazy_functions.gen_fns.gen_fns_shared import subprocess_worker +from crazy_functions.gen_fns.gen_fns_shared import try_make_module +import os +import time +import glob +import multiprocessing + +templete = """ +```python +import ... # Put dependencies here, e.g. import numpy as np. + +class TerminalFunction(object): # Do not change the name of the class, The name of the class must be `TerminalFunction` + + def run(self, path): # The name of the function must be `run`, it takes only a positional argument. + # rewrite the function you have just written here + ... + return generated_file_path +``` +""" + +def inspect_dependency(chatbot, history): + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + return True + +def get_code_block(reply): + import re + pattern = r"```([\s\S]*?)```" # regex pattern to match code blocks + matches = re.findall(pattern, reply) # find all code blocks in text + if len(matches) == 1: + return matches[0].strip('python') # code block + for match in matches: + if 'class TerminalFunction' in match: + return match.strip('python') # code block + raise RuntimeError("GPT is not generating proper code.") + +def gpt_interact_multi_step(txt, file_type, llm_kwargs, chatbot, history): + # 输入 + prompt_compose = [ + f'Your job:\n' + f'1. write a single Python function, which takes a path of a `{file_type}` file as the only argument and returns a `string` containing the result of analysis or the path of generated files. \n', + f"2. You should write this function to perform following task: " + txt + "\n", + f"3. Wrap the output python function with markdown codeblock." + ] + i_say = "".join(prompt_compose) + demo = [] + + # 第一步 + gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( + inputs=i_say, inputs_show_user=i_say, + llm_kwargs=llm_kwargs, chatbot=chatbot, history=demo, + sys_prompt= r"You are a world-class programmer." + ) + history.extend([i_say, gpt_say]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 + + # 第二步 + prompt_compose = [ + "If previous stage is successful, rewrite the function you have just written to satisfy following templete: \n", + templete + ] + i_say = "".join(prompt_compose); inputs_show_user = "If previous stage is successful, rewrite the function you have just written to satisfy executable templete. " + gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( + inputs=i_say, inputs_show_user=inputs_show_user, + llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, + sys_prompt= r"You are a programmer. You need to replace `...` with valid packages, do not give `...` in your answer!" + ) + code_to_return = gpt_say + history.extend([i_say, gpt_say]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 + + # # 第三步 + # i_say = "Please list to packages to install to run the code above. Then show me how to use `try_install_deps` function to install them." + # i_say += 'For instance. `try_install_deps(["opencv-python", "scipy", "numpy"])`' + # installation_advance = yield from request_gpt_model_in_new_thread_with_ui_alive( + # inputs=i_say, inputs_show_user=inputs_show_user, + # llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, + # sys_prompt= r"You are a programmer." + # ) + + # # # 第三步 + # i_say = "Show me how to use `pip` to install packages to run the code above. " + # i_say += 'For instance. `pip install -r opencv-python scipy numpy`' + # installation_advance = yield from request_gpt_model_in_new_thread_with_ui_alive( + # inputs=i_say, inputs_show_user=i_say, + # llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, + # sys_prompt= r"You are a programmer." + # ) + installation_advance = "" + + return code_to_return, installation_advance, txt, file_type, llm_kwargs, chatbot, history + + + + +def for_immediate_show_off_when_possible(file_type, fp, chatbot): + if file_type in ['png', 'jpg']: + image_path = os.path.abspath(fp) + chatbot.append(['这是一张图片, 展示如下:', + f'本地文件地址:
`{image_path}`
'+ + f'本地文件预览:
' + ]) + return chatbot + + + +def have_any_recent_upload_files(chatbot): + _5min = 5 * 60 + if not chatbot: return False # chatbot is None + most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None) + if not most_recent_uploaded: return False # most_recent_uploaded is None + if time.time() - most_recent_uploaded["time"] < _5min: return True # most_recent_uploaded is new + else: return False # most_recent_uploaded is too old + +def get_recent_file_prompt_support(chatbot): + most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None) + path = most_recent_uploaded['path'] + return path + +@CatchException +def 函数动态生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): + """ + txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 + llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 + plugin_kwargs 插件模型的参数,暂时没有用武之地 + chatbot 聊天显示框的句柄,用于显示给用户 + history 聊天历史,前情提要 + system_prompt 给gpt的静默提醒 + web_port 当前软件运行的端口号 + """ + + # 清空历史 + history = [] + + # 基本信息:功能、贡献者 + chatbot.append(["正在启动: 插件动态生成插件", "插件动态生成, 执行开始, 作者Binary-Husky."]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + + # ⭐ 文件上传区是否有东西 + # 1. 如果有文件: 作为函数参数 + # 2. 如果没有文件:需要用GPT提取参数 (太懒了,以后再写,虚空终端已经实现了类似的代码) + file_list = [] + if get_plugin_arg(plugin_kwargs, key="file_path_arg", default=False): + file_path = get_plugin_arg(plugin_kwargs, key="file_path_arg", default=None) + file_list.append(file_path) + yield from update_ui_lastest_msg(f"当前文件: {file_path}", chatbot, history, 1) + elif have_any_recent_upload_files(chatbot): + file_dir = get_recent_file_prompt_support(chatbot) + file_list = glob.glob(os.path.join(file_dir, '**/*'), recursive=True) + yield from update_ui_lastest_msg(f"当前文件处理列表: {file_list}", chatbot, history, 1) + else: + chatbot.append(["文件检索", "没有发现任何近期上传的文件。"]) + yield from update_ui_lastest_msg("没有发现任何近期上传的文件。", chatbot, history, 1) + return # 2. 如果没有文件 + if len(file_list) == 0: + chatbot.append(["文件检索", "没有发现任何近期上传的文件。"]) + yield from update_ui_lastest_msg("没有发现任何近期上传的文件。", chatbot, history, 1) + return # 2. 如果没有文件 + + # 读取文件 + file_type = file_list[0].split('.')[-1] + + # 粗心检查 + if is_the_upload_folder(txt): + yield from update_ui_lastest_msg(f"请在输入框内填写需求, 然后再次点击该插件! 至于您的文件,不用担心, 文件路径 {txt} 已经被记忆. ", chatbot, history, 1) + return + + # 开始干正事 + MAX_TRY = 3 + for j in range(MAX_TRY): # 最多重试5次 + traceback = "" + try: + # ⭐ 开始啦 ! + code, installation_advance, txt, file_type, llm_kwargs, chatbot, history = \ + yield from gpt_interact_multi_step(txt, file_type, llm_kwargs, chatbot, history) + chatbot.append(["代码生成阶段结束", ""]) + yield from update_ui_lastest_msg(f"正在验证上述代码的有效性 ...", chatbot, history, 1) + # ⭐ 分离代码块 + code = get_code_block(code) + # ⭐ 检查模块 + ok, traceback = try_make_module(code, chatbot) + # 搞定代码生成 + if ok: break + except Exception as e: + if not traceback: traceback = trimmed_format_exc() + # 处理异常 + if not traceback: traceback = trimmed_format_exc() + yield from update_ui_lastest_msg(f"第 {j+1}/{MAX_TRY} 次代码生成尝试, 失败了~ 别担心, 我们5秒后再试一次... \n\n此次我们的错误追踪是\n```\n{traceback}\n```\n", chatbot, history, 5) + + # 代码生成结束, 开始执行 + TIME_LIMIT = 15 + yield from update_ui_lastest_msg(f"开始创建新进程并执行代码! 时间限制 {TIME_LIMIT} 秒. 请等待任务完成... ", chatbot, history, 1) + manager = multiprocessing.Manager() + return_dict = manager.dict() + + # ⭐ 到最后一步了,开始逐个文件进行处理 + for file_path in file_list: + if os.path.exists(file_path): + chatbot.append([f"正在处理文件: {file_path}", f"请稍等..."]) + chatbot = for_immediate_show_off_when_possible(file_type, file_path, chatbot) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 + else: + continue + + # ⭐⭐⭐ subprocess_worker ⭐⭐⭐ + p = multiprocessing.Process(target=subprocess_worker, args=(code, file_path, return_dict)) + # ⭐ 开始执行,时间限制TIME_LIMIT + p.start(); p.join(timeout=TIME_LIMIT) + if p.is_alive(): p.terminate(); p.join() + p.close() + res = return_dict['result'] + success = return_dict['success'] + traceback = return_dict['traceback'] + if not success: + if not traceback: traceback = trimmed_format_exc() + chatbot.append(["执行失败了", f"错误追踪\n```\n{trimmed_format_exc()}\n```\n"]) + # chatbot.append(["如果是缺乏依赖,请参考以下建议", installation_advance]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + return + + # 顺利完成,收尾 + res = str(res) + if os.path.exists(res): + chatbot.append(["执行成功了,结果是一个有效文件", "结果:" + res]) + new_file_path = promote_file_to_downloadzone(res, chatbot=chatbot) + chatbot = for_immediate_show_off_when_possible(file_type, new_file_path, chatbot) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 + else: + chatbot.append(["执行成功了,结果是一个字符串", "结果:" + res]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 + diff --git a/main.py b/main.py index 70b9219..f50cdb0 100644 --- a/main.py +++ b/main.py @@ -266,7 +266,7 @@ def main(): cookies.update({'uuid': uuid.uuid4()}) return cookies demo.load(init_cookie, inputs=[cookies, chatbot], outputs=[cookies]) - demo.load(lambda: 0, inputs=None, outputs=None, _js='()=>{ChatBotHeight();}') + demo.load(lambda: 0, inputs=None, outputs=None, _js='()=>{GptAcademicJavaScriptInit();}') # gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数 def auto_opentab_delay(): @@ -285,6 +285,7 @@ def main(): auto_opentab_delay() demo.queue(concurrency_count=CONCURRENT_COUNT).launch( + quiet=True, server_name="0.0.0.0", server_port=PORT, favicon_path="docs/logo.png", diff --git a/request_llm/bridge_all.py b/request_llm/bridge_all.py index c4823ce..44e0ae4 100644 --- a/request_llm/bridge_all.py +++ b/request_llm/bridge_all.py @@ -126,6 +126,15 @@ model_info = { "token_cnt": get_token_num_gpt4, }, + "gpt-4-32k": { + "fn_with_ui": chatgpt_ui, + "fn_without_ui": chatgpt_noui, + "endpoint": openai_endpoint, + "max_token": 32768, + "tokenizer": tokenizer_gpt4, + "token_cnt": get_token_num_gpt4, + }, + # azure openai "azure-gpt-3.5":{ "fn_with_ui": chatgpt_ui, @@ -136,6 +145,15 @@ model_info = { "token_cnt": get_token_num_gpt35, }, + "azure-gpt-4":{ + "fn_with_ui": chatgpt_ui, + "fn_without_ui": chatgpt_noui, + "endpoint": azure_endpoint, + "max_token": 8192, + "tokenizer": tokenizer_gpt35, + "token_cnt": get_token_num_gpt35, + }, + # api_2d "api2d-gpt-3.5-turbo": { "fn_with_ui": chatgpt_ui, diff --git a/request_llm/bridge_chatglm.py b/request_llm/bridge_chatglm.py index 6dac863..387b3e2 100644 --- a/request_llm/bridge_chatglm.py +++ b/request_llm/bridge_chatglm.py @@ -3,7 +3,7 @@ from transformers import AutoModel, AutoTokenizer import time import threading import importlib -from toolbox import update_ui, get_conf +from toolbox import update_ui, get_conf, ProxyNetworkActivate from multiprocessing import Process, Pipe load_message = "ChatGLM尚未加载,加载需要一段时间。注意,取决于`config.py`的配置,ChatGLM消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……" @@ -48,16 +48,17 @@ class GetGLMHandle(Process): while True: try: - if self.chatglm_model is None: - self.chatglm_tokenizer = AutoTokenizer.from_pretrained(_model_name_, trust_remote_code=True) - if device=='cpu': - self.chatglm_model = AutoModel.from_pretrained(_model_name_, trust_remote_code=True).float() + with ProxyNetworkActivate('Download_LLM'): + if self.chatglm_model is None: + self.chatglm_tokenizer = AutoTokenizer.from_pretrained(_model_name_, trust_remote_code=True) + if device=='cpu': + self.chatglm_model = AutoModel.from_pretrained(_model_name_, trust_remote_code=True).float() + else: + self.chatglm_model = AutoModel.from_pretrained(_model_name_, trust_remote_code=True).half().cuda() + self.chatglm_model = self.chatglm_model.eval() + break else: - self.chatglm_model = AutoModel.from_pretrained(_model_name_, trust_remote_code=True).half().cuda() - self.chatglm_model = self.chatglm_model.eval() - break - else: - break + break except: retry += 1 if retry > 3: diff --git a/request_llm/bridge_llama2.py b/request_llm/bridge_llama2.py index e236c94..d1be446 100644 --- a/request_llm/bridge_llama2.py +++ b/request_llm/bridge_llama2.py @@ -30,7 +30,7 @@ class GetONNXGLMHandle(LocalLLMHandle): with open(os.path.expanduser('~/.cache/huggingface/token'), 'w') as f: f.write(huggingface_token) model_id = 'meta-llama/Llama-2-7b-chat-hf' - with ProxyNetworkActivate(): + with ProxyNetworkActivate('Download_LLM'): self._tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=huggingface_token) # use fp16 model = AutoModelForCausalLM.from_pretrained(model_id, use_auth_token=huggingface_token).eval() diff --git a/request_llm/requirements_chatglm.txt b/request_llm/requirements_chatglm.txt index b2629f8..cd53cd7 100644 --- a/request_llm/requirements_chatglm.txt +++ b/request_llm/requirements_chatglm.txt @@ -1,5 +1,4 @@ protobuf -transformers>=4.27.1 cpm_kernels torch>=1.10 mdtex2html diff --git a/request_llm/requirements_chatglm_onnx.txt b/request_llm/requirements_chatglm_onnx.txt index 70ab668..5481147 100644 --- a/request_llm/requirements_chatglm_onnx.txt +++ b/request_llm/requirements_chatglm_onnx.txt @@ -1,5 +1,4 @@ protobuf -transformers>=4.27.1 cpm_kernels torch>=1.10 mdtex2html diff --git a/request_llm/requirements_jittorllms.txt b/request_llm/requirements_jittorllms.txt index 1d86ff8..ddb6195 100644 --- a/request_llm/requirements_jittorllms.txt +++ b/request_llm/requirements_jittorllms.txt @@ -2,6 +2,5 @@ jittor >= 1.3.7.9 jtorch >= 0.1.3 torch torchvision -transformers==4.26.1 pandas jieba \ No newline at end of file diff --git a/request_llm/requirements_moss.txt b/request_llm/requirements_moss.txt index 8dd75bf..c27907c 100644 --- a/request_llm/requirements_moss.txt +++ b/request_llm/requirements_moss.txt @@ -1,5 +1,4 @@ torch -transformers==4.25.1 sentencepiece datasets accelerate diff --git a/requirements.txt b/requirements.txt index 5ff40cc..1958963 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,7 @@ pydantic==1.10.11 tiktoken>=0.3.3 requests[socks] -transformers +transformers>=4.27.1 python-markdown-math beautifulsoup4 prompt_toolkit diff --git a/tests/test_plugins.py b/tests/test_plugins.py index ec28af1..d9f78d6 100644 --- a/tests/test_plugins.py +++ b/tests/test_plugins.py @@ -6,11 +6,14 @@ import os, sys def validate_path(): dir_name = os.path.dirname(__file__); root_dir_assume = os.path.abspath(dir_name + '/..'); os.chdir(root_dir_assume); sys.path.append(root_dir_assume) validate_path() # 返回项目根路径 -from tests.test_utils import plugin_test if __name__ == "__main__": + from tests.test_utils import plugin_test + plugin_test(plugin='crazy_functions.函数动态生成->函数动态生成', main_input='交换图像的蓝色通道和红色通道', advanced_arg={"file_path_arg": "./build/ants.jpg"}) + # plugin_test(plugin='crazy_functions.虚空终端->虚空终端', main_input='修改api-key为sk-jhoejriotherjep') - plugin_test(plugin='crazy_functions.批量翻译PDF文档_NOUGAT->批量翻译PDF文档', main_input='crazy_functions/test_project/pdf_and_word/aaai.pdf') + + # plugin_test(plugin='crazy_functions.批量翻译PDF文档_NOUGAT->批量翻译PDF文档', main_input='crazy_functions/test_project/pdf_and_word/aaai.pdf') # plugin_test(plugin='crazy_functions.虚空终端->虚空终端', main_input='调用插件,对C:/Users/fuqingxu/Desktop/旧文件/gpt/chatgpt_academic/crazy_functions/latex_fns中的python文件进行解析') diff --git a/tests/test_utils.py b/tests/test_utils.py index f3a45aa..1fdca1e 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -74,7 +74,7 @@ def plugin_test(main_input, plugin, advanced_arg=None): plugin_kwargs['plugin_kwargs'] = advanced_arg my_working_plugin = silence_stdout(plugin)(**plugin_kwargs) - with Live(Markdown(""), auto_refresh=False) as live: + with Live(Markdown(""), auto_refresh=False, vertical_overflow="visible") as live: for cookies, chat, hist, msg in my_working_plugin: md_str = vt.chat_to_markdown_str(chat) md = Markdown(md_str) diff --git a/themes/common.css b/themes/common.css index 0f201f0..e020ded 100644 --- a/themes/common.css +++ b/themes/common.css @@ -23,4 +23,63 @@ /* status bar height */ .min.svelte-1yrv54 { min-height: var(--size-12); +} + +/* copy btn */ +.message-btn-row { + width: 19px; + height: 19px; + position: absolute; + left: calc(100% + 3px); + top: 0; + display: flex; + justify-content: space-between; +} +/* .message-btn-row-leading, .message-btn-row-trailing { + display: inline-flex; + gap: 4px; +} */ +.message-btn-row button { + font-size: 18px; + align-self: center; + align-items: center; + flex-wrap: nowrap; + white-space: nowrap; + display: inline-flex; + flex-direction: row; + gap: 4px; + padding-block: 2px !important; +} + + +/* Scrollbar Width */ +::-webkit-scrollbar { + width: 12px; +} + +/* Scrollbar Track */ +::-webkit-scrollbar-track { + background: #f1f1f1; + border-radius: 12px; +} + +/* Scrollbar Handle */ +::-webkit-scrollbar-thumb { + background: #888; + border-radius: 12px; +} + +/* Scrollbar Handle on hover */ +::-webkit-scrollbar-thumb:hover { + background: #555; +} + +/* input btns: clear, reset, stop */ +#input-panel button { + min-width: min(80px, 100%); +} + +/* input btns: clear, reset, stop */ +#input-panel2 button { + min-width: min(80px, 100%); } \ No newline at end of file diff --git a/themes/common.js b/themes/common.js index 7733c7b..150d472 100644 --- a/themes/common.js +++ b/themes/common.js @@ -1,4 +1,85 @@ -function ChatBotHeight() { +function gradioApp() { + // https://github.com/GaiZhenbiao/ChuanhuChatGPT/tree/main/web_assets/javascript + const elems = document.getElementsByTagName('gradio-app'); + const elem = elems.length == 0 ? document : elems[0]; + if (elem !== document) { + elem.getElementById = function(id) { + return document.getElementById(id); + }; + } + return elem.shadowRoot ? elem.shadowRoot : elem; +} + + +const copiedIcon = ''; +const copyIcon = ''; + + +function addCopyButton(botElement) { + // https://github.com/GaiZhenbiao/ChuanhuChatGPT/tree/main/web_assets/javascript + // Copy bot button + const messageBtnColumnElement = botElement.querySelector('.message-btn-row'); + if (messageBtnColumnElement) { + // Do something if .message-btn-column exists, for example, remove it + // messageBtnColumnElement.remove(); + return; + } + + var copyButton = document.createElement('button'); + copyButton.classList.add('copy-bot-btn'); + copyButton.setAttribute('aria-label', 'Copy'); + copyButton.innerHTML = copyIcon; + copyButton.addEventListener('click', async () => { + const textToCopy = botElement.innerText; + try { + if ("clipboard" in navigator) { + await navigator.clipboard.writeText(textToCopy); + copyButton.innerHTML = copiedIcon; + setTimeout(() => { + copyButton.innerHTML = copyIcon; + }, 1500); + } else { + const textArea = document.createElement("textarea"); + textArea.value = textToCopy; + document.body.appendChild(textArea); + textArea.select(); + try { + document.execCommand('copy'); + copyButton.innerHTML = copiedIcon; + setTimeout(() => { + copyButton.innerHTML = copyIcon; + }, 1500); + } catch (error) { + console.error("Copy failed: ", error); + } + document.body.removeChild(textArea); + } + } catch (error) { + console.error("Copy failed: ", error); + } + }); + var messageBtnColumn = document.createElement('div'); + messageBtnColumn.classList.add('message-btn-row'); + messageBtnColumn.appendChild(copyButton); + botElement.appendChild(messageBtnColumn); +} + +function chatbotContentChanged(attempt = 1, force = false) { + // https://github.com/GaiZhenbiao/ChuanhuChatGPT/tree/main/web_assets/javascript + for (var i = 0; i < attempt; i++) { + setTimeout(() => { + gradioApp().querySelectorAll('#gpt-chatbot .message-wrap .message.bot').forEach(addCopyButton); + }, i === 0 ? 0 : 200); + } +} + +function GptAcademicJavaScriptInit() { + chatbotIndicator = gradioApp().querySelector('#gpt-chatbot > div.wrap'); + var chatbotObserver = new MutationObserver(() => { + chatbotContentChanged(1); + }); + chatbotObserver.observe(chatbotIndicator, { attributes: true, childList: true, subtree: true }); + function update_height(){ var { panel_height_target, chatbot_height, chatbot } = get_elements(true); if (panel_height_target!=chatbot_height) diff --git a/themes/gradios.py b/themes/gradios.py index acabf75..6a34e88 100644 --- a/themes/gradios.py +++ b/themes/gradios.py @@ -5,7 +5,7 @@ CODE_HIGHLIGHT, ADD_WAIFU, LAYOUT = get_conf('CODE_HIGHLIGHT', 'ADD_WAIFU', 'LAY def dynamic_set_theme(THEME): set_theme = gr.themes.ThemeClass() - with ProxyNetworkActivate(): + with ProxyNetworkActivate('Download_Gradio_Theme'): logging.info('正在下载Gradio主题,请稍等。') if THEME.startswith('Huggingface-'): THEME = THEME.lstrip('Huggingface-') if THEME.startswith('huggingface-'): THEME = THEME.lstrip('huggingface-') @@ -16,7 +16,7 @@ def adjust_theme(): try: set_theme = gr.themes.ThemeClass() - with ProxyNetworkActivate(): + with ProxyNetworkActivate('Download_Gradio_Theme'): logging.info('正在下载Gradio主题,请稍等。') THEME, = get_conf('THEME') if THEME.startswith('Huggingface-'): THEME = THEME.lstrip('Huggingface-') diff --git a/toolbox.py b/toolbox.py index 6a53868..04853bc 100644 --- a/toolbox.py +++ b/toolbox.py @@ -527,6 +527,7 @@ def promote_file_to_downloadzone(file, rename_file=None, chatbot=None): if 'files_to_promote' in chatbot._cookies: current = chatbot._cookies['files_to_promote'] else: current = [] chatbot._cookies.update({'files_to_promote': [new_path] + current}) + return new_path def disable_auto_promotion(chatbot): chatbot._cookies.update({'files_to_promote': []}) @@ -955,7 +956,19 @@ class ProxyNetworkActivate(): """ 这段代码定义了一个名为TempProxy的空上下文管理器, 用于给一小段代码上代理 """ + def __init__(self, task=None) -> None: + self.task = task + if not task: + # 不给定task, 那么我们默认代理生效 + self.valid = True + else: + # 给定了task, 我们检查一下 + from toolbox import get_conf + WHEN_TO_USE_PROXY, = get_conf('WHEN_TO_USE_PROXY') + self.valid = (task in WHEN_TO_USE_PROXY) + def __enter__(self): + if not self.valid: return self from toolbox import get_conf proxies, = get_conf('proxies') if 'no_proxy' in os.environ: os.environ.pop('no_proxy') diff --git a/version b/version index 5562088..3bb18a4 100644 --- a/version +++ b/version @@ -1,5 +1,5 @@ { - "version": 3.53, + "version": 3.54, "show_feature": true, - "new_feature": "支持动态选择不同界面主题 <-> 提高稳定性&解决多用户冲突问题 <-> 支持插件分类和更多UI皮肤外观 <-> 支持用户使用自然语言调度各个插件(虚空终端) ! <-> 改进UI,设计新主题 <-> 支持借助GROBID实现PDF高精度翻译 <-> 接入百度千帆平台和文心一言 <-> 接入阿里通义千问、讯飞星火、上海AI-Lab书生 <-> 优化一键升级 <-> 提高arxiv翻译速度和成功率" + "new_feature": "新增动态代码解释器(CodeInterpreter) <-> 增加文本回答复制按钮 <-> 细分代理场合 <-> 支持动态选择不同界面主题 <-> 提高稳定性&解决多用户冲突问题 <-> 支持插件分类和更多UI皮肤外观 <-> 支持用户使用自然语言调度各个插件(虚空终端) ! <-> 改进UI,设计新主题 <-> 支持借助GROBID实现PDF高精度翻译 <-> 接入百度千帆平台和文心一言 <-> 接入阿里通义千问、讯飞星火、上海AI-Lab书生 <-> 优化一键升级 <-> 提高arxiv翻译速度和成功率" }