From a208782049eb81cd9a2e5f040f7f45cfa4bea164 Mon Sep 17 00:00:00 2001 From: w_xiaolizu Date: Wed, 30 Aug 2023 14:46:34 +0800 Subject: [PATCH 01/47] =?UTF-8?q?=E6=96=B0=E5=A2=9E=E6=8F=92=E4=BB=B6?= =?UTF-8?q?=E5=88=86=E7=B1=BB?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- config.py | 4 +- crazy_functional.py | 698 ++++++++++++++++++++------------------------ main.py | 64 +++- themes/default.css | 5 + themes/green.css | 6 +- 5 files changed, 378 insertions(+), 399 deletions(-) diff --git a/config.py b/config.py index 0983d22..3664c15 100644 --- a/config.py +++ b/config.py @@ -67,6 +67,8 @@ WEB_PORT = -1 # 如果OpenAI不响应(网络卡顿、代理失败、KEY失效),重试的次数限制 MAX_RETRY = 2 +# 插件分类默认选项 +default_plugin = ['学术优化', '多功能插件', '代码解析'] # 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 ) LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓ @@ -100,7 +102,7 @@ AUTO_CLEAR_TXT = False # 色彩主体,可选 ["Default", "Chuanhu-Small-and-Beautiful"] -THEME = "Default" +THEME = "Chuanhu-Small-and-Beautiful" # 加一个live2d装饰 diff --git a/crazy_functional.py b/crazy_functional.py index fcc4e52..99aad8a 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -1,8 +1,16 @@ from toolbox import HotReload # HotReload 的意思是热更新,修改函数插件后,不需要重启程序,代码直接生效 +function_plugins = {} + def get_crazy_functions(): - from crazy_functions.读文章写摘要 import 读文章写摘要 + get_functions_学术优化() + get_functions_文档读取() + get_functions_代码解析() + get_functions_多功能插件() + return function_plugins + +def get_functions_代码解析(): from crazy_functions.生成函数注释 import 批量生成函数注释 from crazy_functions.解析项目源代码 import 解析项目本身 from crazy_functions.解析项目源代码 import 解析一个Python项目 @@ -14,37 +22,257 @@ def get_crazy_functions(): from crazy_functions.解析项目源代码 import 解析一个前端项目 from crazy_functions.高级功能函数模板 import 高阶功能模板函数 from crazy_functions.代码重写为全英文_多线程 import 全项目切换英文 - from crazy_functions.Latex全文润色 import Latex英文润色 - from crazy_functions.询问多个大语言模型 import 同时问询 from crazy_functions.解析项目源代码 import 解析一个Lua项目 from crazy_functions.解析项目源代码 import 解析一个CSharp项目 - from crazy_functions.总结word文档 import 总结word文档 from crazy_functions.解析JupyterNotebook import 解析ipynb文件 - from crazy_functions.对话历史存档 import 对话历史存档 - from crazy_functions.对话历史存档 import 载入对话历史存档 - from crazy_functions.对话历史存档 import 删除所有本地对话历史记录 - from crazy_functions.辅助功能 import 清除缓存 - from crazy_functions.批量Markdown翻译 import Markdown英译中 - from crazy_functions.批量总结PDF文档 import 批量总结PDF文档 - from crazy_functions.批量翻译PDF文档_多线程 import 批量翻译PDF文档 - from crazy_functions.谷歌检索小助手 import 谷歌检索小助手 - from crazy_functions.理解PDF文档内容 import 理解PDF文档内容标准文件输入 - from crazy_functions.Latex全文润色 import Latex中文润色 - from crazy_functions.Latex全文润色 import Latex英文纠错 - from crazy_functions.Latex全文翻译 import Latex中译英 - from crazy_functions.Latex全文翻译 import Latex英译中 + from crazy_functions.解析项目源代码 import 解析任意code项目 from crazy_functions.批量Markdown翻译 import Markdown中译英 - - - function_plugins = { + function_plugins['代码解析'] = { "解析整个Python项目": { - "Color": "stop", + "Color": "primary", "AsButton": True, "Info": "解析一个Python项目的所有源文件(.py) | 输入参数为路径", "Function": HotReload(解析一个Python项目) }, + "解析整个C++项目头文件": { + "Color": "primary", + "AsButton": False, # 加入下拉菜单中 + "Info": "解析一个C++项目的所有头文件(.h/.hpp) | 输入参数为路径", + "Function": HotReload(解析一个C项目的头文件) + }, + "解析整个C++项目(.cpp/.hpp/.c/.h)": { + "Color": "primary", + "AsButton": False, # 加入下拉菜单中 + "Info": "解析一个C++项目的所有源文件(.cpp/.hpp/.c/.h)| 输入参数为路径", + "Function": HotReload(解析一个C项目) + }, + "解析整个Go项目": { + "Color": "primary", + "AsButton": False, # 加入下拉菜单中 + "Info": "解析一个Go项目的所有源文件 | 输入参数为路径", + "Function": HotReload(解析一个Golang项目) + }, + "解析整个Rust项目": { + "Color": "primary", + "AsButton": False, # 加入下拉菜单中 + "Info": "解析一个Rust项目的所有源文件 | 输入参数为路径", + "Function": HotReload(解析一个Rust项目) + }, + "解析整个Java项目": { + "Color": "primary", + "AsButton": False, # 加入下拉菜单中 + "Info": "解析一个Java项目的所有源文件 | 输入参数为路径", + "Function": HotReload(解析一个Java项目) + }, + "解析整个前端项目(js,ts,css等)": { + "Color": "primary", + "AsButton": False, # 加入下拉菜单中 + "Info": "解析一个前端项目的所有源文件(js,ts,css等) | 输入参数为路径", + "Function": HotReload(解析一个前端项目) + }, + "解析整个Lua项目": { + "Color": "primary", + "AsButton": False, # 加入下拉菜单中 + "Info": "解析一个Lua项目的所有源文件 | 输入参数为路径", + "Function": HotReload(解析一个Lua项目) + }, + "解析整个CSharp项目": { + "Color": "primary", + "AsButton": False, # 加入下拉菜单中 + "Info": "解析一个CSharp项目的所有源文件 | 输入参数为路径", + "Function": HotReload(解析一个CSharp项目) + }, + "解析Jupyter Notebook文件": { + "Color": "primary", + "AsButton": False, + "Info": "解析Jupyter Notebook文件 | 输入参数为路径", + "Function": HotReload(解析ipynb文件), + "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) + "ArgsReminder": "若输入0,则不解析notebook中的Markdown块", # 高级参数输入区的显示提示 + }, + "批量生成函数注释": { + "Color": "primary", + "AsButton": False, # 加入下拉菜单中 + "Function": HotReload(批量生成函数注释) + }, + "[多线程Demo] 解析此项目本身(源码自译解)": { + "AsButton": False, # 加入下拉菜单中 + "Function": HotReload(解析项目本身) + }, + "[插件demo] 历史上的今天": { + "AsButton": True, + "Function": HotReload(高阶功能模板函数) + }, + "批量Markdown中译英(输入路径或上传压缩包)": { + "Color": "primary", + "AsButton": False, # 加入下拉菜单中 + "Function": HotReload(Markdown中译英) + }, + "解析项目源代码(手动指定和筛选源代码文件类型)": { + "Color": "primary", + "AsButton": False, + "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) + "ArgsReminder": "输入时用逗号隔开, *代表通配符, 加了^代表不匹配; 不输入代表全部匹配。例如: \"*.c, ^*.cpp, config.toml, ^*.toml\"", + # 高级参数输入区的显示提示 + "Function": HotReload(解析任意code项目) + }, + + } + + +def get_functions_文档读取(): + from crazy_functions.读文章写摘要 import 读文章写摘要 + from crazy_functions.总结word文档 import 总结word文档 + from crazy_functions.批量总结PDF文档 import 批量总结PDF文档 + from crazy_functions.批量翻译PDF文档_多线程 import 批量翻译PDF文档 + from crazy_functions.批量Markdown翻译 import Markdown英译中 + from crazy_functions.理解PDF文档内容 import 理解PDF文档内容标准文件输入 + from crazy_functions.批量Markdown翻译 import Markdown翻译指定语言 + function_plugins['文档读取'] = { + "批量总结PDF文档": { + "Color": "primary", + "AsButton": False, # 加入下拉菜单中 + "Function": HotReload(批量总结PDF文档) + }, + "理解PDF文档内容 (模仿ChatPDF)": { + "Color": "primary", + "AsButton": False, # 加入下拉菜单中 + "Function": HotReload(理解PDF文档内容标准文件输入) + }, + "精准翻译PDF论文": { + "Color": "primary", + "AsButton": True, # 加入下拉菜单中 + "Function": HotReload(批量翻译PDF文档) + }, + "批量总结Word文档": { + "Color": "primary", + "AsButton": True, + "Info": "批量总结word文档 | 输入参数为路径", + "Function": HotReload(总结word文档) + }, + "读Tex论文写摘要": { + "Color": "primary", + "AsButton": True, + "Function": HotReload(读文章写摘要) + }, + "翻译README或.MD": { + "Color": "primary", + "AsButton": True, + "Info": "将Markdown翻译为中文 | 输入参数为路径或URL", + "Function": HotReload(Markdown英译中) + }, + "翻译Markdown或README(支持Github链接)": { + "Color": "primary", + "AsButton": False, + "Function": HotReload(Markdown英译中) + }, + "Markdown翻译(手动指定语言)": { + "Color": "primary", + "AsButton": False, + "AdvancedArgs": True, + "ArgsReminder": "请输入要翻译成哪种语言,默认为Chinese。", + "Function": HotReload(Markdown翻译指定语言) + }, + } + +def get_functions_学术优化(): + from crazy_functions.谷歌检索小助手 import 谷歌检索小助手 + from crazy_functions.Latex全文润色 import Latex中文润色 + from crazy_functions.Latex全文润色 import Latex英文纠错 + from crazy_functions.Latex全文翻译 import Latex中译英 + from crazy_functions.Latex全文翻译 import Latex英译中 + from crazy_functions.Latex全文润色 import Latex英文润色 + from crazy_functions.下载arxiv论文翻译摘要 import 下载arxiv论文并翻译摘要 + from crazy_functions.Latex输出PDF结果 import Latex英文纠错加PDF对比 + from crazy_functions.Latex输出PDF结果 import Latex翻译中文并重新编译PDF + function_plugins['学术优化'] = { + "英文Latex项目全文纠错(输入路径或上传压缩包)": { + "Color": "primary", + "AsButton": False, # 加入下拉菜单中 + "Function": HotReload(Latex英文纠错) + }, + "中文Latex项目全文润色(输入路径或上传压缩包)": { + "Color": "primary", + "AsButton": False, # 加入下拉菜单中 + "Function": HotReload(Latex中文润色) + }, + "Latex项目全文中译英(输入路径或上传压缩包)": { + "Color": "primary", + "AsButton": False, # 加入下拉菜单中 + "Function": HotReload(Latex中译英) + }, + "Latex项目全文英译中(输入路径或上传压缩包)": { + "Color": "primary", + "AsButton": False, # 加入下拉菜单中 + "Function": HotReload(Latex英译中) + }, + "谷歌学术检索助手(输入谷歌学术搜索页url)": { + "Color": "primary", + "AsButton": False, # 加入下拉菜单中 + "Function": HotReload(谷歌检索小助手) + }, + "英文Latex项目全文润色(输入路径或上传压缩包)": { + "Color": "primary", + "AsButton": False, # 加入下拉菜单中 + "Function": HotReload(Latex英文润色) + }, + "Arixv论文精细翻译(输入arxivID)[需Latex]": { + "Color": "primary", + "AsButton": False, + "AdvancedArgs": True, + "ArgsReminder": + "如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 " + + "例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " + + 'If the term "agent" is used in this section, it should be translated to "智能体". ', + "Function": HotReload(Latex翻译中文并重新编译PDF) + }, + "Latex英文纠错+高亮修正位置 [需Latex]": { + "Color": "primary", + "AsButton": False, + "AdvancedArgs": True, + "ArgsReminder": "如果有必要, 请在此处追加更细致的矫错指令(使用英文)。", + "Function": HotReload(Latex英文纠错加PDF对比) + }, + + } + function_plugins['学术优化'].update({ + "一键下载arxiv论文并翻译摘要(先在input输入编号,如1812.10695)": { + "Color": "primary", + "AsButton": False, # 加入下拉菜单中 + "Function": HotReload(下载arxiv论文并翻译摘要) + } + }) + +def get_functions_多功能插件(): + from crazy_functions.询问多个大语言模型 import 同时问询 + from crazy_functions.对话历史存档 import 对话历史存档 + from crazy_functions.对话历史存档 import 载入对话历史存档 + from crazy_functions.对话历史存档 import 删除所有本地对话历史记录 + from crazy_functions.辅助功能 import 清除缓存 + from crazy_functions.联网的ChatGPT import 连接网络回答问题 + from crazy_functions.联网的ChatGPT_bing版 import 连接bing搜索回答问题 + from crazy_functions.询问多个大语言模型 import 同时问询_指定模型 + from crazy_functions.图片生成 import 图片生成 + from crazy_functions.总结音视频 import 总结音视频 + from crazy_functions.数学动画生成manim import 动画生成 + from crazy_functions.Langchain知识库 import 知识库问答 + from crazy_functions.Langchain知识库 import 读取知识库作答 + from crazy_functions.交互功能函数模板 import 交互功能模板函数 + from crazy_functions.语音助手 import 语音助手 + from crazy_functions.虚空终端 import 自动终端 + function_plugins['多功能插件'] = { + "询问多个GPT模型": { + "Color": "primary", + "AsButton": True, + "Function": HotReload(同时问询) + }, + "保存当前的对话": { + "AsButton": True, + "Info": "保存当前的对话 | 不需要输入参数", + "Function": HotReload(对话历史存档) + }, "载入对话历史存档(先上传存档或输入路径)": { - "Color": "stop", + "Color": "primary", "AsButton": False, "Info": "载入对话历史存档 | 输入参数为路径", "Function": HotReload(载入对话历史存档) @@ -55,392 +283,84 @@ def get_crazy_functions(): "Function": HotReload(删除所有本地对话历史记录) }, "清除所有缓存文件(谨慎操作)": { - "Color": "stop", + "Color": "primary", "AsButton": False, # 加入下拉菜单中 "Info": "清除所有缓存文件,谨慎操作 | 不需要输入参数", "Function": HotReload(清除缓存) }, - "批量总结Word文档": { - "Color": "stop", - "AsButton": True, - "Info": "批量总结word文档 | 输入参数为路径", - "Function": HotReload(总结word文档) - }, - "解析整个C++项目头文件": { - "Color": "stop", + "连接网络回答问题(输入问题后点击该插件,需要访问谷歌)": { + "Color": "primary", "AsButton": False, # 加入下拉菜单中 - "Info": "解析一个C++项目的所有头文件(.h/.hpp) | 输入参数为路径", - "Function": HotReload(解析一个C项目的头文件) + "Function": HotReload(连接网络回答问题) }, - "解析整个C++项目(.cpp/.hpp/.c/.h)": { - "Color": "stop", + "连接网络回答问题(中文Bing版,输入问题后点击该插件)": { + "Color": "primary", "AsButton": False, # 加入下拉菜单中 - "Info": "解析一个C++项目的所有源文件(.cpp/.hpp/.c/.h)| 输入参数为路径", - "Function": HotReload(解析一个C项目) + "Function": HotReload(连接bing搜索回答问题) }, - "解析整个Go项目": { - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - "Info": "解析一个Go项目的所有源文件 | 输入参数为路径", - "Function": HotReload(解析一个Golang项目) - }, - "解析整个Rust项目": { - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - "Info": "解析一个Rust项目的所有源文件 | 输入参数为路径", - "Function": HotReload(解析一个Rust项目) - }, - "解析整个Java项目": { - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - "Info": "解析一个Java项目的所有源文件 | 输入参数为路径", - "Function": HotReload(解析一个Java项目) - }, - "解析整个前端项目(js,ts,css等)": { - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - "Info": "解析一个前端项目的所有源文件(js,ts,css等) | 输入参数为路径", - "Function": HotReload(解析一个前端项目) - }, - "解析整个Lua项目": { - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - "Info": "解析一个Lua项目的所有源文件 | 输入参数为路径", - "Function": HotReload(解析一个Lua项目) - }, - "解析整个CSharp项目": { - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - "Info": "解析一个CSharp项目的所有源文件 | 输入参数为路径", - "Function": HotReload(解析一个CSharp项目) - }, - "解析Jupyter Notebook文件": { - "Color": "stop", + "询问多个GPT模型(手动指定询问哪些模型)": { + "Color": "primary", "AsButton": False, - "Info": "解析Jupyter Notebook文件 | 输入参数为路径", - "Function": HotReload(解析ipynb文件), "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) - "ArgsReminder": "若输入0,则不解析notebook中的Markdown块", # 高级参数输入区的显示提示 + "ArgsReminder": "支持任意数量的llm接口,用&符号分隔。例如chatglm&gpt-3.5-turbo&api2d-gpt-4", # 高级参数输入区的显示提示 + "Function": HotReload(同时问询_指定模型) }, - "读Tex论文写摘要": { - "Color": "stop", - "AsButton": True, - "Function": HotReload(读文章写摘要) - }, - "翻译README或.MD": { - "Color": "stop", - "AsButton": True, - "Info": "将Markdown翻译为中文 | 输入参数为路径或URL", - "Function": HotReload(Markdown英译中) - }, - "翻译Markdown或README(支持Github链接)": { - "Color": "stop", + "图片生成(先切换模型到openai或api2d)": { + "Color": "primary", "AsButton": False, - "Function": HotReload(Markdown英译中) + "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) + "ArgsReminder": "在这里输入分辨率, 如256x256(默认)", # 高级参数输入区的显示提示 + "Info": "图片生成 | 输入参数字符串,提供图像的内容", + "Function": HotReload(图片生成) }, - "批量生成函数注释": { - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - "Function": HotReload(批量生成函数注释) + "批量总结音视频(输入路径或上传压缩包)": { + "Color": "primary", + "AsButton": False, + "AdvancedArgs": True, + "ArgsReminder": "调用openai api 使用whisper-1模型, 目前支持的格式:mp4, m4a, wav, mpga, mpeg, mp3。此处可以输入解析提示,例如:解析为简体中文(默认)。", + "Info": "批量总结音频或视频 | 输入参数为路径", + "Function": HotReload(总结音视频) }, - "保存当前的对话": { + "数学动画生成(Manim)": { + "Color": "primary", + "AsButton": False, + "Function": HotReload(动画生成) + }, + "构建知识库(请先上传文件素材)": { + "Color": "primary", + "AsButton": False, + "AdvancedArgs": True, + "ArgsReminder": "待注入的知识库名称id, 默认为default", + "Function": HotReload(知识库问答) + }, + "知识库问答": { + "Color": "primary", + "AsButton": False, + "AdvancedArgs": True, + "ArgsReminder": "待提取的知识库名称id, 默认为default, 您需要首先调用构建知识库", + "Function": HotReload(读取知识库作答) + }, + "交互功能模板函数": { + "Color": "primary", + "AsButton": False, + "Function": HotReload(交互功能模板函数) + }, + "实时音频采集": { + "Color": "primary", "AsButton": True, - "Info": "保存当前的对话 | 不需要输入参数", - "Function": HotReload(对话历史存档) - }, - "[多线程Demo] 解析此项目本身(源码自译解)": { - "AsButton": False, # 加入下拉菜单中 - "Function": HotReload(解析项目本身) - }, - "[插件demo] 历史上的今天": { - "AsButton": True, - "Function": HotReload(高阶功能模板函数) - }, - "精准翻译PDF论文": { - "Color": "stop", - "AsButton": True, # 加入下拉菜单中 - "Function": HotReload(批量翻译PDF文档) - }, - "询问多个GPT模型": { - "Color": "stop", - "AsButton": True, - "Function": HotReload(同时问询) - }, - "批量总结PDF文档": { - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - "Function": HotReload(批量总结PDF文档) - }, - "谷歌学术检索助手(输入谷歌学术搜索页url)": { - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - "Function": HotReload(谷歌检索小助手) - }, - "理解PDF文档内容 (模仿ChatPDF)": { - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - "Function": HotReload(理解PDF文档内容标准文件输入) - }, - "英文Latex项目全文润色(输入路径或上传压缩包)": { - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - "Function": HotReload(Latex英文润色) - }, - "英文Latex项目全文纠错(输入路径或上传压缩包)": { - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - "Function": HotReload(Latex英文纠错) - }, - "中文Latex项目全文润色(输入路径或上传压缩包)": { - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - "Function": HotReload(Latex中文润色) - }, - "Latex项目全文中译英(输入路径或上传压缩包)": { - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - "Function": HotReload(Latex中译英) - }, - "Latex项目全文英译中(输入路径或上传压缩包)": { - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - "Function": HotReload(Latex英译中) - }, - "批量Markdown中译英(输入路径或上传压缩包)": { - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - "Function": HotReload(Markdown中译英) + "Function": HotReload(语音助手) }, + "自动终端": { + "Color": "primary", + "AsButton": False, + "Function": HotReload(自动终端) + } } - - # -=--=- 尚未充分测试的实验性插件 & 需要额外依赖的插件 -=--=- - try: - from crazy_functions.下载arxiv论文翻译摘要 import 下载arxiv论文并翻译摘要 - function_plugins.update({ - "一键下载arxiv论文并翻译摘要(先在input输入编号,如1812.10695)": { - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - "Function": HotReload(下载arxiv论文并翻译摘要) - } - }) - except: - print('Load function plugin failed') - - try: - from crazy_functions.联网的ChatGPT import 连接网络回答问题 - function_plugins.update({ - "连接网络回答问题(输入问题后点击该插件,需要访问谷歌)": { - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - "Function": HotReload(连接网络回答问题) - } - }) - from crazy_functions.联网的ChatGPT_bing版 import 连接bing搜索回答问题 - function_plugins.update({ - "连接网络回答问题(中文Bing版,输入问题后点击该插件)": { - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - "Function": HotReload(连接bing搜索回答问题) - } - }) - except: - print('Load function plugin failed') - - try: - from crazy_functions.解析项目源代码 import 解析任意code项目 - function_plugins.update({ - "解析项目源代码(手动指定和筛选源代码文件类型)": { - "Color": "stop", - "AsButton": False, - "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) - "ArgsReminder": "输入时用逗号隔开, *代表通配符, 加了^代表不匹配; 不输入代表全部匹配。例如: \"*.c, ^*.cpp, config.toml, ^*.toml\"", # 高级参数输入区的显示提示 - "Function": HotReload(解析任意code项目) - }, - }) - except: - print('Load function plugin failed') - - try: - from crazy_functions.询问多个大语言模型 import 同时问询_指定模型 - function_plugins.update({ - "询问多个GPT模型(手动指定询问哪些模型)": { - "Color": "stop", - "AsButton": False, - "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) - "ArgsReminder": "支持任意数量的llm接口,用&符号分隔。例如chatglm&gpt-3.5-turbo&api2d-gpt-4", # 高级参数输入区的显示提示 - "Function": HotReload(同时问询_指定模型) - }, - }) - except: - print('Load function plugin failed') - - try: - from crazy_functions.图片生成 import 图片生成 - function_plugins.update({ - "图片生成(先切换模型到openai或api2d)": { - "Color": "stop", - "AsButton": False, - "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) - "ArgsReminder": "在这里输入分辨率, 如256x256(默认)", # 高级参数输入区的显示提示 - "Info": "图片生成 | 输入参数字符串,提供图像的内容", - "Function": HotReload(图片生成) - }, - }) - except: - print('Load function plugin failed') - - try: - from crazy_functions.总结音视频 import 总结音视频 - function_plugins.update({ - "批量总结音视频(输入路径或上传压缩包)": { - "Color": "stop", - "AsButton": False, - "AdvancedArgs": True, - "ArgsReminder": "调用openai api 使用whisper-1模型, 目前支持的格式:mp4, m4a, wav, mpga, mpeg, mp3。此处可以输入解析提示,例如:解析为简体中文(默认)。", - "Info": "批量总结音频或视频 | 输入参数为路径", - "Function": HotReload(总结音视频) - } - }) - except: - print('Load function plugin failed') - - try: - from crazy_functions.数学动画生成manim import 动画生成 - function_plugins.update({ - "数学动画生成(Manim)": { - "Color": "stop", - "AsButton": False, - "Function": HotReload(动画生成) - } - }) - except: - print('Load function plugin failed') - - try: - from crazy_functions.批量Markdown翻译 import Markdown翻译指定语言 - function_plugins.update({ - "Markdown翻译(手动指定语言)": { - "Color": "stop", - "AsButton": False, - "AdvancedArgs": True, - "ArgsReminder": "请输入要翻译成哪种语言,默认为Chinese。", - "Function": HotReload(Markdown翻译指定语言) - } - }) - except: - print('Load function plugin failed') - - try: - from crazy_functions.Langchain知识库 import 知识库问答 - function_plugins.update({ - "构建知识库(请先上传文件素材)": { - "Color": "stop", - "AsButton": False, - "AdvancedArgs": True, - "ArgsReminder": "待注入的知识库名称id, 默认为default", - "Function": HotReload(知识库问答) - } - }) - except: - print('Load function plugin failed') - - try: - from crazy_functions.Langchain知识库 import 读取知识库作答 - function_plugins.update({ - "知识库问答": { - "Color": "stop", - "AsButton": False, - "AdvancedArgs": True, - "ArgsReminder": "待提取的知识库名称id, 默认为default, 您需要首先调用构建知识库", - "Function": HotReload(读取知识库作答) - } - }) - except: - print('Load function plugin failed') - - try: - from crazy_functions.交互功能函数模板 import 交互功能模板函数 - function_plugins.update({ - "交互功能模板函数": { - "Color": "stop", - "AsButton": False, - "Function": HotReload(交互功能模板函数) - } - }) - except: - print('Load function plugin failed') - - try: - from crazy_functions.Latex输出PDF结果 import Latex英文纠错加PDF对比 - function_plugins.update({ - "Latex英文纠错+高亮修正位置 [需Latex]": { - "Color": "stop", - "AsButton": False, - "AdvancedArgs": True, - "ArgsReminder": "如果有必要, 请在此处追加更细致的矫错指令(使用英文)。", - "Function": HotReload(Latex英文纠错加PDF对比) - } - }) - from crazy_functions.Latex输出PDF结果 import Latex翻译中文并重新编译PDF - function_plugins.update({ - "Arixv论文精细翻译(输入arxivID)[需Latex]": { - "Color": "stop", - "AsButton": False, - "AdvancedArgs": True, - "ArgsReminder": - "如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 " + - "例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " + - 'If the term "agent" is used in this section, it should be translated to "智能体". ', - "Function": HotReload(Latex翻译中文并重新编译PDF) - } - }) - function_plugins.update({ - "本地Latex论文精细翻译(上传Latex项目)[需Latex]": { - "Color": "stop", - "AsButton": False, - "AdvancedArgs": True, - "ArgsReminder": - "如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 " + - "例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " + - 'If the term "agent" is used in this section, it should be translated to "智能体". ', - "Function": HotReload(Latex翻译中文并重新编译PDF) - } - }) - except: - print('Load function plugin failed') - - try: - from toolbox import get_conf - ENABLE_AUDIO, = get_conf('ENABLE_AUDIO') - if ENABLE_AUDIO: - from crazy_functions.语音助手 import 语音助手 - function_plugins.update({ - "实时音频采集": { - "Color": "stop", - "AsButton": True, - "Function": HotReload(语音助手) - } - }) - except: - print('Load function plugin failed') - - try: - from crazy_functions.虚空终端 import 自动终端 - function_plugins.update({ - "自动终端": { - "Color": "stop", - "AsButton": False, - "Function": HotReload(自动终端) - } - }) - except: - print('Load function plugin failed') - # try: # from crazy_functions.chatglm微调工具 import 微调数据集生成 - # function_plugins.update({ + # function_plugins['多功能'].update({ # "黑盒模型学习: 微调数据集生成 (先上传数据集)": { - # "Color": "stop", + # "Color": "primary", # "AsButton": False, # "AdvancedArgs": True, # "ArgsReminder": "针对数据集输入(如 绿帽子*深蓝色衬衫*黑色运动裤)给出指令,例如您可以将以下命令复制到下方: --llm_to_learn=azure-gpt-3.5 --prompt_prefix='根据下面的服装类型提示,想象一个穿着者,对这个人外貌、身处的环境、内心世界、过去经历进行描写。要求:100字以内,用第二人称。' --system_prompt=''", @@ -449,4 +369,4 @@ def get_crazy_functions(): # }) # except: # print('Load function plugin failed') - return function_plugins + diff --git a/main.py b/main.py index 0f8ea07..4ecb2f7 100644 --- a/main.py +++ b/main.py @@ -2,7 +2,7 @@ import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染 def main(): import gradio as gr - if gr.__version__ not in ['3.28.3','3.32.2']: assert False, "需要特殊依赖,请务必用 pip install -r requirements.txt 指令安装依赖,详情信息见requirements.txt" + # if gr.__version__ not in ['3.28.3','3.32.2']: assert False, "需要特殊依赖,请务必用 pip install -r requirements.txt 指令安装依赖,详情信息见requirements.txt" from request_llm.bridge_all import predict from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, load_chat_cookies, DummyWith # 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到 @@ -34,7 +34,13 @@ def main(): # 高级函数插件 from crazy_functional import get_crazy_functions - crazy_fns = get_crazy_functions() + default_plugin, = get_conf('default_plugin') + crazy_fns_role = get_crazy_functions() + crazy_classification = [i for i in crazy_fns_role] + crazy_fns = {} + for role in crazy_fns_role: + for k in crazy_fns_role[role]: + crazy_fns[k] = crazy_fns_role[role][k] # 处理markdown文本格式的转变 gr.Chatbot.postprocess = format_io @@ -86,15 +92,35 @@ def main(): with gr.Accordion("函数插件区", open=True, elem_id="plugin-panel") as area_crazy_fn: with gr.Row(): gr.Markdown("插件可读取“输入区”文本/路径作为参数(上传文件自动修正路径)") + plugin_dropdown = gr.Dropdown(choices=crazy_classification, label='选择插件分类', + value=default_plugin, + multiselect=True, interactive=True, + elem_classes='normal_mut_select' + ).style(container=False) with gr.Row(): - for k in crazy_fns: - if not crazy_fns[k].get("AsButton", True): continue - variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary" - crazy_fns[k]["Button"] = gr.Button(k, variant=variant) - crazy_fns[k]["Button"].style(size="sm") + for role in crazy_fns_role: + for k in crazy_fns_role[role]: + if not crazy_fns_role[role][k].get("AsButton", True): continue + if role not in default_plugin: + variant = crazy_fns_role[role][k]["Color"] if "Color" in crazy_fns_role[role][ + k] else "secondary" + crazy_fns_role[role][k]['Button'] = gr.Button(k, variant=variant, + visible=False).style(size="sm") + else: + variant = crazy_fns[k]["Color"] if "Color" in crazy_fns_role[role][ + k] else "secondary" + crazy_fns_role[role][k]['Button'] = gr.Button(k, variant=variant, + visible=True).style(size="sm") with gr.Row(): with gr.Accordion("更多函数插件", open=True): - dropdown_fn_list = [k for k in crazy_fns.keys() if not crazy_fns[k].get("AsButton", True)] + dropdown_fn_list = [] + for role in crazy_fns_role: + if role in default_plugin: + for k in crazy_fns_role[role]: + if not crazy_fns_role[role][k].get("AsButton", True): + dropdown_fn_list.append(k) + elif crazy_fns_role[role][k].get('AdvancedArgs', False): + dropdown_fn_list.append(k) with gr.Row(): dropdown = gr.Dropdown(dropdown_fn_list, value=r"打开插件列表", label="", show_label=False).style(container=False) with gr.Row(): @@ -190,6 +216,28 @@ def main(): # 终止按钮的回调函数注册 stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles) stopBtn2.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles) + fn_btn_dict = {crazy_fns_role[role][k]['Button']: {role: k} for role in crazy_fns_role for k in crazy_fns_role[role] if crazy_fns_role[role][k].get('Button')} + def show_plugin_btn(plu_list): + new_btn_list = [] + fns_list = [] + if not plu_list: + return [*[fns.update(visible=False) for fns in fn_btn_dict], gr.Dropdown.update(choices=[])] + else: + for fns in fn_btn_dict: + if list(fn_btn_dict[fns].keys())[0] in plu_list: + new_btn_list.append(fns.update(visible=True)) + else: + new_btn_list.append(fns.update(visible=False)) + for role in crazy_fns_role: + if role in plu_list: + for k in crazy_fns_role[role]: + if not crazy_fns_role[role][k].get("AsButton", True): + fns_list.append(k) + elif crazy_fns_role[role][k].get('AdvancedArgs', False): + fns_list.append(k) + return [*new_btn_list, gr.Dropdown.update(choices=fns_list)] + plugin_dropdown.select(fn=show_plugin_btn, inputs=[plugin_dropdown], + outputs=[*fn_btn_dict.keys(), dropdown]) if ENABLE_AUDIO: from crazy_functions.live_audio.audio_io import RealtimeAudioDistribution rad = RealtimeAudioDistribution() diff --git a/themes/default.css b/themes/default.css index 07b5383..5fba4af 100644 --- a/themes/default.css +++ b/themes/default.css @@ -16,6 +16,11 @@ .markdown-body thead th { padding: .5em .2em; } +.normal_mut_select .svelte-1gfkn6j { + float: left; + width: auto; + line-height: 260% !important; +} .markdown-body ol, .markdown-body ul { padding-inline-start: 2em !important; diff --git a/themes/green.css b/themes/green.css index 950ac2e..dd109d5 100644 --- a/themes/green.css +++ b/themes/green.css @@ -24,7 +24,11 @@ mspace { border-color: yellow; } } - +.normal_mut_select .svelte-1gfkn6j { + float: left; + width: auto; + line-height: 260% !important; +} #highlight_update { animation-name: highlight; animation-duration: 0.75s; From 89de49f31ee5e3ff34d8f9a7f0daa91d2a13847f Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Wed, 30 Aug 2023 16:00:27 +0800 Subject: [PATCH 02/47] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E5=8F=98=E9=87=8F?= =?UTF-8?q?=E5=91=BD=E5=90=8D=EF=BC=8C=E6=95=B4=E7=90=86=E9=85=8D=E7=BD=AE?= =?UTF-8?q?=E6=B8=85=E5=8D=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- config.py | 20 +++++++++++++++++--- main.py | 8 ++++---- request_llm/bridge_chatglmft.py | 10 +++++----- toolbox.py | 4 ++-- 4 files changed, 28 insertions(+), 14 deletions(-) diff --git a/config.py b/config.py index 3664c15..20e5d8d 100644 --- a/config.py +++ b/config.py @@ -67,8 +67,10 @@ WEB_PORT = -1 # 如果OpenAI不响应(网络卡顿、代理失败、KEY失效),重试的次数限制 MAX_RETRY = 2 + # 插件分类默认选项 -default_plugin = ['学术优化', '多功能插件', '代码解析'] +DEFAULT_FN_GROUPS = ['学术优化', '多功能插件', '代码解析'] + # 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 ) LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓ @@ -85,7 +87,7 @@ BAIDU_CLOUD_QIANFAN_MODEL = 'ERNIE-Bot' # 可选 "ERNIE-Bot"(文心一言), " # 如果使用ChatGLM2微调模型,请把 LLM_MODEL="chatglmft",并在此处指定模型路径 -ChatGLM_PTUNING_CHECKPOINT = "" # 例如"/home/hmp/ChatGLM2-6B/ptuning/output/6b-pt-128-1e-2/checkpoint-100" +CHATGLM_PTUNING_CHECKPOINT = "" # 例如"/home/hmp/ChatGLM2-6B/ptuning/output/6b-pt-128-1e-2/checkpoint-100" # 本地LLM模型如ChatGLM的执行方式 CPU/GPU @@ -101,7 +103,7 @@ CONCURRENT_COUNT = 100 AUTO_CLEAR_TXT = False -# 色彩主体,可选 ["Default", "Chuanhu-Small-and-Beautiful"] +# 色彩主题,可选 ["Default", "Chuanhu-Small-and-Beautiful"] THEME = "Chuanhu-Small-and-Beautiful" @@ -215,6 +217,18 @@ ALLOW_RESET_CONFIG = False ├── NEWBING_STYLE └── NEWBING_COOKIES + +用户图形界面布局依赖关系示意图 +│ +├── CHATBOT_HEIGHT 对话窗的高度 +├── CODE_HIGHLIGHT 代码高亮 +├── LAYOUT 窗口布局 +├── DARK_MODE 暗色模式 / 亮色模式 +├── DEFAULT_FN_GROUPS 插件分类默认选项 +├── THEME 色彩主题 +├── AUTO_CLEAR_TXT 是否在提交时自动清空输入框 +├── ADD_WAIFU 加一个live2d装饰 + 插件在线服务配置依赖关系示意图 diff --git a/main.py b/main.py index 4ecb2f7..ced4177 100644 --- a/main.py +++ b/main.py @@ -34,7 +34,7 @@ def main(): # 高级函数插件 from crazy_functional import get_crazy_functions - default_plugin, = get_conf('default_plugin') + DEFAULT_FN_GROUPS, = get_conf('DEFAULT_FN_GROUPS') crazy_fns_role = get_crazy_functions() crazy_classification = [i for i in crazy_fns_role] crazy_fns = {} @@ -93,7 +93,7 @@ def main(): with gr.Row(): gr.Markdown("插件可读取“输入区”文本/路径作为参数(上传文件自动修正路径)") plugin_dropdown = gr.Dropdown(choices=crazy_classification, label='选择插件分类', - value=default_plugin, + value=DEFAULT_FN_GROUPS, multiselect=True, interactive=True, elem_classes='normal_mut_select' ).style(container=False) @@ -101,7 +101,7 @@ def main(): for role in crazy_fns_role: for k in crazy_fns_role[role]: if not crazy_fns_role[role][k].get("AsButton", True): continue - if role not in default_plugin: + if role not in DEFAULT_FN_GROUPS: variant = crazy_fns_role[role][k]["Color"] if "Color" in crazy_fns_role[role][ k] else "secondary" crazy_fns_role[role][k]['Button'] = gr.Button(k, variant=variant, @@ -115,7 +115,7 @@ def main(): with gr.Accordion("更多函数插件", open=True): dropdown_fn_list = [] for role in crazy_fns_role: - if role in default_plugin: + if role in DEFAULT_FN_GROUPS: for k in crazy_fns_role[role]: if not crazy_fns_role[role][k].get("AsButton", True): dropdown_fn_list.append(k) diff --git a/request_llm/bridge_chatglmft.py b/request_llm/bridge_chatglmft.py index 4e21c98..71af942 100644 --- a/request_llm/bridge_chatglmft.py +++ b/request_llm/bridge_chatglmft.py @@ -63,9 +63,9 @@ class GetGLMFTHandle(Process): # if not os.path.exists(conf): raise RuntimeError('找不到微调模型信息') # with open(conf, 'r', encoding='utf8') as f: # model_args = json.loads(f.read()) - ChatGLM_PTUNING_CHECKPOINT, = get_conf('ChatGLM_PTUNING_CHECKPOINT') - assert os.path.exists(ChatGLM_PTUNING_CHECKPOINT), "找不到微调模型检查点" - conf = os.path.join(ChatGLM_PTUNING_CHECKPOINT, "config.json") + CHATGLM_PTUNING_CHECKPOINT, = get_conf('CHATGLM_PTUNING_CHECKPOINT') + assert os.path.exists(CHATGLM_PTUNING_CHECKPOINT), "找不到微调模型检查点" + conf = os.path.join(CHATGLM_PTUNING_CHECKPOINT, "config.json") with open(conf, 'r', encoding='utf8') as f: model_args = json.loads(f.read()) if 'model_name_or_path' not in model_args: @@ -78,9 +78,9 @@ class GetGLMFTHandle(Process): config.pre_seq_len = model_args['pre_seq_len'] config.prefix_projection = model_args['prefix_projection'] - print(f"Loading prefix_encoder weight from {ChatGLM_PTUNING_CHECKPOINT}") + print(f"Loading prefix_encoder weight from {CHATGLM_PTUNING_CHECKPOINT}") model = AutoModel.from_pretrained(model_args['model_name_or_path'], config=config, trust_remote_code=True) - prefix_state_dict = torch.load(os.path.join(ChatGLM_PTUNING_CHECKPOINT, "pytorch_model.bin")) + prefix_state_dict = torch.load(os.path.join(CHATGLM_PTUNING_CHECKPOINT, "pytorch_model.bin")) new_prefix_state_dict = {} for k, v in prefix_state_dict.items(): if k.startswith("transformer.prefix_encoder."): diff --git a/toolbox.py b/toolbox.py index 73e3e8d..6da2347 100644 --- a/toolbox.py +++ b/toolbox.py @@ -1001,7 +1001,7 @@ def get_plugin_default_kwargs(): chatbot = ChatBotWithCookies(llm_kwargs) # txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port - default_plugin_kwargs = { + DEFAULT_FN_GROUPS_kwargs = { "main_input": "./README.md", "llm_kwargs": llm_kwargs, "plugin_kwargs": {}, @@ -1010,7 +1010,7 @@ def get_plugin_default_kwargs(): "system_prompt": "You are a good AI.", "web_port": WEB_PORT } - return default_plugin_kwargs + return DEFAULT_FN_GROUPS_kwargs def get_chat_default_kwargs(): """ From 2d5a1fbc126397c18163911473f4416aa40a92ad Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Thu, 31 Aug 2023 00:21:24 +0800 Subject: [PATCH 03/47] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E5=89=8D=E7=AB=AF?= =?UTF-8?q?=E4=BB=A3=E7=A0=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- config.py | 12 +- crazy_functional.py | 669 ++++++++++++++++++++++++++------------------ main.py | 13 +- themes/common.css | 15 + themes/common.js | 5 +- themes/contrast.css | 482 +++++++++++++++++++++++++++++++ themes/contrast.py | 88 ++++++ themes/default.css | 44 +++ themes/default.py | 6 +- themes/green.py | 2 + themes/theme.py | 3 + 11 files changed, 1059 insertions(+), 280 deletions(-) create mode 100644 themes/common.css create mode 100644 themes/contrast.css create mode 100644 themes/contrast.py diff --git a/config.py b/config.py index 20e5d8d..6442ad8 100644 --- a/config.py +++ b/config.py @@ -43,7 +43,11 @@ API_URL_REDIRECT = {} DEFAULT_WORKER_NUM = 3 -# 对话窗的高度 +# 色彩主题,可选 ["Default", "Chuanhu-Small-and-Beautiful", "High-Contrast"] +THEME = "Default" + + +# 对话窗的高度 (仅在LAYOUT="TOP-DOWN"时生效) CHATBOT_HEIGHT = 1115 @@ -69,7 +73,7 @@ MAX_RETRY = 2 # 插件分类默认选项 -DEFAULT_FN_GROUPS = ['学术优化', '多功能插件', '代码解析'] +DEFAULT_FN_GROUPS = ['对话', '编程', '学术'] # 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 ) @@ -103,10 +107,6 @@ CONCURRENT_COUNT = 100 AUTO_CLEAR_TXT = False -# 色彩主题,可选 ["Default", "Chuanhu-Small-and-Beautiful"] -THEME = "Chuanhu-Small-and-Beautiful" - - # 加一个live2d装饰 ADD_WAIFU = False diff --git a/crazy_functional.py b/crazy_functional.py index 99aad8a..ee3ed99 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -1,16 +1,8 @@ from toolbox import HotReload # HotReload 的意思是热更新,修改函数插件后,不需要重启程序,代码直接生效 -function_plugins = {} - def get_crazy_functions(): - get_functions_学术优化() - get_functions_文档读取() - get_functions_代码解析() - get_functions_多功能插件() - return function_plugins - -def get_functions_代码解析(): + from crazy_functions.读文章写摘要 import 读文章写摘要 from crazy_functions.生成函数注释 import 批量生成函数注释 from crazy_functions.解析项目源代码 import 解析项目本身 from crazy_functions.解析项目源代码 import 解析一个Python项目 @@ -22,345 +14,482 @@ def get_functions_代码解析(): from crazy_functions.解析项目源代码 import 解析一个前端项目 from crazy_functions.高级功能函数模板 import 高阶功能模板函数 from crazy_functions.代码重写为全英文_多线程 import 全项目切换英文 + from crazy_functions.Latex全文润色 import Latex英文润色 + from crazy_functions.询问多个大语言模型 import 同时问询 from crazy_functions.解析项目源代码 import 解析一个Lua项目 from crazy_functions.解析项目源代码 import 解析一个CSharp项目 + from crazy_functions.总结word文档 import 总结word文档 from crazy_functions.解析JupyterNotebook import 解析ipynb文件 - from crazy_functions.解析项目源代码 import 解析任意code项目 + from crazy_functions.对话历史存档 import 对话历史存档 + from crazy_functions.对话历史存档 import 载入对话历史存档 + from crazy_functions.对话历史存档 import 删除所有本地对话历史记录 + from crazy_functions.辅助功能 import 清除缓存 + from crazy_functions.批量Markdown翻译 import Markdown英译中 + from crazy_functions.批量总结PDF文档 import 批量总结PDF文档 + from crazy_functions.批量翻译PDF文档_多线程 import 批量翻译PDF文档 + from crazy_functions.谷歌检索小助手 import 谷歌检索小助手 + from crazy_functions.理解PDF文档内容 import 理解PDF文档内容标准文件输入 + from crazy_functions.Latex全文润色 import Latex中文润色 + from crazy_functions.Latex全文润色 import Latex英文纠错 + from crazy_functions.Latex全文翻译 import Latex中译英 + from crazy_functions.Latex全文翻译 import Latex英译中 from crazy_functions.批量Markdown翻译 import Markdown中译英 - function_plugins['代码解析'] = { + + + function_plugins = { "解析整个Python项目": { - "Color": "primary", + "Group": "编程", + "Color": "stop", "AsButton": True, "Info": "解析一个Python项目的所有源文件(.py) | 输入参数为路径", "Function": HotReload(解析一个Python项目) }, + "载入对话历史存档(先上传存档或输入路径)": { + "Group": "对话", + "Color": "stop", + "AsButton": False, + "Info": "载入对话历史存档 | 输入参数为路径", + "Function": HotReload(载入对话历史存档) + }, + "删除所有本地对话历史记录(谨慎操作)": { + "Group": "对话", + "AsButton": False, + "Info": "删除所有本地对话历史记录,谨慎操作 | 不需要输入参数", + "Function": HotReload(删除所有本地对话历史记录) + }, + "清除所有缓存文件(谨慎操作)": { + "Group": "对话", + "Color": "stop", + "AsButton": False, # 加入下拉菜单中 + "Info": "清除所有缓存文件,谨慎操作 | 不需要输入参数", + "Function": HotReload(清除缓存) + }, + "批量总结Word文档": { + "Group": "学术", + "Color": "stop", + "AsButton": True, + "Info": "批量总结word文档 | 输入参数为路径", + "Function": HotReload(总结word文档) + }, "解析整个C++项目头文件": { - "Color": "primary", + "Group": "编程", + "Color": "stop", "AsButton": False, # 加入下拉菜单中 "Info": "解析一个C++项目的所有头文件(.h/.hpp) | 输入参数为路径", "Function": HotReload(解析一个C项目的头文件) }, "解析整个C++项目(.cpp/.hpp/.c/.h)": { - "Color": "primary", + "Group": "编程", + "Color": "stop", "AsButton": False, # 加入下拉菜单中 "Info": "解析一个C++项目的所有源文件(.cpp/.hpp/.c/.h)| 输入参数为路径", "Function": HotReload(解析一个C项目) }, "解析整个Go项目": { - "Color": "primary", + "Group": "编程", + "Color": "stop", "AsButton": False, # 加入下拉菜单中 "Info": "解析一个Go项目的所有源文件 | 输入参数为路径", "Function": HotReload(解析一个Golang项目) }, "解析整个Rust项目": { - "Color": "primary", + "Group": "编程", + "Color": "stop", "AsButton": False, # 加入下拉菜单中 "Info": "解析一个Rust项目的所有源文件 | 输入参数为路径", "Function": HotReload(解析一个Rust项目) }, "解析整个Java项目": { - "Color": "primary", + "Group": "编程", + "Color": "stop", "AsButton": False, # 加入下拉菜单中 "Info": "解析一个Java项目的所有源文件 | 输入参数为路径", "Function": HotReload(解析一个Java项目) }, "解析整个前端项目(js,ts,css等)": { - "Color": "primary", + "Group": "编程", + "Color": "stop", "AsButton": False, # 加入下拉菜单中 "Info": "解析一个前端项目的所有源文件(js,ts,css等) | 输入参数为路径", "Function": HotReload(解析一个前端项目) }, "解析整个Lua项目": { - "Color": "primary", + "Group": "编程", + "Color": "stop", "AsButton": False, # 加入下拉菜单中 "Info": "解析一个Lua项目的所有源文件 | 输入参数为路径", "Function": HotReload(解析一个Lua项目) }, "解析整个CSharp项目": { - "Color": "primary", + "Group": "编程", + "Color": "stop", "AsButton": False, # 加入下拉菜单中 "Info": "解析一个CSharp项目的所有源文件 | 输入参数为路径", "Function": HotReload(解析一个CSharp项目) }, "解析Jupyter Notebook文件": { - "Color": "primary", + "Group": "编程", + "Color": "stop", "AsButton": False, "Info": "解析Jupyter Notebook文件 | 输入参数为路径", "Function": HotReload(解析ipynb文件), "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) "ArgsReminder": "若输入0,则不解析notebook中的Markdown块", # 高级参数输入区的显示提示 }, - "批量生成函数注释": { - "Color": "primary", - "AsButton": False, # 加入下拉菜单中 - "Function": HotReload(批量生成函数注释) - }, - "[多线程Demo] 解析此项目本身(源码自译解)": { - "AsButton": False, # 加入下拉菜单中 - "Function": HotReload(解析项目本身) - }, - "[插件demo] 历史上的今天": { - "AsButton": True, - "Function": HotReload(高阶功能模板函数) - }, - "批量Markdown中译英(输入路径或上传压缩包)": { - "Color": "primary", - "AsButton": False, # 加入下拉菜单中 - "Function": HotReload(Markdown中译英) - }, - "解析项目源代码(手动指定和筛选源代码文件类型)": { - "Color": "primary", - "AsButton": False, - "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) - "ArgsReminder": "输入时用逗号隔开, *代表通配符, 加了^代表不匹配; 不输入代表全部匹配。例如: \"*.c, ^*.cpp, config.toml, ^*.toml\"", - # 高级参数输入区的显示提示 - "Function": HotReload(解析任意code项目) - }, - - } - - -def get_functions_文档读取(): - from crazy_functions.读文章写摘要 import 读文章写摘要 - from crazy_functions.总结word文档 import 总结word文档 - from crazy_functions.批量总结PDF文档 import 批量总结PDF文档 - from crazy_functions.批量翻译PDF文档_多线程 import 批量翻译PDF文档 - from crazy_functions.批量Markdown翻译 import Markdown英译中 - from crazy_functions.理解PDF文档内容 import 理解PDF文档内容标准文件输入 - from crazy_functions.批量Markdown翻译 import Markdown翻译指定语言 - function_plugins['文档读取'] = { - "批量总结PDF文档": { - "Color": "primary", - "AsButton": False, # 加入下拉菜单中 - "Function": HotReload(批量总结PDF文档) - }, - "理解PDF文档内容 (模仿ChatPDF)": { - "Color": "primary", - "AsButton": False, # 加入下拉菜单中 - "Function": HotReload(理解PDF文档内容标准文件输入) - }, - "精准翻译PDF论文": { - "Color": "primary", - "AsButton": True, # 加入下拉菜单中 - "Function": HotReload(批量翻译PDF文档) - }, - "批量总结Word文档": { - "Color": "primary", - "AsButton": True, - "Info": "批量总结word文档 | 输入参数为路径", - "Function": HotReload(总结word文档) - }, "读Tex论文写摘要": { - "Color": "primary", + "Group": "学术", + "Color": "stop", "AsButton": True, "Function": HotReload(读文章写摘要) }, - "翻译README或.MD": { - "Color": "primary", + "翻译README或MD": { + "Group": "编程", + "Color": "stop", "AsButton": True, "Info": "将Markdown翻译为中文 | 输入参数为路径或URL", "Function": HotReload(Markdown英译中) }, "翻译Markdown或README(支持Github链接)": { - "Color": "primary", + "Group": "编程", + "Color": "stop", "AsButton": False, "Function": HotReload(Markdown英译中) }, - "Markdown翻译(手动指定语言)": { - "Color": "primary", - "AsButton": False, - "AdvancedArgs": True, - "ArgsReminder": "请输入要翻译成哪种语言,默认为Chinese。", - "Function": HotReload(Markdown翻译指定语言) - }, - } - -def get_functions_学术优化(): - from crazy_functions.谷歌检索小助手 import 谷歌检索小助手 - from crazy_functions.Latex全文润色 import Latex中文润色 - from crazy_functions.Latex全文润色 import Latex英文纠错 - from crazy_functions.Latex全文翻译 import Latex中译英 - from crazy_functions.Latex全文翻译 import Latex英译中 - from crazy_functions.Latex全文润色 import Latex英文润色 - from crazy_functions.下载arxiv论文翻译摘要 import 下载arxiv论文并翻译摘要 - from crazy_functions.Latex输出PDF结果 import Latex英文纠错加PDF对比 - from crazy_functions.Latex输出PDF结果 import Latex翻译中文并重新编译PDF - function_plugins['学术优化'] = { - "英文Latex项目全文纠错(输入路径或上传压缩包)": { - "Color": "primary", + "批量生成函数注释": { + "Group": "编程", + "Color": "stop", "AsButton": False, # 加入下拉菜单中 - "Function": HotReload(Latex英文纠错) - }, - "中文Latex项目全文润色(输入路径或上传压缩包)": { - "Color": "primary", - "AsButton": False, # 加入下拉菜单中 - "Function": HotReload(Latex中文润色) - }, - "Latex项目全文中译英(输入路径或上传压缩包)": { - "Color": "primary", - "AsButton": False, # 加入下拉菜单中 - "Function": HotReload(Latex中译英) - }, - "Latex项目全文英译中(输入路径或上传压缩包)": { - "Color": "primary", - "AsButton": False, # 加入下拉菜单中 - "Function": HotReload(Latex英译中) - }, - "谷歌学术检索助手(输入谷歌学术搜索页url)": { - "Color": "primary", - "AsButton": False, # 加入下拉菜单中 - "Function": HotReload(谷歌检索小助手) - }, - "英文Latex项目全文润色(输入路径或上传压缩包)": { - "Color": "primary", - "AsButton": False, # 加入下拉菜单中 - "Function": HotReload(Latex英文润色) - }, - "Arixv论文精细翻译(输入arxivID)[需Latex]": { - "Color": "primary", - "AsButton": False, - "AdvancedArgs": True, - "ArgsReminder": - "如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 " + - "例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " + - 'If the term "agent" is used in this section, it should be translated to "智能体". ', - "Function": HotReload(Latex翻译中文并重新编译PDF) - }, - "Latex英文纠错+高亮修正位置 [需Latex]": { - "Color": "primary", - "AsButton": False, - "AdvancedArgs": True, - "ArgsReminder": "如果有必要, 请在此处追加更细致的矫错指令(使用英文)。", - "Function": HotReload(Latex英文纠错加PDF对比) - }, - - } - function_plugins['学术优化'].update({ - "一键下载arxiv论文并翻译摘要(先在input输入编号,如1812.10695)": { - "Color": "primary", - "AsButton": False, # 加入下拉菜单中 - "Function": HotReload(下载arxiv论文并翻译摘要) - } - }) - -def get_functions_多功能插件(): - from crazy_functions.询问多个大语言模型 import 同时问询 - from crazy_functions.对话历史存档 import 对话历史存档 - from crazy_functions.对话历史存档 import 载入对话历史存档 - from crazy_functions.对话历史存档 import 删除所有本地对话历史记录 - from crazy_functions.辅助功能 import 清除缓存 - from crazy_functions.联网的ChatGPT import 连接网络回答问题 - from crazy_functions.联网的ChatGPT_bing版 import 连接bing搜索回答问题 - from crazy_functions.询问多个大语言模型 import 同时问询_指定模型 - from crazy_functions.图片生成 import 图片生成 - from crazy_functions.总结音视频 import 总结音视频 - from crazy_functions.数学动画生成manim import 动画生成 - from crazy_functions.Langchain知识库 import 知识库问答 - from crazy_functions.Langchain知识库 import 读取知识库作答 - from crazy_functions.交互功能函数模板 import 交互功能模板函数 - from crazy_functions.语音助手 import 语音助手 - from crazy_functions.虚空终端 import 自动终端 - function_plugins['多功能插件'] = { - "询问多个GPT模型": { - "Color": "primary", - "AsButton": True, - "Function": HotReload(同时问询) + "Function": HotReload(批量生成函数注释) }, "保存当前的对话": { + "Group": "对话", "AsButton": True, "Info": "保存当前的对话 | 不需要输入参数", "Function": HotReload(对话历史存档) }, - "载入对话历史存档(先上传存档或输入路径)": { - "Color": "primary", - "AsButton": False, - "Info": "载入对话历史存档 | 输入参数为路径", - "Function": HotReload(载入对话历史存档) - }, - "删除所有本地对话历史记录(谨慎操作)": { - "AsButton": False, - "Info": "删除所有本地对话历史记录,谨慎操作 | 不需要输入参数", - "Function": HotReload(删除所有本地对话历史记录) - }, - "清除所有缓存文件(谨慎操作)": { - "Color": "primary", + "[多线程Demo]解析此项目本身(源码自译解)": { + "Group": "对话", "AsButton": False, # 加入下拉菜单中 - "Info": "清除所有缓存文件,谨慎操作 | 不需要输入参数", - "Function": HotReload(清除缓存) + "Function": HotReload(解析项目本身) }, - "连接网络回答问题(输入问题后点击该插件,需要访问谷歌)": { - "Color": "primary", - "AsButton": False, # 加入下拉菜单中 - "Function": HotReload(连接网络回答问题) - }, - "连接网络回答问题(中文Bing版,输入问题后点击该插件)": { - "Color": "primary", - "AsButton": False, # 加入下拉菜单中 - "Function": HotReload(连接bing搜索回答问题) - }, - "询问多个GPT模型(手动指定询问哪些模型)": { - "Color": "primary", - "AsButton": False, - "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) - "ArgsReminder": "支持任意数量的llm接口,用&符号分隔。例如chatglm&gpt-3.5-turbo&api2d-gpt-4", # 高级参数输入区的显示提示 - "Function": HotReload(同时问询_指定模型) - }, - "图片生成(先切换模型到openai或api2d)": { - "Color": "primary", - "AsButton": False, - "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) - "ArgsReminder": "在这里输入分辨率, 如256x256(默认)", # 高级参数输入区的显示提示 - "Info": "图片生成 | 输入参数字符串,提供图像的内容", - "Function": HotReload(图片生成) - }, - "批量总结音视频(输入路径或上传压缩包)": { - "Color": "primary", - "AsButton": False, - "AdvancedArgs": True, - "ArgsReminder": "调用openai api 使用whisper-1模型, 目前支持的格式:mp4, m4a, wav, mpga, mpeg, mp3。此处可以输入解析提示,例如:解析为简体中文(默认)。", - "Info": "批量总结音频或视频 | 输入参数为路径", - "Function": HotReload(总结音视频) - }, - "数学动画生成(Manim)": { - "Color": "primary", - "AsButton": False, - "Function": HotReload(动画生成) - }, - "构建知识库(请先上传文件素材)": { - "Color": "primary", - "AsButton": False, - "AdvancedArgs": True, - "ArgsReminder": "待注入的知识库名称id, 默认为default", - "Function": HotReload(知识库问答) - }, - "知识库问答": { - "Color": "primary", - "AsButton": False, - "AdvancedArgs": True, - "ArgsReminder": "待提取的知识库名称id, 默认为default, 您需要首先调用构建知识库", - "Function": HotReload(读取知识库作答) - }, - "交互功能模板函数": { - "Color": "primary", - "AsButton": False, - "Function": HotReload(交互功能模板函数) - }, - "实时音频采集": { - "Color": "primary", + "[插件demo]历史上的今天": { + "Group": "对话", "AsButton": True, - "Function": HotReload(语音助手) + "Function": HotReload(高阶功能模板函数) + }, + "精准翻译PDF论文": { + "Group": "学术", + "Color": "stop", + "AsButton": True, # 加入下拉菜单中 + "Function": HotReload(批量翻译PDF文档) + }, + "询问多个GPT模型": { + "Group": "对话", + "Color": "stop", + "AsButton": True, + "Function": HotReload(同时问询) + }, + "批量总结PDF文档": { + "Group": "学术", + "Color": "stop", + "AsButton": False, # 加入下拉菜单中 + "Function": HotReload(批量总结PDF文档) + }, + "谷歌学术检索助手(输入谷歌学术搜索页url)": { + "Group": "学术", + "Color": "stop", + "AsButton": False, # 加入下拉菜单中 + "Function": HotReload(谷歌检索小助手) + }, + "理解PDF文档内容 (模仿ChatPDF)": { + "Group": "学术", + "Color": "stop", + "AsButton": False, # 加入下拉菜单中 + "Function": HotReload(理解PDF文档内容标准文件输入) + }, + "英文Latex项目全文润色(输入路径或上传压缩包)": { + "Group": "学术", + "Color": "stop", + "AsButton": False, # 加入下拉菜单中 + "Function": HotReload(Latex英文润色) + }, + "英文Latex项目全文纠错(输入路径或上传压缩包)": { + "Group": "学术", + "Color": "stop", + "AsButton": False, # 加入下拉菜单中 + "Function": HotReload(Latex英文纠错) + }, + "中文Latex项目全文润色(输入路径或上传压缩包)": { + "Group": "学术", + "Color": "stop", + "AsButton": False, # 加入下拉菜单中 + "Function": HotReload(Latex中文润色) + }, + "Latex项目全文中译英(输入路径或上传压缩包)": { + "Group": "学术", + "Color": "stop", + "AsButton": False, # 加入下拉菜单中 + "Function": HotReload(Latex中译英) + }, + "Latex项目全文英译中(输入路径或上传压缩包)": { + "Group": "学术", + "Color": "stop", + "AsButton": False, # 加入下拉菜单中 + "Function": HotReload(Latex英译中) + }, + "批量Markdown中译英(输入路径或上传压缩包)": { + "Group": "编程", + "Color": "stop", + "AsButton": False, # 加入下拉菜单中 + "Function": HotReload(Markdown中译英) }, - "自动终端": { - "Color": "primary", - "AsButton": False, - "Function": HotReload(自动终端) - } } + + # -=--=- 尚未充分测试的实验性插件 & 需要额外依赖的插件 -=--=- + try: + from crazy_functions.下载arxiv论文翻译摘要 import 下载arxiv论文并翻译摘要 + function_plugins.update({ + "一键下载arxiv论文并翻译摘要(先在input输入编号,如1812.10695)": { + "Group": "学术", + "Color": "stop", + "AsButton": False, # 加入下拉菜单中 + "Function": HotReload(下载arxiv论文并翻译摘要) + } + }) + except: + print('Load function plugin failed') + + try: + from crazy_functions.联网的ChatGPT import 连接网络回答问题 + function_plugins.update({ + "连接网络回答问题(输入问题后点击该插件,需要访问谷歌)": { + "Group": "对话", + "Color": "stop", + "AsButton": False, # 加入下拉菜单中 + "Function": HotReload(连接网络回答问题) + } + }) + from crazy_functions.联网的ChatGPT_bing版 import 连接bing搜索回答问题 + function_plugins.update({ + "连接网络回答问题(中文Bing版,输入问题后点击该插件)": { + "Group": "对话", + "Color": "stop", + "AsButton": False, # 加入下拉菜单中 + "Function": HotReload(连接bing搜索回答问题) + } + }) + except: + print('Load function plugin failed') + + try: + from crazy_functions.解析项目源代码 import 解析任意code项目 + function_plugins.update({ + "解析项目源代码(手动指定和筛选源代码文件类型)": { + "Group": "编程", + "Color": "stop", + "AsButton": False, + "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) + "ArgsReminder": "输入时用逗号隔开, *代表通配符, 加了^代表不匹配; 不输入代表全部匹配。例如: \"*.c, ^*.cpp, config.toml, ^*.toml\"", # 高级参数输入区的显示提示 + "Function": HotReload(解析任意code项目) + }, + }) + except: + print('Load function plugin failed') + + try: + from crazy_functions.询问多个大语言模型 import 同时问询_指定模型 + function_plugins.update({ + "询问多个GPT模型(手动指定询问哪些模型)": { + "Group": "对话", + "Color": "stop", + "AsButton": False, + "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) + "ArgsReminder": "支持任意数量的llm接口,用&符号分隔。例如chatglm&gpt-3.5-turbo&api2d-gpt-4", # 高级参数输入区的显示提示 + "Function": HotReload(同时问询_指定模型) + }, + }) + except: + print('Load function plugin failed') + + try: + from crazy_functions.图片生成 import 图片生成 + function_plugins.update({ + "图片生成(先切换模型到openai或api2d)": { + "Group": "对话", + "Color": "stop", + "AsButton": False, + "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) + "ArgsReminder": "在这里输入分辨率, 如256x256(默认)", # 高级参数输入区的显示提示 + "Info": "图片生成 | 输入参数字符串,提供图像的内容", + "Function": HotReload(图片生成) + }, + }) + except: + print('Load function plugin failed') + + try: + from crazy_functions.总结音视频 import 总结音视频 + function_plugins.update({ + "批量总结音视频(输入路径或上传压缩包)": { + "Group": "对话", + "Color": "stop", + "AsButton": False, + "AdvancedArgs": True, + "ArgsReminder": "调用openai api 使用whisper-1模型, 目前支持的格式:mp4, m4a, wav, mpga, mpeg, mp3。此处可以输入解析提示,例如:解析为简体中文(默认)。", + "Info": "批量总结音频或视频 | 输入参数为路径", + "Function": HotReload(总结音视频) + } + }) + except: + print('Load function plugin failed') + + try: + from crazy_functions.数学动画生成manim import 动画生成 + function_plugins.update({ + "数学动画生成(Manim)": { + "Group": "对话", + "Color": "stop", + "AsButton": False, + "Function": HotReload(动画生成) + } + }) + except: + print('Load function plugin failed') + + try: + from crazy_functions.批量Markdown翻译 import Markdown翻译指定语言 + function_plugins.update({ + "Markdown翻译(手动指定语言)": { + "Group": "编程", + "Color": "stop", + "AsButton": False, + "AdvancedArgs": True, + "ArgsReminder": "请输入要翻译成哪种语言,默认为Chinese。", + "Function": HotReload(Markdown翻译指定语言) + } + }) + except: + print('Load function plugin failed') + + try: + from crazy_functions.Langchain知识库 import 知识库问答 + function_plugins.update({ + "构建知识库(请先上传文件素材)": { + "Group": "对话", + "Color": "stop", + "AsButton": False, + "AdvancedArgs": True, + "ArgsReminder": "待注入的知识库名称id, 默认为default", + "Function": HotReload(知识库问答) + } + }) + except: + print('Load function plugin failed') + + try: + from crazy_functions.Langchain知识库 import 读取知识库作答 + function_plugins.update({ + "知识库问答": { + "Group": "对话", + "Color": "stop", + "AsButton": False, + "AdvancedArgs": True, + "ArgsReminder": "待提取的知识库名称id, 默认为default, 您需要首先调用构建知识库", + "Function": HotReload(读取知识库作答) + } + }) + except: + print('Load function plugin failed') + + try: + from crazy_functions.交互功能函数模板 import 交互功能模板函数 + function_plugins.update({ + "交互功能模板函数": { + "Group": "对话", + "Color": "stop", + "AsButton": False, + "Function": HotReload(交互功能模板函数) + } + }) + except: + print('Load function plugin failed') + + try: + from crazy_functions.Latex输出PDF结果 import Latex英文纠错加PDF对比 + function_plugins.update({ + "Latex英文纠错+高亮修正位置 [需Latex]": { + "Group": "学术", + "Color": "stop", + "AsButton": False, + "AdvancedArgs": True, + "ArgsReminder": "如果有必要, 请在此处追加更细致的矫错指令(使用英文)。", + "Function": HotReload(Latex英文纠错加PDF对比) + } + }) + from crazy_functions.Latex输出PDF结果 import Latex翻译中文并重新编译PDF + function_plugins.update({ + "Arixv论文精细翻译(输入arxivID)[需Latex]": { + "Group": "学术", + "Color": "stop", + "AsButton": False, + "AdvancedArgs": True, + "ArgsReminder": + "如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 " + + "例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " + + 'If the term "agent" is used in this section, it should be translated to "智能体". ', + "Function": HotReload(Latex翻译中文并重新编译PDF) + } + }) + function_plugins.update({ + "本地Latex论文精细翻译(上传Latex项目)[需Latex]": { + "Group": "学术", + "Color": "stop", + "AsButton": False, + "AdvancedArgs": True, + "ArgsReminder": + "如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 " + + "例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " + + 'If the term "agent" is used in this section, it should be translated to "智能体". ', + "Function": HotReload(Latex翻译中文并重新编译PDF) + } + }) + except: + print('Load function plugin failed') + + try: + from toolbox import get_conf + ENABLE_AUDIO, = get_conf('ENABLE_AUDIO') + if ENABLE_AUDIO: + from crazy_functions.语音助手 import 语音助手 + function_plugins.update({ + "实时音频采集": { + "Group": "对话", + "Color": "stop", + "AsButton": True, + "Function": HotReload(语音助手) + } + }) + except: + print('Load function plugin failed') + + try: + from crazy_functions.虚空终端 import 自动终端 + function_plugins.update({ + "自动终端": { + "Group": "对话", + "Color": "stop", + "AsButton": False, + "Function": HotReload(自动终端) + } + }) + except: + print('Load function plugin failed') + # try: # from crazy_functions.chatglm微调工具 import 微调数据集生成 - # function_plugins['多功能'].update({ + # function_plugins.update({ # "黑盒模型学习: 微调数据集生成 (先上传数据集)": { - # "Color": "primary", + # "Color": "stop", # "AsButton": False, # "AdvancedArgs": True, # "ArgsReminder": "针对数据集输入(如 绿帽子*深蓝色衬衫*黑色运动裤)给出指令,例如您可以将以下命令复制到下方: --llm_to_learn=azure-gpt-3.5 --prompt_prefix='根据下面的服装类型提示,想象一个穿着者,对这个人外貌、身处的环境、内心世界、过去经历进行描写。要求:100字以内,用第二人称。' --system_prompt=''", @@ -370,3 +499,13 @@ def get_functions_多功能插件(): # except: # print('Load function plugin failed') + regroupped_functions = {} + for name, function_meta in function_plugins.items(): + if "Group" in function_meta: + groups = function_meta["Group"].split('|') + else: + groups = ['对话'] + for gruop in groups: + regroupped_functions[gruop] = regroupped_functions.get(gruop, {}) + regroupped_functions[gruop].update({name: function_meta}) + return regroupped_functions diff --git a/main.py b/main.py index ced4177..f21383b 100644 --- a/main.py +++ b/main.py @@ -89,14 +89,16 @@ def main(): if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue variant = functional[k]["Color"] if "Color" in functional[k] else "secondary" functional[k]["Button"] = gr.Button(k, variant=variant) + functional[k]["Button"].style(size="sm") with gr.Accordion("函数插件区", open=True, elem_id="plugin-panel") as area_crazy_fn: with gr.Row(): gr.Markdown("插件可读取“输入区”文本/路径作为参数(上传文件自动修正路径)") - plugin_dropdown = gr.Dropdown(choices=crazy_classification, label='选择插件分类', - value=DEFAULT_FN_GROUPS, - multiselect=True, interactive=True, - elem_classes='normal_mut_select' - ).style(container=False) + with gr.Row(elem_id="input-plugin-group"): + plugin_dropdown = gr.Dropdown(choices=crazy_classification, label='', + value=DEFAULT_FN_GROUPS, + multiselect=True, interactive=True, + elem_classes='normal_mut_select', + ).style(container=False) with gr.Row(): for role in crazy_fns_role: for k in crazy_fns_role[role]: @@ -138,7 +140,6 @@ def main(): max_length_sl = gr.Slider(minimum=256, maximum=8192, value=4096, step=1, interactive=True, label="Local LLM MaxLength",) checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "底部输入区", "输入清除键", "插件参数区"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区") md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False) - gr.Markdown(description) with gr.Accordion("备选输入区", open=True, visible=False, elem_id="input-panel2") as area_input_secondary: with gr.Row(): diff --git a/themes/common.css b/themes/common.css new file mode 100644 index 0000000..33bb4f0 --- /dev/null +++ b/themes/common.css @@ -0,0 +1,15 @@ +/* hide remove all button */ +.remove-all.svelte-aqlk7e.svelte-aqlk7e.svelte-aqlk7e { + visibility: hidden; +} + +/* hide selector border */ +#input-plugin-group .wrap.svelte-aqlk7e.svelte-aqlk7e.svelte-aqlk7e { + border: 0px; + box-shadow: none; +} + +/* hide selector label */ +#input-plugin-group .svelte-1gfkn6j { + visibility: hidden; +} diff --git a/themes/common.js b/themes/common.js index dcdeb50..a56672d 100644 --- a/themes/common.js +++ b/themes/common.js @@ -39,7 +39,10 @@ function get_elements() { const panel4 = document.querySelector('#interact-panel'); const panel5 = document.querySelector('#input-panel2'); const panel_active = document.querySelector('#state-panel'); - var panel_height_target = (20-panel_active.offsetHeight) + panel1.offsetHeight + panel2.offsetHeight + panel3.offsetHeight + panel4.offsetHeight + panel5.offsetHeight + 21; + // 25.3 是chatbot的label高度, 16 是右侧的gap + var panel_height_target = panel1.offsetHeight + panel2.offsetHeight + panel3.offsetHeight + panel4.offsetHeight + panel5.offsetHeight - 25.5 + 16*3; + // 禁止动态的state-panel高度影响 + panel_height_target = panel_height_target + (21-panel_active.offsetHeight) var panel_height_target = parseInt(panel_height_target); var chatbot_height = chatbot.style.height; var chatbot_height = parseInt(chatbot_height); diff --git a/themes/contrast.css b/themes/contrast.css new file mode 100644 index 0000000..54a1b2b --- /dev/null +++ b/themes/contrast.css @@ -0,0 +1,482 @@ +:root { + --body-text-color: #FFFFFF; + --link-text-color: #FFFFFF; + --link-text-color-active: #FFFFFF; + --link-text-color-hover: #FFFFFF; + --link-text-color-visited: #FFFFFF; + --body-text-color-subdued: #FFFFFF; + --block-info-text-color: #FFFFFF; + --block-label-text-color: #FFFFFF; + --block-title-text-color: #FFFFFF; + --checkbox-label-text-color: #FFFFFF; + --checkbox-label-text-color-selected: #FFFFFF; + --error-text-color: #FFFFFF; + --button-cancel-text-color: #FFFFFF; + --button-cancel-text-color-hover: #FFFFFF; + --button-primary-text-color: #FFFFFF; + --button-primary-text-color-hover: #FFFFFF; + --button-secondary-text-color: #FFFFFF; + --button-secondary-text-color-hover: #FFFFFF; + + + --border-bottom-right-radius: 0px; + --border-bottom-left-radius: 0px; + --border-top-right-radius: 0px; + --border-top-left-radius: 0px; + --block-radius: 0px; + --button-large-radius: 0px; + --button-small-radius: 0px; + --block-background-fill: #000000; + + --border-color-accent: #3cff00; + --border-color-primary: #3cff00; + --block-border-color: #3cff00; + --block-label-border-color: #3cff00; + --block-title-border-color: #3cff00; + --panel-border-color: #3cff00; + --checkbox-border-color: #3cff00; + --checkbox-border-color-focus: #3cff00; + --checkbox-border-color-hover: #3cff00; + --checkbox-border-color-selected: #3cff00; + --checkbox-label-border-color: #3cff00; + --checkbox-label-border-color-hover: #3cff00; + --error-border-color: #3cff00; + --input-border-color: #3cff00; + --input-border-color-focus: #3cff00; + --input-border-color-hover: #3cff00; + --table-border-color: #3cff00; + --button-cancel-border-color: #3cff00; + --button-cancel-border-color-hover: #3cff00; + --button-primary-border-color: #3cff00; + --button-primary-border-color-hover: #3cff00; + --button-secondary-border-color: #3cff00; + --button-secondary-border-color-hover: #3cff00; + + + --body-background-fill: #000000; + --background-fill-primary: #000000; + --background-fill-secondary: #000000; + --block-background-fill: #000000; + --block-label-background-fill: #000000; + --block-title-background-fill: #000000; + --panel-background-fill: #000000; + --chatbot-code-background-color: #000000; + --checkbox-background-color: #000000; + --checkbox-background-color-focus: #000000; + --checkbox-background-color-hover: #000000; + --checkbox-background-color-selected: #000000; + --checkbox-label-background-fill: #000000; + --checkbox-label-background-fill-hover: #000000; + --checkbox-label-background-fill-selected: #000000; + --error-background-fill: #000000; + --input-background-fill: #000000; + --input-background-fill-focus: #000000; + --input-background-fill-hover: #000000; + --stat-background-fill: #000000; + --table-even-background-fill: #000000; + --table-odd-background-fill: #000000; + --button-cancel-background-fill: #000000; + --button-cancel-background-fill-hover: #000000; + --button-primary-background-fill: #000000; + --button-primary-background-fill-hover: #000000; + --button-secondary-background-fill: #000000; + --button-secondary-background-fill-hover: #000000; + --color-accent-soft: #000000; +} + +.dark { + --body-text-color: #FFFFFF; + --link-text-color: #FFFFFF; + --link-text-color-active: #FFFFFF; + --link-text-color-hover: #FFFFFF; + --link-text-color-visited: #FFFFFF; + --body-text-color-subdued: #FFFFFF; + --block-info-text-color: #FFFFFF; + --block-label-text-color: #FFFFFF; + --block-title-text-color: #FFFFFF; + --checkbox-label-text-color: #FFFFFF; + --checkbox-label-text-color-selected: #FFFFFF; + --error-text-color: #FFFFFF; + --button-cancel-text-color: #FFFFFF; + --button-cancel-text-color-hover: #FFFFFF; + --button-primary-text-color: #FFFFFF; + --button-primary-text-color-hover: #FFFFFF; + --button-secondary-text-color: #FFFFFF; + --button-secondary-text-color-hover: #FFFFFF; + + + + --border-bottom-right-radius: 0px; + --border-bottom-left-radius: 0px; + --border-top-right-radius: 0px; + --border-top-left-radius: 0px; + --block-radius: 0px; + --button-large-radius: 0px; + --button-small-radius: 0px; + --block-background-fill: #000000; + + --border-color-accent: #3cff00; + --border-color-primary: #3cff00; + --block-border-color: #3cff00; + --block-label-border-color: #3cff00; + --block-title-border-color: #3cff00; + --panel-border-color: #3cff00; + --checkbox-border-color: #3cff00; + --checkbox-border-color-focus: #3cff00; + --checkbox-border-color-hover: #3cff00; + --checkbox-border-color-selected: #3cff00; + --checkbox-label-border-color: #3cff00; + --checkbox-label-border-color-hover: #3cff00; + --error-border-color: #3cff00; + --input-border-color: #3cff00; + --input-border-color-focus: #3cff00; + --input-border-color-hover: #3cff00; + --table-border-color: #3cff00; + --button-cancel-border-color: #3cff00; + --button-cancel-border-color-hover: #3cff00; + --button-primary-border-color: #3cff00; + --button-primary-border-color-hover: #3cff00; + --button-secondary-border-color: #3cff00; + --button-secondary-border-color-hover: #3cff00; + + + --body-background-fill: #000000; + --background-fill-primary: #000000; + --background-fill-secondary: #000000; + --block-background-fill: #000000; + --block-label-background-fill: #000000; + --block-title-background-fill: #000000; + --panel-background-fill: #000000; + --chatbot-code-background-color: #000000; + --checkbox-background-color: #000000; + --checkbox-background-color-focus: #000000; + --checkbox-background-color-hover: #000000; + --checkbox-background-color-selected: #000000; + --checkbox-label-background-fill: #000000; + --checkbox-label-background-fill-hover: #000000; + --checkbox-label-background-fill-selected: #000000; + --error-background-fill: #000000; + --input-background-fill: #000000; + --input-background-fill-focus: #000000; + --input-background-fill-hover: #000000; + --stat-background-fill: #000000; + --table-even-background-fill: #000000; + --table-odd-background-fill: #000000; + --button-cancel-background-fill: #000000; + --button-cancel-background-fill-hover: #000000; + --button-primary-background-fill: #000000; + --button-primary-background-fill-hover: #000000; + --button-secondary-background-fill: #000000; + --button-secondary-background-fill-hover: #000000; + --color-accent-soft: #000000; +} + + + +.block.svelte-mppz8v { + border-color: #3cff00; +} + +/* 插件下拉菜单 */ +#plugin-panel .wrap.svelte-aqlk7e.svelte-aqlk7e.svelte-aqlk7e { + box-shadow: var(--input-shadow); + border: var(--input-border-width) dashed var(--border-color-primary); + border-radius: 4px; +} + +#plugin-panel .dropdown-arrow.svelte-p5edak { + width: 50px; +} +#plugin-panel input.svelte-aqlk7e.svelte-aqlk7e.svelte-aqlk7e { + padding-left: 5px; +} +.root{ + border-bottom-right-radius: 0px; + border-bottom-left-radius: 0px; + border-top-right-radius: 0px; + border-top-left-radius: 0px; +} + +/* 小按钮 */ +.sm.svelte-1ipelgc { + font-family: "Microsoft YaHei UI", "Helvetica", "Microsoft YaHei", "ui-sans-serif", "sans-serif", "system-ui"; + --button-small-text-weight: 600; + --button-small-text-size: 16px; + border-bottom-right-radius: 0px; + border-bottom-left-radius: 0px; + border-top-right-radius: 0px; + border-top-left-radius: 0px; +} + +#plugin-panel .sm.svelte-1ipelgc { + font-family: "Microsoft YaHei UI", "Helvetica", "Microsoft YaHei", "ui-sans-serif", "sans-serif", "system-ui"; + --button-small-text-weight: 400; + --button-small-text-size: 14px; + border-bottom-right-radius: 0px; + border-bottom-left-radius: 0px; + border-top-right-radius: 0px; + border-top-left-radius: 0px; +} + +.wrap-inner.svelte-aqlk7e.svelte-aqlk7e.svelte-aqlk7e { + padding: 0%; +} + +.markdown-body table { + margin: 1em 0; + border-collapse: collapse; + empty-cells: show; +} + +.markdown-body th, .markdown-body td { + border: 1.2px solid var(--border-color-primary); + padding: 5px; +} + +.markdown-body thead { + background-color: rgb(0, 0, 0); +} + +.markdown-body thead th { + padding: .5em .2em; +} + +.normal_mut_select .svelte-1gfkn6j { + float: left; + width: auto; + line-height: 260% !important; +} + +.markdown-body ol, .markdown-body ul { + padding-inline-start: 2em !important; +} + +/* chat box. */ +[class *= "message"] { + border-radius: var(--radius-xl) !important; + /* padding: var(--spacing-xl) !important; */ + /* font-size: var(--text-md) !important; */ + /* line-height: var(--line-md) !important; */ + /* min-height: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl)); */ + /* min-width: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl)); */ +} +[data-testid = "bot"] { + max-width: 95%; + /* width: auto !important; */ + border-bottom-left-radius: 0 !important; +} +[data-testid = "user"] { + max-width: 100%; + /* width: auto !important; */ + border-bottom-right-radius: 0 !important; +} + +/* linein code block. */ +.markdown-body code { + display: inline; + white-space: break-spaces; + border-radius: 6px; + margin: 0 2px 0 2px; + padding: .2em .4em .1em .4em; + background-color: rgba(0, 0, 0, 0.95); + color: #c9d1d9; +} + +.dark .markdown-body code { + display: inline; + white-space: break-spaces; + border-radius: 6px; + margin: 0 2px 0 2px; + padding: .2em .4em .1em .4em; + background-color: rgba(0,0,0,0.2); +} + +/* code block css */ +.markdown-body pre code { + display: block; + overflow: auto; + white-space: pre; + background-color: rgba(0, 0, 0, 0.95); + border-radius: 10px; + padding: 1em; + margin: 1em 2em 1em 0.5em; +} + +.dark .markdown-body pre code { + display: block; + overflow: auto; + white-space: pre; + background-color: rgba(0,0,0,0.2); + border-radius: 10px; + padding: 1em; + margin: 1em 2em 1em 0.5em; +} + +/* .mic-wrap.svelte-1thnwz { + +} */ +.block.svelte-mppz8v > .mic-wrap.svelte-1thnwz{ + justify-content: center; + display: flex; + padding: 0; + +} + +.codehilite .hll { background-color: #6e7681 } +.codehilite .c { color: #8b949e; font-style: italic } /* Comment */ +.codehilite .err { color: #f85149 } /* Error */ +.codehilite .esc { color: #c9d1d9 } /* Escape */ +.codehilite .g { color: #c9d1d9 } /* Generic */ +.codehilite .k { color: #ff7b72 } /* Keyword */ +.codehilite .l { color: #a5d6ff } /* Literal */ +.codehilite .n { color: #c9d1d9 } /* Name */ +.codehilite .o { color: #ff7b72; font-weight: bold } /* Operator */ +.codehilite .x { color: #c9d1d9 } /* Other */ +.codehilite .p { color: #c9d1d9 } /* Punctuation */ +.codehilite .ch { color: #8b949e; font-style: italic } /* Comment.Hashbang */ +.codehilite .cm { color: #8b949e; font-style: italic } /* Comment.Multiline */ +.codehilite .cp { color: #8b949e; font-weight: bold; font-style: italic } /* Comment.Preproc */ +.codehilite .cpf { color: #8b949e; font-style: italic } /* Comment.PreprocFile */ +.codehilite .c1 { color: #8b949e; font-style: italic } /* Comment.Single */ +.codehilite .cs { color: #8b949e; font-weight: bold; font-style: italic } /* Comment.Special */ +.codehilite .gd { color: #ffa198; background-color: #490202 } /* Generic.Deleted */ +.codehilite .ge { color: #c9d1d9; font-style: italic } /* Generic.Emph */ +.codehilite .gr { color: #ffa198 } /* Generic.Error */ +.codehilite .gh { color: #79c0ff; font-weight: bold } /* Generic.Heading */ +.codehilite .gi { color: #56d364; background-color: #0f5323 } /* Generic.Inserted */ +.codehilite .go { color: #8b949e } /* Generic.Output */ +.codehilite .gp { color: #8b949e } /* Generic.Prompt */ +.codehilite .gs { color: #c9d1d9; font-weight: bold } /* Generic.Strong */ +.codehilite .gu { color: #79c0ff } /* Generic.Subheading */ +.codehilite .gt { color: #ff7b72 } /* Generic.Traceback */ +.codehilite .g-Underline { color: #c9d1d9; text-decoration: underline } /* Generic.Underline */ +.codehilite .kc { color: #79c0ff } /* Keyword.Constant */ +.codehilite .kd { color: #ff7b72 } /* Keyword.Declaration */ +.codehilite .kn { color: #ff7b72 } /* Keyword.Namespace */ +.codehilite .kp { color: #79c0ff } /* Keyword.Pseudo */ +.codehilite .kr { color: #ff7b72 } /* Keyword.Reserved */ +.codehilite .kt { color: #ff7b72 } /* Keyword.Type */ +.codehilite .ld { color: #79c0ff } /* Literal.Date */ +.codehilite .m { color: #a5d6ff } /* Literal.Number */ +.codehilite .s { color: #a5d6ff } /* Literal.String */ +.codehilite .na { color: #c9d1d9 } /* Name.Attribute */ +.codehilite .nb { color: #c9d1d9 } /* Name.Builtin */ +.codehilite .nc { color: #f0883e; font-weight: bold } /* Name.Class */ +.codehilite .no { color: #79c0ff; font-weight: bold } /* Name.Constant */ +.codehilite .nd { color: #d2a8ff; font-weight: bold } /* Name.Decorator */ +.codehilite .ni { color: #ffa657 } /* Name.Entity */ +.codehilite .ne { color: #f0883e; font-weight: bold } /* Name.Exception */ +.codehilite .nf { color: #d2a8ff; font-weight: bold } /* Name.Function */ +.codehilite .nl { color: #79c0ff; font-weight: bold } /* Name.Label */ +.codehilite .nn { color: #ff7b72 } /* Name.Namespace */ +.codehilite .nx { color: #c9d1d9 } /* Name.Other */ +.codehilite .py { color: #79c0ff } /* Name.Property */ +.codehilite .nt { color: #7ee787 } /* Name.Tag */ +.codehilite .nv { color: #79c0ff } /* Name.Variable */ +.codehilite .ow { color: #ff7b72; font-weight: bold } /* Operator.Word */ +.codehilite .pm { color: #c9d1d9 } /* Punctuation.Marker */ +.codehilite .w { color: #6e7681 } /* Text.Whitespace */ +.codehilite .mb { color: #a5d6ff } /* Literal.Number.Bin */ +.codehilite .mf { color: #a5d6ff } /* Literal.Number.Float */ +.codehilite .mh { color: #a5d6ff } /* Literal.Number.Hex */ +.codehilite .mi { color: #a5d6ff } /* Literal.Number.Integer */ +.codehilite .mo { color: #a5d6ff } /* Literal.Number.Oct */ +.codehilite .sa { color: #79c0ff } /* Literal.String.Affix */ +.codehilite .sb { color: #a5d6ff } /* Literal.String.Backtick */ +.codehilite .sc { color: #a5d6ff } /* Literal.String.Char */ +.codehilite .dl { color: #79c0ff } /* Literal.String.Delimiter */ +.codehilite .sd { color: #a5d6ff } /* Literal.String.Doc */ +.codehilite .s2 { color: #a5d6ff } /* Literal.String.Double */ +.codehilite .se { color: #79c0ff } /* Literal.String.Escape */ +.codehilite .sh { color: #79c0ff } /* Literal.String.Heredoc */ +.codehilite .si { color: #a5d6ff } /* Literal.String.Interpol */ +.codehilite .sx { color: #a5d6ff } /* Literal.String.Other */ +.codehilite .sr { color: #79c0ff } /* Literal.String.Regex */ +.codehilite .s1 { color: #a5d6ff } /* Literal.String.Single */ +.codehilite .ss { color: #a5d6ff } /* Literal.String.Symbol */ +.codehilite .bp { color: #c9d1d9 } /* Name.Builtin.Pseudo */ +.codehilite .fm { color: #d2a8ff; font-weight: bold } /* Name.Function.Magic */ +.codehilite .vc { color: #79c0ff } /* Name.Variable.Class */ +.codehilite .vg { color: #79c0ff } /* Name.Variable.Global */ +.codehilite .vi { color: #79c0ff } /* Name.Variable.Instance */ +.codehilite .vm { color: #79c0ff } /* Name.Variable.Magic */ +.codehilite .il { color: #a5d6ff } /* Literal.Number.Integer.Long */ + +.dark .codehilite .hll { background-color: #2C3B41 } +.dark .codehilite .c { color: #79d618; font-style: italic } /* Comment */ +.dark .codehilite .err { color: #FF5370 } /* Error */ +.dark .codehilite .esc { color: #89DDFF } /* Escape */ +.dark .codehilite .g { color: #EEFFFF } /* Generic */ +.dark .codehilite .k { color: #BB80B3 } /* Keyword */ +.dark .codehilite .l { color: #C3E88D } /* Literal */ +.dark .codehilite .n { color: #EEFFFF } /* Name */ +.dark .codehilite .o { color: #89DDFF } /* Operator */ +.dark .codehilite .p { color: #89DDFF } /* Punctuation */ +.dark .codehilite .ch { color: #79d618; font-style: italic } /* Comment.Hashbang */ +.dark .codehilite .cm { color: #79d618; font-style: italic } /* Comment.Multiline */ +.dark .codehilite .cp { color: #79d618; font-style: italic } /* Comment.Preproc */ +.dark .codehilite .cpf { color: #79d618; font-style: italic } /* Comment.PreprocFile */ +.dark .codehilite .c1 { color: #79d618; font-style: italic } /* Comment.Single */ +.dark .codehilite .cs { color: #79d618; font-style: italic } /* Comment.Special */ +.dark .codehilite .gd { color: #FF5370 } /* Generic.Deleted */ +.dark .codehilite .ge { color: #89DDFF } /* Generic.Emph */ +.dark .codehilite .gr { color: #FF5370 } /* Generic.Error */ +.dark .codehilite .gh { color: #C3E88D } /* Generic.Heading */ +.dark .codehilite .gi { color: #C3E88D } /* Generic.Inserted */ +.dark .codehilite .go { color: #79d618 } /* Generic.Output */ +.dark .codehilite .gp { color: #FFCB6B } /* Generic.Prompt */ +.dark .codehilite .gs { color: #FF5370 } /* Generic.Strong */ +.dark .codehilite .gu { color: #89DDFF } /* Generic.Subheading */ +.dark .codehilite .gt { color: #FF5370 } /* Generic.Traceback */ +.dark .codehilite .kc { color: #89DDFF } /* Keyword.Constant */ +.dark .codehilite .kd { color: #BB80B3 } /* Keyword.Declaration */ +.dark .codehilite .kn { color: #89DDFF; font-style: italic } /* Keyword.Namespace */ +.dark .codehilite .kp { color: #89DDFF } /* Keyword.Pseudo */ +.dark .codehilite .kr { color: #BB80B3 } /* Keyword.Reserved */ +.dark .codehilite .kt { color: #BB80B3 } /* Keyword.Type */ +.dark .codehilite .ld { color: #C3E88D } /* Literal.Date */ +.dark .codehilite .m { color: #F78C6C } /* Literal.Number */ +.dark .codehilite .s { color: #C3E88D } /* Literal.String */ +.dark .codehilite .na { color: #BB80B3 } /* Name.Attribute */ +.dark .codehilite .nb { color: #82AAFF } /* Name.Builtin */ +.dark .codehilite .nc { color: #FFCB6B } /* Name.Class */ +.dark .codehilite .no { color: #EEFFFF } /* Name.Constant */ +.dark .codehilite .nd { color: #82AAFF } /* Name.Decorator */ +.dark .codehilite .ni { color: #89DDFF } /* Name.Entity */ +.dark .codehilite .ne { color: #FFCB6B } /* Name.Exception */ +.dark .codehilite .nf { color: #82AAFF } /* Name.Function */ +.dark .codehilite .nl { color: #82AAFF } /* Name.Label */ +.dark .codehilite .nn { color: #FFCB6B } /* Name.Namespace */ +.dark .codehilite .nx { color: #EEFFFF } /* Name.Other */ +.dark .codehilite .py { color: #FFCB6B } /* Name.Property */ +.dark .codehilite .nt { color: #FF5370 } /* Name.Tag */ +.dark .codehilite .nv { color: #89DDFF } /* Name.Variable */ +.dark .codehilite .ow { color: #89DDFF; font-style: italic } /* Operator.Word */ +.dark .codehilite .pm { color: #89DDFF } /* Punctuation.Marker */ +.dark .codehilite .w { color: #EEFFFF } /* Text.Whitespace */ +.dark .codehilite .mb { color: #F78C6C } /* Literal.Number.Bin */ +.dark .codehilite .mf { color: #F78C6C } /* Literal.Number.Float */ +.dark .codehilite .mh { color: #F78C6C } /* Literal.Number.Hex */ +.dark .codehilite .mi { color: #F78C6C } /* Literal.Number.Integer */ +.dark .codehilite .mo { color: #F78C6C } /* Literal.Number.Oct */ +.dark .codehilite .sa { color: #BB80B3 } /* Literal.String.Affix */ +.dark .codehilite .sb { color: #C3E88D } /* Literal.String.Backtick */ +.dark .codehilite .sc { color: #C3E88D } /* Literal.String.Char */ +.dark .codehilite .dl { color: #EEFFFF } /* Literal.String.Delimiter */ +.dark .codehilite .sd { color: #79d618; font-style: italic } /* Literal.String.Doc */ +.dark .codehilite .s2 { color: #C3E88D } /* Literal.String.Double */ +.dark .codehilite .se { color: #EEFFFF } /* Literal.String.Escape */ +.dark .codehilite .sh { color: #C3E88D } /* Literal.String.Heredoc */ +.dark .codehilite .si { color: #89DDFF } /* Literal.String.Interpol */ +.dark .codehilite .sx { color: #C3E88D } /* Literal.String.Other */ +.dark .codehilite .sr { color: #89DDFF } /* Literal.String.Regex */ +.dark .codehilite .s1 { color: #C3E88D } /* Literal.String.Single */ +.dark .codehilite .ss { color: #89DDFF } /* Literal.String.Symbol */ +.dark .codehilite .bp { color: #89DDFF } /* Name.Builtin.Pseudo */ +.dark .codehilite .fm { color: #82AAFF } /* Name.Function.Magic */ +.dark .codehilite .vc { color: #89DDFF } /* Name.Variable.Class */ +.dark .codehilite .vg { color: #89DDFF } /* Name.Variable.Global */ +.dark .codehilite .vi { color: #89DDFF } /* Name.Variable.Instance */ +.dark .codehilite .vm { color: #82AAFF } /* Name.Variable.Magic */ +.dark .codehilite .il { color: #F78C6C } /* Literal.Number.Integer.Long */ + diff --git a/themes/contrast.py b/themes/contrast.py new file mode 100644 index 0000000..fd4ef04 --- /dev/null +++ b/themes/contrast.py @@ -0,0 +1,88 @@ +import gradio as gr +from toolbox import get_conf +CODE_HIGHLIGHT, ADD_WAIFU, LAYOUT = get_conf('CODE_HIGHLIGHT', 'ADD_WAIFU', 'LAYOUT') + +def adjust_theme(): + + try: + color_er = gr.themes.utils.colors.fuchsia + set_theme = gr.themes.Default( + primary_hue=gr.themes.utils.colors.orange, + neutral_hue=gr.themes.utils.colors.gray, + font=["Helvetica", "Microsoft YaHei", "ui-sans-serif", "sans-serif", "system-ui"], + font_mono=["ui-monospace", "Consolas", "monospace"]) + set_theme.set( + # Colors + input_background_fill_dark="*neutral_800", + # Transition + button_transition="none", + # Shadows + button_shadow="*shadow_drop", + button_shadow_hover="*shadow_drop_lg", + button_shadow_active="*shadow_inset", + input_shadow="0 0 0 *shadow_spread transparent, *shadow_inset", + input_shadow_focus="0 0 0 *shadow_spread *secondary_50, *shadow_inset", + input_shadow_focus_dark="0 0 0 *shadow_spread *neutral_700, *shadow_inset", + checkbox_label_shadow="*shadow_drop", + block_shadow="*shadow_drop", + form_gap_width="1px", + # Button borders + input_border_width="1px", + input_background_fill="white", + # Gradients + stat_background_fill="linear-gradient(to right, *primary_400, *primary_200)", + stat_background_fill_dark="linear-gradient(to right, *primary_400, *primary_600)", + error_background_fill=f"linear-gradient(to right, {color_er.c100}, *background_fill_secondary)", + error_background_fill_dark="*background_fill_primary", + checkbox_label_background_fill="linear-gradient(to top, *neutral_50, white)", + checkbox_label_background_fill_dark="linear-gradient(to top, *neutral_900, *neutral_800)", + checkbox_label_background_fill_hover="linear-gradient(to top, *neutral_100, white)", + checkbox_label_background_fill_hover_dark="linear-gradient(to top, *neutral_900, *neutral_800)", + button_primary_background_fill="linear-gradient(to bottom right, *primary_100, *primary_300)", + button_primary_background_fill_dark="linear-gradient(to bottom right, *primary_500, *primary_600)", + button_primary_background_fill_hover="linear-gradient(to bottom right, *primary_100, *primary_200)", + button_primary_background_fill_hover_dark="linear-gradient(to bottom right, *primary_500, *primary_500)", + button_primary_border_color_dark="*primary_500", + button_secondary_background_fill="linear-gradient(to bottom right, *neutral_100, *neutral_200)", + button_secondary_background_fill_dark="linear-gradient(to bottom right, *neutral_600, *neutral_700)", + button_secondary_background_fill_hover="linear-gradient(to bottom right, *neutral_100, *neutral_100)", + button_secondary_background_fill_hover_dark="linear-gradient(to bottom right, *neutral_600, *neutral_600)", + button_cancel_background_fill=f"linear-gradient(to bottom right, {color_er.c100}, {color_er.c200})", + button_cancel_background_fill_dark=f"linear-gradient(to bottom right, {color_er.c600}, {color_er.c700})", + button_cancel_background_fill_hover=f"linear-gradient(to bottom right, {color_er.c100}, {color_er.c100})", + button_cancel_background_fill_hover_dark=f"linear-gradient(to bottom right, {color_er.c600}, {color_er.c600})", + button_cancel_border_color=color_er.c200, + button_cancel_border_color_dark=color_er.c600, + button_cancel_text_color=color_er.c600, + button_cancel_text_color_dark="white", + ) + + if LAYOUT=="TOP-DOWN": + js = "" + else: + with open('themes/common.js', 'r', encoding='utf8') as f: + js = f"" + + # 添加一个萌萌的看板娘 + if ADD_WAIFU: + js += """ + + + + """ + gradio_original_template_fn = gr.routes.templates.TemplateResponse + def gradio_new_template_fn(*args, **kwargs): + res = gradio_original_template_fn(*args, **kwargs) + res.body = res.body.replace(b'', f'{js}'.encode("utf8")) + res.init_headers() + return res + gr.routes.templates.TemplateResponse = gradio_new_template_fn # override gradio template + except: + set_theme = None + print('gradio版本较旧, 不能自定义字体和颜色') + return set_theme + +with open("themes/contrast.css", "r", encoding="utf-8") as f: + advanced_css = f.read() +with open("themes/common.css", "r", encoding="utf-8") as f: + advanced_css += f.read() diff --git a/themes/default.css b/themes/default.css index 5fba4af..a35cd1d 100644 --- a/themes/default.css +++ b/themes/default.css @@ -1,3 +1,46 @@ +.dark { + --background-fill-primary: #050810; + --body-background-fill: var(--background-fill-primary); +} +/* 插件下拉菜单 */ +#plugin-panel .wrap.svelte-aqlk7e.svelte-aqlk7e.svelte-aqlk7e { + box-shadow: var(--input-shadow); + border: var(--input-border-width) dashed var(--border-color-primary); + border-radius: 4px; +} + +#plugin-panel .dropdown-arrow.svelte-p5edak { + width: 50px; +} +#plugin-panel input.svelte-aqlk7e.svelte-aqlk7e.svelte-aqlk7e { + padding-left: 5px; +} + +/* 小按钮 */ +.sm.svelte-1ipelgc { + font-family: "Microsoft YaHei UI", "Helvetica", "Microsoft YaHei", "ui-sans-serif", "sans-serif", "system-ui"; + --button-small-text-weight: 600; + --button-small-text-size: 16px; + border-bottom-right-radius: 6px; + border-bottom-left-radius: 6px; + border-top-right-radius: 6px; + border-top-left-radius: 6px; +} + +#plugin-panel .sm.svelte-1ipelgc { + font-family: "Microsoft YaHei UI", "Helvetica", "Microsoft YaHei", "ui-sans-serif", "sans-serif", "system-ui"; + --button-small-text-weight: 400; + --button-small-text-size: 14px; + border-bottom-right-radius: 6px; + border-bottom-left-radius: 6px; + border-top-right-radius: 6px; + border-top-left-radius: 6px; +} + +.wrap-inner.svelte-aqlk7e.svelte-aqlk7e.svelte-aqlk7e { + padding: 0%; +} + .markdown-body table { margin: 1em 0; border-collapse: collapse; @@ -16,6 +59,7 @@ .markdown-body thead th { padding: .5em .2em; } + .normal_mut_select .svelte-1gfkn6j { float: left; width: auto; diff --git a/themes/default.py b/themes/default.py index 4efde5b..2611e7a 100644 --- a/themes/default.py +++ b/themes/default.py @@ -9,7 +9,7 @@ def adjust_theme(): set_theme = gr.themes.Default( primary_hue=gr.themes.utils.colors.orange, neutral_hue=gr.themes.utils.colors.gray, - font=["sans-serif", "Microsoft YaHei", "ui-sans-serif", "system-ui"], + font=["Helvetica", "Microsoft YaHei", "ui-sans-serif", "sans-serif", "system-ui"], font_mono=["ui-monospace", "Consolas", "monospace"]) set_theme.set( # Colors @@ -83,4 +83,6 @@ def adjust_theme(): return set_theme with open("themes/default.css", "r", encoding="utf-8") as f: - advanced_css = f.read() \ No newline at end of file + advanced_css = f.read() +with open("themes/common.css", "r", encoding="utf-8") as f: + advanced_css += f.read() diff --git a/themes/green.py b/themes/green.py index e14f4b6..5aa9e8b 100644 --- a/themes/green.py +++ b/themes/green.py @@ -106,3 +106,5 @@ def adjust_theme(): with open("themes/green.css", "r", encoding="utf-8") as f: advanced_css = f.read() +with open("themes/common.css", "r", encoding="utf-8") as f: + advanced_css += f.read() diff --git a/themes/theme.py b/themes/theme.py index c08d9bb..5cba541 100644 --- a/themes/theme.py +++ b/themes/theme.py @@ -5,6 +5,9 @@ THEME, = get_conf('THEME') if THEME == 'Chuanhu-Small-and-Beautiful': from .green import adjust_theme, advanced_css theme_declaration = "

[Chuanhu-Small-and-Beautiful主题]

" +elif THEME == 'High-Contrast': + from .contrast import adjust_theme, advanced_css + theme_declaration = "" else: from .default import adjust_theme, advanced_css theme_declaration = "" From b31abbcad352cf773a7b512d129f3be9f97ba2ad Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Thu, 31 Aug 2023 15:59:19 +0800 Subject: [PATCH 04/47] =?UTF-8?q?=E6=AF=8F=E4=B8=AA=E6=8F=92=E4=BB=B6?= =?UTF-8?q?=E5=8F=AF=E4=BB=A5=E5=BD=92=E5=B1=9E=E5=A4=9A=E4=B8=AAGroup?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functional.py | 30 ++++++++---- main.py | 110 ++++++++++++++++++-------------------------- 2 files changed, 64 insertions(+), 76 deletions(-) diff --git a/crazy_functional.py b/crazy_functional.py index ee3ed99..e51dec0 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -168,7 +168,7 @@ def get_crazy_functions(): "Function": HotReload(对话历史存档) }, "[多线程Demo]解析此项目本身(源码自译解)": { - "Group": "对话", + "Group": "对话|编程", "AsButton": False, # 加入下拉菜单中 "Function": HotReload(解析项目本身) }, @@ -499,13 +499,23 @@ def get_crazy_functions(): # except: # print('Load function plugin failed') - regroupped_functions = {} + + + """ + 设置默认值: + - 默认 Group = 对话 + - 默认 AsButton = True + - 默认 AdvancedArgs = False + - 默认 Color = secondary + """ for name, function_meta in function_plugins.items(): - if "Group" in function_meta: - groups = function_meta["Group"].split('|') - else: - groups = ['对话'] - for gruop in groups: - regroupped_functions[gruop] = regroupped_functions.get(gruop, {}) - regroupped_functions[gruop].update({name: function_meta}) - return regroupped_functions + if "Group" not in function_meta: + function_plugins[name]["Group"] = '对话' + if "AsButton" not in function_meta: + function_plugins[name]["AsButton"] = True + if "AdvancedArgs" not in function_meta: + function_plugins[name]["AdvancedArgs"] = False + if "Color" not in function_meta: + function_plugins[name]["Color"] = 'secondary' + + return function_plugins diff --git a/main.py b/main.py index f21383b..50028c2 100644 --- a/main.py +++ b/main.py @@ -5,19 +5,20 @@ def main(): # if gr.__version__ not in ['3.28.3','3.32.2']: assert False, "需要特殊依赖,请务必用 pip install -r requirements.txt 指令安装依赖,详情信息见requirements.txt" from request_llm.bridge_all import predict from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, load_chat_cookies, DummyWith + # 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到 - proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = \ - get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT') + proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION') + CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT') ENABLE_AUDIO, AUTO_CLEAR_TXT = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT') + # 如果WEB_PORT是-1, 则随机选取WEB端口 PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT - if not AUTHENTICATION: AUTHENTICATION = None - from check_proxy import get_current_version from themes.theme import adjust_theme, advanced_css, theme_declaration initial_prompt = "Serve me as a writing and programming assistant." title_html = f"

GPT 学术优化 {get_current_version()}

{theme_declaration}" - description = """代码开源和更新[地址🚀](https://github.com/binary-husky/chatgpt_academic),感谢热情的[开发者们❤️](https://github.com/binary-husky/chatgpt_academic/graphs/contributors)""" + description = "代码开源和更新[地址🚀](https://github.com/binary-husky/gpt_academic)," + description += "感谢热情的[开发者们❤️](https://github.com/binary-husky/gpt_academic/graphs/contributors)" # 问询记录, python 版本建议3.9+(越新越好) import logging, uuid @@ -35,12 +36,9 @@ def main(): # 高级函数插件 from crazy_functional import get_crazy_functions DEFAULT_FN_GROUPS, = get_conf('DEFAULT_FN_GROUPS') - crazy_fns_role = get_crazy_functions() - crazy_classification = [i for i in crazy_fns_role] - crazy_fns = {} - for role in crazy_fns_role: - for k in crazy_fns_role[role]: - crazy_fns[k] = crazy_fns_role[role][k] + plugins = get_crazy_functions() + all_plugin_groups = list(set([g for _, plugin in plugins.items() for g in plugin['Group'].split('|')])) + match_group = lambda tags, groups: any([g in groups for g in tags.split('|')]) # 处理markdown文本格式的转变 gr.Chatbot.postprocess = format_io @@ -94,35 +92,21 @@ def main(): with gr.Row(): gr.Markdown("插件可读取“输入区”文本/路径作为参数(上传文件自动修正路径)") with gr.Row(elem_id="input-plugin-group"): - plugin_dropdown = gr.Dropdown(choices=crazy_classification, label='', - value=DEFAULT_FN_GROUPS, - multiselect=True, interactive=True, - elem_classes='normal_mut_select', - ).style(container=False) + plugin_group_sel = gr.Dropdown(choices=all_plugin_groups, label='', show_label=False, value=DEFAULT_FN_GROUPS, + multiselect=True, interactive=True, elem_classes='normal_mut_select').style(container=False) with gr.Row(): - for role in crazy_fns_role: - for k in crazy_fns_role[role]: - if not crazy_fns_role[role][k].get("AsButton", True): continue - if role not in DEFAULT_FN_GROUPS: - variant = crazy_fns_role[role][k]["Color"] if "Color" in crazy_fns_role[role][ - k] else "secondary" - crazy_fns_role[role][k]['Button'] = gr.Button(k, variant=variant, - visible=False).style(size="sm") - else: - variant = crazy_fns[k]["Color"] if "Color" in crazy_fns_role[role][ - k] else "secondary" - crazy_fns_role[role][k]['Button'] = gr.Button(k, variant=variant, - visible=True).style(size="sm") + for k, plugin in plugins.items(): + if not plugin.get("AsButton", True): continue + visible = True if match_group(plugin['Group'], DEFAULT_FN_GROUPS) else False + variant = plugins[k]["Color"] if "Color" in plugin else "secondary" + plugin['Button'] = plugins[k]['Button'] = gr.Button(k, variant=variant, visible=visible).style(size="sm") with gr.Row(): with gr.Accordion("更多函数插件", open=True): dropdown_fn_list = [] - for role in crazy_fns_role: - if role in DEFAULT_FN_GROUPS: - for k in crazy_fns_role[role]: - if not crazy_fns_role[role][k].get("AsButton", True): - dropdown_fn_list.append(k) - elif crazy_fns_role[role][k].get('AdvancedArgs', False): - dropdown_fn_list.append(k) + for k, plugin in plugins.items(): + if not match_group(plugin['Group'], DEFAULT_FN_GROUPS): continue + if not plugin.get("AsButton", True): dropdown_fn_list.append(k) # 排除已经是按钮的插件 + elif plugin.get('AdvancedArgs', False): dropdown_fn_list.append(k) # 对于需要高级参数的插件,亦在下拉菜单中显示 with gr.Row(): dropdown = gr.Dropdown(dropdown_fn_list, value=r"打开插件列表", label="", show_label=False).style(container=False) with gr.Row(): @@ -150,6 +134,7 @@ def main(): resetBtn2 = gr.Button("重置", variant="secondary"); resetBtn2.style(size="sm") stopBtn2 = gr.Button("停止", variant="secondary"); stopBtn2.style(size="sm") clearBtn2 = gr.Button("清除", variant="secondary", visible=False); clearBtn2.style(size="sm") + # 功能区显示开关与功能区的互动 def fn_area_visibility(a): ret = {} @@ -189,17 +174,17 @@ def main(): # 文件上传区,接收文件后与chatbot的互动 file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt, txt2, checkboxes], [chatbot, txt, txt2]) # 函数插件-固定按钮区 - for k in crazy_fns: - if not crazy_fns[k].get("AsButton", True): continue - click_handle = crazy_fns[k]["Button"].click(ArgsGeneralWrapper(crazy_fns[k]["Function"]), [*input_combo, gr.State(PORT)], output_combo) + for k in plugins: + if not plugins[k].get("AsButton", True): continue + click_handle = plugins[k]["Button"].click(ArgsGeneralWrapper(plugins[k]["Function"]), [*input_combo, gr.State(PORT)], output_combo) click_handle.then(on_report_generated, [cookies, file_upload, chatbot], [cookies, file_upload, chatbot]) cancel_handles.append(click_handle) # 函数插件-下拉菜单与随变按钮的互动 def on_dropdown_changed(k): - variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary" + variant = plugins[k]["Color"] if "Color" in plugins[k] else "secondary" ret = {switchy_bt: gr.update(value=k, variant=variant)} - if crazy_fns[k].get("AdvancedArgs", False): # 是否唤起高级插件参数区 - ret.update({plugin_advanced_arg: gr.update(visible=True, label=f"插件[{k}]的高级参数说明:" + crazy_fns[k].get("ArgsReminder", [f"没有提供高级参数功能说明"]))}) + if plugins[k].get("AdvancedArgs", False): # 是否唤起高级插件参数区 + ret.update({plugin_advanced_arg: gr.update(visible=True, label=f"插件[{k}]的高级参数说明:" + plugins[k].get("ArgsReminder", [f"没有提供高级参数功能说明"]))}) else: ret.update({plugin_advanced_arg: gr.update(visible=False, label=f"插件[{k}]不需要高级参数。")}) return ret @@ -210,35 +195,26 @@ def main(): # 随变按钮的回调函数注册 def route(request: gr.Request, k, *args, **kwargs): if k in [r"打开插件列表", r"请先从插件列表中选择"]: return - yield from ArgsGeneralWrapper(crazy_fns[k]["Function"])(request, *args, **kwargs) + yield from ArgsGeneralWrapper(plugins[k]["Function"])(request, *args, **kwargs) click_handle = switchy_bt.click(route,[switchy_bt, *input_combo, gr.State(PORT)], output_combo) click_handle.then(on_report_generated, [cookies, file_upload, chatbot], [cookies, file_upload, chatbot]) cancel_handles.append(click_handle) # 终止按钮的回调函数注册 stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles) stopBtn2.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles) - fn_btn_dict = {crazy_fns_role[role][k]['Button']: {role: k} for role in crazy_fns_role for k in crazy_fns_role[role] if crazy_fns_role[role][k].get('Button')} - def show_plugin_btn(plu_list): - new_btn_list = [] + plugins_as_btn = {name:plugin for name, plugin in plugins.items() if plugin.get('Button', None)} + def on_group_change(group_list): + btn_list = [] fns_list = [] - if not plu_list: - return [*[fns.update(visible=False) for fns in fn_btn_dict], gr.Dropdown.update(choices=[])] - else: - for fns in fn_btn_dict: - if list(fn_btn_dict[fns].keys())[0] in plu_list: - new_btn_list.append(fns.update(visible=True)) - else: - new_btn_list.append(fns.update(visible=False)) - for role in crazy_fns_role: - if role in plu_list: - for k in crazy_fns_role[role]: - if not crazy_fns_role[role][k].get("AsButton", True): - fns_list.append(k) - elif crazy_fns_role[role][k].get('AdvancedArgs', False): - fns_list.append(k) - return [*new_btn_list, gr.Dropdown.update(choices=fns_list)] - plugin_dropdown.select(fn=show_plugin_btn, inputs=[plugin_dropdown], - outputs=[*fn_btn_dict.keys(), dropdown]) + if not group_list: # 处理特殊情况:没有选择任何插件组 + return [*[plugin['Button'].update(visible=False) for _, plugin in plugins_as_btn.items()], gr.Dropdown.update(choices=[])] + for k, plugin in plugins.items(): + if plugin.get("AsButton", True): + btn_list.append(plugin['Button'].update(visible=match_group(plugin['Group'], group_list))) # 刷新按钮 + if plugin.get('AdvancedArgs', False): dropdown_fn_list.append(k) # 对于需要高级参数的插件,亦在下拉菜单中显示 + elif match_group(plugin['Group'], group_list): fns_list.append(k) # 刷新下拉列表 + return [*btn_list, gr.Dropdown.update(choices=fns_list)] + plugin_group_sel.select(fn=on_group_change, inputs=[plugin_group_sel], outputs=[*[plugin['Button'] for name, plugin in plugins_as_btn.items()], dropdown]) if ENABLE_AUDIO: from crazy_functions.live_audio.audio_io import RealtimeAudioDistribution rad = RealtimeAudioDistribution() @@ -270,8 +246,10 @@ def main(): auto_opentab_delay() demo.queue(concurrency_count=CONCURRENT_COUNT).launch( - server_name="0.0.0.0", server_port=PORT, - favicon_path="docs/logo.png", auth=AUTHENTICATION, + server_name="0.0.0.0", + server_port=PORT, + favicon_path="docs/logo.png", + auth=AUTHENTICATION if len(AUTHENTICATION) != 0 else None, blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile"]) # 如果需要在二级路径下运行 From b69140307bccaa486e8d6449528b99e55ad5aef8 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Thu, 31 Aug 2023 16:24:00 +0800 Subject: [PATCH 05/47] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=AF=B9=E8=AF=9D?= =?UTF-8?q?=E6=A1=86=E5=AF=B9=E9=BD=90=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- themes/common.js | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/themes/common.js b/themes/common.js index a56672d..7733c7b 100644 --- a/themes/common.js +++ b/themes/common.js @@ -1,6 +1,6 @@ function ChatBotHeight() { function update_height(){ - var { panel_height_target, chatbot_height, chatbot } = get_elements(); + var { panel_height_target, chatbot_height, chatbot } = get_elements(true); if (panel_height_target!=chatbot_height) { var pixelString = panel_height_target.toString() + 'px'; @@ -28,21 +28,24 @@ function ChatBotHeight() { }, 50); // 每100毫秒执行一次 } -function get_elements() { +function get_elements(consider_state_panel=false) { var chatbot = document.querySelector('#gpt-chatbot > div.wrap.svelte-18telvq'); if (!chatbot) { chatbot = document.querySelector('#gpt-chatbot'); } - const panel1 = document.querySelector('#input-panel'); - const panel2 = document.querySelector('#basic-panel'); - const panel3 = document.querySelector('#plugin-panel'); - const panel4 = document.querySelector('#interact-panel'); - const panel5 = document.querySelector('#input-panel2'); - const panel_active = document.querySelector('#state-panel'); - // 25.3 是chatbot的label高度, 16 是右侧的gap - var panel_height_target = panel1.offsetHeight + panel2.offsetHeight + panel3.offsetHeight + panel4.offsetHeight + panel5.offsetHeight - 25.5 + 16*3; + const panel1 = document.querySelector('#input-panel').getBoundingClientRect(); + const panel2 = document.querySelector('#basic-panel').getBoundingClientRect() + const panel3 = document.querySelector('#plugin-panel').getBoundingClientRect(); + const panel4 = document.querySelector('#interact-panel').getBoundingClientRect(); + const panel5 = document.querySelector('#input-panel2').getBoundingClientRect(); + const panel_active = document.querySelector('#state-panel').getBoundingClientRect(); + if (consider_state_panel || panel_active.height < 25){ + document.state_panel_height = panel_active.height; + } + // 25 是chatbot的label高度, 16 是右侧的gap + var panel_height_target = panel1.height + panel2.height + panel3.height + panel4.height + panel5.height - 25 + 16*3; // 禁止动态的state-panel高度影响 - panel_height_target = panel_height_target + (21-panel_active.offsetHeight) + panel_height_target = panel_height_target + (document.state_panel_height-panel_active.height) var panel_height_target = parseInt(panel_height_target); var chatbot_height = chatbot.style.height; var chatbot_height = parseInt(chatbot_height); From 8b0905c076d23336ef2839c54626756b44911bd9 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Thu, 31 Aug 2023 18:04:31 +0800 Subject: [PATCH 06/47] =?UTF-8?q?=E6=8F=90=E9=AB=98=E8=99=9A=E7=A9=BA?= =?UTF-8?q?=E7=BB=88=E7=AB=AF=E7=9A=84=E6=88=90=E5=8A=9F=E7=8E=87?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functional.py | 30 ++++++++++++++++--- crazy_functions/json_fns/pydantic_io.py | 18 +++++++++--- crazy_functions/vt_fns/vt_call_plugin.py | 37 +++++++++++++++++------- crazy_functions/虚空终端.py | 37 +++++++++--------------- tests/test_plugins.py | 4 +-- version | 4 +-- 6 files changed, 83 insertions(+), 47 deletions(-) diff --git a/crazy_functional.py b/crazy_functional.py index e51dec0..1388e34 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -140,6 +140,7 @@ def get_crazy_functions(): "Group": "学术", "Color": "stop", "AsButton": True, + "Info": "读取Tex论文并写摘要 | 输入参数为路径", "Function": HotReload(读文章写摘要) }, "翻译README或MD": { @@ -153,12 +154,14 @@ def get_crazy_functions(): "Group": "编程", "Color": "stop", "AsButton": False, + "Info": "将Markdown或README翻译为中文 | 输入参数为路径或URL", "Function": HotReload(Markdown英译中) }, "批量生成函数注释": { "Group": "编程", "Color": "stop", "AsButton": False, # 加入下拉菜单中 + "Info": "批量生成函数的注释 | 输入参数为路径", "Function": HotReload(批量生成函数注释) }, "保存当前的对话": { @@ -170,17 +173,20 @@ def get_crazy_functions(): "[多线程Demo]解析此项目本身(源码自译解)": { "Group": "对话|编程", "AsButton": False, # 加入下拉菜单中 + "Info": "多线程解析并翻译此项目的源码 | 不需要输入参数", "Function": HotReload(解析项目本身) }, "[插件demo]历史上的今天": { "Group": "对话", "AsButton": True, + "Info": "查看历史上的今天事件 | 不需要输入参数", "Function": HotReload(高阶功能模板函数) }, "精准翻译PDF论文": { "Group": "学术", "Color": "stop", - "AsButton": True, # 加入下拉菜单中 + "AsButton": True, + "Info": "精准翻译PDF论文为中文 | 输入参数为路径", "Function": HotReload(批量翻译PDF文档) }, "询问多个GPT模型": { @@ -193,54 +199,63 @@ def get_crazy_functions(): "Group": "学术", "Color": "stop", "AsButton": False, # 加入下拉菜单中 + "Info": "批量总结PDF文档的内容 | 输入参数为路径", "Function": HotReload(批量总结PDF文档) }, "谷歌学术检索助手(输入谷歌学术搜索页url)": { "Group": "学术", "Color": "stop", "AsButton": False, # 加入下拉菜单中 + "Info": "使用谷歌学术检索助手搜索指定URL的结果 | 输入参数为谷歌学术搜索页的URL", "Function": HotReload(谷歌检索小助手) }, "理解PDF文档内容 (模仿ChatPDF)": { "Group": "学术", "Color": "stop", "AsButton": False, # 加入下拉菜单中 + "Info": "理解PDF文档的内容并进行回答 | 输入参数为路径", "Function": HotReload(理解PDF文档内容标准文件输入) }, "英文Latex项目全文润色(输入路径或上传压缩包)": { "Group": "学术", "Color": "stop", "AsButton": False, # 加入下拉菜单中 + "Info": "对英文Latex项目全文进行润色处理 | 输入参数为路径或上传压缩包", "Function": HotReload(Latex英文润色) }, "英文Latex项目全文纠错(输入路径或上传压缩包)": { "Group": "学术", "Color": "stop", "AsButton": False, # 加入下拉菜单中 + "Info": "对英文Latex项目全文进行纠错处理 | 输入参数为路径或上传压缩包", "Function": HotReload(Latex英文纠错) }, "中文Latex项目全文润色(输入路径或上传压缩包)": { "Group": "学术", "Color": "stop", "AsButton": False, # 加入下拉菜单中 + "Info": "对中文Latex项目全文进行润色处理 | 输入参数为路径或上传压缩包", "Function": HotReload(Latex中文润色) }, "Latex项目全文中译英(输入路径或上传压缩包)": { "Group": "学术", "Color": "stop", "AsButton": False, # 加入下拉菜单中 + "Info": "对Latex项目全文进行中译英处理 | 输入参数为路径或上传压缩包", "Function": HotReload(Latex中译英) }, "Latex项目全文英译中(输入路径或上传压缩包)": { "Group": "学术", "Color": "stop", "AsButton": False, # 加入下拉菜单中 + "Info": "对Latex项目全文进行英译中处理 | 输入参数为路径或上传压缩包", "Function": HotReload(Latex英译中) }, "批量Markdown中译英(输入路径或上传压缩包)": { "Group": "编程", "Color": "stop", "AsButton": False, # 加入下拉菜单中 + "Info": "批量将Markdown文件中文翻译为英文 | 输入参数为路径或上传压缩包", "Function": HotReload(Markdown中译英) }, } @@ -253,6 +268,7 @@ def get_crazy_functions(): "Group": "学术", "Color": "stop", "AsButton": False, # 加入下拉菜单中 + "Info": "下载arxiv论文并翻译摘要 | 输入参数为arxiv编号如1812.10695", "Function": HotReload(下载arxiv论文并翻译摘要) } }) @@ -266,6 +282,7 @@ def get_crazy_functions(): "Group": "对话", "Color": "stop", "AsButton": False, # 加入下拉菜单中 + # "Info": "连接网络回答问题(需要访问谷歌)| 输入参数是一个问题", "Function": HotReload(连接网络回答问题) } }) @@ -275,6 +292,7 @@ def get_crazy_functions(): "Group": "对话", "Color": "stop", "AsButton": False, # 加入下拉菜单中 + "Info": "连接网络回答问题(需要访问中文Bing)| 输入参数是一个问题", "Function": HotReload(连接bing搜索回答问题) } }) @@ -350,6 +368,7 @@ def get_crazy_functions(): "Group": "对话", "Color": "stop", "AsButton": False, + "Info": "按照自然语言描述生成一个动画 | 输入参数是一段话", "Function": HotReload(动画生成) } }) @@ -437,6 +456,7 @@ def get_crazy_functions(): "如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 " + "例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " + 'If the term "agent" is used in this section, it should be translated to "智能体". ', + "Info": "Arixv论文精细翻译 | 输入参数arxiv论文的ID,比如1812.10695", "Function": HotReload(Latex翻译中文并重新编译PDF) } }) @@ -450,6 +470,7 @@ def get_crazy_functions(): "如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 " + "例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " + 'If the term "agent" is used in this section, it should be translated to "智能体". ', + "Info": "本地Latex论文精细翻译 | 输入参数是路径", "Function": HotReload(Latex翻译中文并重新编译PDF) } }) @@ -466,6 +487,7 @@ def get_crazy_functions(): "Group": "对话", "Color": "stop", "AsButton": True, + "Info": "开始语言对话 | 没有输入参数", "Function": HotReload(语音助手) } }) @@ -473,13 +495,13 @@ def get_crazy_functions(): print('Load function plugin failed') try: - from crazy_functions.虚空终端 import 自动终端 + from crazy_functions.虚空终端 import 虚空终端 function_plugins.update({ - "自动终端": { + "虚空终端": { "Group": "对话", "Color": "stop", "AsButton": False, - "Function": HotReload(自动终端) + "Function": HotReload(虚空终端) } }) except: diff --git a/crazy_functions/json_fns/pydantic_io.py b/crazy_functions/json_fns/pydantic_io.py index db92412..9bd39c3 100644 --- a/crazy_functions/json_fns/pydantic_io.py +++ b/crazy_functions/json_fns/pydantic_io.py @@ -37,10 +37,18 @@ Here is the output schema: {schema} ```""" + +PYDANTIC_FORMAT_INSTRUCTIONS_SIMPLE = """The output should be formatted as a JSON instance that conforms to the JSON schema below. +``` +{schema} +```""" + + class GptJsonIO(): - def __init__(self, schema): + def __init__(self, schema, example_instruction=True): self.pydantic_object = schema + self.example_instruction = example_instruction self.format_instructions = self.generate_format_instructions() def generate_format_instructions(self): @@ -53,9 +61,11 @@ class GptJsonIO(): if "type" in reduced_schema: del reduced_schema["type"] # Ensure json in context is well-formed with double quotes. - schema_str = json.dumps(reduced_schema) - - return PYDANTIC_FORMAT_INSTRUCTIONS.format(schema=schema_str) + if self.example_instruction: + schema_str = json.dumps(reduced_schema) + return PYDANTIC_FORMAT_INSTRUCTIONS.format(schema=schema_str) + else: + return PYDANTIC_FORMAT_INSTRUCTIONS_SIMPLE.format(schema=schema_str) def generate_output(self, text): # Greedy search for 1st json candidate. diff --git a/crazy_functions/vt_fns/vt_call_plugin.py b/crazy_functions/vt_fns/vt_call_plugin.py index c1c1976..8ee88b1 100644 --- a/crazy_functions/vt_fns/vt_call_plugin.py +++ b/crazy_functions/vt_fns/vt_call_plugin.py @@ -11,32 +11,47 @@ def read_avail_plugin_enum(): plugin_arr = get_crazy_functions() # remove plugins with out explaination plugin_arr = {k:v for k, v in plugin_arr.items() if 'Info' in v} - plugin_arr_info = {"F{:04d}".format(i):v["Info"] for i, v in enumerate(plugin_arr.values(), start=1)} - plugin_arr_dict = {"F{:04d}".format(i):v for i, v in enumerate(plugin_arr.values(), start=1)} + plugin_arr_info = {"F_{:04d}".format(i):v["Info"] for i, v in enumerate(plugin_arr.values(), start=1)} + plugin_arr_dict = {"F_{:04d}".format(i):v for i, v in enumerate(plugin_arr.values(), start=1)} prompt = json.dumps(plugin_arr_info, ensure_ascii=False, indent=2) prompt = "\n\nThe defination of PluginEnum:\nPluginEnum=" + prompt return prompt, plugin_arr_dict +def wrap_code(txt): + return f"\n```\n{txt}\n```\n" def execute_plugin(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention): plugin_arr_enum_prompt, plugin_arr_dict = read_avail_plugin_enum() class Plugin(BaseModel): - plugin_selection: str = Field(description="The most related plugin from one of the PluginEnum.", default="F0000000000000") - plugin_arg: str = Field(description="The argument of the plugin. A path or url or empty.", default="") - + plugin_selection: str = Field(description="The most related plugin from one of the PluginEnum.", default="F_0000") + reason_of_selection: str = Field(description="The reason why you should select this plugin.", default="This plugin satisfy user requirement most") # ⭐ ⭐ ⭐ 选择插件 yield from update_ui_lastest_msg(lastmsg=f"正在执行任务: {txt}\n\n查找可用插件中...", chatbot=chatbot, history=history, delay=0) gpt_json_io = GptJsonIO(Plugin) + gpt_json_io.format_instructions = "The format of your output should be a json that can be parsed by json.loads.\n" + gpt_json_io.format_instructions += """Output example: {"plugin_selection":"F_1234", "reason_of_selection":"F_1234 plugin satisfy user requirement most"}\n""" + gpt_json_io.format_instructions += "The plugins you are authorized to use are listed below:\n" gpt_json_io.format_instructions += plugin_arr_enum_prompt - inputs = "Choose the correct plugin and extract plugin_arg, the user requirement is: \n\n" + \ - ">> " + txt.rstrip('\n').replace('\n','\n>> ') + '\n\n' + \ - gpt_json_io.format_instructions + inputs = "Choose the correct plugin according to user requirements, the user requirement is: \n\n" + \ + ">> " + txt.rstrip('\n').replace('\n','\n>> ') + '\n\n' + gpt_json_io.format_instructions + run_gpt_fn = lambda inputs, sys_prompt: predict_no_ui_long_connection( inputs=inputs, llm_kwargs=llm_kwargs, history=[], sys_prompt=sys_prompt, observe_window=[]) - plugin_sel = gpt_json_io.generate_output_auto_repair(run_gpt_fn(inputs, ""), run_gpt_fn) - + try: + gpt_reply = run_gpt_fn(inputs, "") + plugin_sel = gpt_json_io.generate_output_auto_repair(gpt_reply, run_gpt_fn) + except: + msg = "抱歉,当前的大语言模型无法理解您的需求。" + msg += "请求的Prompt为:\n" + wrap_code(inputs) + msg += "语言模型回复为:\n" + wrap_code(gpt_reply) + msg += "但您可以尝试再试一次\n" + yield from update_ui_lastest_msg(lastmsg=msg, chatbot=chatbot, history=history, delay=2) + return if plugin_sel.plugin_selection not in plugin_arr_dict: - msg = f'找不到合适插件执行该任务' + msg = "抱歉, 找不到合适插件执行该任务, 当前的大语言模型可能无法理解您的需求。" + msg += "请求的Prompt为:\n" + wrap_code(inputs) + msg += "语言模型回复为:\n" + wrap_code(gpt_reply) + msg += "但您可以尝试再试一次\n" yield from update_ui_lastest_msg(lastmsg=msg, chatbot=chatbot, history=history, delay=2) return diff --git a/crazy_functions/虚空终端.py b/crazy_functions/虚空终端.py index da16527..ef8efd5 100644 --- a/crazy_functions/虚空终端.py +++ b/crazy_functions/虚空终端.py @@ -46,7 +46,7 @@ def analyze_with_rule(txt): return is_certain, user_intention @CatchException -def 自动终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): +def 虚空终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): """ txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 llm_kwargs gpt模型参数, 如温度和top_p等, 一般原样传递下去就行 @@ -57,7 +57,7 @@ def 自动终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt web_port 当前软件运行的端口号 """ history = [] # 清空历史,以免输入溢出 - chatbot.append(("自动终端状态: ", f"正在执行任务: {txt}")) + chatbot.append(("虚空终端状态: ", f"正在执行任务: {txt}")) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 初始化插件状态 @@ -67,21 +67,29 @@ def 自动终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt def update_vt_state(): # 赋予插件锁定 锁定插件回调路径,当下一次用户提交时,会直接转到该函数 - chatbot._cookies['lock_plugin'] = 'crazy_functions.虚空终端->自动终端' + chatbot._cookies['lock_plugin'] = 'crazy_functions.虚空终端->虚空终端' chatbot._cookies['vt_state'] = pickle.dumps(state) # ⭐ ⭐ ⭐ 分析用户意图 is_certain, user_intention = analyze_with_rule(txt) if not is_certain: - yield from update_ui_lastest_msg(lastmsg=f"正在执行任务: {txt}\n\n分析用户意图中", chatbot=chatbot, history=history, delay=0) + yield from update_ui_lastest_msg( + lastmsg=f"正在执行任务: {txt}\n\n分析用户意图中", chatbot=chatbot, history=history, delay=0) gpt_json_io = GptJsonIO(UserIntention) inputs = "Analyze the intention of the user according to following user input: \n\n" + txt + '\n\n' + gpt_json_io.format_instructions run_gpt_fn = lambda inputs, sys_prompt: predict_no_ui_long_connection( inputs=inputs, llm_kwargs=llm_kwargs, history=[], sys_prompt=sys_prompt, observe_window=[]) - user_intention = gpt_json_io.generate_output_auto_repair(run_gpt_fn(inputs, ""), run_gpt_fn) + try: + user_intention = gpt_json_io.generate_output_auto_repair(run_gpt_fn(inputs, ""), run_gpt_fn) + except: + yield from update_ui_lastest_msg( + lastmsg=f"正在执行任务: {txt}\n\n用户意图理解: 失败 当前语言模型不能理解您的意图", chatbot=chatbot, history=history, delay=0) + return else: pass + yield from update_ui_lastest_msg( + lastmsg=f"正在执行任务: {txt}\n\n用户意图理解: intention_type={user_intention.intention_type}", chatbot=chatbot, history=history, delay=0) # 用户意图: 修改本项目的配置 if user_intention.intention_type == 'ModifyConfiguration': yield from modify_configuration_reboot(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention) @@ -97,22 +105,3 @@ def 自动终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt return - - - - # # if state == 'wait_user_keyword': - # # chatbot._cookies['lock_plugin'] = None # 解除插件锁定,避免遗忘导致死锁 - # # chatbot._cookies['plugin_state_0001'] = None # 解除插件状态,避免遗忘导致死锁 - - # # # 解除插件锁定 - # # chatbot.append((f"获取关键词:{txt}", "")) - # # yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - # # inputs=inputs_show_user=f"Extract all image urls in this html page, pick the first 5 images and show them with markdown format: \n\n {page_return}" - # # gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - # # inputs=inputs, inputs_show_user=inputs_show_user, - # # llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], - # # sys_prompt="When you want to show an image, use markdown format. e.g. ![image_description](image_url). If there are no image url provided, answer 'no image url provided'" - # # ) - # # chatbot[-1] = [chatbot[-1][0], gpt_say] - # yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - # return diff --git a/tests/test_plugins.py b/tests/test_plugins.py index 2207f71..2780ed2 100644 --- a/tests/test_plugins.py +++ b/tests/test_plugins.py @@ -9,9 +9,9 @@ validate_path() # 返回项目根路径 from tests.test_utils import plugin_test if __name__ == "__main__": - # plugin_test(plugin='crazy_functions.虚空终端->自动终端', main_input='修改api-key为sk-jhoejriotherjep') + # plugin_test(plugin='crazy_functions.虚空终端->虚空终端', main_input='修改api-key为sk-jhoejriotherjep') - plugin_test(plugin='crazy_functions.虚空终端->自动终端', main_input='调用插件,对C:/Users/fuqingxu/Desktop/旧文件/gpt/chatgpt_academic/crazy_functions/latex_fns中的python文件进行解析') + plugin_test(plugin='crazy_functions.虚空终端->虚空终端', main_input='调用插件,对C:/Users/fuqingxu/Desktop/旧文件/gpt/chatgpt_academic/crazy_functions/latex_fns中的python文件进行解析') # plugin_test(plugin='crazy_functions.命令行助手->命令行助手', main_input='查看当前的docker容器列表') diff --git a/version b/version index 303a44b..2034d6d 100644 --- a/version +++ b/version @@ -1,5 +1,5 @@ { - "version": 3.49, + "version": 3.50, "show_feature": true, - "new_feature": "支持借助GROBID实现PDF高精度翻译 <-> 接入百度千帆平台和文心一言 <-> 接入阿里通义千问、讯飞星火、上海AI-Lab书生 <-> 优化一键升级 <-> 提高arxiv翻译速度和成功率 <-> 支持自定义APIKEY格式 <-> 临时修复theme的文件丢失问题 <-> 新增实时语音对话插件(自动断句,脱手对话) <-> 支持加载自定义的ChatGLM2微调模型 <-> 动态ChatBot窗口高度 <-> 修复Azure接口的BUG <-> 完善多语言模块" + "new_feature": "支持自然语言插件调度(虚空终端) <-> 支持借助GROBID实现PDF高精度翻译 <-> 接入百度千帆平台和文心一言 <-> 接入阿里通义千问、讯飞星火、上海AI-Lab书生 <-> 优化一键升级 <-> 提高arxiv翻译速度和成功率 <-> 支持自定义APIKEY格式 <-> 临时修复theme的文件丢失问题 <-> 新增实时语音对话插件(自动断句,脱手对话) <-> 支持加载自定义的ChatGLM2微调模型 <-> 动态ChatBot窗口高度 <-> 修复Azure接口的BUG <-> 完善多语言模块" } From d0af07422583710b86351302f0202bcc76e31526 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Sat, 2 Sep 2023 18:19:19 +0800 Subject: [PATCH 07/47] change layout --- core_functional.py | 2 ++ crazy_functional.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/core_functional.py b/core_functional.py index b04e1e0..c4519ef 100644 --- a/core_functional.py +++ b/core_functional.py @@ -63,6 +63,7 @@ def get_core_functions(): "英译中": { "Prefix": r"翻译成地道的中文:" + "\n\n", "Suffix": r"", + "Visible": False, }, "找图片": { "Prefix": r"我需要你找一张网络图片。使用Unsplash API(https://source.unsplash.com/960x640/?<英语关键词>)获取图片URL," + @@ -78,6 +79,7 @@ def get_core_functions(): "Prefix": r"Here are some bibliography items, please transform them into bibtex style." + r"Note that, reference styles maybe more than one kind, you should transform each item correctly." + r"Items need to be transformed:", + "Visible": False, "Suffix": r"", } } diff --git a/crazy_functional.py b/crazy_functional.py index 1388e34..8fed5f8 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -500,7 +500,7 @@ def get_crazy_functions(): "虚空终端": { "Group": "对话", "Color": "stop", - "AsButton": False, + "AsButton": True, "Function": HotReload(虚空终端) } }) From 1639796041b4354ce3927776650f44a9c7453671 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Sat, 2 Sep 2023 22:22:41 +0800 Subject: [PATCH 08/47] support file implementation --- crazy_functions/Latex输出PDF结果.py | 4 +-- crazy_functions/vt_fns/vt_call_plugin.py | 26 ++++++++++++++++-- main.py | 4 +-- themes/common.css | 6 ++++ toolbox.py | 35 ++++++++++++++++++------ 5 files changed, 61 insertions(+), 14 deletions(-) diff --git a/crazy_functions/Latex输出PDF结果.py b/crazy_functions/Latex输出PDF结果.py index e79cf82..0ba9f19 100644 --- a/crazy_functions/Latex输出PDF结果.py +++ b/crazy_functions/Latex输出PDF结果.py @@ -6,7 +6,7 @@ pj = os.path.join ARXIV_CACHE_DIR = os.path.expanduser(f"~/arxiv_cache/") # =================================== 工具函数 =============================================== -专业词汇声明 = 'If the term "agent" is used in this section, it should be translated to "智能体". ' +# 专业词汇声明 = 'If the term "agent" is used in this section, it should be translated to "智能体". ' def switch_prompt(pfg, mode, more_requirement): """ Generate prompts and system prompts based on the mode for proofreading or translating. @@ -291,7 +291,7 @@ def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, yield from update_ui(chatbot=chatbot, history=history); time.sleep(1) # 刷新界面 promote_file_to_downloadzone(file=zip_res, chatbot=chatbot) else: - chatbot.append((f"失败了", '虽然PDF生成失败了, 但请查收结果(压缩包), 内含已经翻译的Tex文档, 也是可读的, 您可以到Github Issue区, 用该压缩包+对话历史存档进行反馈 ...')) + chatbot.append((f"失败了", '虽然PDF生成失败了, 但请查收结果(压缩包), 内含已经翻译的Tex文档, 您可以到Github Issue区, 用该压缩包进行反馈。如系统是Linux,请检查系统字体(见Github wiki) ...')) yield from update_ui(chatbot=chatbot, history=history); time.sleep(1) # 刷新界面 promote_file_to_downloadzone(file=zip_res, chatbot=chatbot) diff --git a/crazy_functions/vt_fns/vt_call_plugin.py b/crazy_functions/vt_fns/vt_call_plugin.py index 8ee88b1..09b136f 100644 --- a/crazy_functions/vt_fns/vt_call_plugin.py +++ b/crazy_functions/vt_fns/vt_call_plugin.py @@ -3,7 +3,7 @@ from typing import List from toolbox import update_ui_lastest_msg, get_conf from request_llm.bridge_all import predict_no_ui_long_connection from crazy_functions.json_fns.pydantic_io import GptJsonIO -import copy, json, pickle, os, sys +import copy, json, pickle, os, sys, time def read_avail_plugin_enum(): @@ -20,6 +20,23 @@ def read_avail_plugin_enum(): def wrap_code(txt): return f"\n```\n{txt}\n```\n" +def have_any_recent_upload_files(chatbot): + _5min = 5 * 60 + if not chatbot: return False # chatbot is None + most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None) + if not most_recent_uploaded: return False # most_recent_uploaded is None + if time.time() - most_recent_uploaded["time"] < _5min: return True # most_recent_uploaded is new + else: return False # most_recent_uploaded is too old + +def get_recent_file_prompt_support(chatbot): + most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None) + path = most_recent_uploaded['path'] + prompt = "\nAdditional Information:\n" + prompt = "In case that this plugin requires a path or a file as argument," + prompt += f"it is important for you to know that the user has recently uploaded a file, located at: `{path}`" + prompt += f"Only use it when necessary, otherwise, you can ignore this file." + return prompt + def execute_plugin(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention): plugin_arr_enum_prompt, plugin_arr_dict = read_avail_plugin_enum() class Plugin(BaseModel): @@ -56,6 +73,11 @@ def execute_plugin(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom return # ⭐ ⭐ ⭐ 确认插件参数 + if not have_any_recent_upload_files(chatbot): + appendix_info = "" + else: + appendix_info = get_recent_file_prompt_support(chatbot) + plugin = plugin_arr_dict[plugin_sel.plugin_selection] yield from update_ui_lastest_msg(lastmsg=f"正在执行任务: {txt}\n\n提取插件参数...", chatbot=chatbot, history=history, delay=0) class PluginExplicit(BaseModel): @@ -65,7 +87,7 @@ def execute_plugin(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom gpt_json_io.format_instructions += "The information about this plugin is:" + plugin["Info"] inputs = f"A plugin named {plugin_sel.plugin_selection} is selected, " + \ "you should extract plugin_arg from the user requirement, the user requirement is: \n\n" + \ - ">> " + txt.rstrip('\n').replace('\n','\n>> ') + '\n\n' + \ + ">> " + (txt + appendix_info).rstrip('\n').replace('\n','\n>> ') + '\n\n' + \ gpt_json_io.format_instructions run_gpt_fn = lambda inputs, sys_prompt: predict_no_ui_long_connection( inputs=inputs, llm_kwargs=llm_kwargs, history=[], sys_prompt=sys_prompt, observe_window=[]) diff --git a/main.py b/main.py index 50028c2..feebfb6 100644 --- a/main.py +++ b/main.py @@ -113,7 +113,7 @@ def main(): plugin_advanced_arg = gr.Textbox(show_label=True, label="高级参数输入区", visible=False, placeholder="这里是特殊函数插件的高级参数输入区").style(container=False) with gr.Row(): - switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary") + switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary").style(size="sm") with gr.Row(): with gr.Accordion("点击展开“文件上传区”。上传本地文件/压缩包供函数插件调用。", open=False) as area_file_up: file_upload = gr.Files(label="任何文件, 但推荐上传压缩文件(zip, tar)", file_count="multiple") @@ -172,7 +172,7 @@ def main(): click_handle = functional[k]["Button"].click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(k)], outputs=output_combo) cancel_handles.append(click_handle) # 文件上传区,接收文件后与chatbot的互动 - file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt, txt2, checkboxes], [chatbot, txt, txt2]) + file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies]) # 函数插件-固定按钮区 for k in plugins: if not plugins[k].get("AsButton", True): continue diff --git a/themes/common.css b/themes/common.css index 33bb4f0..c43e54d 100644 --- a/themes/common.css +++ b/themes/common.css @@ -13,3 +13,9 @@ #input-plugin-group .svelte-1gfkn6j { visibility: hidden; } + + +/* height of the upload box */ +.wrap.svelte-xwlu1w { + min-height: var(--size-32); +} \ No newline at end of file diff --git a/toolbox.py b/toolbox.py index 6da2347..9a06895 100644 --- a/toolbox.py +++ b/toolbox.py @@ -24,6 +24,19 @@ pj = os.path.join class ChatBotWithCookies(list): def __init__(self, cookie): + """ + cookies = { + 'top_p': top_p, + 'temperature': temperature, + 'lock_plugin': bool, + "files_to_promote": ["file1", "file2"], + "most_recent_uploaded": { + "path": "uploaded_path", + "time": time.time(), + "time_str": "timestr", + } + } + """ self._cookies = cookie def write_list(self, list): @@ -503,15 +516,15 @@ def promote_file_to_downloadzone(file, rename_file=None, chatbot=None): if not os.path.exists(new_path): shutil.copyfile(file, new_path) # 将文件添加到chatbot cookie中,避免多用户干扰 if chatbot: - if 'file_to_promote' in chatbot._cookies: current = chatbot._cookies['file_to_promote'] + if 'files_to_promote' in chatbot._cookies: current = chatbot._cookies['files_to_promote'] else: current = [] - chatbot._cookies.update({'file_to_promote': [new_path] + current}) + chatbot._cookies.update({'files_to_promote': [new_path] + current}) def disable_auto_promotion(chatbot): - chatbot._cookies.update({'file_to_promote': []}) + chatbot._cookies.update({'files_to_promote': []}) return -def on_file_uploaded(files, chatbot, txt, txt2, checkboxes): +def on_file_uploaded(files, chatbot, txt, txt2, checkboxes, cookies): """ 当文件被上传时的回调函数 """ @@ -546,14 +559,20 @@ def on_file_uploaded(files, chatbot, txt, txt2, checkboxes): f'[Local Message] 收到以下文件: \n\n{moved_files_str}' + f'\n\n调用路径参数已自动修正到: \n\n{txt}' + f'\n\n现在您点击任意“红颜色”标识的函数插件时,以上文件将被作为输入参数'+err_msg]) - return chatbot, txt, txt2 + cookies.update({ + 'most_recent_uploaded': { + 'path': f'private_upload/{time_tag}', + 'time': time.time(), + 'time_str': time_tag + }}) + return chatbot, txt, txt2, cookies def on_report_generated(cookies, files, chatbot): from toolbox import find_recent_files - if 'file_to_promote' in cookies: - report_files = cookies['file_to_promote'] - cookies.pop('file_to_promote') + if 'files_to_promote' in cookies: + report_files = cookies['files_to_promote'] + cookies.pop('files_to_promote') else: report_files = find_recent_files('gpt_log') if len(report_files) == 0: From 0b70e9df7b5635f598ad32af917a2cabccb9c27f Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Sat, 2 Sep 2023 23:49:56 +0800 Subject: [PATCH 09/47] =?UTF-8?q?=E4=BC=98=E5=8C=96=E8=99=9A=E7=A9=BA?= =?UTF-8?q?=E7=BB=88=E7=AB=AF=E8=B0=83=E7=94=A8=E6=B5=81=E7=A8=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functions/vt_fns/vt_state.py | 28 +++++++++ crazy_functions/虚空终端.py | 95 ++++++++++++++++++++++-------- toolbox.py | 2 +- 3 files changed, 100 insertions(+), 25 deletions(-) create mode 100644 crazy_functions/vt_fns/vt_state.py diff --git a/crazy_functions/vt_fns/vt_state.py b/crazy_functions/vt_fns/vt_state.py new file mode 100644 index 0000000..1818728 --- /dev/null +++ b/crazy_functions/vt_fns/vt_state.py @@ -0,0 +1,28 @@ +import pickle + +class VoidTerminalState(): + def __init__(self): + self.reset_state() + + def reset_state(self): + self.has_provided_explaination = False + + def lock_plugin(self, chatbot): + chatbot._cookies['lock_plugin'] = 'crazy_functions.虚空终端->虚空终端' + chatbot._cookies['plugin_state'] = pickle.dumps(self) + + def unlock_plugin(self, chatbot): + self.reset_state() + chatbot._cookies['lock_plugin'] = None + chatbot._cookies['plugin_state'] = pickle.dumps(self) + + def set_state(self, chatbot, key, value): + setattr(self, key, value) + chatbot._cookies['plugin_state'] = pickle.dumps(self) + + def get_state(chatbot): + state = chatbot._cookies.get('plugin_state', None) + if state is not None: state = pickle.loads(state) + else: state = VoidTerminalState() + state.chatbot = chatbot + return state \ No newline at end of file diff --git a/crazy_functions/虚空终端.py b/crazy_functions/虚空终端.py index ef8efd5..6304bd7 100644 --- a/crazy_functions/虚空终端.py +++ b/crazy_functions/虚空终端.py @@ -6,6 +6,7 @@ from request_llm.bridge_all import predict_no_ui_long_connection from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive from crazy_functions.crazy_utils import input_clipping from crazy_functions.json_fns.pydantic_io import GptJsonIO +from crazy_functions.vt_fns.vt_state import VoidTerminalState from crazy_functions.vt_fns.vt_modify_config import modify_configuration_hot from crazy_functions.vt_fns.vt_modify_config import modify_configuration_reboot from crazy_functions.vt_fns.vt_call_plugin import execute_plugin @@ -13,12 +14,14 @@ from enum import Enum import copy, json, pickle, os, sys + class UserIntention(BaseModel): user_prompt: str = Field(description="the content of user input", default="") - intention_type: str = Field(description="the type of user intention, choose from ['ModifyConfiguration', 'ExecutePlugin', 'Chat']", default="Chat") + intention_type: str = Field(description="the type of user intention, choose from ['ModifyConfiguration', 'ExecutePlugin', 'Chat']", default="ExecutePlugin") user_provide_file: bool = Field(description="whether the user provides a path to a file", default=False) user_provide_url: bool = Field(description="whether the user provides a url", default=False) + def chat(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention): gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( inputs=txt, inputs_show_user=txt, @@ -30,11 +33,23 @@ def chat(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_i yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 pass -def analyze_with_rule(txt): + +explain_intention_to_user = { + 'Chat': "聊天对话", + 'ExecutePlugin': "调用插件", + 'ModifyConfiguration': "修改配置", +} + + +def analyze_intention_with_simple_rules(txt): user_intention = UserIntention() user_intention.user_prompt = txt is_certain = False + if '请问' in txt: + is_certain = True + user_intention.intention_type = 'Chat' + if '调用插件' in txt: is_certain = True user_intention.intention_type = 'ExecutePlugin' @@ -45,33 +60,63 @@ def analyze_with_rule(txt): return is_certain, user_intention + + + + + + +explain_msg = """ +## 虚空终端插件说明: + +请用**自然语言**描述您需要做什么。 + +1. 如果涉及文件处理, 请务必描述文件所在路径(把文件拖拽到文件上传区亦可)。 + +2. 您可以打开插件下拉菜单以了解本项目的各种能力。 + +3. 如果您使用“调用插件xxx”、“修改配置xxx”、“请问”等关键词,您的意图可以被识别的更准确。 + +4. 使用GPT4等强模型时,您的意图可以被识别的更准确。 + +5. 现在,请您给出指令(或先上传文件,再给指令)。 +""" + + + + @CatchException def 虚空终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数, 如温度和top_p等, 一般原样传递下去就行 - plugin_kwargs 插件模型的参数, 如温度和top_p等, 一般原样传递下去就行 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - web_port 当前软件运行的端口号 - """ - history = [] # 清空历史,以免输入溢出 + # 获取当前虚空终端状态 + state = VoidTerminalState.get_state(chatbot) + + # 用简单的关键词检测用户意图 + is_certain, _ = analyze_intention_with_simple_rules(txt) + + if is_certain or (state.has_provided_explaination): + # 如果意图明确,跳过提示环节 + state.set_state(chatbot=chatbot, key='has_provided_explaination', value=True) + state.unlock_plugin(chatbot=chatbot) + yield from update_ui(chatbot=chatbot, history=history) + yield from 虚空终端主路由(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port) + return + else: + # 如果意图模糊,提示 + state.set_state(chatbot=chatbot, key='has_provided_explaination', value=True) + state.lock_plugin(chatbot=chatbot) + chatbot.append(("虚空终端状态:", explain_msg)) + yield from update_ui(chatbot=chatbot, history=history) + return + + + +def 虚空终端主路由(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): + history = [] chatbot.append(("虚空终端状态: ", f"正在执行任务: {txt}")) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - # 初始化插件状态 - state = chatbot._cookies.get('plugin_state', None) - if state is not None: state = pickle.loads(state) - else: state = {} - - def update_vt_state(): - # 赋予插件锁定 锁定插件回调路径,当下一次用户提交时,会直接转到该函数 - chatbot._cookies['lock_plugin'] = 'crazy_functions.虚空终端->虚空终端' - chatbot._cookies['vt_state'] = pickle.dumps(state) - # ⭐ ⭐ ⭐ 分析用户意图 - is_certain, user_intention = analyze_with_rule(txt) + is_certain, user_intention = analyze_intention_with_simple_rules(txt) if not is_certain: yield from update_ui_lastest_msg( lastmsg=f"正在执行任务: {txt}\n\n分析用户意图中", chatbot=chatbot, history=history, delay=0) @@ -89,7 +134,9 @@ def 虚空终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt pass yield from update_ui_lastest_msg( - lastmsg=f"正在执行任务: {txt}\n\n用户意图理解: intention_type={user_intention.intention_type}", chatbot=chatbot, history=history, delay=0) + lastmsg=f"正在执行任务: {txt}\n\n用户意图理解: 意图={explain_intention_to_user[user_intention.intention_type]}", + chatbot=chatbot, history=history, delay=0) + # 用户意图: 修改本项目的配置 if user_intention.intention_type == 'ModifyConfiguration': yield from modify_configuration_reboot(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention) diff --git a/toolbox.py b/toolbox.py index 9a06895..0d598d8 100644 --- a/toolbox.py +++ b/toolbox.py @@ -82,7 +82,7 @@ def ArgsGeneralWrapper(f): # 处理个别特殊插件的锁定状态 module, fn_name = cookies['lock_plugin'].split('->') f_hot_reload = getattr(importlib.import_module(module, fn_name), fn_name) - yield from f_hot_reload(txt_passon, llm_kwargs, plugin_kwargs, chatbot_with_cookie, history, system_prompt, *args) + yield from f_hot_reload(txt_passon, llm_kwargs, plugin_kwargs, chatbot_with_cookie, history, system_prompt, request) return decorated From e666b5269e1c861b0160c91b1c1016415cefc8d5 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Sun, 3 Sep 2023 00:53:57 +0800 Subject: [PATCH 10/47] =?UTF-8?q?=E6=94=B9=E8=BF=9B=E8=99=9A=E7=A9=BA?= =?UTF-8?q?=E7=BB=88=E7=AB=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- config.py | 2 +- crazy_functional.py | 2 +- crazy_functions/vt_fns/vt_call_plugin.py | 13 ++++++++++-- crazy_functions/虚空终端.py | 27 +++++++++++++++++------- 4 files changed, 32 insertions(+), 12 deletions(-) diff --git a/config.py b/config.py index 6442ad8..d2f35c4 100644 --- a/config.py +++ b/config.py @@ -228,7 +228,7 @@ ALLOW_RESET_CONFIG = False ├── THEME 色彩主题 ├── AUTO_CLEAR_TXT 是否在提交时自动清空输入框 ├── ADD_WAIFU 加一个live2d装饰 - +├── ALLOW_RESET_CONFIG 是否允许通过自然语言描述修改本页的配置,该功能具有一定的危险性 插件在线服务配置依赖关系示意图 diff --git a/crazy_functional.py b/crazy_functional.py index 8fed5f8..05e4466 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -268,7 +268,7 @@ def get_crazy_functions(): "Group": "学术", "Color": "stop", "AsButton": False, # 加入下拉菜单中 - "Info": "下载arxiv论文并翻译摘要 | 输入参数为arxiv编号如1812.10695", + # "Info": "下载arxiv论文并翻译摘要 | 输入参数为arxiv编号如1812.10695", "Function": HotReload(下载arxiv论文并翻译摘要) } }) diff --git a/crazy_functions/vt_fns/vt_call_plugin.py b/crazy_functions/vt_fns/vt_call_plugin.py index 09b136f..1f84d23 100644 --- a/crazy_functions/vt_fns/vt_call_plugin.py +++ b/crazy_functions/vt_fns/vt_call_plugin.py @@ -37,6 +37,15 @@ def get_recent_file_prompt_support(chatbot): prompt += f"Only use it when necessary, otherwise, you can ignore this file." return prompt +def get_inputs_show_user(inputs, plugin_arr_enum_prompt): + # remove plugin_arr_enum_prompt from inputs string + inputs_show_user = inputs.replace(plugin_arr_enum_prompt, "") + inputs_show_user += plugin_arr_enum_prompt[:200] + '...' + inputs_show_user += '\n...\n' + inputs_show_user += '...\n' + inputs_show_user += '...}' + return inputs_show_user + def execute_plugin(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention): plugin_arr_enum_prompt, plugin_arr_dict = read_avail_plugin_enum() class Plugin(BaseModel): @@ -59,14 +68,14 @@ def execute_plugin(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom plugin_sel = gpt_json_io.generate_output_auto_repair(gpt_reply, run_gpt_fn) except: msg = "抱歉,当前的大语言模型无法理解您的需求。" - msg += "请求的Prompt为:\n" + wrap_code(inputs) + msg += "请求的Prompt为:\n" + wrap_code(get_inputs_show_user(inputs, plugin_arr_enum_prompt)) msg += "语言模型回复为:\n" + wrap_code(gpt_reply) msg += "但您可以尝试再试一次\n" yield from update_ui_lastest_msg(lastmsg=msg, chatbot=chatbot, history=history, delay=2) return if plugin_sel.plugin_selection not in plugin_arr_dict: msg = "抱歉, 找不到合适插件执行该任务, 当前的大语言模型可能无法理解您的需求。" - msg += "请求的Prompt为:\n" + wrap_code(inputs) + msg += "请求的Prompt为:\n" + wrap_code(get_inputs_show_user(inputs, plugin_arr_enum_prompt)) msg += "语言模型回复为:\n" + wrap_code(gpt_reply) msg += "但您可以尝试再试一次\n" yield from update_ui_lastest_msg(lastmsg=msg, chatbot=chatbot, history=history, delay=2) diff --git a/crazy_functions/虚空终端.py b/crazy_functions/虚空终端.py index 6304bd7..c4f2b2f 100644 --- a/crazy_functions/虚空终端.py +++ b/crazy_functions/虚空终端.py @@ -50,7 +50,7 @@ def analyze_intention_with_simple_rules(txt): is_certain = True user_intention.intention_type = 'Chat' - if '调用插件' in txt: + if '用插件' in txt: is_certain = True user_intention.intention_type = 'ExecutePlugin' @@ -71,15 +71,22 @@ explain_msg = """ 请用**自然语言**描述您需要做什么。 -1. 如果涉及文件处理, 请务必描述文件所在路径(把文件拖拽到文件上传区亦可)。 +1. 您可以打开插件下拉菜单以了解本项目的各种能力,然后用自然语言描述您的需要,例如: + - 「请调用插件,为我翻译PDF论文,论文我刚刚放到上传区了。」 + - 「请调用插件翻译PDF论文,地址为https://www.nature.com/articles/s41586-019-1724-z.pdf」 + - 「生成一张图片,图中鲜花怒放,绿草如茵,用插件实现。」 + - 「用插件翻译README,Github网址是https://github.com/facebookresearch/co-tracker」 + - 「给爷翻译Arxiv论文,arxiv论文的ID是1812.10695,记得用插件,不要自己瞎搞!」 + - 「我不喜欢当前的界面颜色,修改配置,把主题THEME更换为THEME="High-Contrast"。」 + - 「请问Transformer网络的结构是怎样的?」 -2. 您可以打开插件下拉菜单以了解本项目的各种能力。 +2. 如果您使用「调用插件xxx」、「修改配置xxx」、「请问」等关键词,您的意图可以被识别的更准确。 -3. 如果您使用“调用插件xxx”、“修改配置xxx”、“请问”等关键词,您的意图可以被识别的更准确。 +3. 使用GPT4等强模型时,您的意图可以被识别的更准确。 -4. 使用GPT4等强模型时,您的意图可以被识别的更准确。 +4. 现在,如果需要处理文件,请您上传文件(将文件拖动到文件上传区),或者描述文件所在的路径。 -5. 现在,请您给出指令(或先上传文件,再给指令)。 +5. 如果不需要上传文件,现在您只需要再次重复一次您的指令即可。 """ @@ -89,10 +96,14 @@ explain_msg = """ def 虚空终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): # 获取当前虚空终端状态 state = VoidTerminalState.get_state(chatbot) + appendix_msg = "" # 用简单的关键词检测用户意图 is_certain, _ = analyze_intention_with_simple_rules(txt) - + if txt.startswith('private_upload/') and len(txt) == 34: + state.set_state(chatbot=chatbot, key='has_provided_explaination', value=False) + appendix_msg = "\n\n**很好,您已经上传了文件**,现在请您描述您的需求。" + if is_certain or (state.has_provided_explaination): # 如果意图明确,跳过提示环节 state.set_state(chatbot=chatbot, key='has_provided_explaination', value=True) @@ -104,7 +115,7 @@ def 虚空终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt # 如果意图模糊,提示 state.set_state(chatbot=chatbot, key='has_provided_explaination', value=True) state.lock_plugin(chatbot=chatbot) - chatbot.append(("虚空终端状态:", explain_msg)) + chatbot.append(("虚空终端状态:", explain_msg+appendix_msg)) yield from update_ui(chatbot=chatbot, history=history) return From 1dd165a3cd1f74b2f643f2b94948b790c6806d55 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Sun, 3 Sep 2023 14:47:22 +0800 Subject: [PATCH 11/47] ui layout improve --- crazy_functional.py | 21 ++++------ crazy_functions/虚空终端.py | 81 ++++++++++++++++++++----------------- toolbox.py | 3 +- 3 files changed, 55 insertions(+), 50 deletions(-) diff --git a/crazy_functional.py b/crazy_functional.py index 05e4466..f06cd40 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -34,9 +34,16 @@ def get_crazy_functions(): from crazy_functions.Latex全文翻译 import Latex中译英 from crazy_functions.Latex全文翻译 import Latex英译中 from crazy_functions.批量Markdown翻译 import Markdown中译英 + from crazy_functions.虚空终端 import 虚空终端 function_plugins = { + "虚空终端": { + "Group": "对话|编程|学术", + "Color": "stop", + "AsButton": True, + "Function": HotReload(虚空终端) + }, "解析整个Python项目": { "Group": "编程", "Color": "stop", @@ -139,7 +146,7 @@ def get_crazy_functions(): "读Tex论文写摘要": { "Group": "学术", "Color": "stop", - "AsButton": True, + "AsButton": False, "Info": "读取Tex论文并写摘要 | 输入参数为路径", "Function": HotReload(读文章写摘要) }, @@ -494,18 +501,6 @@ def get_crazy_functions(): except: print('Load function plugin failed') - try: - from crazy_functions.虚空终端 import 虚空终端 - function_plugins.update({ - "虚空终端": { - "Group": "对话", - "Color": "stop", - "AsButton": True, - "Function": HotReload(虚空终端) - } - }) - except: - print('Load function plugin failed') # try: # from crazy_functions.chatglm微调工具 import 微调数据集生成 diff --git a/crazy_functions/虚空终端.py b/crazy_functions/虚空终端.py index c4f2b2f..cdf0605 100644 --- a/crazy_functions/虚空终端.py +++ b/crazy_functions/虚空终端.py @@ -1,3 +1,48 @@ +""" +Explanation of the Void Terminal Plugin: + +Please describe in natural language what you want to do. + +1. You can open the plugin's dropdown menu to explore various capabilities of this project, and then describe your needs in natural language, for example: +- "Please call the plugin to translate a PDF paper for me. I just uploaded the paper to the upload area." +- "Please use the plugin to translate a PDF paper, with the address being https://www.nature.com/articles/s41586-019-1724-z.pdf." +- "Generate an image with blooming flowers and lush green grass using the plugin." +- "Translate the README using the plugin. The GitHub URL is https://github.com/facebookresearch/co-tracker." +- "Translate an Arxiv paper for me. The Arxiv ID is 1812.10695. Remember to use the plugin and don't do it manually!" +- "I don't like the current interface color. Modify the configuration and change the theme to THEME="High-Contrast"." +- "Could you please explain the structure of the Transformer network?" + +2. If you use keywords like "call the plugin xxx", "modify the configuration xxx", "please", etc., your intention can be recognized more accurately. + +3. Your intention can be recognized more accurately when using powerful models like GPT4. This plugin is relatively new, so please feel free to provide feedback on GitHub. + +4. Now, if you need to process a file, please upload the file (drag the file to the file upload area) or describe the path to the file. + +5. If you don't need to upload a file, you can simply repeat your command again. +""" +explain_msg = """ +## 虚空终端插件说明: + +请用**自然语言**描述您需要做什么。 + +1. 您可以打开插件下拉菜单以了解本项目的各种能力,然后用自然语言描述您的需要,例如: + - 「请调用插件,为我翻译PDF论文,论文我刚刚放到上传区了。」 + - 「请调用插件翻译PDF论文,地址为https://www.nature.com/articles/s41586-019-1724-z.pdf」 + - 「生成一张图片,图中鲜花怒放,绿草如茵,用插件实现。」 + - 「用插件翻译README,Github网址是https://github.com/facebookresearch/co-tracker」 + - 「给爷翻译Arxiv论文,arxiv论文的ID是1812.10695,记得用插件,不要自己瞎搞!」 + - 「我不喜欢当前的界面颜色,修改配置,把主题THEME更换为THEME="High-Contrast"。」 + - 「请问Transformer网络的结构是怎样的?」 + +2. 如果您使用「调用插件xxx」、「修改配置xxx」、「请问」等关键词,您的意图可以被识别的更准确。 + +3. 使用GPT4等强力模型时,您的意图可以被识别的更准确。该插件诞生时间不长,欢迎您前往Github反馈问题。 + +4. 现在,如果需要处理文件,请您上传文件(将文件拖动到文件上传区),或者描述文件所在的路径。 + +5. 如果不需要上传文件,现在您只需要再次重复一次您的指令即可。 +""" + from pydantic import BaseModel, Field from typing import List from toolbox import CatchException, update_ui, gen_time_str @@ -10,10 +55,6 @@ from crazy_functions.vt_fns.vt_state import VoidTerminalState from crazy_functions.vt_fns.vt_modify_config import modify_configuration_hot from crazy_functions.vt_fns.vt_modify_config import modify_configuration_reboot from crazy_functions.vt_fns.vt_call_plugin import execute_plugin -from enum import Enum -import copy, json, pickle, os, sys - - class UserIntention(BaseModel): user_prompt: str = Field(description="the content of user input", default="") @@ -61,37 +102,6 @@ def analyze_intention_with_simple_rules(txt): return is_certain, user_intention - - - - - -explain_msg = """ -## 虚空终端插件说明: - -请用**自然语言**描述您需要做什么。 - -1. 您可以打开插件下拉菜单以了解本项目的各种能力,然后用自然语言描述您的需要,例如: - - 「请调用插件,为我翻译PDF论文,论文我刚刚放到上传区了。」 - - 「请调用插件翻译PDF论文,地址为https://www.nature.com/articles/s41586-019-1724-z.pdf」 - - 「生成一张图片,图中鲜花怒放,绿草如茵,用插件实现。」 - - 「用插件翻译README,Github网址是https://github.com/facebookresearch/co-tracker」 - - 「给爷翻译Arxiv论文,arxiv论文的ID是1812.10695,记得用插件,不要自己瞎搞!」 - - 「我不喜欢当前的界面颜色,修改配置,把主题THEME更换为THEME="High-Contrast"。」 - - 「请问Transformer网络的结构是怎样的?」 - -2. 如果您使用「调用插件xxx」、「修改配置xxx」、「请问」等关键词,您的意图可以被识别的更准确。 - -3. 使用GPT4等强模型时,您的意图可以被识别的更准确。 - -4. 现在,如果需要处理文件,请您上传文件(将文件拖动到文件上传区),或者描述文件所在的路径。 - -5. 如果不需要上传文件,现在您只需要再次重复一次您的指令即可。 -""" - - - - @CatchException def 虚空终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): # 获取当前虚空终端状态 @@ -162,4 +172,3 @@ def 虚空终端主路由(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst return - diff --git a/toolbox.py b/toolbox.py index 0d598d8..1242fab 100644 --- a/toolbox.py +++ b/toolbox.py @@ -492,7 +492,8 @@ def find_recent_files(directory): current_time = time.time() one_minute_ago = current_time - 60 recent_files = [] - + if not os.path.exists(directory): + os.makedirs(directory, exist_ok=True) for filename in os.listdir(directory): file_path = os.path.join(directory, filename) if file_path.endswith('.log'): From a3e6fc0141ba5afaffd1d9da0a0f9bf317a53b09 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Sun, 3 Sep 2023 15:32:39 +0800 Subject: [PATCH 12/47] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E6=96=87=E5=BF=83?= =?UTF-8?q?=E4=B8=80=E8=A8=80=E7=9A=84=E6=8E=A5=E5=8F=A3=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functions/vt_fns/vt_call_plugin.py | 26 +++++++++++++----------- crazy_functions/虚空终端.py | 12 +++++++---- request_llm/bridge_qianfan.py | 5 +++-- 3 files changed, 25 insertions(+), 18 deletions(-) diff --git a/crazy_functions/vt_fns/vt_call_plugin.py b/crazy_functions/vt_fns/vt_call_plugin.py index 1f84d23..614e9fe 100644 --- a/crazy_functions/vt_fns/vt_call_plugin.py +++ b/crazy_functions/vt_fns/vt_call_plugin.py @@ -1,6 +1,6 @@ from pydantic import BaseModel, Field from typing import List -from toolbox import update_ui_lastest_msg, get_conf +from toolbox import update_ui_lastest_msg, disable_auto_promotion from request_llm.bridge_all import predict_no_ui_long_connection from crazy_functions.json_fns.pydantic_io import GptJsonIO import copy, json, pickle, os, sys, time @@ -13,11 +13,14 @@ def read_avail_plugin_enum(): plugin_arr = {k:v for k, v in plugin_arr.items() if 'Info' in v} plugin_arr_info = {"F_{:04d}".format(i):v["Info"] for i, v in enumerate(plugin_arr.values(), start=1)} plugin_arr_dict = {"F_{:04d}".format(i):v for i, v in enumerate(plugin_arr.values(), start=1)} + plugin_arr_dict_parse = {"F_{:04d}".format(i):v for i, v in enumerate(plugin_arr.values(), start=1)} + plugin_arr_dict_parse.update({f"F_{i}":v for i, v in enumerate(plugin_arr.values(), start=1)}) prompt = json.dumps(plugin_arr_info, ensure_ascii=False, indent=2) prompt = "\n\nThe defination of PluginEnum:\nPluginEnum=" + prompt - return prompt, plugin_arr_dict + return prompt, plugin_arr_dict, plugin_arr_dict_parse def wrap_code(txt): + txt = txt.replace('```','') return f"\n```\n{txt}\n```\n" def have_any_recent_upload_files(chatbot): @@ -47,7 +50,7 @@ def get_inputs_show_user(inputs, plugin_arr_enum_prompt): return inputs_show_user def execute_plugin(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention): - plugin_arr_enum_prompt, plugin_arr_dict = read_avail_plugin_enum() + plugin_arr_enum_prompt, plugin_arr_dict, plugin_arr_dict_parse = read_avail_plugin_enum() class Plugin(BaseModel): plugin_selection: str = Field(description="The most related plugin from one of the PluginEnum.", default="F_0000") reason_of_selection: str = Field(description="The reason why you should select this plugin.", default="This plugin satisfy user requirement most") @@ -67,17 +70,16 @@ def execute_plugin(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom gpt_reply = run_gpt_fn(inputs, "") plugin_sel = gpt_json_io.generate_output_auto_repair(gpt_reply, run_gpt_fn) except: - msg = "抱歉,当前的大语言模型无法理解您的需求。" + msg = f"抱歉, {llm_kwargs['llm_model']}无法理解您的需求。" msg += "请求的Prompt为:\n" + wrap_code(get_inputs_show_user(inputs, plugin_arr_enum_prompt)) msg += "语言模型回复为:\n" + wrap_code(gpt_reply) - msg += "但您可以尝试再试一次\n" + msg += "\n但您可以尝试再试一次\n" yield from update_ui_lastest_msg(lastmsg=msg, chatbot=chatbot, history=history, delay=2) return - if plugin_sel.plugin_selection not in plugin_arr_dict: - msg = "抱歉, 找不到合适插件执行该任务, 当前的大语言模型可能无法理解您的需求。" - msg += "请求的Prompt为:\n" + wrap_code(get_inputs_show_user(inputs, plugin_arr_enum_prompt)) - msg += "语言模型回复为:\n" + wrap_code(gpt_reply) - msg += "但您可以尝试再试一次\n" + if plugin_sel.plugin_selection not in plugin_arr_dict_parse: + msg = f"抱歉, 找不到合适插件执行该任务, 或者{llm_kwargs['llm_model']}无法理解您的需求。" + msg += f"语言模型{llm_kwargs['llm_model']}选择了不存在的插件:\n" + wrap_code(gpt_reply) + msg += "\n但您可以尝试再试一次\n" yield from update_ui_lastest_msg(lastmsg=msg, chatbot=chatbot, history=history, delay=2) return @@ -87,7 +89,7 @@ def execute_plugin(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom else: appendix_info = get_recent_file_prompt_support(chatbot) - plugin = plugin_arr_dict[plugin_sel.plugin_selection] + plugin = plugin_arr_dict_parse[plugin_sel.plugin_selection] yield from update_ui_lastest_msg(lastmsg=f"正在执行任务: {txt}\n\n提取插件参数...", chatbot=chatbot, history=history, delay=0) class PluginExplicit(BaseModel): plugin_selection: str = plugin_sel.plugin_selection @@ -106,7 +108,7 @@ def execute_plugin(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom # ⭐ ⭐ ⭐ 执行插件 fn = plugin['Function'] fn_name = fn.__name__ - msg = f'正在调用插件: {fn_name}\n\n插件说明:{plugin["Info"]}\n\n插件参数:{plugin_sel.plugin_arg}' + msg = f'{llm_kwargs["llm_model"]}为您选择了插件: `{fn_name}`\n\n插件说明:{plugin["Info"]}\n\n插件参数:{plugin_sel.plugin_arg}\n\n假如偏离了您的要求,按停止键终止。' yield from update_ui_lastest_msg(lastmsg=msg, chatbot=chatbot, history=history, delay=2) yield from fn(plugin_sel.plugin_arg, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, -1) return \ No newline at end of file diff --git a/crazy_functions/虚空终端.py b/crazy_functions/虚空终端.py index cdf0605..caf241c 100644 --- a/crazy_functions/虚空终端.py +++ b/crazy_functions/虚空终端.py @@ -36,7 +36,7 @@ explain_msg = """ 2. 如果您使用「调用插件xxx」、「修改配置xxx」、「请问」等关键词,您的意图可以被识别的更准确。 -3. 使用GPT4等强力模型时,您的意图可以被识别的更准确。该插件诞生时间不长,欢迎您前往Github反馈问题。 +3. 建议使用 GPT3.5 或更强的模型,弱模型可能无法理解您的想法。该插件诞生时间不长,欢迎您前往Github反馈问题。 4. 现在,如果需要处理文件,请您上传文件(将文件拖动到文件上传区),或者描述文件所在的路径。 @@ -46,7 +46,7 @@ explain_msg = """ from pydantic import BaseModel, Field from typing import List from toolbox import CatchException, update_ui, gen_time_str -from toolbox import update_ui_lastest_msg +from toolbox import update_ui_lastest_msg, disable_auto_promotion from request_llm.bridge_all import predict_no_ui_long_connection from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive from crazy_functions.crazy_utils import input_clipping @@ -104,6 +104,7 @@ def analyze_intention_with_simple_rules(txt): @CatchException def 虚空终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): + disable_auto_promotion(chatbot=chatbot) # 获取当前虚空终端状态 state = VoidTerminalState.get_state(chatbot) appendix_msg = "" @@ -142,14 +143,17 @@ def 虚空终端主路由(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst yield from update_ui_lastest_msg( lastmsg=f"正在执行任务: {txt}\n\n分析用户意图中", chatbot=chatbot, history=history, delay=0) gpt_json_io = GptJsonIO(UserIntention) - inputs = "Analyze the intention of the user according to following user input: \n\n" + txt + '\n\n' + gpt_json_io.format_instructions + rf_req = "\nchoose from ['ModifyConfiguration', 'ExecutePlugin', 'Chat']" + inputs = "Analyze the intention of the user according to following user input: \n\n" + \ + ">> " + (txt+rf_req).rstrip('\n').replace('\n','\n>> ') + '\n\n' + gpt_json_io.format_instructions run_gpt_fn = lambda inputs, sys_prompt: predict_no_ui_long_connection( inputs=inputs, llm_kwargs=llm_kwargs, history=[], sys_prompt=sys_prompt, observe_window=[]) try: user_intention = gpt_json_io.generate_output_auto_repair(run_gpt_fn(inputs, ""), run_gpt_fn) + lastmsg=f"正在执行任务: {txt}\n\n用户意图理解: 意图={explain_intention_to_user[user_intention.intention_type]}", except: yield from update_ui_lastest_msg( - lastmsg=f"正在执行任务: {txt}\n\n用户意图理解: 失败 当前语言模型不能理解您的意图", chatbot=chatbot, history=history, delay=0) + lastmsg=f"正在执行任务: {txt}\n\n用户意图理解: 失败 当前语言模型({llm_kwargs['llm_model']})不能理解您的意图", chatbot=chatbot, history=history, delay=0) return else: pass diff --git a/request_llm/bridge_qianfan.py b/request_llm/bridge_qianfan.py index e2cdb0e..be73976 100644 --- a/request_llm/bridge_qianfan.py +++ b/request_llm/bridge_qianfan.py @@ -49,16 +49,17 @@ def get_access_token(): def generate_message_payload(inputs, llm_kwargs, history, system_prompt): conversation_cnt = len(history) // 2 + if system_prompt == "": system_prompt = "Hello" messages = [{"role": "user", "content": system_prompt}] messages.append({"role": "assistant", "content": 'Certainly!'}) if conversation_cnt: for index in range(0, 2*conversation_cnt, 2): what_i_have_asked = {} what_i_have_asked["role"] = "user" - what_i_have_asked["content"] = history[index] + what_i_have_asked["content"] = history[index] if history[index]!="" else "Hello" what_gpt_answer = {} what_gpt_answer["role"] = "assistant" - what_gpt_answer["content"] = history[index+1] + what_gpt_answer["content"] = history[index+1] if history[index]!="" else "Hello" if what_i_have_asked["content"] != "": if what_gpt_answer["content"] == "": continue if what_gpt_answer["content"] == timeout_bot_msg: continue From 0d6e32d31a812744d837fcbba1f5fbe16d955c18 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Sun, 3 Sep 2023 15:38:10 +0800 Subject: [PATCH 13/47] version 3.5 release --- README.md | 3 ++- version | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 761e303..43b38dd 100644 --- a/README.md +++ b/README.md @@ -300,7 +300,8 @@ Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史h ### II:版本: -- version 3.5(Todo): 使用自然语言调用本项目的所有函数插件(高优先级) +- version 3.60(todo): 优化虚空终端,引入code interpreter和更多插件 +- version 3.50: 使用自然语言调用本项目的所有函数插件(虚空终端),支持插件分类,改进UI,设计新主题 - version 3.49: 支持百度千帆平台和文心一言 - version 3.48: 支持阿里达摩院通义千问,上海AI-Lab书生,讯飞星火 - version 3.46: 支持完全脱手操作的实时语音对话 diff --git a/version b/version index 2034d6d..369a800 100644 --- a/version +++ b/version @@ -1,5 +1,5 @@ { "version": 3.50, "show_feature": true, - "new_feature": "支持自然语言插件调度(虚空终端) <-> 支持借助GROBID实现PDF高精度翻译 <-> 接入百度千帆平台和文心一言 <-> 接入阿里通义千问、讯飞星火、上海AI-Lab书生 <-> 优化一键升级 <-> 提高arxiv翻译速度和成功率 <-> 支持自定义APIKEY格式 <-> 临时修复theme的文件丢失问题 <-> 新增实时语音对话插件(自动断句,脱手对话) <-> 支持加载自定义的ChatGLM2微调模型 <-> 动态ChatBot窗口高度 <-> 修复Azure接口的BUG <-> 完善多语言模块" + "new_feature": "支持插件分类! <-> 支持用户使用自然语言调度各个插件(虚空终端) ! <-> 改进UI,设计新主题 <-> 支持借助GROBID实现PDF高精度翻译 <-> 接入百度千帆平台和文心一言 <-> 接入阿里通义千问、讯飞星火、上海AI-Lab书生 <-> 优化一键升级 <-> 提高arxiv翻译速度和成功率" } From 9bd2023a8e820cd068ffce124f2bc17442ae3c1d Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Sun, 3 Sep 2023 15:40:41 +0800 Subject: [PATCH 14/47] revise version check --- main.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/main.py b/main.py index feebfb6..d475253 100644 --- a/main.py +++ b/main.py @@ -2,10 +2,9 @@ import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染 def main(): import gradio as gr - # if gr.__version__ not in ['3.28.3','3.32.2']: assert False, "需要特殊依赖,请务必用 pip install -r requirements.txt 指令安装依赖,详情信息见requirements.txt" + if gr.__version__ not in ['3.28.3','3.32.2']: assert False, "需要特殊依赖,请务必用 pip install -r requirements.txt 指令安装依赖,详情信息见requirements.txt" from request_llm.bridge_all import predict from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, load_chat_cookies, DummyWith - # 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到 proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION') CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT') From 843113ba0f3825e496c2b98c25c6249a2ab87e30 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Sun, 3 Sep 2023 16:20:05 +0800 Subject: [PATCH 15/47] fix minor bugs --- crazy_functions/json_fns/pydantic_io.py | 3 ++- crazy_functions/vt_fns/vt_call_plugin.py | 4 ++-- crazy_functions/虚空终端.py | 21 +++++++++++---------- toolbox.py | 2 ++ 4 files changed, 17 insertions(+), 13 deletions(-) diff --git a/crazy_functions/json_fns/pydantic_io.py b/crazy_functions/json_fns/pydantic_io.py index 9bd39c3..4e300d6 100644 --- a/crazy_functions/json_fns/pydantic_io.py +++ b/crazy_functions/json_fns/pydantic_io.py @@ -43,6 +43,7 @@ PYDANTIC_FORMAT_INSTRUCTIONS_SIMPLE = """The output should be formatted as a JSO {schema} ```""" +class JsonStringError(Exception): ... class GptJsonIO(): @@ -105,6 +106,6 @@ class GptJsonIO(): except Exception as e: # 没辙了,放弃治疗 logging.info('Repaire json fail.') - raise RuntimeError('Cannot repair json.', str(e)) + raise JsonStringError('Cannot repair json.', str(e)) return result diff --git a/crazy_functions/vt_fns/vt_call_plugin.py b/crazy_functions/vt_fns/vt_call_plugin.py index 614e9fe..455ac88 100644 --- a/crazy_functions/vt_fns/vt_call_plugin.py +++ b/crazy_functions/vt_fns/vt_call_plugin.py @@ -2,7 +2,7 @@ from pydantic import BaseModel, Field from typing import List from toolbox import update_ui_lastest_msg, disable_auto_promotion from request_llm.bridge_all import predict_no_ui_long_connection -from crazy_functions.json_fns.pydantic_io import GptJsonIO +from crazy_functions.json_fns.pydantic_io import GptJsonIO, JsonStringError import copy, json, pickle, os, sys, time @@ -69,7 +69,7 @@ def execute_plugin(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom try: gpt_reply = run_gpt_fn(inputs, "") plugin_sel = gpt_json_io.generate_output_auto_repair(gpt_reply, run_gpt_fn) - except: + except JsonStringError: msg = f"抱歉, {llm_kwargs['llm_model']}无法理解您的需求。" msg += "请求的Prompt为:\n" + wrap_code(get_inputs_show_user(inputs, plugin_arr_enum_prompt)) msg += "语言模型回复为:\n" + wrap_code(gpt_reply) diff --git a/crazy_functions/虚空终端.py b/crazy_functions/虚空终端.py index caf241c..a51e79b 100644 --- a/crazy_functions/虚空终端.py +++ b/crazy_functions/虚空终端.py @@ -23,9 +23,7 @@ Please describe in natural language what you want to do. explain_msg = """ ## 虚空终端插件说明: -请用**自然语言**描述您需要做什么。 - -1. 您可以打开插件下拉菜单以了解本项目的各种能力,然后用自然语言描述您的需要,例如: +1. 请用**自然语言**描述您需要做什么。例如: - 「请调用插件,为我翻译PDF论文,论文我刚刚放到上传区了。」 - 「请调用插件翻译PDF论文,地址为https://www.nature.com/articles/s41586-019-1724-z.pdf」 - 「生成一张图片,图中鲜花怒放,绿草如茵,用插件实现。」 @@ -34,13 +32,15 @@ explain_msg = """ - 「我不喜欢当前的界面颜色,修改配置,把主题THEME更换为THEME="High-Contrast"。」 - 「请问Transformer网络的结构是怎样的?」 -2. 如果您使用「调用插件xxx」、「修改配置xxx」、「请问」等关键词,您的意图可以被识别的更准确。 +2. 您可以打开插件下拉菜单以了解本项目的各种能力。 -3. 建议使用 GPT3.5 或更强的模型,弱模型可能无法理解您的想法。该插件诞生时间不长,欢迎您前往Github反馈问题。 +3. 如果您使用「调用插件xxx」、「修改配置xxx」、「请问」等关键词,您的意图可以被识别的更准确。 -4. 现在,如果需要处理文件,请您上传文件(将文件拖动到文件上传区),或者描述文件所在的路径。 +4. 建议使用 GPT3.5 或更强的模型,弱模型可能无法理解您的想法。该插件诞生时间不长,欢迎您前往Github反馈问题。 -5. 如果不需要上传文件,现在您只需要再次重复一次您的指令即可。 +5. 现在,如果需要处理文件,请您上传文件(将文件拖动到文件上传区),或者描述文件所在的路径。 + +6. 如果不需要上传文件,现在您只需要再次重复一次您的指令即可。 """ from pydantic import BaseModel, Field @@ -50,7 +50,7 @@ from toolbox import update_ui_lastest_msg, disable_auto_promotion from request_llm.bridge_all import predict_no_ui_long_connection from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive from crazy_functions.crazy_utils import input_clipping -from crazy_functions.json_fns.pydantic_io import GptJsonIO +from crazy_functions.json_fns.pydantic_io import GptJsonIO, JsonStringError from crazy_functions.vt_fns.vt_state import VoidTerminalState from crazy_functions.vt_fns.vt_modify_config import modify_configuration_hot from crazy_functions.vt_fns.vt_modify_config import modify_configuration_reboot @@ -148,10 +148,11 @@ def 虚空终端主路由(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst ">> " + (txt+rf_req).rstrip('\n').replace('\n','\n>> ') + '\n\n' + gpt_json_io.format_instructions run_gpt_fn = lambda inputs, sys_prompt: predict_no_ui_long_connection( inputs=inputs, llm_kwargs=llm_kwargs, history=[], sys_prompt=sys_prompt, observe_window=[]) + analyze_res = run_gpt_fn(inputs, "") try: - user_intention = gpt_json_io.generate_output_auto_repair(run_gpt_fn(inputs, ""), run_gpt_fn) + user_intention = gpt_json_io.generate_output_auto_repair(analyze_res, run_gpt_fn) lastmsg=f"正在执行任务: {txt}\n\n用户意图理解: 意图={explain_intention_to_user[user_intention.intention_type]}", - except: + except JsonStringError as e: yield from update_ui_lastest_msg( lastmsg=f"正在执行任务: {txt}\n\n用户意图理解: 失败 当前语言模型({llm_kwargs['llm_model']})不能理解您的意图", chatbot=chatbot, history=history, delay=0) return diff --git a/toolbox.py b/toolbox.py index 1242fab..273ecee 100644 --- a/toolbox.py +++ b/toolbox.py @@ -60,6 +60,8 @@ def ArgsGeneralWrapper(f): # 引入一个有cookie的chatbot cookies.update({ 'top_p':top_p, + 'api_key': cookies['api_key'], + 'llm_model': llm_model, 'temperature':temperature, }) llm_kwargs = { From 0ef06683dc75062cdbfef0ca198422cf0ea76e15 Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Sun, 3 Sep 2023 16:35:03 +0800 Subject: [PATCH 16/47] Update README.md --- README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.md b/README.md index 43b38dd..fda0f9e 100644 --- a/README.md +++ b/README.md @@ -298,6 +298,11 @@ Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史h +12. 虚空终端 +
+ +
+ ### II:版本: - version 3.60(todo): 优化虚空终端,引入code interpreter和更多插件 From 23a42d93dffdef633447cce5157c9a36cdaa4485 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Sun, 3 Sep 2023 16:46:27 +0800 Subject: [PATCH 17/47] update translation matrix --- docs/translate_english.json | 289 +++++++++++++++++++++++++++++++++++- docs/translate_std.json | 7 +- multi_language.py | 2 + 3 files changed, 296 insertions(+), 2 deletions(-) diff --git a/docs/translate_english.json b/docs/translate_english.json index 9721dcf..667956c 100644 --- a/docs/translate_english.json +++ b/docs/translate_english.json @@ -2161,5 +2161,292 @@ "在运行过程中动态地修改配置": "Dynamically modify configurations during runtime", "请先把模型切换至gpt-*或者api2d-*": "Please switch the model to gpt-* or api2d-* first", "获取简单聊天的句柄": "Get handle of simple chat", - "获取插件的默认参数": "Get default parameters of plugin" + "获取插件的默认参数": "Get default parameters of plugin", + "GROBID服务不可用": "GROBID service is unavailable", + "请问": "May I ask", + "如果等待时间过长": "If the waiting time is too long", + "编程": "programming", + "5. 现在": "5. Now", + "您不必读这个else分支": "You don't have to read this else branch", + "用插件实现": "Implement with plugins", + "插件分类默认选项": "Default options for plugin classification", + "填写多个可以均衡负载": "Filling in multiple can balance the load", + "色彩主题": "Color theme", + "可能附带额外依赖 -=-=-=-=-=-=-": "May come with additional dependencies -=-=-=-=-=-=-", + "讯飞星火认知大模型": "Xunfei Xinghuo cognitive model", + "ParsingLuaProject的所有源文件 | 输入参数为路径": "All source files of ParsingLuaProject | Input parameter is path", + "复制以下空间https": "Copy the following space https", + "如果意图明确": "If the intention is clear", + "如系统是Linux": "If the system is Linux", + "├── 语音功能": "├── Voice function", + "见Github wiki": "See Github wiki", + "⭐ ⭐ ⭐ 立即应用配置": "⭐ ⭐ ⭐ Apply configuration immediately", + "现在您只需要再次重复一次您的指令即可": "Now you just need to repeat your command again", + "没辙了": "No way", + "解析Jupyter Notebook文件 | 输入参数为路径": "Parse Jupyter Notebook file | Input parameter is path", + "⭐ ⭐ ⭐ 确认插件参数": "⭐ ⭐ ⭐ Confirm plugin parameters", + "找不到合适插件执行该任务": "Cannot find a suitable plugin to perform this task", + "接驳VoidTerminal": "Connect to VoidTerminal", + "**很好": "**Very good", + "对话|编程": "Conversation|Programming", + "对话|编程|学术": "Conversation|Programming|Academic", + "4. 建议使用 GPT3.5 或更强的模型": "4. It is recommended to use GPT3.5 or a stronger model", + "「请调用插件翻译PDF论文": "Please call the plugin to translate the PDF paper", + "3. 如果您使用「调用插件xxx」、「修改配置xxx」、「请问」等关键词": "3. If you use keywords such as 'call plugin xxx', 'modify configuration xxx', 'please', etc.", + "以下是一篇学术论文的基本信息": "The following is the basic information of an academic paper", + "GROBID服务器地址": "GROBID server address", + "修改配置": "Modify configuration", + "理解PDF文档的内容并进行回答 | 输入参数为路径": "Understand the content of the PDF document and answer | Input parameter is path", + "对于需要高级参数的插件": "For plugins that require advanced parameters", + "🏃‍♂️🏃‍♂️🏃‍♂️ 主进程执行": "Main process execution 🏃‍♂️🏃‍♂️🏃‍♂️", + "没有填写 HUGGINGFACE_ACCESS_TOKEN": "HUGGINGFACE_ACCESS_TOKEN not filled in", + "调度插件": "Scheduling plugin", + "语言模型": "Language model", + "├── ADD_WAIFU 加一个live2d装饰": "├── ADD_WAIFU Add a live2d decoration", + "初始化": "Initialization", + "选择了不存在的插件": "Selected a non-existent plugin", + "修改本项目的配置": "Modify the configuration of this project", + "如果输入的文件路径是正确的": "If the input file path is correct", + "2. 您可以打开插件下拉菜单以了解本项目的各种能力": "2. You can open the plugin dropdown menu to learn about various capabilities of this project", + "VoidTerminal插件说明": "VoidTerminal plugin description", + "无法理解您的需求": "Unable to understand your requirements", + "默认 AdvancedArgs = False": "Default AdvancedArgs = False", + "「请问Transformer网络的结构是怎样的": "What is the structure of the Transformer network?", + "比如1812.10695": "For example, 1812.10695", + "翻译README或MD": "Translate README or MD", + "读取新配置中": "Reading new configuration", + "假如偏离了您的要求": "If it deviates from your requirements", + "├── THEME 色彩主题": "├── THEME color theme", + "如果还找不到": "If still not found", + "问": "Ask", + "请检查系统字体": "Please check system fonts", + "如果错误": "If there is an error", + "作为替代": "As an alternative", + "ParseJavaProject的所有源文件 | 输入参数为路径": "All source files of ParseJavaProject | Input parameter is path", + "比对相同参数时生成的url与自己代码生成的url是否一致": "Check if the generated URL matches the one generated by your code when comparing the same parameters", + "清除本地缓存数据": "Clear local cache data", + "使用谷歌学术检索助手搜索指定URL的结果 | 输入参数为谷歌学术搜索页的URL": "Use Google Scholar search assistant to search for results of a specific URL | Input parameter is the URL of Google Scholar search page", + "运行方法": "Running method", + "您已经上传了文件**": "You have uploaded the file **", + "「给爷翻译Arxiv论文": "Translate Arxiv papers for me", + "请修改config中的GROBID_URL": "Please modify GROBID_URL in the config", + "处理特殊情况": "Handling special cases", + "不要自己瞎搞!」": "Don't mess around by yourself!", + "LoadConversationHistoryArchive | 输入参数为路径": "LoadConversationHistoryArchive | Input parameter is a path", + "| 输入参数是一个问题": "| Input parameter is a question", + "├── CHATBOT_HEIGHT 对话窗的高度": "├── CHATBOT_HEIGHT Height of the chat window", + "对C": "To C", + "默认关闭": "Default closed", + "当前进度": "Current progress", + "HUGGINGFACE的TOKEN": "HUGGINGFACE's TOKEN", + "查找可用插件中": "Searching for available plugins", + "下载LLAMA时起作用 https": "Works when downloading LLAMA https", + "使用 AK": "Using AK", + "正在执行任务": "Executing task", + "保存当前的对话 | 不需要输入参数": "Save current conversation | No input parameters required", + "对话": "Conversation", + "图中鲜花怒放": "Flowers blooming in the picture", + "批量将Markdown文件中文翻译为英文 | 输入参数为路径或上传压缩包": "Batch translate Chinese to English in Markdown files | Input parameter is a path or upload a compressed package", + "ParsingCSharpProject的所有源文件 | 输入参数为路径": "ParsingCSharpProject's all source files | Input parameter is a path", + "为我翻译PDF论文": "Translate PDF papers for me", + "聊天对话": "Chat conversation", + "拼接鉴权参数": "Concatenate authentication parameters", + "请检查config中的GROBID_URL": "Please check the GROBID_URL in the config", + "拼接字符串": "Concatenate strings", + "您的意图可以被识别的更准确": "Your intent can be recognized more accurately", + "该模型有七个 bin 文件": "The model has seven bin files", + "但思路相同": "But the idea is the same", + "你需要翻译": "You need to translate", + "或者描述文件所在的路径": "Or the path of the description file", + "请您上传文件": "Please upload the file", + "不常用": "Not commonly used", + "尚未充分测试的实验性插件 & 需要额外依赖的插件 -=--=-": "Experimental plugins that have not been fully tested & plugins that require additional dependencies -=--=-", + "⭐ ⭐ ⭐ 选择插件": "⭐ ⭐ ⭐ Select plugin", + "当前配置不允许被修改!如需激活本功能": "The current configuration does not allow modification! To activate this feature", + "正在连接GROBID服务": "Connecting to GROBID service", + "用户图形界面布局依赖关系示意图": "Diagram of user interface layout dependencies", + "是否允许通过自然语言描述修改本页的配置": "Allow modifying the configuration of this page through natural language description", + "self.chatbot被序列化": "self.chatbot is serialized", + "本地Latex论文精细翻译 | 输入参数是路径": "Locally translate Latex papers with fine-grained translation | Input parameter is the path", + "抱歉": "Sorry", + "以下这部分是最早加入的最稳定的模型 -=-=-=-=-=-=-": "The following section is the earliest and most stable model added", + "「用插件翻译README": "Translate README with plugins", + "如果不正确": "If incorrect", + "⭐ ⭐ ⭐ 读取可配置项目条目": "⭐ ⭐ ⭐ Read configurable project entries", + "开始语言对话 | 没有输入参数": "Start language conversation | No input parameters", + "谨慎操作 | 不需要输入参数": "Handle with caution | No input parameters required", + "对英文Latex项目全文进行纠错处理 | 输入参数为路径或上传压缩包": "Correct the entire English Latex project | Input parameter is the path or upload compressed package", + "如果需要处理文件": "If file processing is required", + "提供图像的内容": "Provide the content of the image", + "查看历史上的今天事件 | 不需要输入参数": "View historical events of today | No input parameters required", + "这个稍微啰嗦一点": "This is a bit verbose", + "多线程解析并翻译此项目的源码 | 不需要输入参数": "Parse and translate the source code of this project in multi-threading | No input parameters required", + "此处打印出建立连接时候的url": "Print the URL when establishing the connection here", + "精准翻译PDF论文为中文 | 输入参数为路径": "Translate PDF papers accurately into Chinese | Input parameter is the path", + "检测到操作错误!当您上传文档之后": "Operation error detected! After you upload the document", + "在线大模型配置关联关系示意图": "Online large model configuration relationship diagram", + "你的填写的空间名如grobid": "Your filled space name such as grobid", + "获取方法": "Get method", + "| 输入参数为路径": "| Input parameter is the path", + "⭐ ⭐ ⭐ 执行插件": "⭐ ⭐ ⭐ Execute plugin", + "├── ALLOW_RESET_CONFIG 是否允许通过自然语言描述修改本页的配置": "├── ALLOW_RESET_CONFIG Whether to allow modifying the configuration of this page through natural language description", + "重新页面即可生效": "Refresh the page to take effect", + "设为public": "Set as public", + "并在此处指定模型路径": "And specify the model path here", + "分析用户意图中": "Analyzing user intent", + "刷新下拉列表": "Refresh the drop-down list", + "失败 当前语言模型": "Failed current language model", + "1. 请用**自然语言**描述您需要做什么": "1. Please describe what you need to do in **natural language**", + "对Latex项目全文进行中译英处理 | 输入参数为路径或上传压缩包": "Translate the full text of Latex projects from Chinese to English | Input parameter is the path or upload a compressed package", + "没有配置BAIDU_CLOUD_API_KEY": "No configuration for BAIDU_CLOUD_API_KEY", + "设置默认值": "Set default value", + "如果太多了会导致gpt无法理解": "If there are too many, it will cause GPT to be unable to understand", + "绿草如茵": "Green grass", + "├── LAYOUT 窗口布局": "├── LAYOUT window layout", + "用户意图理解": "User intent understanding", + "生成RFC1123格式的时间戳": "Generate RFC1123 formatted timestamp", + "欢迎您前往Github反馈问题": "Welcome to go to Github to provide feedback", + "排除已经是按钮的插件": "Exclude plugins that are already buttons", + "亦在下拉菜单中显示": "Also displayed in the dropdown menu", + "导致无法反序列化": "Causing deserialization failure", + "意图=": "Intent =", + "章节": "Chapter", + "调用插件": "Invoke plugin", + "ParseRustProject的所有源文件 | 输入参数为路径": "All source files of ParseRustProject | Input parameter is path", + "需要点击“函数插件区”按钮进行处理": "Need to click the 'Function Plugin Area' button for processing", + "默认 AsButton = True": "Default AsButton = True", + "收到websocket错误的处理": "Handling websocket errors", + "用插件": "Use Plugin", + "没有选择任何插件组": "No plugin group selected", + "答": "Answer", + "可修改成本地GROBID服务": "Can modify to local GROBID service", + "用户意图": "User intent", + "对英文Latex项目全文进行润色处理 | 输入参数为路径或上传压缩包": "Polish the full text of English Latex projects | Input parameters are paths or uploaded compressed packages", + "「我不喜欢当前的界面颜色": "I don't like the current interface color", + "「请调用插件": "Please call the plugin", + "VoidTerminal状态": "VoidTerminal status", + "新配置": "New configuration", + "支持Github链接": "Support Github links", + "没有配置BAIDU_CLOUD_SECRET_KEY": "No BAIDU_CLOUD_SECRET_KEY configured", + "获取当前VoidTerminal状态": "Get the current VoidTerminal status", + "刷新按钮": "Refresh button", + "为了防止pickle.dumps": "To prevent pickle.dumps", + "放弃治疗": "Give up treatment", + "可指定不同的生成长度、top_p等相关超参": "Can specify different generation lengths, top_p and other related hyperparameters", + "请将题目和摘要翻译为": "Translate the title and abstract", + "通过appid和用户的提问来生成请参数": "Generate request parameters through appid and user's question", + "ImageGeneration | 输入参数字符串": "ImageGeneration | Input parameter string", + "将文件拖动到文件上传区": "Drag and drop the file to the file upload area", + "如果意图模糊": "If the intent is ambiguous", + "星火认知大模型": "Spark Cognitive Big Model", + "执行中. 删除 gpt_log & private_upload": "Executing. Delete gpt_log & private_upload", + "默认 Color = secondary": "Default Color = secondary", + "此处也不需要修改": "No modification is needed here", + "⭐ ⭐ ⭐ 分析用户意图": "⭐ ⭐ ⭐ Analyze user intent", + "再试一次": "Try again", + "请写bash命令实现以下功能": "Please write a bash command to implement the following function", + "批量SummarizingWordDocuments | 输入参数为路径": "Batch SummarizingWordDocuments | Input parameter is the path", + "/Users/fuqingxu/Desktop/旧文件/gpt/chatgpt_academic/crazy_functions/latex_fns中的python文件进行解析": "Parse the python file in /Users/fuqingxu/Desktop/旧文件/gpt/chatgpt_academic/crazy_functions/latex_fns", + "当我要求你写bash命令时": "When I ask you to write a bash command", + "├── AUTO_CLEAR_TXT 是否在提交时自动清空输入框": "├── AUTO_CLEAR_TXT Whether to automatically clear the input box when submitting", + "按停止键终止": "Press the stop key to terminate", + "文心一言": "Original text", + "不能理解您的意图": "Cannot understand your intention", + "用简单的关键词检测用户意图": "Detect user intention with simple keywords", + "中文": "Chinese", + "解析一个C++项目的所有源文件": "Parse all source files of a C++ project", + "请求的Prompt为": "Requested prompt is", + "参考本demo的时候可取消上方打印的注释": "You can remove the comments above when referring to this demo", + "开始接收回复": "Start receiving replies", + "接入讯飞星火大模型 https": "Access to Xunfei Xinghuo large model https", + "用该压缩包进行反馈": "Use this compressed package for feedback", + "翻译Markdown或README": "Translate Markdown or README", + "SK 生成鉴权签名": "SK generates authentication signature", + "插件参数": "Plugin parameters", + "需要访问中文Bing": "Need to access Chinese Bing", + "ParseFrontendProject的所有源文件": "Parse all source files of ParseFrontendProject", + "现在将执行效果稍差的旧版代码": "Now execute the older version code with slightly worse performance", + "您需要明确说明并在指令中提到它": "You need to specify and mention it in the command", + "请在config.py中设置ALLOW_RESET_CONFIG=True后重启软件": "Please set ALLOW_RESET_CONFIG=True in config.py and restart the software", + "按照自然语言描述生成一个动画 | 输入参数是一段话": "Generate an animation based on natural language description | Input parameter is a sentence", + "你的hf用户名如qingxu98": "Your hf username is qingxu98", + "Arixv论文精细翻译 | 输入参数arxiv论文的ID": "Fine translation of Arixv paper | Input parameter is the ID of arxiv paper", + "无法获取 abstract": "Unable to retrieve abstract", + "尽可能地仅用一行命令解决我的要求": "Try to solve my request using only one command", + "提取插件参数": "Extract plugin parameters", + "配置修改完成": "Configuration modification completed", + "正在修改配置中": "Modifying configuration", + "ParsePythonProject的所有源文件": "All source files of ParsePythonProject", + "请求错误": "Request error", + "精准翻译PDF论文": "Accurate translation of PDF paper", + "无法获取 authors": "Unable to retrieve authors", + "该插件诞生时间不长": "This plugin has not been around for long", + "返回项目根路径": "Return project root path", + "BatchSummarizePDFDocuments的内容 | 输入参数为路径": "Content of BatchSummarizePDFDocuments | Input parameter is a path", + "百度千帆": "Baidu Qianfan", + "解析一个C++项目的所有头文件": "Parse all header files of a C++ project", + "现在请您描述您的需求": "Now please describe your requirements", + "该功能具有一定的危险性": "This feature has a certain level of danger", + "收到websocket关闭的处理": "Processing when receiving websocket closure", + "读取Tex论文并写摘要 | 输入参数为路径": "Read Tex paper and write abstract | Input parameter is the path", + "地址为https": "The address is https", + "限制最多前10个配置项": "Limit up to 10 configuration items", + "6. 如果不需要上传文件": "6. If file upload is not needed", + "默认 Group = 对话": "Default Group = Conversation", + "五秒后即将重启!若出现报错请无视即可": "Restarting in five seconds! Please ignore if there is an error", + "收到websocket连接建立的处理": "Processing when receiving websocket connection establishment", + "批量生成函数的注释 | 输入参数为路径": "Batch generate function comments | Input parameter is the path", + "聊天": "Chat", + "但您可以尝试再试一次": "But you can try again", + "千帆大模型平台": "Qianfan Big Model Platform", + "直接运行 python tests/test_plugins.py": "Run python tests/test_plugins.py directly", + "或是None": "Or None", + "进行hmac-sha256进行加密": "Perform encryption using hmac-sha256", + "批量总结音频或视频 | 输入参数为路径": "Batch summarize audio or video | Input parameter is path", + "插件在线服务配置依赖关系示意图": "Plugin online service configuration dependency diagram", + "开始初始化模型": "Start initializing model", + "弱模型可能无法理解您的想法": "Weak model may not understand your ideas", + "解除大小写限制": "Remove case sensitivity restriction", + "跳过提示环节": "Skip prompt section", + "接入一些逆向工程https": "Access some reverse engineering https", + "执行完成": "Execution completed", + "如果需要配置": "If configuration is needed", + "此处不修改;如果使用本地或无地域限制的大模型时": "Do not modify here; if using local or region-unrestricted large models", + "你是一个Linux大师级用户": "You are a Linux master-level user", + "arxiv论文的ID是1812.10695": "The ID of the arxiv paper is 1812.10695", + "而不是点击“提交”按钮": "Instead of clicking the 'Submit' button", + "解析一个Go项目的所有源文件 | 输入参数为路径": "Parse all source files of a Go project | Input parameter is path", + "对中文Latex项目全文进行润色处理 | 输入参数为路径或上传压缩包": "Polish the entire text of a Chinese Latex project | Input parameter is path or upload compressed package", + "「生成一张图片": "Generate an image", + "将Markdown或README翻译为中文 | 输入参数为路径或URL": "Translate Markdown or README to Chinese | Input parameters are path or URL", + "训练时间": "Training time", + "将请求的鉴权参数组合为字典": "Combine the requested authentication parameters into a dictionary", + "对Latex项目全文进行英译中处理 | 输入参数为路径或上传压缩包": "Translate the entire text of Latex project from English to Chinese | Input parameters are path or uploaded compressed package", + "内容如下": "The content is as follows", + "用于高质量地读取PDF文档": "Used for high-quality reading of PDF documents", + "上下文太长导致 token 溢出": "The context is too long, causing token overflow", + "├── DARK_MODE 暗色模式 / 亮色模式": "├── DARK_MODE Dark mode / Light mode", + "语言模型回复为": "The language model replies as", + "from crazy_functions.chatglm微调工具 import 微调数据集生成": "from crazy_functions.chatglm fine-tuning tool import fine-tuning dataset generation", + "为您选择了插件": "Selected plugin for you", + "无法获取 title": "Unable to get title", + "收到websocket消息的处理": "Processing of received websocket messages", + "2023年": "2023", + "清除所有缓存文件": "Clear all cache files", + "├── PDF文档精准解析": "├── Accurate parsing of PDF documents", + "论文我刚刚放到上传区了": "I just put the paper in the upload area", + "生成url": "Generate URL", + "以下部分是新加入的模型": "The following section is the newly added model", + "学术": "Academic", + "├── DEFAULT_FN_GROUPS 插件分类默认选项": "├── DEFAULT_FN_GROUPS Plugin classification default options", + "不推荐使用": "Not recommended for use", + "正在同时咨询": "Consulting simultaneously", + "将Markdown翻译为中文 | 输入参数为路径或URL": "Translate Markdown to Chinese | Input parameters are path or URL", + "Github网址是https": "The Github URL is https", + "试着加上.tex后缀试试": "Try adding the .tex suffix", + "对项目中的各个插件进行测试": "Test each plugin in the project", + "插件说明": "Plugin description", + "├── CODE_HIGHLIGHT 代码高亮": "├── CODE_HIGHLIGHT Code highlighting", + "记得用插件": "Remember to use the plugin", + "谨慎操作": "Handle with caution" } \ No newline at end of file diff --git a/docs/translate_std.json b/docs/translate_std.json index 8c47de9..63b6baa 100644 --- a/docs/translate_std.json +++ b/docs/translate_std.json @@ -83,5 +83,10 @@ "图片生成": "ImageGeneration", "动画生成": "AnimationGeneration", "语音助手": "VoiceAssistant", - "启动微调": "StartFineTuning" + "启动微调": "StartFineTuning", + "清除缓存": "ClearCache", + "辅助功能": "Accessibility", + "虚空终端": "VoidTerminal", + "解析PDF_基于GROBID": "ParsePDF_BasedOnGROBID", + "虚空终端主路由": "VoidTerminalMainRoute" } \ No newline at end of file diff --git a/multi_language.py b/multi_language.py index 7786b86..c4ed36e 100644 --- a/multi_language.py +++ b/multi_language.py @@ -478,6 +478,8 @@ def step_2_core_key_translate(): up = trans_json(need_translate, language=LANG, special=False) map_to_json(up, language=LANG) cached_translation = read_map_from_json(language=LANG) + LANG_STD = 'std' + cached_translation.update(read_map_from_json(language=LANG_STD)) cached_translation = dict(sorted(cached_translation.items(), key=lambda x: -len(x[0]))) # =============================================== From 51e809c09e3cf8d678bd64727f0f77313b4ee1a9 Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Mon, 4 Sep 2023 11:34:46 +0800 Subject: [PATCH 18/47] Update README.md --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index fda0f9e..ea629dd 100644 --- a/README.md +++ b/README.md @@ -53,7 +53,8 @@ Latex论文一键校对 | [函数插件] 仿Grammarly对Latex文章进行语法 [多LLM模型](https://www.bilibili.com/video/BV1wT411p7yf)支持 | 同时被GPT3.5、GPT4、[清华ChatGLM2](https://github.com/THUDM/ChatGLM2-6B)、[复旦MOSS](https://github.com/OpenLMLab/MOSS)同时伺候的感觉一定会很不错吧? ⭐ChatGLM2微调模型 | 支持加载ChatGLM2微调模型,提供ChatGLM2微调辅助插件 更多LLM模型接入,支持[huggingface部署](https://huggingface.co/spaces/qingxu98/gpt-academic) | 加入Newbing接口(新必应),引入清华[Jittorllms](https://github.com/Jittor/JittorLLMs)支持[LLaMA](https://github.com/facebookresearch/llama)和[盘古α](https://openi.org.cn/pangu/) -⭐[虚空终端](https://github.com/binary-husky/void-terminal)pip包 | 脱离GUI,在Python中直接调用本项目的函数插件(开发中) +⭐[void-terminal](https://github.com/binary-husky/void-terminal) pip包 | 脱离GUI,在Python中直接调用本项目的所有函数插件(开发中) +⭐虚空终端插件 | 用自然语言,直接调度本项目其他插件 更多新功能展示 (图像生成等) …… | 见本文档结尾处 …… From 82936f71b6b25a605a7902183f45a06a6f567e30 Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Mon, 4 Sep 2023 11:37:47 +0800 Subject: [PATCH 19/47] Update README.md --- README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.md b/README.md index ea629dd..e60397c 100644 --- a/README.md +++ b/README.md @@ -300,6 +300,11 @@ Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史h 12. 虚空终端 + +步骤一:“请调用插件翻译PDF论文,地址为https://www.nature.com/articles/s41586-019-1724-z.pdf” + +步骤二:点击“虚空终端” +
From 3344ffcb8b9a49bcc66414f8d1535253541c6fa6 Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Mon, 4 Sep 2023 11:41:52 +0800 Subject: [PATCH 20/47] Update README.md --- README.md | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index e60397c..59f4d3d 100644 --- a/README.md +++ b/README.md @@ -250,10 +250,13 @@ Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史h -3. 生成报告。大部分插件都会在执行结束后,生成工作报告 +3. 虚空终端(从自然语言输入中,理解用户意图+自动调用其他插件) + +- 步骤一:输入 “ 请调用插件翻译PDF论文,地址为https://www.nature.com/articles/s41586-019-1724-z.pdf ” +- 步骤二:点击“虚空终端” +
- - +
4. 模块化功能设计,简单的接口却能支持强大的功能 @@ -299,15 +302,6 @@ Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史h -12. 虚空终端 - -步骤一:“请调用插件翻译PDF论文,地址为https://www.nature.com/articles/s41586-019-1724-z.pdf” - -步骤二:点击“虚空终端” - -
- -
### II:版本: From b6ac3d0d6cc0203507d09120b3403ae3bc5559c7 Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Mon, 4 Sep 2023 12:34:55 +0800 Subject: [PATCH 21/47] Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 59f4d3d..dcee102 100644 --- a/README.md +++ b/README.md @@ -14,9 +14,9 @@ To translate this project to arbitary language with GPT, read and run [`multi_la > **Note** > -> 1.请注意只有 **高亮(如红色)** 标识的函数插件(按钮)才支持读取文件,部分插件位于插件区的**下拉菜单**中。另外我们以**最高优先级**欢迎和处理任何新插件的PR。 +> 1.请注意只有 **高亮** 标识的函数插件(按钮)才支持读取文件,部分插件位于插件区的**下拉菜单**中。另外我们以**最高优先级**欢迎和处理任何新插件的PR。 > -> 2.本项目中每个文件的功能都在[自译解报告`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告)详细说明。随着版本的迭代,您也可以随时自行点击相关函数插件,调用GPT重新生成项目的自我解析报告。常见问题汇总在[`wiki`](https://github.com/binary-husky/gpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98)当中。[安装方法](#installation)。 +> 2.本项目中每个文件的功能都在[自译解报告`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告)详细说明。随着版本的迭代,您也可以随时自行点击相关函数插件,调用GPT重新生成项目的自我解析报告。常见问题[`wiki`](https://github.com/binary-husky/gpt_academic/wiki)。[安装方法](#installation) | [配置说明](https://github.com/binary-husky/gpt_academic/wiki/%E9%A1%B9%E7%9B%AE%E9%85%8D%E7%BD%AE%E8%AF%B4%E6%98%8E)。 > > 3.本项目兼容并鼓励尝试国产大语言模型ChatGLM和Moss等等。支持多个api-key共存,可在配置文件中填写如`API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`。需要临时更换`API_KEY`时,在输入区输入临时的`API_KEY`然后回车键提交后即可生效。 From f0482d3baeca7bcb066ccf231c2cd61a89a641cc Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Mon, 4 Sep 2023 12:39:25 +0800 Subject: [PATCH 22/47] Update docker-compose.yml --- docker-compose.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index cf753b5..2387527 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,7 +1,7 @@ #【请修改完参数后,删除此行】请在以下方案中选择一种,然后删除其他的方案,最后docker-compose up运行 | Please choose from one of these options below, delete other options as well as This Line ## =================================================== -## 【方案一】 如果不需要运行本地模型(仅chatgpt,newbing类远程服务) +## 【方案一】 如果不需要运行本地模型(仅 chatgpt, azure, 星火, 千帆, claude 等在线大模型服务) ## =================================================== version: '3' services: @@ -13,7 +13,7 @@ services: USE_PROXY: ' True ' proxies: ' { "http": "socks5h://localhost:10880", "https": "socks5h://localhost:10880", } ' LLM_MODEL: ' gpt-3.5-turbo ' - AVAIL_LLM_MODELS: ' ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "newbing"] ' + AVAIL_LLM_MODELS: ' ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "sparkv2", "qianfan"] ' WEB_PORT: ' 22303 ' ADD_WAIFU: ' True ' # THEME: ' Chuanhu-Small-and-Beautiful ' From 1092031d77996e14725fbcbd157ec2f8b78ff975 Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Wed, 6 Sep 2023 10:15:52 +0800 Subject: [PATCH 23/47] Create stale.yml --- .github/workflows/stale.yml | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 .github/workflows/stale.yml diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 0000000..05a33b7 --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,24 @@ +# This workflow warns and then closes issues and PRs that have had no activity for a specified amount of time. +# +# You can adjust the behavior by modifying this file. +# For more information, see: +# https://github.com/actions/stale + +name: 'Close stale issues and PRs' +on: + schedule: + - cron: '30 1 * * *' + +jobs: + stale: + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: read + + steps: + - uses: actions/stale@v8 + with: + stale-issue-message: 'This issue is stale because it has been open 100 days with no activity. Remove stale label or comment or this will be closed in 1 days.' + days-before-stale: 100 + days-before-close: 1 From fda1e8727843b430646f0ef0a03a69f7a1acbcd8 Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Wed, 6 Sep 2023 10:19:21 +0800 Subject: [PATCH 24/47] Update stale.yml --- .github/workflows/stale.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 05a33b7..717f254 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -7,7 +7,7 @@ name: 'Close stale issues and PRs' on: schedule: - - cron: '30 1 * * *' + - cron: '*/5 * * * *' jobs: stale: @@ -22,3 +22,4 @@ jobs: stale-issue-message: 'This issue is stale because it has been open 100 days with no activity. Remove stale label or comment or this will be closed in 1 days.' days-before-stale: 100 days-before-close: 1 + debug-only: true From 0e21e3e2e75f14309318ef56eb2888ab769fee7c Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Wed, 6 Sep 2023 10:24:11 +0800 Subject: [PATCH 25/47] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E6=B2=A1=E5=A1=AB?= =?UTF-8?q?=E5=86=99=E8=AE=AF=E9=A3=9EAPPID=E6=97=A0=E6=8A=A5=E9=94=99?= =?UTF-8?q?=E6=8F=90=E7=A4=BA=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- request_llm/bridge_spark.py | 15 ++++++++++++++- request_llm/com_sparkapi.py | 2 +- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/request_llm/bridge_spark.py b/request_llm/bridge_spark.py index 1a3d43d..0fe925f 100644 --- a/request_llm/bridge_spark.py +++ b/request_llm/bridge_spark.py @@ -2,11 +2,17 @@ import time import threading import importlib -from toolbox import update_ui, get_conf +from toolbox import update_ui, get_conf, update_ui_lastest_msg from multiprocessing import Process, Pipe model_name = '星火认知大模型' +def validate_key(): + XFYUN_APPID, = get_conf('XFYUN_APPID', ) + if XFYUN_APPID == '00000000' or XFYUN_APPID == '': + return False + return True + def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False): """ ⭐多线程方法 @@ -15,6 +21,9 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", watch_dog_patience = 5 response = "" + if validate_key() is False: + raise RuntimeError('请配置讯飞星火大模型的XFYUN_APPID, XFYUN_API_KEY, XFYUN_API_SECRET') + from .com_sparkapi import SparkRequestInstance sri = SparkRequestInstance() for response in sri.generate(inputs, llm_kwargs, history, sys_prompt): @@ -32,6 +41,10 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp chatbot.append((inputs, "")) yield from update_ui(chatbot=chatbot, history=history) + if validate_key() is False: + yield from update_ui_lastest_msg(lastmsg="[Local Message]: 请配置讯飞星火大模型的XFYUN_APPID, XFYUN_API_KEY, XFYUN_API_SECRET", chatbot=chatbot, history=history, delay=0) + return + if additional_fn is not None: from core_functional import handle_core_functionality inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot) diff --git a/request_llm/com_sparkapi.py b/request_llm/com_sparkapi.py index 308aa64..0b8d655 100644 --- a/request_llm/com_sparkapi.py +++ b/request_llm/com_sparkapi.py @@ -58,7 +58,7 @@ class Ws_Param(object): class SparkRequestInstance(): def __init__(self): XFYUN_APPID, XFYUN_API_SECRET, XFYUN_API_KEY = get_conf('XFYUN_APPID', 'XFYUN_API_SECRET', 'XFYUN_API_KEY') - + if XFYUN_APPID == '00000000' or XFYUN_APPID == '': raise RuntimeError('请配置讯飞星火大模型的XFYUN_APPID, XFYUN_API_KEY, XFYUN_API_SECRET') self.appid = XFYUN_APPID self.api_secret = XFYUN_API_SECRET self.api_key = XFYUN_API_KEY From 2f83b60fb3be02a8ed56a9b11ee35d47bb954ce3 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Wed, 6 Sep 2023 12:36:59 +0800 Subject: [PATCH 26/47] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E6=90=9C=E7=B4=A2?= =?UTF-8?q?=E5=A4=B1=E8=B4=A5=E6=97=B6=E7=9A=84=E6=8F=90=E7=A4=BA?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functions/联网的ChatGPT.py | 6 +++++- crazy_functions/联网的ChatGPT_bing版.py | 6 +++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/crazy_functions/联网的ChatGPT.py b/crazy_functions/联网的ChatGPT.py index 6a7d118..4ed9aeb 100644 --- a/crazy_functions/联网的ChatGPT.py +++ b/crazy_functions/联网的ChatGPT.py @@ -75,7 +75,11 @@ def 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, s proxies, = get_conf('proxies') urls = google(txt, proxies) history = [] - + if len(urls) == 0: + chatbot.append((f"结论:{txt}", + "[Local Message] 受到google限制,无法从google获取信息!")) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 + return # ------------- < 第2步:依次访问网页 > ------------- max_search_result = 5 # 最多收纳多少个网页的结果 for index, url in enumerate(urls[:max_search_result]): diff --git a/crazy_functions/联网的ChatGPT_bing版.py b/crazy_functions/联网的ChatGPT_bing版.py index 93a84a0..db5adb7 100644 --- a/crazy_functions/联网的ChatGPT_bing版.py +++ b/crazy_functions/联网的ChatGPT_bing版.py @@ -75,7 +75,11 @@ def 连接bing搜索回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, histor proxies, = get_conf('proxies') urls = bing_search(txt, proxies) history = [] - + if len(urls) == 0: + chatbot.append((f"结论:{txt}", + "[Local Message] 受到bing限制,无法从bing获取信息!")) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 + return # ------------- < 第2步:依次访问网页 > ------------- max_search_result = 8 # 最多收纳多少个网页的结果 for index, url in enumerate(urls[:max_search_result]): From b2d03bf2a3e370c3f7c3d49ceb8472614d20b2de Mon Sep 17 00:00:00 2001 From: Ikko Eltociear Ashimine Date: Wed, 6 Sep 2023 15:30:12 +0900 Subject: [PATCH 27/47] Update README.md arbitary -> arbitrary --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index dcee102..0451262 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ **如果喜欢这个项目,请给它一个Star;如果您发明了好用的快捷键或函数插件,欢迎发pull requests!** If you like this project, please give it a Star. If you've come up with more useful academic shortcuts or functional plugins, feel free to open an issue or pull request. We also have a README in [English|](docs/README_EN.md)[日本語|](docs/README_JP.md)[한국어|](https://github.com/mldljyh/ko_gpt_academic)[Русский|](docs/README_RS.md)[Français](docs/README_FR.md) translated by this project itself. -To translate this project to arbitary language with GPT, read and run [`multi_language.py`](multi_language.py) (experimental). +To translate this project to arbitrary language with GPT, read and run [`multi_language.py`](multi_language.py) (experimental). > **Note** > From d6698db25713f0f2a16558787c120b7db4e82133 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Wed, 6 Sep 2023 15:32:11 +0800 Subject: [PATCH 28/47] =?UTF-8?q?nougat=E7=BF=BB=E8=AF=91PDF=E8=AE=BA?= =?UTF-8?q?=E6=96=87?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functional.py | 12 + crazy_functions/批量翻译PDF文档_NOUGAT.py | 271 ++++++++++++++++++++++ tests/test_plugins.py | 3 +- 3 files changed, 285 insertions(+), 1 deletion(-) create mode 100644 crazy_functions/批量翻译PDF文档_NOUGAT.py diff --git a/crazy_functional.py b/crazy_functional.py index f06cd40..4c12e92 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -501,6 +501,18 @@ def get_crazy_functions(): except: print('Load function plugin failed') + try: + from crazy_functions.批量翻译PDF文档_NOUGAT import 批量翻译PDF文档 + function_plugins.update({ + "精准翻译PDF文档(NOUGAT)": { + "Group": "学术", + "Color": "stop", + "AsButton": False, + "Function": HotReload(批量翻译PDF文档) + } + }) + except: + print('Load function plugin failed') # try: # from crazy_functions.chatglm微调工具 import 微调数据集生成 diff --git a/crazy_functions/批量翻译PDF文档_NOUGAT.py b/crazy_functions/批量翻译PDF文档_NOUGAT.py new file mode 100644 index 0000000..33e127a --- /dev/null +++ b/crazy_functions/批量翻译PDF文档_NOUGAT.py @@ -0,0 +1,271 @@ +from toolbox import CatchException, report_execption, gen_time_str +from toolbox import update_ui, promote_file_to_downloadzone, update_ui_lastest_msg, disable_auto_promotion +from toolbox import write_history_to_file, get_log_folder +from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive +from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency +from .crazy_utils import read_and_clean_pdf_text +from .pdf_fns.parse_pdf import parse_pdf, get_avail_grobid_url +from colorful import * +import os +import math +import logging + +def markdown_to_dict(article_content): + import markdown + from bs4 import BeautifulSoup + cur_t = "" + cur_c = "" + results = {} + for line in article_content: + if line.startswith('#'): + if cur_t!="": + if cur_t not in results: + results.update({cur_t:cur_c.lstrip('\n')}) + else: + # 处理重名的章节 + results.update({cur_t + " " + gen_time_str():cur_c.lstrip('\n')}) + cur_t = line.rstrip('\n') + cur_c = "" + else: + cur_c += line + results_final = {} + for k in list(results.keys()): + if k.startswith('# '): + results_final['title'] = k.split('# ')[-1] + results_final['authors'] = results.pop(k).lstrip('\n') + if k.startswith('###### Abstract'): + results_final['abstract'] = results.pop(k).lstrip('\n') + + results_final_sections = [] + for k,v in results.items(): + results_final_sections.append({ + 'heading':k.lstrip("# "), + 'text':v if len(v) > 0 else f"The beginning of {k.lstrip('# ')} section." + }) + results_final['sections'] = results_final_sections + return results_final + + +@CatchException +def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): + + disable_auto_promotion(chatbot) + # 基本信息:功能、贡献者 + chatbot.append([ + "函数插件功能?", + "批量翻译PDF文档。函数插件贡献者: Binary-Husky"]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + + # 尝试导入依赖,如果缺少依赖,则给出安装建议 + try: + import nougat + import tiktoken + except: + report_execption(chatbot, history, + a=f"解析项目: {txt}", + b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade nougat-ocr tiktoken```。") + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + return + + # 清空历史,以免输入溢出 + history = [] + + from .crazy_utils import get_files_from_everything + success, file_manifest, project_folder = get_files_from_everything(txt, type='.pdf') + # 检测输入参数,如没有给定输入参数,直接退出 + if not success: + if txt == "": txt = '空空如也的输入栏' + + # 如果没找到任何文件 + if len(file_manifest) == 0: + report_execption(chatbot, history, + a=f"解析项目: {txt}", b=f"找不到任何.tex或.pdf文件: {txt}") + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + return + + # 开始正式执行任务 + yield from 解析PDF_基于NOUGAT(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) + + +def nougat_with_timeout(command, cwd, timeout=3600): + import subprocess + process = subprocess.Popen(command, shell=True, cwd=cwd) + try: + stdout, stderr = process.communicate(timeout=timeout) + except subprocess.TimeoutExpired: + process.kill() + stdout, stderr = process.communicate() + print("Process timed out!") + return False + return True + + +def NOUGAT_parse_pdf(fp): + import glob + from toolbox import get_log_folder, gen_time_str + dst = os.path.join(get_log_folder(plugin_name='nougat'), gen_time_str()) + os.makedirs(dst) + nougat_with_timeout(f'nougat --out "{os.path.abspath(dst)}" "{os.path.abspath(fp)}"', os.getcwd()) + res = glob.glob(os.path.join(dst,'*.mmd')) + if len(res) == 0: + raise RuntimeError("Nougat解析论文失败。") + return res[0] + + +def 解析PDF_基于NOUGAT(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): + import copy + import tiktoken + TOKEN_LIMIT_PER_FRAGMENT = 1280 + generated_conclusion_files = [] + generated_html_files = [] + DST_LANG = "中文" + for index, fp in enumerate(file_manifest): + chatbot.append(["当前进度:", f"正在解析论文,请稍候"]); yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + fpp = NOUGAT_parse_pdf(fp) + + with open(fpp, 'r', encoding='utf8') as f: + article_content = f.readlines() + article_dict = markdown_to_dict(article_content) + logging.info(article_dict) + + prompt = "以下是一篇学术论文的基本信息:\n" + # title + title = article_dict.get('title', '无法获取 title'); prompt += f'title:{title}\n\n' + # authors + authors = article_dict.get('authors', '无法获取 authors'); prompt += f'authors:{authors}\n\n' + # abstract + abstract = article_dict.get('abstract', '无法获取 abstract'); prompt += f'abstract:{abstract}\n\n' + # command + prompt += f"请将题目和摘要翻译为{DST_LANG}。" + meta = [f'# Title:\n\n', title, f'# Abstract:\n\n', abstract ] + + # 单线,获取文章meta信息 + paper_meta_info = yield from request_gpt_model_in_new_thread_with_ui_alive( + inputs=prompt, + inputs_show_user=prompt, + llm_kwargs=llm_kwargs, + chatbot=chatbot, history=[], + sys_prompt="You are an academic paper reader。", + ) + + # 多线,翻译 + inputs_array = [] + inputs_show_user_array = [] + + # get_token_num + from request_llm.bridge_all import model_info + enc = model_info[llm_kwargs['llm_model']]['tokenizer'] + def get_token_num(txt): return len(enc.encode(txt, disallowed_special=())) + from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf + + def break_down(txt): + raw_token_num = get_token_num(txt) + if raw_token_num <= TOKEN_LIMIT_PER_FRAGMENT: + return [txt] + else: + # raw_token_num > TOKEN_LIMIT_PER_FRAGMENT + # find a smooth token limit to achieve even seperation + count = int(math.ceil(raw_token_num / TOKEN_LIMIT_PER_FRAGMENT)) + token_limit_smooth = raw_token_num // count + count + return breakdown_txt_to_satisfy_token_limit_for_pdf(txt, get_token_fn=get_token_num, limit=token_limit_smooth) + + for section in article_dict.get('sections'): + if len(section['text']) == 0: continue + section_frags = break_down(section['text']) + for i, fragment in enumerate(section_frags): + heading = section['heading'] + if len(section_frags) > 1: heading += f' Part-{i+1}' + inputs_array.append( + f"你需要翻译{heading}章节,内容如下: \n\n{fragment}" + ) + inputs_show_user_array.append( + f"# {heading}\n\n{fragment}" + ) + + gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency( + inputs_array=inputs_array, + inputs_show_user_array=inputs_show_user_array, + llm_kwargs=llm_kwargs, + chatbot=chatbot, + history_array=[meta for _ in inputs_array], + sys_prompt_array=[ + "请你作为一个学术翻译,负责把学术论文准确翻译成中文。注意文章中的每一句话都要翻译。" for _ in inputs_array], + ) + res_path = write_history_to_file(meta + ["# Meta Translation" , paper_meta_info] + gpt_response_collection, file_basename=None, file_fullname=None) + promote_file_to_downloadzone(res_path, rename_file=os.path.basename(fp)+'.md', chatbot=chatbot) + generated_conclusion_files.append(res_path) + + ch = construct_html() + orig = "" + trans = "" + gpt_response_collection_html = copy.deepcopy(gpt_response_collection) + for i,k in enumerate(gpt_response_collection_html): + if i%2==0: + gpt_response_collection_html[i] = inputs_show_user_array[i//2] + else: + gpt_response_collection_html[i] = gpt_response_collection_html[i] + + final = ["", "", "一、论文概况", "", "Abstract", paper_meta_info, "二、论文翻译", ""] + final.extend(gpt_response_collection_html) + for i, k in enumerate(final): + if i%2==0: + orig = k + if i%2==1: + trans = k + ch.add_row(a=orig, b=trans) + create_report_file_name = f"{os.path.basename(fp)}.trans.html" + html_file = ch.save_file(create_report_file_name) + generated_html_files.append(html_file) + promote_file_to_downloadzone(html_file, rename_file=os.path.basename(html_file), chatbot=chatbot) + + chatbot.append(("给出输出文件清单", str(generated_conclusion_files + generated_html_files))) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + + + +class construct_html(): + def __init__(self) -> None: + self.css = """ +.row { + display: flex; + flex-wrap: wrap; +} + +.column { + flex: 1; + padding: 10px; +} + +.table-header { + font-weight: bold; + border-bottom: 1px solid black; +} + +.table-row { + border-bottom: 1px solid lightgray; +} + +.table-cell { + padding: 5px; +} + """ + self.html_string = f'翻译结果' + + + def add_row(self, a, b): + tmp = """ +
+
REPLACE_A
+
REPLACE_B
+
+ """ + from toolbox import markdown_convertion + tmp = tmp.replace('REPLACE_A', markdown_convertion(a)) + tmp = tmp.replace('REPLACE_B', markdown_convertion(b)) + self.html_string += tmp + + + def save_file(self, file_name): + with open(os.path.join(get_log_folder(), file_name), 'w', encoding='utf8') as f: + f.write(self.html_string.encode('utf-8', 'ignore').decode()) + return os.path.join(get_log_folder(), file_name) diff --git a/tests/test_plugins.py b/tests/test_plugins.py index 2780ed2..ec28af1 100644 --- a/tests/test_plugins.py +++ b/tests/test_plugins.py @@ -10,8 +10,9 @@ from tests.test_utils import plugin_test if __name__ == "__main__": # plugin_test(plugin='crazy_functions.虚空终端->虚空终端', main_input='修改api-key为sk-jhoejriotherjep') + plugin_test(plugin='crazy_functions.批量翻译PDF文档_NOUGAT->批量翻译PDF文档', main_input='crazy_functions/test_project/pdf_and_word/aaai.pdf') - plugin_test(plugin='crazy_functions.虚空终端->虚空终端', main_input='调用插件,对C:/Users/fuqingxu/Desktop/旧文件/gpt/chatgpt_academic/crazy_functions/latex_fns中的python文件进行解析') + # plugin_test(plugin='crazy_functions.虚空终端->虚空终端', main_input='调用插件,对C:/Users/fuqingxu/Desktop/旧文件/gpt/chatgpt_academic/crazy_functions/latex_fns中的python文件进行解析') # plugin_test(plugin='crazy_functions.命令行助手->命令行助手', main_input='查看当前的docker容器列表') From 7855325ff966d7936d399744273ba2739841751b Mon Sep 17 00:00:00 2001 From: binary-husky Date: Wed, 6 Sep 2023 23:33:15 +0800 Subject: [PATCH 29/47] update dockerfiles --- docs/Dockerfile+ChatGLM | 62 +-------------------------------- docs/Dockerfile+JittorLLM | 60 +------------------------------ docs/Dockerfile+NoLocal+Latex | 28 +-------------- docs/GithubAction+NoLocal+Latex | 5 ++- 4 files changed, 7 insertions(+), 148 deletions(-) diff --git a/docs/Dockerfile+ChatGLM b/docs/Dockerfile+ChatGLM index 75c1fa7..f0d7c75 100644 --- a/docs/Dockerfile+ChatGLM +++ b/docs/Dockerfile+ChatGLM @@ -1,62 +1,2 @@ -# How to build | 如何构建: docker build -t gpt-academic --network=host -f Dockerfile+ChatGLM . -# How to run | (1) 我想直接一键运行(选择0号GPU): docker run --rm -it --net=host --gpus \"device=0\" gpt-academic -# How to run | (2) 我想运行之前进容器做一些调整(选择1号GPU): docker run --rm -it --net=host --gpus \"device=1\" gpt-academic bash - -# 从NVIDIA源,从而支持显卡运损(检查宿主的nvidia-smi中的cuda版本必须>=11.3) -FROM nvidia/cuda:11.3.1-runtime-ubuntu20.04 -ARG useProxyNetwork='' -RUN apt-get update -RUN apt-get install -y curl proxychains curl -RUN apt-get install -y git python python3 python-dev python3-dev --fix-missing +# 此Dockerfile不再维护,请前往docs/GithubAction+ChatGLM+Moss -# 配置代理网络(构建Docker镜像时使用) -# # comment out below if you do not need proxy network | 如果不需要翻墙 - 从此行向下删除 -RUN $useProxyNetwork curl cip.cc -RUN sed -i '$ d' /etc/proxychains.conf -RUN sed -i '$ d' /etc/proxychains.conf -# 在这里填写主机的代理协议(用于从github拉取代码) -RUN echo "socks5 127.0.0.1 10880" >> /etc/proxychains.conf -ARG useProxyNetwork=proxychains -# # comment out above if you do not need proxy network | 如果不需要翻墙 - 从此行向上删除 - - -# use python3 as the system default python -RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8 -# 下载pytorch -RUN $useProxyNetwork python3 -m pip install torch --extra-index-url https://download.pytorch.org/whl/cu113 -# 下载分支 -WORKDIR /gpt -RUN $useProxyNetwork git clone https://github.com/binary-husky/gpt_academic.git -WORKDIR /gpt/gpt_academic -RUN $useProxyNetwork python3 -m pip install -r requirements.txt -RUN $useProxyNetwork python3 -m pip install -r request_llm/requirements_chatglm.txt -RUN $useProxyNetwork python3 -m pip install -r request_llm/requirements_newbing.txt - -# 预热CHATGLM参数(非必要 可选步骤) -RUN echo ' \n\ -from transformers import AutoModel, AutoTokenizer \n\ -chatglm_tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) \n\ -chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float() ' >> warm_up_chatglm.py -RUN python3 -u warm_up_chatglm.py - -# 禁用缓存,确保更新代码 -ADD "https://www.random.org/cgi-bin/randbyte?nbytes=10&format=h" skipcache -RUN $useProxyNetwork git pull - -# 预热Tiktoken模块 -RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()' - -# 为chatgpt-academic配置代理和API-KEY (非必要 可选步骤) -# 可同时填写多个API-KEY,支持openai的key和api2d的key共存,用英文逗号分割,例如API_KEY = "sk-openaikey1,fkxxxx-api2dkey2,........" -# LLM_MODEL 是选择初始的模型 -# LOCAL_MODEL_DEVICE 是选择chatglm等本地模型运行的设备,可选 cpu 和 cuda -# [说明: 以下内容与`config.py`一一对应,请查阅config.py来完成一下配置的填写] -RUN echo ' \n\ -API_KEY = "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,fkxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" \n\ -USE_PROXY = True \n\ -LLM_MODEL = "chatglm" \n\ -LOCAL_MODEL_DEVICE = "cuda" \n\ -proxies = { "http": "socks5h://localhost:10880", "https": "socks5h://localhost:10880", } ' >> config_private.py - -# 启动 -CMD ["python3", "-u", "main.py"] diff --git a/docs/Dockerfile+JittorLLM b/docs/Dockerfile+JittorLLM index ea53402..2bd1237 100644 --- a/docs/Dockerfile+JittorLLM +++ b/docs/Dockerfile+JittorLLM @@ -1,59 +1 @@ -# How to build | 如何构建: docker build -t gpt-academic-jittor --network=host -f Dockerfile+ChatGLM . -# How to run | (1) 我想直接一键运行(选择0号GPU): docker run --rm -it --net=host --gpus \"device=0\" gpt-academic-jittor bash -# How to run | (2) 我想运行之前进容器做一些调整(选择1号GPU): docker run --rm -it --net=host --gpus \"device=1\" gpt-academic-jittor bash - -# 从NVIDIA源,从而支持显卡运损(检查宿主的nvidia-smi中的cuda版本必须>=11.3) -FROM nvidia/cuda:11.3.1-runtime-ubuntu20.04 -ARG useProxyNetwork='' -RUN apt-get update -RUN apt-get install -y curl proxychains curl g++ -RUN apt-get install -y git python python3 python-dev python3-dev --fix-missing - -# 配置代理网络(构建Docker镜像时使用) -# # comment out below if you do not need proxy network | 如果不需要翻墙 - 从此行向下删除 -RUN $useProxyNetwork curl cip.cc -RUN sed -i '$ d' /etc/proxychains.conf -RUN sed -i '$ d' /etc/proxychains.conf -# 在这里填写主机的代理协议(用于从github拉取代码) -RUN echo "socks5 127.0.0.1 10880" >> /etc/proxychains.conf -ARG useProxyNetwork=proxychains -# # comment out above if you do not need proxy network | 如果不需要翻墙 - 从此行向上删除 - - -# use python3 as the system default python -RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8 -# 下载pytorch -RUN $useProxyNetwork python3 -m pip install torch --extra-index-url https://download.pytorch.org/whl/cu113 -# 下载分支 -WORKDIR /gpt -RUN $useProxyNetwork git clone https://github.com/binary-husky/gpt_academic.git -WORKDIR /gpt/gpt_academic -RUN $useProxyNetwork python3 -m pip install -r requirements.txt -RUN $useProxyNetwork python3 -m pip install -r request_llm/requirements_chatglm.txt -RUN $useProxyNetwork python3 -m pip install -r request_llm/requirements_newbing.txt -RUN $useProxyNetwork python3 -m pip install -r request_llm/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I - -# 下载JittorLLMs -RUN $useProxyNetwork git clone https://github.com/binary-husky/JittorLLMs.git --depth 1 request_llm/jittorllms - -# 禁用缓存,确保更新代码 -ADD "https://www.random.org/cgi-bin/randbyte?nbytes=10&format=h" skipcache -RUN $useProxyNetwork git pull - -# 预热Tiktoken模块 -RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()' - -# 为chatgpt-academic配置代理和API-KEY (非必要 可选步骤) -# 可同时填写多个API-KEY,支持openai的key和api2d的key共存,用英文逗号分割,例如API_KEY = "sk-openaikey1,fkxxxx-api2dkey2,........" -# LLM_MODEL 是选择初始的模型 -# LOCAL_MODEL_DEVICE 是选择chatglm等本地模型运行的设备,可选 cpu 和 cuda -# [说明: 以下内容与`config.py`一一对应,请查阅config.py来完成一下配置的填写] -RUN echo ' \n\ -API_KEY = "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,fkxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" \n\ -USE_PROXY = True \n\ -LLM_MODEL = "chatglm" \n\ -LOCAL_MODEL_DEVICE = "cuda" \n\ -proxies = { "http": "socks5h://localhost:10880", "https": "socks5h://localhost:10880", } ' >> config_private.py - -# 启动 -CMD ["python3", "-u", "main.py"] +# 此Dockerfile不再维护,请前往docs/GithubAction+JittorLLMs \ No newline at end of file diff --git a/docs/Dockerfile+NoLocal+Latex b/docs/Dockerfile+NoLocal+Latex index 0f9ac8a..a0f162a 100644 --- a/docs/Dockerfile+NoLocal+Latex +++ b/docs/Dockerfile+NoLocal+Latex @@ -1,27 +1 @@ -# 此Dockerfile适用于“无本地模型”的环境构建,如果需要使用chatglm等本地模型,请参考 docs/Dockerfile+ChatGLM -# - 1 修改 `config.py` -# - 2 构建 docker build -t gpt-academic-nolocal-latex -f docs/Dockerfile+NoLocal+Latex . -# - 3 运行 docker run -v /home/fuqingxu/arxiv_cache:/root/arxiv_cache --rm -it --net=host gpt-academic-nolocal-latex - -FROM fuqingxu/python311_texlive_ctex:latest - -# 指定路径 -WORKDIR /gpt - -ARG useProxyNetwork='' - -RUN $useProxyNetwork pip3 install gradio openai numpy arxiv rich -i https://pypi.douban.com/simple/ -RUN $useProxyNetwork pip3 install colorama Markdown pygments pymupdf -i https://pypi.douban.com/simple/ - -# 装载项目文件 -COPY . . - - -# 安装依赖 -RUN $useProxyNetwork pip3 install -r requirements.txt -i https://pypi.douban.com/simple/ - -# 可选步骤,用于预热模块 -RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()' - -# 启动 -CMD ["python3", "-u", "main.py"] +# 此Dockerfile不再维护,请前往docs/GithubAction+NoLocal+Latex diff --git a/docs/GithubAction+NoLocal+Latex b/docs/GithubAction+NoLocal+Latex index 5ff9bb8..6b41b63 100644 --- a/docs/GithubAction+NoLocal+Latex +++ b/docs/GithubAction+NoLocal+Latex @@ -1,6 +1,6 @@ # 此Dockerfile适用于“无本地模型”的环境构建,如果需要使用chatglm等本地模型,请参考 docs/Dockerfile+ChatGLM # - 1 修改 `config.py` -# - 2 构建 docker build -t gpt-academic-nolocal-latex -f docs/Dockerfile+NoLocal+Latex . +# - 2 构建 docker build -t gpt-academic-nolocal-latex -f docs/GithubAction+NoLocal+Latex . # - 3 运行 docker run -v /home/fuqingxu/arxiv_cache:/root/arxiv_cache --rm -it --net=host gpt-academic-nolocal-latex FROM fuqingxu/python311_texlive_ctex:latest @@ -10,6 +10,9 @@ WORKDIR /gpt RUN pip3 install gradio openai numpy arxiv rich RUN pip3 install colorama Markdown pygments pymupdf +RUN pip3 install python-docx moviepy pdfminer +RUN pip3 install nougat-ocr +RUN pip3 install aliyun-python-sdk-core==2.13.3 pyOpenSSL scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git # 装载项目文件 COPY . . From dd9e624761ad2958704e98a18d7f73f6e71ef0c4 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Thu, 7 Sep 2023 00:40:11 +0800 Subject: [PATCH 30/47] add new dockerfile --- docs/GithubAction+AllCapacity | 41 +++++++++++++++++++++++++++++++++ docs/GithubAction+ChatGLM+Moss | 1 - docs/GithubAction+NoLocal+Latex | 1 + 3 files changed, 42 insertions(+), 1 deletion(-) create mode 100644 docs/GithubAction+AllCapacity diff --git a/docs/GithubAction+AllCapacity b/docs/GithubAction+AllCapacity new file mode 100644 index 0000000..ed20312 --- /dev/null +++ b/docs/GithubAction+AllCapacity @@ -0,0 +1,41 @@ + +# 从NVIDIA源,从而支持显卡(检查宿主的nvidia-smi中的cuda版本必须>=11.3) +FROM fuqingxu/python311_texlive_ctex:latest as texdocker +FROM nvidia/cuda:11.3.1-runtime-ubuntu20.04 +COPY --from=texdocker /usr/local/texlive/ /usr/local/texlive/ + +RUN apt-get update +RUN apt-get install -y curl proxychains curl gcc +RUN apt-get install -y git python python3 python-dev python3-dev --fix-missing + +ENV PATH="${PATH}:/usr/local/texlive/2023/bin/x86_64-linux:/usr/local/texlive/2024/bin/x86_64-linux:/usr/local/texlive/2025/bin/x86_64-linux:/usr/local/texlive/2026/bin/x86_64-linux" + + +# use python3 as the system default python +RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8 +# 下载pytorch +RUN python3 -m pip install torch --extra-index-url https://download.pytorch.org/whl/cu113 +# 下载分支 +WORKDIR /gpt +RUN git clone --depth=1 https://github.com/binary-husky/gpt_academic.git +WORKDIR /gpt/gpt_academic +RUN git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss +RUN python3 -m pip install openai numpy arxiv rich +RUN python3 -m pip install colorama Markdown pygments pymupdf +RUN python3 -m pip install python-docx moviepy pdfminer +RUN python3 -m pip install zh_langchain==0.2.1 +RUN python3 -m pip install nougat-ocr +RUN python3 -m pip install aliyun-python-sdk-core==2.13.3 pyOpenSSL scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git +RUN python3 -m pip install -r requirements.txt +RUN python3 -m pip install -r request_llm/requirements_moss.txt +RUN python3 -m pip install -r request_llm/requirements_qwen.txt +RUN python3 -m pip install -r request_llm/requirements_chatglm.txt +RUN python3 -m pip install -r request_llm/requirements_newbing.txt + + + +# 预热Tiktoken模块 +RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()' + +# 启动 +CMD ["python3", "-u", "main.py"] diff --git a/docs/GithubAction+ChatGLM+Moss b/docs/GithubAction+ChatGLM+Moss index 7bb11a2..3087d55 100644 --- a/docs/GithubAction+ChatGLM+Moss +++ b/docs/GithubAction+ChatGLM+Moss @@ -1,7 +1,6 @@ # 从NVIDIA源,从而支持显卡运损(检查宿主的nvidia-smi中的cuda版本必须>=11.3) FROM nvidia/cuda:11.3.1-runtime-ubuntu20.04 -ARG useProxyNetwork='' RUN apt-get update RUN apt-get install -y curl proxychains curl gcc RUN apt-get install -y git python python3 python-dev python3-dev --fix-missing diff --git a/docs/GithubAction+NoLocal+Latex b/docs/GithubAction+NoLocal+Latex index 6b41b63..2f2608c 100644 --- a/docs/GithubAction+NoLocal+Latex +++ b/docs/GithubAction+NoLocal+Latex @@ -11,6 +11,7 @@ WORKDIR /gpt RUN pip3 install gradio openai numpy arxiv rich RUN pip3 install colorama Markdown pygments pymupdf RUN pip3 install python-docx moviepy pdfminer +RUN pip3 install zh_langchain==0.2.1 RUN pip3 install nougat-ocr RUN pip3 install aliyun-python-sdk-core==2.13.3 pyOpenSSL scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git From bfec29e9bc6edd5071176c4a775927b13d6ce07c Mon Sep 17 00:00:00 2001 From: binary-husky Date: Thu, 7 Sep 2023 00:43:31 +0800 Subject: [PATCH 31/47] new docker file --- .github/workflows/build-with-all-capacity.yml | 44 +++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 .github/workflows/build-with-all-capacity.yml diff --git a/.github/workflows/build-with-all-capacity.yml b/.github/workflows/build-with-all-capacity.yml new file mode 100644 index 0000000..d81e43f --- /dev/null +++ b/.github/workflows/build-with-all-capacity.yml @@ -0,0 +1,44 @@ +# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages +name: build-with-all-capacity + +on: + push: + branches: + - 'master' + +env: + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository }}_with_all_capacity + +jobs: + build-and-push-image: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Log in to the Container registry + uses: docker/login-action@v2 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract metadata (tags, labels) for Docker + id: meta + uses: docker/metadata-action@v4 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + + - name: Build and push Docker image + uses: docker/build-push-action@v4 + with: + context: . + push: true + file: docs/GithubAction+NoLocal+AllCapacity + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} From 24f41b0a75921f30c6d3296ca106de5aca586567 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Thu, 7 Sep 2023 00:45:03 +0800 Subject: [PATCH 32/47] new docker file --- .github/workflows/build-with-all-capacity.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-with-all-capacity.yml b/.github/workflows/build-with-all-capacity.yml index d81e43f..7b2ee6a 100644 --- a/.github/workflows/build-with-all-capacity.yml +++ b/.github/workflows/build-with-all-capacity.yml @@ -39,6 +39,6 @@ jobs: with: context: . push: true - file: docs/GithubAction+NoLocal+AllCapacity + file: docs/GithubAction+AllCapacity tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} From 97cb9a4adc71726f9806405207a2103a5f5186d4 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Thu, 7 Sep 2023 15:09:38 +0800 Subject: [PATCH 33/47] full capacity docker file --- docs/GithubAction+AllCapacity | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/docs/GithubAction+AllCapacity b/docs/GithubAction+AllCapacity index ed20312..3e506f5 100644 --- a/docs/GithubAction+AllCapacity +++ b/docs/GithubAction+AllCapacity @@ -1,17 +1,10 @@ +# docker build -t gpt-academic-all-capacity -f docs/GithubAction+AllCapacity --network=host --build-arg http_proxy=http://localhost:10881 --build-arg https_proxy=http://localhost:10881 . # 从NVIDIA源,从而支持显卡(检查宿主的nvidia-smi中的cuda版本必须>=11.3) -FROM fuqingxu/python311_texlive_ctex:latest as texdocker -FROM nvidia/cuda:11.3.1-runtime-ubuntu20.04 -COPY --from=texdocker /usr/local/texlive/ /usr/local/texlive/ - -RUN apt-get update -RUN apt-get install -y curl proxychains curl gcc -RUN apt-get install -y git python python3 python-dev python3-dev --fix-missing - -ENV PATH="${PATH}:/usr/local/texlive/2023/bin/x86_64-linux:/usr/local/texlive/2024/bin/x86_64-linux:/usr/local/texlive/2025/bin/x86_64-linux:/usr/local/texlive/2026/bin/x86_64-linux" - +FROM fuqingxu/11.3.1-runtime-ubuntu20.04-with-texlive:latest # use python3 as the system default python +WORKDIR /gpt RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8 # 下载pytorch RUN python3 -m pip install torch --extra-index-url https://download.pytorch.org/whl/cu113 From 63219baa217617b0702002f8dc128152f8e57d3d Mon Sep 17 00:00:00 2001 From: binary-husky Date: Thu, 7 Sep 2023 17:04:40 +0800 Subject: [PATCH 34/47] =?UTF-8?q?=E4=BF=AE=E6=AD=A3=E8=AF=AD=E9=9F=B3?= =?UTF-8?q?=E5=AF=B9=E8=AF=9D=E6=97=B6=20=E5=8F=A5=E5=AD=90=E6=9C=AB?= =?UTF-8?q?=E5=B0=BE=E6=98=BE=E7=A4=BA=E5=BC=82=E5=B8=B8=E7=9A=84=E9=97=AE?= =?UTF-8?q?=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functions/live_audio/aliyunASR.py | 7 +++---- crazy_functions/语音助手.py | 14 +++++++++----- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/crazy_functions/live_audio/aliyunASR.py b/crazy_functions/live_audio/aliyunASR.py index 9641057..ed67fcd 100644 --- a/crazy_functions/live_audio/aliyunASR.py +++ b/crazy_functions/live_audio/aliyunASR.py @@ -1,4 +1,4 @@ -import time, threading, json +import time, logging, json class AliyunASR(): @@ -12,14 +12,14 @@ class AliyunASR(): message = json.loads(message) self.parsed_sentence = message['payload']['result'] self.event_on_entence_end.set() - print(self.parsed_sentence) + # print(self.parsed_sentence) def test_on_start(self, message, *args): # print("test_on_start:{}".format(message)) pass def test_on_error(self, message, *args): - print("on_error args=>{}".format(args)) + logging.error("on_error args=>{}".format(args)) pass def test_on_close(self, *args): @@ -36,7 +36,6 @@ class AliyunASR(): # print("on_completed:args=>{} message=>{}".format(args, message)) pass - def audio_convertion_thread(self, uuid): # 在一个异步线程中采集音频 import nls # pip install git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git diff --git a/crazy_functions/语音助手.py b/crazy_functions/语音助手.py index 6fe1afc..b1c8c41 100644 --- a/crazy_functions/语音助手.py +++ b/crazy_functions/语音助手.py @@ -80,9 +80,9 @@ class InterviewAssistant(AliyunASR): def __init__(self): self.capture_interval = 0.5 # second self.stop = False - self.parsed_text = "" - self.parsed_sentence = "" - self.buffered_sentence = "" + self.parsed_text = "" # 下个句子中已经说完的部分, 由 test_on_result_chg() 写入 + self.parsed_sentence = "" # 某段话的整个句子,由 test_on_sentence_end() 写入 + self.buffered_sentence = "" # self.event_on_result_chg = threading.Event() self.event_on_entence_end = threading.Event() self.event_on_commit_question = threading.Event() @@ -132,7 +132,7 @@ class InterviewAssistant(AliyunASR): self.plugin_wd.feed() if self.event_on_result_chg.is_set(): - # update audio decode result + # called when some words have finished self.event_on_result_chg.clear() chatbot[-1] = list(chatbot[-1]) chatbot[-1][0] = self.buffered_sentence + self.parsed_text @@ -144,7 +144,11 @@ class InterviewAssistant(AliyunASR): # called when a sentence has ended self.event_on_entence_end.clear() self.parsed_text = self.parsed_sentence - self.buffered_sentence += self.parsed_sentence + self.buffered_sentence += self.parsed_text + chatbot[-1] = list(chatbot[-1]) + chatbot[-1][0] = self.buffered_sentence + history = chatbot2history(chatbot) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 if self.event_on_commit_question.is_set(): # called when a question should be commited From d7331befc12a7975198168a4ce02606427b00461 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Thu, 7 Sep 2023 17:42:47 +0800 Subject: [PATCH 35/47] add note --- crazy_functions/批量翻译PDF文档_NOUGAT.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crazy_functions/批量翻译PDF文档_NOUGAT.py b/crazy_functions/批量翻译PDF文档_NOUGAT.py index 33e127a..ed15121 100644 --- a/crazy_functions/批量翻译PDF文档_NOUGAT.py +++ b/crazy_functions/批量翻译PDF文档_NOUGAT.py @@ -120,7 +120,7 @@ def 解析PDF_基于NOUGAT(file_manifest, project_folder, llm_kwargs, plugin_kwa generated_html_files = [] DST_LANG = "中文" for index, fp in enumerate(file_manifest): - chatbot.append(["当前进度:", f"正在解析论文,请稍候"]); yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + chatbot.append(["当前进度:", f"正在解析论文,请稍候。(第一次运行时,需要花费较长时间下载NOUGAT参数)"]); yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 fpp = NOUGAT_parse_pdf(fp) with open(fpp, 'r', encoding='utf8') as f: From 4c6f3aa42797658f0b48c2989477a21da6799cba Mon Sep 17 00:00:00 2001 From: binary-husky Date: Thu, 7 Sep 2023 17:45:44 +0800 Subject: [PATCH 36/47] CodeInterpreter --- crazy_functions/CodeInterpreter.py | 213 +++++++++++++++++++++++++++++ 1 file changed, 213 insertions(+) create mode 100644 crazy_functions/CodeInterpreter.py diff --git a/crazy_functions/CodeInterpreter.py b/crazy_functions/CodeInterpreter.py new file mode 100644 index 0000000..73e142d --- /dev/null +++ b/crazy_functions/CodeInterpreter.py @@ -0,0 +1,213 @@ +from collections.abc import Callable, Iterable, Mapping +from typing import Any +from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, promote_file_to_downloadzone, clear_file_downloadzone +from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive +from .crazy_utils import input_clipping, try_install_deps +from multiprocessing import Process, Pipe +import os + +templete = """ +```python +import ... # Put dependencies here, e.g. import numpy as np + +class TerminalFunction(object): # Do not change the name of the class, The name of the class must be `TerminalFunction` + + def run(self, path): # The name of the function must be `run`, it takes only a positional argument. + # rewrite the function you have just written here + ... + return generated_file_path +``` +""" + +def inspect_dependency(chatbot, history): + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + return True + +def get_code_block(reply): + import re + pattern = r"```([\s\S]*?)```" # regex pattern to match code blocks + matches = re.findall(pattern, reply) # find all code blocks in text + if len(matches) == 1: + return matches[0].strip('python') # code block + for match in matches: + if 'class TerminalFunction' in match: + return match.strip('python') # code block + raise RuntimeError("GPT is not generating proper code.") + +def gpt_interact_multi_step(txt, file_type, llm_kwargs, chatbot, history): + # 输入 + prompt_compose = [ + f'Your job:\n' + f'1. write a single Python function, which takes a path of a `{file_type}` file as the only argument and returns a `string` containing the result of analysis or the path of generated files. \n', + f"2. You should write this function to perform following task: " + txt + "\n", + f"3. Wrap the output python function with markdown codeblock." + ] + i_say = "".join(prompt_compose) + demo = [] + + # 第一步 + gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( + inputs=i_say, inputs_show_user=i_say, + llm_kwargs=llm_kwargs, chatbot=chatbot, history=demo, + sys_prompt= r"You are a programmer." + ) + history.extend([i_say, gpt_say]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 + + # 第二步 + prompt_compose = [ + "If previous stage is successful, rewrite the function you have just written to satisfy following templete: \n", + templete + ] + i_say = "".join(prompt_compose); inputs_show_user = "If previous stage is successful, rewrite the function you have just written to satisfy executable templete. " + gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( + inputs=i_say, inputs_show_user=inputs_show_user, + llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, + sys_prompt= r"You are a programmer." + ) + code_to_return = gpt_say + history.extend([i_say, gpt_say]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 + + # # 第三步 + # i_say = "Please list to packages to install to run the code above. Then show me how to use `try_install_deps` function to install them." + # i_say += 'For instance. `try_install_deps(["opencv-python", "scipy", "numpy"])`' + # installation_advance = yield from request_gpt_model_in_new_thread_with_ui_alive( + # inputs=i_say, inputs_show_user=inputs_show_user, + # llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, + # sys_prompt= r"You are a programmer." + # ) + # # # 第三步 + # i_say = "Show me how to use `pip` to install packages to run the code above. " + # i_say += 'For instance. `pip install -r opencv-python scipy numpy`' + # installation_advance = yield from request_gpt_model_in_new_thread_with_ui_alive( + # inputs=i_say, inputs_show_user=i_say, + # llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, + # sys_prompt= r"You are a programmer." + # ) + installation_advance = "" + + return code_to_return, installation_advance, txt, file_type, llm_kwargs, chatbot, history + +def make_module(code): + module_file = 'gpt_fn_' + gen_time_str().replace('-','_') + with open(f'gpt_log/{module_file}.py', 'w', encoding='utf8') as f: + f.write(code) + + def get_class_name(class_string): + import re + # Use regex to extract the class name + class_name = re.search(r'class (\w+)\(', class_string).group(1) + return class_name + + class_name = get_class_name(code) + return f"gpt_log.{module_file}->{class_name}" + +def init_module_instance(module): + import importlib + module_, class_ = module.split('->') + init_f = getattr(importlib.import_module(module_), class_) + return init_f() + +def for_immediate_show_off_when_possible(file_type, fp, chatbot): + if file_type in ['png', 'jpg']: + image_path = os.path.abspath(fp) + chatbot.append(['这是一张图片, 展示如下:', + f'本地文件地址:
`{image_path}`
'+ + f'本地文件预览:
' + ]) + return chatbot + +def subprocess_worker(instance, file_path, return_dict): + return_dict['result'] = instance.run(file_path) + +@CatchException +def 虚空终端CodeInterpreter(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): + """ + txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 + llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 + plugin_kwargs 插件模型的参数,暂时没有用武之地 + chatbot 聊天显示框的句柄,用于显示给用户 + history 聊天历史,前情提要 + system_prompt 给gpt的静默提醒 + web_port 当前软件运行的端口号 + """ + # 清空历史,以免输入溢出 + history = []; clear_file_downloadzone(chatbot) + + # 基本信息:功能、贡献者 + chatbot.append([ + "函数插件功能?", + "CodeInterpreter开源版, 此插件处于开发阶段, 建议暂时不要使用, 插件初始化中 ..." + ]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + + # 尝试导入依赖, 如果缺少依赖, 则给出安装建议 + dep_ok = yield from inspect_dependency(chatbot=chatbot, history=history) # 刷新界面 + if not dep_ok: return + + # 读取文件 + if ("recently_uploaded_files" in plugin_kwargs) and (plugin_kwargs["recently_uploaded_files"] == ""): plugin_kwargs.pop("recently_uploaded_files") + recently_uploaded_files = plugin_kwargs.get("recently_uploaded_files", None) + file_path = recently_uploaded_files[-1] + file_type = file_path.split('.')[-1] + + # 粗心检查 + if 'private_upload' in txt: + chatbot.append([ + "...", + f"请在输入框内填写需求,然后再次点击该插件(文件路径 {file_path} 已经被记忆)" + ]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + return + + # 开始干正事 + for j in range(5): # 最多重试5次 + try: + code, installation_advance, txt, file_type, llm_kwargs, chatbot, history = \ + yield from gpt_interact_multi_step(txt, file_type, llm_kwargs, chatbot, history) + code = get_code_block(code) + res = make_module(code) + instance = init_module_instance(res) + break + except Exception as e: + chatbot.append([f"第{j}次代码生成尝试,失败了", f"错误追踪\n```\n{trimmed_format_exc()}\n```\n"]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + + # 代码生成结束, 开始执行 + try: + import multiprocessing + manager = multiprocessing.Manager() + return_dict = manager.dict() + + p = multiprocessing.Process(target=subprocess_worker, args=(instance, file_path, return_dict)) + # only has 10 seconds to run + p.start(); p.join(timeout=10) + if p.is_alive(): p.terminate(); p.join() + p.close() + res = return_dict['result'] + # res = instance.run(file_path) + except Exception as e: + chatbot.append(["执行失败了", f"错误追踪\n```\n{trimmed_format_exc()}\n```\n"]) + # chatbot.append(["如果是缺乏依赖,请参考以下建议", installation_advance]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + return + + # 顺利完成,收尾 + res = str(res) + if os.path.exists(res): + chatbot.append(["执行成功了,结果是一个有效文件", "结果:" + res]) + new_file_path = promote_file_to_downloadzone(res, chatbot=chatbot) + chatbot = for_immediate_show_off_when_possible(file_type, new_file_path, chatbot) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 + else: + chatbot.append(["执行成功了,结果是一个字符串", "结果:" + res]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 + +""" +测试: + 裁剪图像,保留下半部分 + 交换图像的蓝色通道和红色通道 + 将图像转为灰度图像 + 将csv文件转excel表格 +""" \ No newline at end of file From 5e0dc9b9adf9303d63cb56c9c5e30faadcfbcbe4 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Thu, 7 Sep 2023 18:51:09 +0800 Subject: [PATCH 37/47] =?UTF-8?q?=E4=BF=AE=E5=A4=8DPDF=E4=B8=8B=E8=BD=BD?= =?UTF-8?q?=E8=B7=AF=E5=BE=84=E6=97=B6=E9=97=B4=E6=88=B3=E7=9A=84=E9=97=AE?= =?UTF-8?q?=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functional.py | 14 ++++++++++++++ crazy_functions/CodeInterpreter.py | 26 ++++++++++++++++++++++---- crazy_functions/crazy_utils.py | 8 +++++--- 3 files changed, 41 insertions(+), 7 deletions(-) diff --git a/crazy_functional.py b/crazy_functional.py index 4c12e92..c657855 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -514,6 +514,20 @@ def get_crazy_functions(): except: print('Load function plugin failed') + + # try: + # from crazy_functions.CodeInterpreter import 虚空终端CodeInterpreter + # function_plugins.update({ + # "CodeInterpreter(开发中,仅供测试)": { + # "Group": "编程|对话", + # "Color": "stop", + # "AsButton": False, + # "Function": HotReload(虚空终端CodeInterpreter) + # } + # }) + # except: + # print('Load function plugin failed') + # try: # from crazy_functions.chatglm微调工具 import 微调数据集生成 # function_plugins.update({ diff --git a/crazy_functions/CodeInterpreter.py b/crazy_functions/CodeInterpreter.py index 73e142d..3c970f3 100644 --- a/crazy_functions/CodeInterpreter.py +++ b/crazy_functions/CodeInterpreter.py @@ -5,6 +5,7 @@ from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive from .crazy_utils import input_clipping, try_install_deps from multiprocessing import Process, Pipe import os +import time templete = """ ```python @@ -121,6 +122,19 @@ def for_immediate_show_off_when_possible(file_type, fp, chatbot): def subprocess_worker(instance, file_path, return_dict): return_dict['result'] = instance.run(file_path) +def have_any_recent_upload_files(chatbot): + _5min = 5 * 60 + if not chatbot: return False # chatbot is None + most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None) + if not most_recent_uploaded: return False # most_recent_uploaded is None + if time.time() - most_recent_uploaded["time"] < _5min: return True # most_recent_uploaded is new + else: return False # most_recent_uploaded is too old + +def get_recent_file_prompt_support(chatbot): + most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None) + path = most_recent_uploaded['path'] + return path + @CatchException def 虚空终端CodeInterpreter(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): """ @@ -132,6 +146,8 @@ def 虚空终端CodeInterpreter(txt, llm_kwargs, plugin_kwargs, chatbot, history system_prompt 给gpt的静默提醒 web_port 当前软件运行的端口号 """ + raise NotImplementedError + # 清空历史,以免输入溢出 history = []; clear_file_downloadzone(chatbot) @@ -142,10 +158,12 @@ def 虚空终端CodeInterpreter(txt, llm_kwargs, plugin_kwargs, chatbot, history ]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - # 尝试导入依赖, 如果缺少依赖, 则给出安装建议 - dep_ok = yield from inspect_dependency(chatbot=chatbot, history=history) # 刷新界面 - if not dep_ok: return - + if have_any_recent_upload_files(chatbot): + file_path = get_recent_file_prompt_support(chatbot) + else: + chatbot.append(["文件检索", "没有发现任何近期上传的文件。"]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + # 读取文件 if ("recently_uploaded_files" in plugin_kwargs) and (plugin_kwargs["recently_uploaded_files"] == ""): plugin_kwargs.pop("recently_uploaded_files") recently_uploaded_files = plugin_kwargs.get("recently_uploaded_files", None) diff --git a/crazy_functions/crazy_utils.py b/crazy_functions/crazy_utils.py index ffe95e2..8069703 100644 --- a/crazy_functions/crazy_utils.py +++ b/crazy_functions/crazy_utils.py @@ -591,11 +591,13 @@ def get_files_from_everything(txt, type): # type='.md' # 网络的远程文件 import requests from toolbox import get_conf + from toolbox import get_log_folder, gen_time_str proxies, = get_conf('proxies') r = requests.get(txt, proxies=proxies) - with open('./gpt_log/temp'+type, 'wb+') as f: f.write(r.content) - project_folder = './gpt_log/' - file_manifest = ['./gpt_log/temp'+type] + path = os.path.join(get_log_folder(plugin_name='web_download'), gen_time_str()+type) + with open(path, 'wb+') as f: f.write(r.content) + project_folder = get_log_folder(plugin_name='web_download') + file_manifest = [path] elif txt.endswith(type): # 直接给定文件 file_manifest = [txt] From 31d5ee6cccd8cf370fef5fe92a8becd18773aad6 Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Thu, 7 Sep 2023 23:05:54 +0800 Subject: [PATCH 38/47] Update README.md --- README.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 0451262..5a055d9 100644 --- a/README.md +++ b/README.md @@ -54,7 +54,7 @@ Latex论文一键校对 | [函数插件] 仿Grammarly对Latex文章进行语法 ⭐ChatGLM2微调模型 | 支持加载ChatGLM2微调模型,提供ChatGLM2微调辅助插件 更多LLM模型接入,支持[huggingface部署](https://huggingface.co/spaces/qingxu98/gpt-academic) | 加入Newbing接口(新必应),引入清华[Jittorllms](https://github.com/Jittor/JittorLLMs)支持[LLaMA](https://github.com/facebookresearch/llama)和[盘古α](https://openi.org.cn/pangu/) ⭐[void-terminal](https://github.com/binary-husky/void-terminal) pip包 | 脱离GUI,在Python中直接调用本项目的所有函数插件(开发中) -⭐虚空终端插件 | 用自然语言,直接调度本项目其他插件 +⭐虚空终端插件 | [函数插件] 用自然语言,直接调度本项目其他插件 更多新功能展示 (图像生成等) …… | 见本文档结尾处 …… @@ -149,11 +149,14 @@ python main.py ### 安装方法II:使用Docker +[![fullcapacity](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml) + 1. 仅ChatGPT(推荐大多数人选择,等价于docker-compose方案1) [![basic](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml) [![basiclatex](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml) [![basicaudio](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml) + ``` sh git clone --depth=1 https://github.com/binary-husky/gpt_academic.git # 下载项目 cd gpt_academic # 进入路径 From c176187d245b1a42dc1de4381cc7575f1cfaca7a Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Thu, 7 Sep 2023 23:46:54 +0800 Subject: [PATCH 39/47] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=9B=A0=E4=B8=BA?= =?UTF-8?q?=E5=87=BD=E6=95=B0=E8=BF=94=E5=9B=9E=E5=80=BC=E5=AF=BC=E8=87=B4?= =?UTF-8?q?=E7=9A=84=E4=B8=8D=E5=87=86=E7=A1=AE=E9=94=99=E8=AF=AF=E6=8F=90?= =?UTF-8?q?=E7=A4=BA?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functions/Latex输出PDF结果.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crazy_functions/Latex输出PDF结果.py b/crazy_functions/Latex输出PDF结果.py index 0ba9f19..8686f7e 100644 --- a/crazy_functions/Latex输出PDF结果.py +++ b/crazy_functions/Latex输出PDF结果.py @@ -109,7 +109,7 @@ def arxiv_download(chatbot, history, txt): url_ = txt # https://arxiv.org/abs/1707.06690 if not txt.startswith('https://arxiv.org/abs/'): - msg = f"解析arxiv网址失败, 期望格式例如: https://arxiv.org/abs/1707.06690。实际得到格式: {url_}" + msg = f"解析arxiv网址失败, 期望格式例如: https://arxiv.org/abs/1707.06690。实际得到格式: {url_}。" yield from update_ui_lastest_msg(msg, chatbot=chatbot, history=history) # 刷新界面 return msg, None # <-------------- set format -------------> @@ -255,7 +255,7 @@ def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无法处理: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return From bac6810e759b81f6a5ee54ad12939dc621581296 Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Fri, 8 Sep 2023 09:38:16 +0800 Subject: [PATCH 40/47] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E6=93=8D=E4=BD=9C?= =?UTF-8?q?=E6=8F=90=E7=A4=BA?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functions/虚空终端.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/crazy_functions/虚空终端.py b/crazy_functions/虚空终端.py index a51e79b..2e1b523 100644 --- a/crazy_functions/虚空终端.py +++ b/crazy_functions/虚空终端.py @@ -24,12 +24,12 @@ explain_msg = """ ## 虚空终端插件说明: 1. 请用**自然语言**描述您需要做什么。例如: - - 「请调用插件,为我翻译PDF论文,论文我刚刚放到上传区了。」 - - 「请调用插件翻译PDF论文,地址为https://www.nature.com/articles/s41586-019-1724-z.pdf」 - - 「生成一张图片,图中鲜花怒放,绿草如茵,用插件实现。」 + - 「请调用插件,为我翻译PDF论文,论文我刚刚放到上传区了」 + - 「请调用插件翻译PDF论文,地址为https://aaa/bbb/ccc.pdf」 + - 「把Arxiv论文翻译成中文PDF,arxiv论文的ID是1812.10695,记得用插件!」 + - 「生成一张图片,图中鲜花怒放,绿草如茵,用插件实现」 - 「用插件翻译README,Github网址是https://github.com/facebookresearch/co-tracker」 - - 「给爷翻译Arxiv论文,arxiv论文的ID是1812.10695,记得用插件,不要自己瞎搞!」 - - 「我不喜欢当前的界面颜色,修改配置,把主题THEME更换为THEME="High-Contrast"。」 + - 「我不喜欢当前的界面颜色,修改配置,把主题THEME更换为THEME="High-Contrast"」 - 「请问Transformer网络的结构是怎样的?」 2. 您可以打开插件下拉菜单以了解本项目的各种能力。 From 13c9606af7cdee8f95eca79641bf6968a494c79e Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Fri, 8 Sep 2023 09:47:29 +0800 Subject: [PATCH 41/47] =?UTF-8?q?=E4=BF=AE=E6=AD=A3=E4=B8=8B=E8=BD=BDPDF?= =?UTF-8?q?=E5=A4=B1=E8=B4=A5=E6=97=B6=E4=BA=A7=E7=94=9F=E7=9A=84=E9=94=99?= =?UTF-8?q?=E8=AF=AF=E6=8F=90=E7=A4=BA?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functions/crazy_utils.py | 5 ++++- crazy_functions/pdf_fns/parse_pdf.py | 7 ++++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/crazy_functions/crazy_utils.py b/crazy_functions/crazy_utils.py index 8069703..5a314b3 100644 --- a/crazy_functions/crazy_utils.py +++ b/crazy_functions/crazy_utils.py @@ -593,7 +593,10 @@ def get_files_from_everything(txt, type): # type='.md' from toolbox import get_conf from toolbox import get_log_folder, gen_time_str proxies, = get_conf('proxies') - r = requests.get(txt, proxies=proxies) + try: + r = requests.get(txt, proxies=proxies) + except: + raise ConnectionRefusedError(f"无法下载资源{txt},请检查。") path = os.path.join(get_log_folder(plugin_name='web_download'), gen_time_str()+type) with open(path, 'wb+') as f: f.write(r.content) project_folder = get_log_folder(plugin_name='web_download') diff --git a/crazy_functions/pdf_fns/parse_pdf.py b/crazy_functions/pdf_fns/parse_pdf.py index 00016be..8a7117a 100644 --- a/crazy_functions/pdf_fns/parse_pdf.py +++ b/crazy_functions/pdf_fns/parse_pdf.py @@ -20,6 +20,11 @@ def get_avail_grobid_url(): def parse_pdf(pdf_path, grobid_url): import scipdf # pip install scipdf_parser if grobid_url.endswith('/'): grobid_url = grobid_url.rstrip('/') - article_dict = scipdf.parse_pdf_to_dict(pdf_path, grobid_url=grobid_url) + try: + article_dict = scipdf.parse_pdf_to_dict(pdf_path, grobid_url=grobid_url) + except GROBID_OFFLINE_EXCEPTION: + raise GROBID_OFFLINE_EXCEPTION("GROBID服务不可用,请修改config中的GROBID_URL,可修改成本地GROBID服务。") + except: + raise RuntimeError("解析PDF失败,请检查PDF是否损坏。") return article_dict From 77a6105a9a60b8aa6cc02a7ec5edc35eebb320ef Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Fri, 8 Sep 2023 09:52:29 +0800 Subject: [PATCH 42/47] =?UTF-8?q?=E4=BF=AE=E6=94=B9demo=E6=A1=88=E4=BE=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 5a055d9..ef463bd 100644 --- a/README.md +++ b/README.md @@ -255,7 +255,7 @@ Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史h 3. 虚空终端(从自然语言输入中,理解用户意图+自动调用其他插件) -- 步骤一:输入 “ 请调用插件翻译PDF论文,地址为https://www.nature.com/articles/s41586-019-1724-z.pdf ” +- 步骤一:输入 “ 请调用插件翻译PDF论文,地址为https://storage.googleapis.com/deepmind-media/alphago/AlphaGoNaturePaper.pdf ” - 步骤二:点击“虚空终端”
From 347124c96724e09471a2d7f8526be8de016b14ec Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Fri, 8 Sep 2023 10:43:20 +0800 Subject: [PATCH 43/47] update scipdf_parser dep --- crazy_functions/批量翻译PDF文档_多线程.py | 4 ++-- requirements.txt | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/crazy_functions/批量翻译PDF文档_多线程.py b/crazy_functions/批量翻译PDF文档_多线程.py index 456c773..0949f7e 100644 --- a/crazy_functions/批量翻译PDF文档_多线程.py +++ b/crazy_functions/批量翻译PDF文档_多线程.py @@ -24,10 +24,11 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst try: import fitz import tiktoken + import scipdf except: report_execption(chatbot, history, a=f"解析项目: {txt}", - b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf tiktoken```。") + b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf tiktoken scipdf_parser```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return @@ -58,7 +59,6 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst def 解析PDF_基于GROBID(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, grobid_url): import copy - import tiktoken TOKEN_LIMIT_PER_FRAGMENT = 1280 generated_conclusion_files = [] generated_html_files = [] diff --git a/requirements.txt b/requirements.txt index e6d27d2..5ff40cc 100644 --- a/requirements.txt +++ b/requirements.txt @@ -20,4 +20,4 @@ arxiv rich pypdf2==2.12.1 websocket-client -scipdf_parser==0.3 +scipdf_parser>=0.3 From cce69beee9102a901d6208d7c3fa295aa2e5ff09 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Fri, 8 Sep 2023 11:08:02 +0800 Subject: [PATCH 44/47] update error message --- crazy_functions/批量翻译PDF文档_多线程.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crazy_functions/批量翻译PDF文档_多线程.py b/crazy_functions/批量翻译PDF文档_多线程.py index 0949f7e..6e9fe6a 100644 --- a/crazy_functions/批量翻译PDF文档_多线程.py +++ b/crazy_functions/批量翻译PDF文档_多线程.py @@ -66,7 +66,7 @@ def 解析PDF_基于GROBID(file_manifest, project_folder, llm_kwargs, plugin_kwa for index, fp in enumerate(file_manifest): chatbot.append(["当前进度:", f"正在连接GROBID服务,请稍候: {grobid_url}\n如果等待时间过长,请修改config中的GROBID_URL,可修改成本地GROBID服务。"]); yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 article_dict = parse_pdf(fp, grobid_url) - print(article_dict) + if article_dict is None: raise RuntimeError("解析PDF失败,请检查PDF是否损坏。") prompt = "以下是一篇学术论文的基本信息:\n" # title title = article_dict.get('title', '无法获取 title'); prompt += f'title:{title}\n\n' From 2202cf3701ca3c5414145bbf389b174f2468a2b0 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Fri, 8 Sep 2023 11:11:53 +0800 Subject: [PATCH 45/47] remove proxy message --- check_proxy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/check_proxy.py b/check_proxy.py index 474988c..b6fe99f 100644 --- a/check_proxy.py +++ b/check_proxy.py @@ -5,7 +5,7 @@ def check_proxy(proxies): try: response = requests.get("https://ipapi.co/json/", proxies=proxies, timeout=4) data = response.json() - print(f'查询代理的地理位置,返回的结果是{data}') + # print(f'查询代理的地理位置,返回的结果是{data}') if 'country_name' in data: country = data['country_name'] result = f"代理配置 {proxies_https}, 代理所在地:{country}" From 74f8cb351126305886a003999055d5bd4b3bdbfc Mon Sep 17 00:00:00 2001 From: binary-husky Date: Fri, 8 Sep 2023 12:10:16 +0800 Subject: [PATCH 46/47] update dockerfile --- docs/GithubAction+AllCapacity | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/GithubAction+AllCapacity b/docs/GithubAction+AllCapacity index 3e506f5..0841644 100644 --- a/docs/GithubAction+AllCapacity +++ b/docs/GithubAction+AllCapacity @@ -15,9 +15,10 @@ WORKDIR /gpt/gpt_academic RUN git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss RUN python3 -m pip install openai numpy arxiv rich RUN python3 -m pip install colorama Markdown pygments pymupdf -RUN python3 -m pip install python-docx moviepy pdfminer +RUN python3 -m pip install python-docx moviepy pdfminer RUN python3 -m pip install zh_langchain==0.2.1 RUN python3 -m pip install nougat-ocr +RUN python3 -m pip install rarfile py7zr manim manimgl RUN python3 -m pip install aliyun-python-sdk-core==2.13.3 pyOpenSSL scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git RUN python3 -m pip install -r requirements.txt RUN python3 -m pip install -r request_llm/requirements_moss.txt From e70b63651359f94f181e7870572f368e87903035 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Sat, 9 Sep 2023 17:50:38 +0800 Subject: [PATCH 47/47] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E6=95=B0=E5=AD=A6?= =?UTF-8?q?=E5=85=AC=E5=BC=8F=E5=88=A4=E5=AE=9A=E7=9A=84Bug?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- toolbox.py | 48 +++++++++++++++++++++++++++++++++++------------- 1 file changed, 35 insertions(+), 13 deletions(-) diff --git a/toolbox.py b/toolbox.py index 273ecee..ab8d046 100644 --- a/toolbox.py +++ b/toolbox.py @@ -281,8 +281,7 @@ def report_execption(chatbot, history, a, b): 向chatbot中添加错误信息 """ chatbot.append((a, b)) - history.append(a) - history.append(b) + history.extend([a, b]) def text_divide_paragraph(text): @@ -305,6 +304,7 @@ def text_divide_paragraph(text): text = "
".join(lines) return pre + text + suf + @lru_cache(maxsize=128) # 使用 lru缓存 加快转换速度 def markdown_convertion(txt): """ @@ -359,19 +359,41 @@ def markdown_convertion(txt): content = content.replace('\n', '') return content - def no_code(txt): - if '```' not in txt: - return True - else: - if '```reference' in txt: return True # newbing - else: return False + def is_equation(txt): + """ + 判定是否为公式 | 测试1 写出洛伦兹定律,使用tex格式公式 测试2 给出柯西不等式,使用latex格式 测试3 写出麦克斯韦方程组 + """ + if '```' in txt and '```reference' not in txt: return False + if '$' not in txt and '\\[' not in txt: return False + mathpatterns = { + r'(?