Merge pull request #1074 from Kilig947/plugin_classification
插件分区新增插件分类选择
This commit is contained in:
commit
79080290c6
@ -300,7 +300,8 @@ Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史h
|
||||
|
||||
|
||||
### II:版本:
|
||||
- version 3.5(Todo): 使用自然语言调用本项目的所有函数插件(高优先级)
|
||||
- version 3.60(todo): 优化虚空终端,引入code interpreter和更多插件
|
||||
- version 3.50: 使用自然语言调用本项目的所有函数插件(虚空终端),支持插件分类,改进UI,设计新主题
|
||||
- version 3.49: 支持百度千帆平台和文心一言
|
||||
- version 3.48: 支持阿里达摩院通义千问,上海AI-Lab书生,讯飞星火
|
||||
- version 3.46: 支持完全脱手操作的实时语音对话
|
||||
|
28
config.py
28
config.py
@ -43,7 +43,11 @@ API_URL_REDIRECT = {}
|
||||
DEFAULT_WORKER_NUM = 3
|
||||
|
||||
|
||||
# 对话窗的高度
|
||||
# 色彩主题,可选 ["Default", "Chuanhu-Small-and-Beautiful", "High-Contrast"]
|
||||
THEME = "Default"
|
||||
|
||||
|
||||
# 对话窗的高度 (仅在LAYOUT="TOP-DOWN"时生效)
|
||||
CHATBOT_HEIGHT = 1115
|
||||
|
||||
|
||||
@ -68,6 +72,10 @@ WEB_PORT = -1
|
||||
MAX_RETRY = 2
|
||||
|
||||
|
||||
# 插件分类默认选项
|
||||
DEFAULT_FN_GROUPS = ['对话', '编程', '学术']
|
||||
|
||||
|
||||
# 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 )
|
||||
LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
|
||||
AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "api2d-gpt-3.5-turbo",
|
||||
@ -83,7 +91,7 @@ BAIDU_CLOUD_QIANFAN_MODEL = 'ERNIE-Bot' # 可选 "ERNIE-Bot"(文心一言), "
|
||||
|
||||
|
||||
# 如果使用ChatGLM2微调模型,请把 LLM_MODEL="chatglmft",并在此处指定模型路径
|
||||
ChatGLM_PTUNING_CHECKPOINT = "" # 例如"/home/hmp/ChatGLM2-6B/ptuning/output/6b-pt-128-1e-2/checkpoint-100"
|
||||
CHATGLM_PTUNING_CHECKPOINT = "" # 例如"/home/hmp/ChatGLM2-6B/ptuning/output/6b-pt-128-1e-2/checkpoint-100"
|
||||
|
||||
|
||||
# 本地LLM模型如ChatGLM的执行方式 CPU/GPU
|
||||
@ -99,10 +107,6 @@ CONCURRENT_COUNT = 100
|
||||
AUTO_CLEAR_TXT = False
|
||||
|
||||
|
||||
# 色彩主体,可选 ["Default", "Chuanhu-Small-and-Beautiful", "High-Contrast"]
|
||||
THEME = "Default"
|
||||
|
||||
|
||||
# 加一个live2d装饰
|
||||
ADD_WAIFU = False
|
||||
|
||||
@ -214,6 +218,18 @@ ALLOW_RESET_CONFIG = False
|
||||
└── NEWBING_COOKIES
|
||||
|
||||
|
||||
用户图形界面布局依赖关系示意图
|
||||
│
|
||||
├── CHATBOT_HEIGHT 对话窗的高度
|
||||
├── CODE_HIGHLIGHT 代码高亮
|
||||
├── LAYOUT 窗口布局
|
||||
├── DARK_MODE 暗色模式 / 亮色模式
|
||||
├── DEFAULT_FN_GROUPS 插件分类默认选项
|
||||
├── THEME 色彩主题
|
||||
├── AUTO_CLEAR_TXT 是否在提交时自动清空输入框
|
||||
├── ADD_WAIFU 加一个live2d装饰
|
||||
├── ALLOW_RESET_CONFIG 是否允许通过自然语言描述修改本页的配置,该功能具有一定的危险性
|
||||
|
||||
|
||||
插件在线服务配置依赖关系示意图
|
||||
│
|
||||
|
@ -63,6 +63,7 @@ def get_core_functions():
|
||||
"英译中": {
|
||||
"Prefix": r"翻译成地道的中文:" + "\n\n",
|
||||
"Suffix": r"",
|
||||
"Visible": False,
|
||||
},
|
||||
"找图片": {
|
||||
"Prefix": r"我需要你找一张网络图片。使用Unsplash API(https://source.unsplash.com/960x640/?<英语关键词>)获取图片URL," +
|
||||
@ -78,6 +79,7 @@ def get_core_functions():
|
||||
"Prefix": r"Here are some bibliography items, please transform them into bibtex style." +
|
||||
r"Note that, reference styles maybe more than one kind, you should transform each item correctly." +
|
||||
r"Items need to be transformed:",
|
||||
"Visible": False,
|
||||
"Suffix": r"",
|
||||
}
|
||||
}
|
||||
|
@ -34,87 +34,108 @@ def get_crazy_functions():
|
||||
from crazy_functions.Latex全文翻译 import Latex中译英
|
||||
from crazy_functions.Latex全文翻译 import Latex英译中
|
||||
from crazy_functions.批量Markdown翻译 import Markdown中译英
|
||||
from crazy_functions.虚空终端 import 虚空终端
|
||||
|
||||
|
||||
function_plugins = {
|
||||
"虚空终端": {
|
||||
"Group": "对话|编程|学术",
|
||||
"Color": "stop",
|
||||
"AsButton": True,
|
||||
"Function": HotReload(虚空终端)
|
||||
},
|
||||
"解析整个Python项目": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": True,
|
||||
"Info": "解析一个Python项目的所有源文件(.py) | 输入参数为路径",
|
||||
"Function": HotReload(解析一个Python项目)
|
||||
},
|
||||
"载入对话历史存档(先上传存档或输入路径)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"Info": "载入对话历史存档 | 输入参数为路径",
|
||||
"Function": HotReload(载入对话历史存档)
|
||||
},
|
||||
"删除所有本地对话历史记录(谨慎操作)": {
|
||||
"Group": "对话",
|
||||
"AsButton": False,
|
||||
"Info": "删除所有本地对话历史记录,谨慎操作 | 不需要输入参数",
|
||||
"Function": HotReload(删除所有本地对话历史记录)
|
||||
},
|
||||
"清除所有缓存文件(谨慎操作)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "清除所有缓存文件,谨慎操作 | 不需要输入参数",
|
||||
"Function": HotReload(清除缓存)
|
||||
},
|
||||
"批量总结Word文档": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": True,
|
||||
"Info": "批量总结word文档 | 输入参数为路径",
|
||||
"Function": HotReload(总结word文档)
|
||||
},
|
||||
"解析整个C++项目头文件": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "解析一个C++项目的所有头文件(.h/.hpp) | 输入参数为路径",
|
||||
"Function": HotReload(解析一个C项目的头文件)
|
||||
},
|
||||
"解析整个C++项目(.cpp/.hpp/.c/.h)": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "解析一个C++项目的所有源文件(.cpp/.hpp/.c/.h)| 输入参数为路径",
|
||||
"Function": HotReload(解析一个C项目)
|
||||
},
|
||||
"解析整个Go项目": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "解析一个Go项目的所有源文件 | 输入参数为路径",
|
||||
"Function": HotReload(解析一个Golang项目)
|
||||
},
|
||||
"解析整个Rust项目": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "解析一个Rust项目的所有源文件 | 输入参数为路径",
|
||||
"Function": HotReload(解析一个Rust项目)
|
||||
},
|
||||
"解析整个Java项目": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "解析一个Java项目的所有源文件 | 输入参数为路径",
|
||||
"Function": HotReload(解析一个Java项目)
|
||||
},
|
||||
"解析整个前端项目(js,ts,css等)": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "解析一个前端项目的所有源文件(js,ts,css等) | 输入参数为路径",
|
||||
"Function": HotReload(解析一个前端项目)
|
||||
},
|
||||
"解析整个Lua项目": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "解析一个Lua项目的所有源文件 | 输入参数为路径",
|
||||
"Function": HotReload(解析一个Lua项目)
|
||||
},
|
||||
"解析整个CSharp项目": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "解析一个CSharp项目的所有源文件 | 输入参数为路径",
|
||||
"Function": HotReload(解析一个CSharp项目)
|
||||
},
|
||||
"解析Jupyter Notebook文件": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"Info": "解析Jupyter Notebook文件 | 输入参数为路径",
|
||||
@ -123,92 +144,125 @@ def get_crazy_functions():
|
||||
"ArgsReminder": "若输入0,则不解析notebook中的Markdown块", # 高级参数输入区的显示提示
|
||||
},
|
||||
"读Tex论文写摘要": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": True,
|
||||
"AsButton": False,
|
||||
"Info": "读取Tex论文并写摘要 | 输入参数为路径",
|
||||
"Function": HotReload(读文章写摘要)
|
||||
},
|
||||
"翻译README或.MD": {
|
||||
"翻译README或MD": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": True,
|
||||
"Info": "将Markdown翻译为中文 | 输入参数为路径或URL",
|
||||
"Function": HotReload(Markdown英译中)
|
||||
},
|
||||
"翻译Markdown或README(支持Github链接)": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"Info": "将Markdown或README翻译为中文 | 输入参数为路径或URL",
|
||||
"Function": HotReload(Markdown英译中)
|
||||
},
|
||||
"批量生成函数注释": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "批量生成函数的注释 | 输入参数为路径",
|
||||
"Function": HotReload(批量生成函数注释)
|
||||
},
|
||||
"保存当前的对话": {
|
||||
"Group": "对话",
|
||||
"AsButton": True,
|
||||
"Info": "保存当前的对话 | 不需要输入参数",
|
||||
"Function": HotReload(对话历史存档)
|
||||
},
|
||||
"[多线程Demo] 解析此项目本身(源码自译解)": {
|
||||
"[多线程Demo]解析此项目本身(源码自译解)": {
|
||||
"Group": "对话|编程",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "多线程解析并翻译此项目的源码 | 不需要输入参数",
|
||||
"Function": HotReload(解析项目本身)
|
||||
},
|
||||
"[插件demo] 历史上的今天": {
|
||||
"[插件demo]历史上的今天": {
|
||||
"Group": "对话",
|
||||
"AsButton": True,
|
||||
"Info": "查看历史上的今天事件 | 不需要输入参数",
|
||||
"Function": HotReload(高阶功能模板函数)
|
||||
},
|
||||
"精准翻译PDF论文": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": True, # 加入下拉菜单中
|
||||
"AsButton": True,
|
||||
"Info": "精准翻译PDF论文为中文 | 输入参数为路径",
|
||||
"Function": HotReload(批量翻译PDF文档)
|
||||
},
|
||||
"询问多个GPT模型": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": True,
|
||||
"Function": HotReload(同时问询)
|
||||
},
|
||||
"批量总结PDF文档": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "批量总结PDF文档的内容 | 输入参数为路径",
|
||||
"Function": HotReload(批量总结PDF文档)
|
||||
},
|
||||
"谷歌学术检索助手(输入谷歌学术搜索页url)": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "使用谷歌学术检索助手搜索指定URL的结果 | 输入参数为谷歌学术搜索页的URL",
|
||||
"Function": HotReload(谷歌检索小助手)
|
||||
},
|
||||
"理解PDF文档内容 (模仿ChatPDF)": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "理解PDF文档的内容并进行回答 | 输入参数为路径",
|
||||
"Function": HotReload(理解PDF文档内容标准文件输入)
|
||||
},
|
||||
"英文Latex项目全文润色(输入路径或上传压缩包)": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "对英文Latex项目全文进行润色处理 | 输入参数为路径或上传压缩包",
|
||||
"Function": HotReload(Latex英文润色)
|
||||
},
|
||||
"英文Latex项目全文纠错(输入路径或上传压缩包)": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "对英文Latex项目全文进行纠错处理 | 输入参数为路径或上传压缩包",
|
||||
"Function": HotReload(Latex英文纠错)
|
||||
},
|
||||
"中文Latex项目全文润色(输入路径或上传压缩包)": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "对中文Latex项目全文进行润色处理 | 输入参数为路径或上传压缩包",
|
||||
"Function": HotReload(Latex中文润色)
|
||||
},
|
||||
"Latex项目全文中译英(输入路径或上传压缩包)": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "对Latex项目全文进行中译英处理 | 输入参数为路径或上传压缩包",
|
||||
"Function": HotReload(Latex中译英)
|
||||
},
|
||||
"Latex项目全文英译中(输入路径或上传压缩包)": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "对Latex项目全文进行英译中处理 | 输入参数为路径或上传压缩包",
|
||||
"Function": HotReload(Latex英译中)
|
||||
},
|
||||
"批量Markdown中译英(输入路径或上传压缩包)": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "批量将Markdown文件中文翻译为英文 | 输入参数为路径或上传压缩包",
|
||||
"Function": HotReload(Markdown中译英)
|
||||
},
|
||||
}
|
||||
@ -218,8 +272,10 @@ def get_crazy_functions():
|
||||
from crazy_functions.下载arxiv论文翻译摘要 import 下载arxiv论文并翻译摘要
|
||||
function_plugins.update({
|
||||
"一键下载arxiv论文并翻译摘要(先在input输入编号,如1812.10695)": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
# "Info": "下载arxiv论文并翻译摘要 | 输入参数为arxiv编号如1812.10695",
|
||||
"Function": HotReload(下载arxiv论文并翻译摘要)
|
||||
}
|
||||
})
|
||||
@ -230,16 +286,20 @@ def get_crazy_functions():
|
||||
from crazy_functions.联网的ChatGPT import 连接网络回答问题
|
||||
function_plugins.update({
|
||||
"连接网络回答问题(输入问题后点击该插件,需要访问谷歌)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
# "Info": "连接网络回答问题(需要访问谷歌)| 输入参数是一个问题",
|
||||
"Function": HotReload(连接网络回答问题)
|
||||
}
|
||||
})
|
||||
from crazy_functions.联网的ChatGPT_bing版 import 连接bing搜索回答问题
|
||||
function_plugins.update({
|
||||
"连接网络回答问题(中文Bing版,输入问题后点击该插件)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "连接网络回答问题(需要访问中文Bing)| 输入参数是一个问题",
|
||||
"Function": HotReload(连接bing搜索回答问题)
|
||||
}
|
||||
})
|
||||
@ -250,6 +310,7 @@ def get_crazy_functions():
|
||||
from crazy_functions.解析项目源代码 import 解析任意code项目
|
||||
function_plugins.update({
|
||||
"解析项目源代码(手动指定和筛选源代码文件类型)": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
||||
@ -264,6 +325,7 @@ def get_crazy_functions():
|
||||
from crazy_functions.询问多个大语言模型 import 同时问询_指定模型
|
||||
function_plugins.update({
|
||||
"询问多个GPT模型(手动指定询问哪些模型)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
||||
@ -278,6 +340,7 @@ def get_crazy_functions():
|
||||
from crazy_functions.图片生成 import 图片生成
|
||||
function_plugins.update({
|
||||
"图片生成(先切换模型到openai或api2d)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
||||
@ -293,6 +356,7 @@ def get_crazy_functions():
|
||||
from crazy_functions.总结音视频 import 总结音视频
|
||||
function_plugins.update({
|
||||
"批量总结音视频(输入路径或上传压缩包)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True,
|
||||
@ -308,8 +372,10 @@ def get_crazy_functions():
|
||||
from crazy_functions.数学动画生成manim import 动画生成
|
||||
function_plugins.update({
|
||||
"数学动画生成(Manim)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"Info": "按照自然语言描述生成一个动画 | 输入参数是一段话",
|
||||
"Function": HotReload(动画生成)
|
||||
}
|
||||
})
|
||||
@ -320,6 +386,7 @@ def get_crazy_functions():
|
||||
from crazy_functions.批量Markdown翻译 import Markdown翻译指定语言
|
||||
function_plugins.update({
|
||||
"Markdown翻译(手动指定语言)": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True,
|
||||
@ -334,6 +401,7 @@ def get_crazy_functions():
|
||||
from crazy_functions.Langchain知识库 import 知识库问答
|
||||
function_plugins.update({
|
||||
"构建知识库(请先上传文件素材)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True,
|
||||
@ -348,6 +416,7 @@ def get_crazy_functions():
|
||||
from crazy_functions.Langchain知识库 import 读取知识库作答
|
||||
function_plugins.update({
|
||||
"知识库问答": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True,
|
||||
@ -362,6 +431,7 @@ def get_crazy_functions():
|
||||
from crazy_functions.交互功能函数模板 import 交互功能模板函数
|
||||
function_plugins.update({
|
||||
"交互功能模板函数": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"Function": HotReload(交互功能模板函数)
|
||||
@ -374,6 +444,7 @@ def get_crazy_functions():
|
||||
from crazy_functions.Latex输出PDF结果 import Latex英文纠错加PDF对比
|
||||
function_plugins.update({
|
||||
"Latex英文纠错+高亮修正位置 [需Latex]": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True,
|
||||
@ -384,6 +455,7 @@ def get_crazy_functions():
|
||||
from crazy_functions.Latex输出PDF结果 import Latex翻译中文并重新编译PDF
|
||||
function_plugins.update({
|
||||
"Arixv论文精细翻译(输入arxivID)[需Latex]": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True,
|
||||
@ -391,11 +463,13 @@ def get_crazy_functions():
|
||||
"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 " +
|
||||
"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " +
|
||||
'If the term "agent" is used in this section, it should be translated to "智能体". ',
|
||||
"Info": "Arixv论文精细翻译 | 输入参数arxiv论文的ID,比如1812.10695",
|
||||
"Function": HotReload(Latex翻译中文并重新编译PDF)
|
||||
}
|
||||
})
|
||||
function_plugins.update({
|
||||
"本地Latex论文精细翻译(上传Latex项目)[需Latex]": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True,
|
||||
@ -403,6 +477,7 @@ def get_crazy_functions():
|
||||
"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 " +
|
||||
"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " +
|
||||
'If the term "agent" is used in this section, it should be translated to "智能体". ',
|
||||
"Info": "本地Latex论文精细翻译 | 输入参数是路径",
|
||||
"Function": HotReload(Latex翻译中文并重新编译PDF)
|
||||
}
|
||||
})
|
||||
@ -416,25 +491,16 @@ def get_crazy_functions():
|
||||
from crazy_functions.语音助手 import 语音助手
|
||||
function_plugins.update({
|
||||
"实时音频采集": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": True,
|
||||
"Info": "开始语言对话 | 没有输入参数",
|
||||
"Function": HotReload(语音助手)
|
||||
}
|
||||
})
|
||||
except:
|
||||
print('Load function plugin failed')
|
||||
|
||||
try:
|
||||
from crazy_functions.虚空终端 import 自动终端
|
||||
function_plugins.update({
|
||||
"自动终端": {
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"Function": HotReload(自动终端)
|
||||
}
|
||||
})
|
||||
except:
|
||||
print('Load function plugin failed')
|
||||
|
||||
# try:
|
||||
# from crazy_functions.chatglm微调工具 import 微调数据集生成
|
||||
@ -449,4 +515,24 @@ def get_crazy_functions():
|
||||
# })
|
||||
# except:
|
||||
# print('Load function plugin failed')
|
||||
|
||||
|
||||
|
||||
"""
|
||||
设置默认值:
|
||||
- 默认 Group = 对话
|
||||
- 默认 AsButton = True
|
||||
- 默认 AdvancedArgs = False
|
||||
- 默认 Color = secondary
|
||||
"""
|
||||
for name, function_meta in function_plugins.items():
|
||||
if "Group" not in function_meta:
|
||||
function_plugins[name]["Group"] = '对话'
|
||||
if "AsButton" not in function_meta:
|
||||
function_plugins[name]["AsButton"] = True
|
||||
if "AdvancedArgs" not in function_meta:
|
||||
function_plugins[name]["AdvancedArgs"] = False
|
||||
if "Color" not in function_meta:
|
||||
function_plugins[name]["Color"] = 'secondary'
|
||||
|
||||
return function_plugins
|
||||
|
@ -6,7 +6,7 @@ pj = os.path.join
|
||||
ARXIV_CACHE_DIR = os.path.expanduser(f"~/arxiv_cache/")
|
||||
|
||||
# =================================== 工具函数 ===============================================
|
||||
专业词汇声明 = 'If the term "agent" is used in this section, it should be translated to "智能体". '
|
||||
# 专业词汇声明 = 'If the term "agent" is used in this section, it should be translated to "智能体". '
|
||||
def switch_prompt(pfg, mode, more_requirement):
|
||||
"""
|
||||
Generate prompts and system prompts based on the mode for proofreading or translating.
|
||||
@ -291,7 +291,7 @@ def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot,
|
||||
yield from update_ui(chatbot=chatbot, history=history); time.sleep(1) # 刷新界面
|
||||
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
|
||||
else:
|
||||
chatbot.append((f"失败了", '虽然PDF生成失败了, 但请查收结果(压缩包), 内含已经翻译的Tex文档, 也是可读的, 您可以到Github Issue区, 用该压缩包+对话历史存档进行反馈 ...'))
|
||||
chatbot.append((f"失败了", '虽然PDF生成失败了, 但请查收结果(压缩包), 内含已经翻译的Tex文档, 您可以到Github Issue区, 用该压缩包进行反馈。如系统是Linux,请检查系统字体(见Github wiki) ...'))
|
||||
yield from update_ui(chatbot=chatbot, history=history); time.sleep(1) # 刷新界面
|
||||
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
|
||||
|
||||
|
@ -37,10 +37,18 @@ Here is the output schema:
|
||||
{schema}
|
||||
```"""
|
||||
|
||||
|
||||
PYDANTIC_FORMAT_INSTRUCTIONS_SIMPLE = """The output should be formatted as a JSON instance that conforms to the JSON schema below.
|
||||
```
|
||||
{schema}
|
||||
```"""
|
||||
|
||||
|
||||
class GptJsonIO():
|
||||
|
||||
def __init__(self, schema):
|
||||
def __init__(self, schema, example_instruction=True):
|
||||
self.pydantic_object = schema
|
||||
self.example_instruction = example_instruction
|
||||
self.format_instructions = self.generate_format_instructions()
|
||||
|
||||
def generate_format_instructions(self):
|
||||
@ -53,9 +61,11 @@ class GptJsonIO():
|
||||
if "type" in reduced_schema:
|
||||
del reduced_schema["type"]
|
||||
# Ensure json in context is well-formed with double quotes.
|
||||
if self.example_instruction:
|
||||
schema_str = json.dumps(reduced_schema)
|
||||
|
||||
return PYDANTIC_FORMAT_INSTRUCTIONS.format(schema=schema_str)
|
||||
else:
|
||||
return PYDANTIC_FORMAT_INSTRUCTIONS_SIMPLE.format(schema=schema_str)
|
||||
|
||||
def generate_output(self, text):
|
||||
# Greedy search for 1st json candidate.
|
||||
|
@ -1,9 +1,9 @@
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import List
|
||||
from toolbox import update_ui_lastest_msg, get_conf
|
||||
from toolbox import update_ui_lastest_msg, disable_auto_promotion
|
||||
from request_llm.bridge_all import predict_no_ui_long_connection
|
||||
from crazy_functions.json_fns.pydantic_io import GptJsonIO
|
||||
import copy, json, pickle, os, sys
|
||||
import copy, json, pickle, os, sys, time
|
||||
|
||||
|
||||
def read_avail_plugin_enum():
|
||||
@ -11,37 +11,85 @@ def read_avail_plugin_enum():
|
||||
plugin_arr = get_crazy_functions()
|
||||
# remove plugins with out explaination
|
||||
plugin_arr = {k:v for k, v in plugin_arr.items() if 'Info' in v}
|
||||
plugin_arr_info = {"F{:04d}".format(i):v["Info"] for i, v in enumerate(plugin_arr.values(), start=1)}
|
||||
plugin_arr_dict = {"F{:04d}".format(i):v for i, v in enumerate(plugin_arr.values(), start=1)}
|
||||
plugin_arr_info = {"F_{:04d}".format(i):v["Info"] for i, v in enumerate(plugin_arr.values(), start=1)}
|
||||
plugin_arr_dict = {"F_{:04d}".format(i):v for i, v in enumerate(plugin_arr.values(), start=1)}
|
||||
plugin_arr_dict_parse = {"F_{:04d}".format(i):v for i, v in enumerate(plugin_arr.values(), start=1)}
|
||||
plugin_arr_dict_parse.update({f"F_{i}":v for i, v in enumerate(plugin_arr.values(), start=1)})
|
||||
prompt = json.dumps(plugin_arr_info, ensure_ascii=False, indent=2)
|
||||
prompt = "\n\nThe defination of PluginEnum:\nPluginEnum=" + prompt
|
||||
return prompt, plugin_arr_dict
|
||||
return prompt, plugin_arr_dict, plugin_arr_dict_parse
|
||||
|
||||
def wrap_code(txt):
|
||||
txt = txt.replace('```','')
|
||||
return f"\n```\n{txt}\n```\n"
|
||||
|
||||
def have_any_recent_upload_files(chatbot):
|
||||
_5min = 5 * 60
|
||||
if not chatbot: return False # chatbot is None
|
||||
most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None)
|
||||
if not most_recent_uploaded: return False # most_recent_uploaded is None
|
||||
if time.time() - most_recent_uploaded["time"] < _5min: return True # most_recent_uploaded is new
|
||||
else: return False # most_recent_uploaded is too old
|
||||
|
||||
def get_recent_file_prompt_support(chatbot):
|
||||
most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None)
|
||||
path = most_recent_uploaded['path']
|
||||
prompt = "\nAdditional Information:\n"
|
||||
prompt = "In case that this plugin requires a path or a file as argument,"
|
||||
prompt += f"it is important for you to know that the user has recently uploaded a file, located at: `{path}`"
|
||||
prompt += f"Only use it when necessary, otherwise, you can ignore this file."
|
||||
return prompt
|
||||
|
||||
def get_inputs_show_user(inputs, plugin_arr_enum_prompt):
|
||||
# remove plugin_arr_enum_prompt from inputs string
|
||||
inputs_show_user = inputs.replace(plugin_arr_enum_prompt, "")
|
||||
inputs_show_user += plugin_arr_enum_prompt[:200] + '...'
|
||||
inputs_show_user += '\n...\n'
|
||||
inputs_show_user += '...\n'
|
||||
inputs_show_user += '...}'
|
||||
return inputs_show_user
|
||||
|
||||
def execute_plugin(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention):
|
||||
plugin_arr_enum_prompt, plugin_arr_dict = read_avail_plugin_enum()
|
||||
plugin_arr_enum_prompt, plugin_arr_dict, plugin_arr_dict_parse = read_avail_plugin_enum()
|
||||
class Plugin(BaseModel):
|
||||
plugin_selection: str = Field(description="The most related plugin from one of the PluginEnum.", default="F0000000000000")
|
||||
plugin_arg: str = Field(description="The argument of the plugin. A path or url or empty.", default="")
|
||||
|
||||
plugin_selection: str = Field(description="The most related plugin from one of the PluginEnum.", default="F_0000")
|
||||
reason_of_selection: str = Field(description="The reason why you should select this plugin.", default="This plugin satisfy user requirement most")
|
||||
# ⭐ ⭐ ⭐ 选择插件
|
||||
yield from update_ui_lastest_msg(lastmsg=f"正在执行任务: {txt}\n\n查找可用插件中...", chatbot=chatbot, history=history, delay=0)
|
||||
gpt_json_io = GptJsonIO(Plugin)
|
||||
gpt_json_io.format_instructions = "The format of your output should be a json that can be parsed by json.loads.\n"
|
||||
gpt_json_io.format_instructions += """Output example: {"plugin_selection":"F_1234", "reason_of_selection":"F_1234 plugin satisfy user requirement most"}\n"""
|
||||
gpt_json_io.format_instructions += "The plugins you are authorized to use are listed below:\n"
|
||||
gpt_json_io.format_instructions += plugin_arr_enum_prompt
|
||||
inputs = "Choose the correct plugin and extract plugin_arg, the user requirement is: \n\n" + \
|
||||
">> " + txt.rstrip('\n').replace('\n','\n>> ') + '\n\n' + \
|
||||
gpt_json_io.format_instructions
|
||||
inputs = "Choose the correct plugin according to user requirements, the user requirement is: \n\n" + \
|
||||
">> " + txt.rstrip('\n').replace('\n','\n>> ') + '\n\n' + gpt_json_io.format_instructions
|
||||
|
||||
run_gpt_fn = lambda inputs, sys_prompt: predict_no_ui_long_connection(
|
||||
inputs=inputs, llm_kwargs=llm_kwargs, history=[], sys_prompt=sys_prompt, observe_window=[])
|
||||
plugin_sel = gpt_json_io.generate_output_auto_repair(run_gpt_fn(inputs, ""), run_gpt_fn)
|
||||
|
||||
if plugin_sel.plugin_selection not in plugin_arr_dict:
|
||||
msg = f'找不到合适插件执行该任务'
|
||||
try:
|
||||
gpt_reply = run_gpt_fn(inputs, "")
|
||||
plugin_sel = gpt_json_io.generate_output_auto_repair(gpt_reply, run_gpt_fn)
|
||||
except:
|
||||
msg = f"抱歉, {llm_kwargs['llm_model']}无法理解您的需求。"
|
||||
msg += "请求的Prompt为:\n" + wrap_code(get_inputs_show_user(inputs, plugin_arr_enum_prompt))
|
||||
msg += "语言模型回复为:\n" + wrap_code(gpt_reply)
|
||||
msg += "\n但您可以尝试再试一次\n"
|
||||
yield from update_ui_lastest_msg(lastmsg=msg, chatbot=chatbot, history=history, delay=2)
|
||||
return
|
||||
if plugin_sel.plugin_selection not in plugin_arr_dict_parse:
|
||||
msg = f"抱歉, 找不到合适插件执行该任务, 或者{llm_kwargs['llm_model']}无法理解您的需求。"
|
||||
msg += f"语言模型{llm_kwargs['llm_model']}选择了不存在的插件:\n" + wrap_code(gpt_reply)
|
||||
msg += "\n但您可以尝试再试一次\n"
|
||||
yield from update_ui_lastest_msg(lastmsg=msg, chatbot=chatbot, history=history, delay=2)
|
||||
return
|
||||
|
||||
# ⭐ ⭐ ⭐ 确认插件参数
|
||||
plugin = plugin_arr_dict[plugin_sel.plugin_selection]
|
||||
if not have_any_recent_upload_files(chatbot):
|
||||
appendix_info = ""
|
||||
else:
|
||||
appendix_info = get_recent_file_prompt_support(chatbot)
|
||||
|
||||
plugin = plugin_arr_dict_parse[plugin_sel.plugin_selection]
|
||||
yield from update_ui_lastest_msg(lastmsg=f"正在执行任务: {txt}\n\n提取插件参数...", chatbot=chatbot, history=history, delay=0)
|
||||
class PluginExplicit(BaseModel):
|
||||
plugin_selection: str = plugin_sel.plugin_selection
|
||||
@ -50,7 +98,7 @@ def execute_plugin(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom
|
||||
gpt_json_io.format_instructions += "The information about this plugin is:" + plugin["Info"]
|
||||
inputs = f"A plugin named {plugin_sel.plugin_selection} is selected, " + \
|
||||
"you should extract plugin_arg from the user requirement, the user requirement is: \n\n" + \
|
||||
">> " + txt.rstrip('\n').replace('\n','\n>> ') + '\n\n' + \
|
||||
">> " + (txt + appendix_info).rstrip('\n').replace('\n','\n>> ') + '\n\n' + \
|
||||
gpt_json_io.format_instructions
|
||||
run_gpt_fn = lambda inputs, sys_prompt: predict_no_ui_long_connection(
|
||||
inputs=inputs, llm_kwargs=llm_kwargs, history=[], sys_prompt=sys_prompt, observe_window=[])
|
||||
@ -60,7 +108,7 @@ def execute_plugin(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom
|
||||
# ⭐ ⭐ ⭐ 执行插件
|
||||
fn = plugin['Function']
|
||||
fn_name = fn.__name__
|
||||
msg = f'正在调用插件: {fn_name}\n\n插件说明:{plugin["Info"]}\n\n插件参数:{plugin_sel.plugin_arg}'
|
||||
msg = f'{llm_kwargs["llm_model"]}为您选择了插件: `{fn_name}`\n\n插件说明:{plugin["Info"]}\n\n插件参数:{plugin_sel.plugin_arg}\n\n假如偏离了您的要求,按停止键终止。'
|
||||
yield from update_ui_lastest_msg(lastmsg=msg, chatbot=chatbot, history=history, delay=2)
|
||||
yield from fn(plugin_sel.plugin_arg, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, -1)
|
||||
return
|
28
crazy_functions/vt_fns/vt_state.py
Normal file
28
crazy_functions/vt_fns/vt_state.py
Normal file
@ -0,0 +1,28 @@
|
||||
import pickle
|
||||
|
||||
class VoidTerminalState():
|
||||
def __init__(self):
|
||||
self.reset_state()
|
||||
|
||||
def reset_state(self):
|
||||
self.has_provided_explaination = False
|
||||
|
||||
def lock_plugin(self, chatbot):
|
||||
chatbot._cookies['lock_plugin'] = 'crazy_functions.虚空终端->虚空终端'
|
||||
chatbot._cookies['plugin_state'] = pickle.dumps(self)
|
||||
|
||||
def unlock_plugin(self, chatbot):
|
||||
self.reset_state()
|
||||
chatbot._cookies['lock_plugin'] = None
|
||||
chatbot._cookies['plugin_state'] = pickle.dumps(self)
|
||||
|
||||
def set_state(self, chatbot, key, value):
|
||||
setattr(self, key, value)
|
||||
chatbot._cookies['plugin_state'] = pickle.dumps(self)
|
||||
|
||||
def get_state(chatbot):
|
||||
state = chatbot._cookies.get('plugin_state', None)
|
||||
if state is not None: state = pickle.loads(state)
|
||||
else: state = VoidTerminalState()
|
||||
state.chatbot = chatbot
|
||||
return state
|
@ -1,24 +1,68 @@
|
||||
"""
|
||||
Explanation of the Void Terminal Plugin:
|
||||
|
||||
Please describe in natural language what you want to do.
|
||||
|
||||
1. You can open the plugin's dropdown menu to explore various capabilities of this project, and then describe your needs in natural language, for example:
|
||||
- "Please call the plugin to translate a PDF paper for me. I just uploaded the paper to the upload area."
|
||||
- "Please use the plugin to translate a PDF paper, with the address being https://www.nature.com/articles/s41586-019-1724-z.pdf."
|
||||
- "Generate an image with blooming flowers and lush green grass using the plugin."
|
||||
- "Translate the README using the plugin. The GitHub URL is https://github.com/facebookresearch/co-tracker."
|
||||
- "Translate an Arxiv paper for me. The Arxiv ID is 1812.10695. Remember to use the plugin and don't do it manually!"
|
||||
- "I don't like the current interface color. Modify the configuration and change the theme to THEME="High-Contrast"."
|
||||
- "Could you please explain the structure of the Transformer network?"
|
||||
|
||||
2. If you use keywords like "call the plugin xxx", "modify the configuration xxx", "please", etc., your intention can be recognized more accurately.
|
||||
|
||||
3. Your intention can be recognized more accurately when using powerful models like GPT4. This plugin is relatively new, so please feel free to provide feedback on GitHub.
|
||||
|
||||
4. Now, if you need to process a file, please upload the file (drag the file to the file upload area) or describe the path to the file.
|
||||
|
||||
5. If you don't need to upload a file, you can simply repeat your command again.
|
||||
"""
|
||||
explain_msg = """
|
||||
## 虚空终端插件说明:
|
||||
|
||||
请用**自然语言**描述您需要做什么。
|
||||
|
||||
1. 您可以打开插件下拉菜单以了解本项目的各种能力,然后用自然语言描述您的需要,例如:
|
||||
- 「请调用插件,为我翻译PDF论文,论文我刚刚放到上传区了。」
|
||||
- 「请调用插件翻译PDF论文,地址为https://www.nature.com/articles/s41586-019-1724-z.pdf」
|
||||
- 「生成一张图片,图中鲜花怒放,绿草如茵,用插件实现。」
|
||||
- 「用插件翻译README,Github网址是https://github.com/facebookresearch/co-tracker」
|
||||
- 「给爷翻译Arxiv论文,arxiv论文的ID是1812.10695,记得用插件,不要自己瞎搞!」
|
||||
- 「我不喜欢当前的界面颜色,修改配置,把主题THEME更换为THEME="High-Contrast"。」
|
||||
- 「请问Transformer网络的结构是怎样的?」
|
||||
|
||||
2. 如果您使用「调用插件xxx」、「修改配置xxx」、「请问」等关键词,您的意图可以被识别的更准确。
|
||||
|
||||
3. 建议使用 GPT3.5 或更强的模型,弱模型可能无法理解您的想法。该插件诞生时间不长,欢迎您前往Github反馈问题。
|
||||
|
||||
4. 现在,如果需要处理文件,请您上传文件(将文件拖动到文件上传区),或者描述文件所在的路径。
|
||||
|
||||
5. 如果不需要上传文件,现在您只需要再次重复一次您的指令即可。
|
||||
"""
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import List
|
||||
from toolbox import CatchException, update_ui, gen_time_str
|
||||
from toolbox import update_ui_lastest_msg
|
||||
from toolbox import update_ui_lastest_msg, disable_auto_promotion
|
||||
from request_llm.bridge_all import predict_no_ui_long_connection
|
||||
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
from crazy_functions.crazy_utils import input_clipping
|
||||
from crazy_functions.json_fns.pydantic_io import GptJsonIO
|
||||
from crazy_functions.vt_fns.vt_state import VoidTerminalState
|
||||
from crazy_functions.vt_fns.vt_modify_config import modify_configuration_hot
|
||||
from crazy_functions.vt_fns.vt_modify_config import modify_configuration_reboot
|
||||
from crazy_functions.vt_fns.vt_call_plugin import execute_plugin
|
||||
from enum import Enum
|
||||
import copy, json, pickle, os, sys
|
||||
|
||||
|
||||
class UserIntention(BaseModel):
|
||||
user_prompt: str = Field(description="the content of user input", default="")
|
||||
intention_type: str = Field(description="the type of user intention, choose from ['ModifyConfiguration', 'ExecutePlugin', 'Chat']", default="Chat")
|
||||
intention_type: str = Field(description="the type of user intention, choose from ['ModifyConfiguration', 'ExecutePlugin', 'Chat']", default="ExecutePlugin")
|
||||
user_provide_file: bool = Field(description="whether the user provides a path to a file", default=False)
|
||||
user_provide_url: bool = Field(description="whether the user provides a url", default=False)
|
||||
|
||||
|
||||
def chat(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention):
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=txt, inputs_show_user=txt,
|
||||
@ -30,12 +74,24 @@ def chat(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_i
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
pass
|
||||
|
||||
def analyze_with_rule(txt):
|
||||
|
||||
explain_intention_to_user = {
|
||||
'Chat': "聊天对话",
|
||||
'ExecutePlugin': "调用插件",
|
||||
'ModifyConfiguration': "修改配置",
|
||||
}
|
||||
|
||||
|
||||
def analyze_intention_with_simple_rules(txt):
|
||||
user_intention = UserIntention()
|
||||
user_intention.user_prompt = txt
|
||||
is_certain = False
|
||||
|
||||
if '调用插件' in txt:
|
||||
if '请问' in txt:
|
||||
is_certain = True
|
||||
user_intention.intention_type = 'Chat'
|
||||
|
||||
if '用插件' in txt:
|
||||
is_certain = True
|
||||
user_intention.intention_type = 'ExecutePlugin'
|
||||
|
||||
@ -45,43 +101,67 @@ def analyze_with_rule(txt):
|
||||
|
||||
return is_certain, user_intention
|
||||
|
||||
|
||||
@CatchException
|
||||
def 自动终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
"""
|
||||
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||
llm_kwargs gpt模型参数, 如温度和top_p等, 一般原样传递下去就行
|
||||
plugin_kwargs 插件模型的参数, 如温度和top_p等, 一般原样传递下去就行
|
||||
chatbot 聊天显示框的句柄,用于显示给用户
|
||||
history 聊天历史,前情提要
|
||||
system_prompt 给gpt的静默提醒
|
||||
web_port 当前软件运行的端口号
|
||||
"""
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
chatbot.append(("自动终端状态: ", f"正在执行任务: {txt}"))
|
||||
def 虚空终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
disable_auto_promotion(chatbot=chatbot)
|
||||
# 获取当前虚空终端状态
|
||||
state = VoidTerminalState.get_state(chatbot)
|
||||
appendix_msg = ""
|
||||
|
||||
# 用简单的关键词检测用户意图
|
||||
is_certain, _ = analyze_intention_with_simple_rules(txt)
|
||||
if txt.startswith('private_upload/') and len(txt) == 34:
|
||||
state.set_state(chatbot=chatbot, key='has_provided_explaination', value=False)
|
||||
appendix_msg = "\n\n**很好,您已经上传了文件**,现在请您描述您的需求。"
|
||||
|
||||
if is_certain or (state.has_provided_explaination):
|
||||
# 如果意图明确,跳过提示环节
|
||||
state.set_state(chatbot=chatbot, key='has_provided_explaination', value=True)
|
||||
state.unlock_plugin(chatbot=chatbot)
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
yield from 虚空终端主路由(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port)
|
||||
return
|
||||
else:
|
||||
# 如果意图模糊,提示
|
||||
state.set_state(chatbot=chatbot, key='has_provided_explaination', value=True)
|
||||
state.lock_plugin(chatbot=chatbot)
|
||||
chatbot.append(("虚空终端状态:", explain_msg+appendix_msg))
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
return
|
||||
|
||||
|
||||
|
||||
def 虚空终端主路由(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
history = []
|
||||
chatbot.append(("虚空终端状态: ", f"正在执行任务: {txt}"))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 初始化插件状态
|
||||
state = chatbot._cookies.get('plugin_state', None)
|
||||
if state is not None: state = pickle.loads(state)
|
||||
else: state = {}
|
||||
|
||||
def update_vt_state():
|
||||
# 赋予插件锁定 锁定插件回调路径,当下一次用户提交时,会直接转到该函数
|
||||
chatbot._cookies['lock_plugin'] = 'crazy_functions.虚空终端->自动终端'
|
||||
chatbot._cookies['vt_state'] = pickle.dumps(state)
|
||||
|
||||
# ⭐ ⭐ ⭐ 分析用户意图
|
||||
is_certain, user_intention = analyze_with_rule(txt)
|
||||
is_certain, user_intention = analyze_intention_with_simple_rules(txt)
|
||||
if not is_certain:
|
||||
yield from update_ui_lastest_msg(lastmsg=f"正在执行任务: {txt}\n\n分析用户意图中", chatbot=chatbot, history=history, delay=0)
|
||||
yield from update_ui_lastest_msg(
|
||||
lastmsg=f"正在执行任务: {txt}\n\n分析用户意图中", chatbot=chatbot, history=history, delay=0)
|
||||
gpt_json_io = GptJsonIO(UserIntention)
|
||||
inputs = "Analyze the intention of the user according to following user input: \n\n" + txt + '\n\n' + gpt_json_io.format_instructions
|
||||
rf_req = "\nchoose from ['ModifyConfiguration', 'ExecutePlugin', 'Chat']"
|
||||
inputs = "Analyze the intention of the user according to following user input: \n\n" + \
|
||||
">> " + (txt+rf_req).rstrip('\n').replace('\n','\n>> ') + '\n\n' + gpt_json_io.format_instructions
|
||||
run_gpt_fn = lambda inputs, sys_prompt: predict_no_ui_long_connection(
|
||||
inputs=inputs, llm_kwargs=llm_kwargs, history=[], sys_prompt=sys_prompt, observe_window=[])
|
||||
try:
|
||||
user_intention = gpt_json_io.generate_output_auto_repair(run_gpt_fn(inputs, ""), run_gpt_fn)
|
||||
lastmsg=f"正在执行任务: {txt}\n\n用户意图理解: 意图={explain_intention_to_user[user_intention.intention_type]}",
|
||||
except:
|
||||
yield from update_ui_lastest_msg(
|
||||
lastmsg=f"正在执行任务: {txt}\n\n用户意图理解: 失败 当前语言模型({llm_kwargs['llm_model']})不能理解您的意图", chatbot=chatbot, history=history, delay=0)
|
||||
return
|
||||
else:
|
||||
pass
|
||||
|
||||
yield from update_ui_lastest_msg(
|
||||
lastmsg=f"正在执行任务: {txt}\n\n用户意图理解: 意图={explain_intention_to_user[user_intention.intention_type]}",
|
||||
chatbot=chatbot, history=history, delay=0)
|
||||
|
||||
# 用户意图: 修改本项目的配置
|
||||
if user_intention.intention_type == 'ModifyConfiguration':
|
||||
yield from modify_configuration_reboot(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention)
|
||||
@ -96,23 +176,3 @@ def 自动终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
|
||||
|
||||
return
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# # if state == 'wait_user_keyword':
|
||||
# # chatbot._cookies['lock_plugin'] = None # 解除插件锁定,避免遗忘导致死锁
|
||||
# # chatbot._cookies['plugin_state_0001'] = None # 解除插件状态,避免遗忘导致死锁
|
||||
|
||||
# # # 解除插件锁定
|
||||
# # chatbot.append((f"获取关键词:{txt}", ""))
|
||||
# # yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
# # inputs=inputs_show_user=f"Extract all image urls in this html page, pick the first 5 images and show them with markdown format: \n\n {page_return}"
|
||||
# # gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
# # inputs=inputs, inputs_show_user=inputs_show_user,
|
||||
# # llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
|
||||
# # sys_prompt="When you want to show an image, use markdown format. e.g. . If there are no image url provided, answer 'no image url provided'"
|
||||
# # )
|
||||
# # chatbot[-1] = [chatbot[-1][0], gpt_say]
|
||||
# yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
# return
|
||||
|
74
main.py
74
main.py
@ -6,18 +6,18 @@ def main():
|
||||
from request_llm.bridge_all import predict
|
||||
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, load_chat_cookies, DummyWith
|
||||
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
|
||||
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = \
|
||||
get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT')
|
||||
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION')
|
||||
CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT')
|
||||
ENABLE_AUDIO, AUTO_CLEAR_TXT = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT')
|
||||
|
||||
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
||||
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
||||
if not AUTHENTICATION: AUTHENTICATION = None
|
||||
|
||||
from check_proxy import get_current_version
|
||||
from themes.theme import adjust_theme, advanced_css, theme_declaration
|
||||
initial_prompt = "Serve me as a writing and programming assistant."
|
||||
title_html = f"<h1 align=\"center\">GPT 学术优化 {get_current_version()}</h1>{theme_declaration}"
|
||||
description = """代码开源和更新[地址🚀](https://github.com/binary-husky/chatgpt_academic),感谢热情的[开发者们❤️](https://github.com/binary-husky/chatgpt_academic/graphs/contributors)"""
|
||||
description = "代码开源和更新[地址🚀](https://github.com/binary-husky/gpt_academic),"
|
||||
description += "感谢热情的[开发者们❤️](https://github.com/binary-husky/gpt_academic/graphs/contributors)"
|
||||
|
||||
# 问询记录, python 版本建议3.9+(越新越好)
|
||||
import logging, uuid
|
||||
@ -34,7 +34,10 @@ def main():
|
||||
|
||||
# 高级函数插件
|
||||
from crazy_functional import get_crazy_functions
|
||||
crazy_fns = get_crazy_functions()
|
||||
DEFAULT_FN_GROUPS, = get_conf('DEFAULT_FN_GROUPS')
|
||||
plugins = get_crazy_functions()
|
||||
all_plugin_groups = list(set([g for _, plugin in plugins.items() for g in plugin['Group'].split('|')]))
|
||||
match_group = lambda tags, groups: any([g in groups for g in tags.split('|')])
|
||||
|
||||
# 处理markdown文本格式的转变
|
||||
gr.Chatbot.postprocess = format_io
|
||||
@ -83,25 +86,33 @@ def main():
|
||||
if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue
|
||||
variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
|
||||
functional[k]["Button"] = gr.Button(k, variant=variant)
|
||||
functional[k]["Button"].style(size="sm")
|
||||
with gr.Accordion("函数插件区", open=True, elem_id="plugin-panel") as area_crazy_fn:
|
||||
with gr.Row():
|
||||
gr.Markdown("插件可读取“输入区”文本/路径作为参数(上传文件自动修正路径)")
|
||||
with gr.Row(elem_id="input-plugin-group"):
|
||||
plugin_group_sel = gr.Dropdown(choices=all_plugin_groups, label='', show_label=False, value=DEFAULT_FN_GROUPS,
|
||||
multiselect=True, interactive=True, elem_classes='normal_mut_select').style(container=False)
|
||||
with gr.Row():
|
||||
for k in crazy_fns:
|
||||
if not crazy_fns[k].get("AsButton", True): continue
|
||||
variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary"
|
||||
crazy_fns[k]["Button"] = gr.Button(k, variant=variant)
|
||||
crazy_fns[k]["Button"].style(size="sm")
|
||||
for k, plugin in plugins.items():
|
||||
if not plugin.get("AsButton", True): continue
|
||||
visible = True if match_group(plugin['Group'], DEFAULT_FN_GROUPS) else False
|
||||
variant = plugins[k]["Color"] if "Color" in plugin else "secondary"
|
||||
plugin['Button'] = plugins[k]['Button'] = gr.Button(k, variant=variant, visible=visible).style(size="sm")
|
||||
with gr.Row():
|
||||
with gr.Accordion("更多函数插件", open=True):
|
||||
dropdown_fn_list = [k for k in crazy_fns.keys() if not crazy_fns[k].get("AsButton", True)]
|
||||
dropdown_fn_list = []
|
||||
for k, plugin in plugins.items():
|
||||
if not match_group(plugin['Group'], DEFAULT_FN_GROUPS): continue
|
||||
if not plugin.get("AsButton", True): dropdown_fn_list.append(k) # 排除已经是按钮的插件
|
||||
elif plugin.get('AdvancedArgs', False): dropdown_fn_list.append(k) # 对于需要高级参数的插件,亦在下拉菜单中显示
|
||||
with gr.Row():
|
||||
dropdown = gr.Dropdown(dropdown_fn_list, value=r"打开插件列表", label="", show_label=False).style(container=False)
|
||||
with gr.Row():
|
||||
plugin_advanced_arg = gr.Textbox(show_label=True, label="高级参数输入区", visible=False,
|
||||
placeholder="这里是特殊函数插件的高级参数输入区").style(container=False)
|
||||
with gr.Row():
|
||||
switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary")
|
||||
switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary").style(size="sm")
|
||||
with gr.Row():
|
||||
with gr.Accordion("点击展开“文件上传区”。上传本地文件/压缩包供函数插件调用。", open=False) as area_file_up:
|
||||
file_upload = gr.Files(label="任何文件, 但推荐上传压缩文件(zip, tar)", file_count="multiple")
|
||||
@ -112,7 +123,6 @@ def main():
|
||||
max_length_sl = gr.Slider(minimum=256, maximum=8192, value=4096, step=1, interactive=True, label="Local LLM MaxLength",)
|
||||
checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "底部输入区", "输入清除键", "插件参数区"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区")
|
||||
md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False)
|
||||
|
||||
gr.Markdown(description)
|
||||
with gr.Accordion("备选输入区", open=True, visible=False, elem_id="input-panel2") as area_input_secondary:
|
||||
with gr.Row():
|
||||
@ -123,6 +133,7 @@ def main():
|
||||
resetBtn2 = gr.Button("重置", variant="secondary"); resetBtn2.style(size="sm")
|
||||
stopBtn2 = gr.Button("停止", variant="secondary"); stopBtn2.style(size="sm")
|
||||
clearBtn2 = gr.Button("清除", variant="secondary", visible=False); clearBtn2.style(size="sm")
|
||||
|
||||
# 功能区显示开关与功能区的互动
|
||||
def fn_area_visibility(a):
|
||||
ret = {}
|
||||
@ -160,19 +171,19 @@ def main():
|
||||
click_handle = functional[k]["Button"].click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(k)], outputs=output_combo)
|
||||
cancel_handles.append(click_handle)
|
||||
# 文件上传区,接收文件后与chatbot的互动
|
||||
file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt, txt2, checkboxes], [chatbot, txt, txt2])
|
||||
file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies])
|
||||
# 函数插件-固定按钮区
|
||||
for k in crazy_fns:
|
||||
if not crazy_fns[k].get("AsButton", True): continue
|
||||
click_handle = crazy_fns[k]["Button"].click(ArgsGeneralWrapper(crazy_fns[k]["Function"]), [*input_combo, gr.State(PORT)], output_combo)
|
||||
for k in plugins:
|
||||
if not plugins[k].get("AsButton", True): continue
|
||||
click_handle = plugins[k]["Button"].click(ArgsGeneralWrapper(plugins[k]["Function"]), [*input_combo, gr.State(PORT)], output_combo)
|
||||
click_handle.then(on_report_generated, [cookies, file_upload, chatbot], [cookies, file_upload, chatbot])
|
||||
cancel_handles.append(click_handle)
|
||||
# 函数插件-下拉菜单与随变按钮的互动
|
||||
def on_dropdown_changed(k):
|
||||
variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary"
|
||||
variant = plugins[k]["Color"] if "Color" in plugins[k] else "secondary"
|
||||
ret = {switchy_bt: gr.update(value=k, variant=variant)}
|
||||
if crazy_fns[k].get("AdvancedArgs", False): # 是否唤起高级插件参数区
|
||||
ret.update({plugin_advanced_arg: gr.update(visible=True, label=f"插件[{k}]的高级参数说明:" + crazy_fns[k].get("ArgsReminder", [f"没有提供高级参数功能说明"]))})
|
||||
if plugins[k].get("AdvancedArgs", False): # 是否唤起高级插件参数区
|
||||
ret.update({plugin_advanced_arg: gr.update(visible=True, label=f"插件[{k}]的高级参数说明:" + plugins[k].get("ArgsReminder", [f"没有提供高级参数功能说明"]))})
|
||||
else:
|
||||
ret.update({plugin_advanced_arg: gr.update(visible=False, label=f"插件[{k}]不需要高级参数。")})
|
||||
return ret
|
||||
@ -183,13 +194,26 @@ def main():
|
||||
# 随变按钮的回调函数注册
|
||||
def route(request: gr.Request, k, *args, **kwargs):
|
||||
if k in [r"打开插件列表", r"请先从插件列表中选择"]: return
|
||||
yield from ArgsGeneralWrapper(crazy_fns[k]["Function"])(request, *args, **kwargs)
|
||||
yield from ArgsGeneralWrapper(plugins[k]["Function"])(request, *args, **kwargs)
|
||||
click_handle = switchy_bt.click(route,[switchy_bt, *input_combo, gr.State(PORT)], output_combo)
|
||||
click_handle.then(on_report_generated, [cookies, file_upload, chatbot], [cookies, file_upload, chatbot])
|
||||
cancel_handles.append(click_handle)
|
||||
# 终止按钮的回调函数注册
|
||||
stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
|
||||
stopBtn2.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
|
||||
plugins_as_btn = {name:plugin for name, plugin in plugins.items() if plugin.get('Button', None)}
|
||||
def on_group_change(group_list):
|
||||
btn_list = []
|
||||
fns_list = []
|
||||
if not group_list: # 处理特殊情况:没有选择任何插件组
|
||||
return [*[plugin['Button'].update(visible=False) for _, plugin in plugins_as_btn.items()], gr.Dropdown.update(choices=[])]
|
||||
for k, plugin in plugins.items():
|
||||
if plugin.get("AsButton", True):
|
||||
btn_list.append(plugin['Button'].update(visible=match_group(plugin['Group'], group_list))) # 刷新按钮
|
||||
if plugin.get('AdvancedArgs', False): dropdown_fn_list.append(k) # 对于需要高级参数的插件,亦在下拉菜单中显示
|
||||
elif match_group(plugin['Group'], group_list): fns_list.append(k) # 刷新下拉列表
|
||||
return [*btn_list, gr.Dropdown.update(choices=fns_list)]
|
||||
plugin_group_sel.select(fn=on_group_change, inputs=[plugin_group_sel], outputs=[*[plugin['Button'] for name, plugin in plugins_as_btn.items()], dropdown])
|
||||
if ENABLE_AUDIO:
|
||||
from crazy_functions.live_audio.audio_io import RealtimeAudioDistribution
|
||||
rad = RealtimeAudioDistribution()
|
||||
@ -221,8 +245,10 @@ def main():
|
||||
|
||||
auto_opentab_delay()
|
||||
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(
|
||||
server_name="0.0.0.0", server_port=PORT,
|
||||
favicon_path="docs/logo.png", auth=AUTHENTICATION,
|
||||
server_name="0.0.0.0",
|
||||
server_port=PORT,
|
||||
favicon_path="docs/logo.png",
|
||||
auth=AUTHENTICATION if len(AUTHENTICATION) != 0 else None,
|
||||
blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile"])
|
||||
|
||||
# 如果需要在二级路径下运行
|
||||
|
@ -63,9 +63,9 @@ class GetGLMFTHandle(Process):
|
||||
# if not os.path.exists(conf): raise RuntimeError('找不到微调模型信息')
|
||||
# with open(conf, 'r', encoding='utf8') as f:
|
||||
# model_args = json.loads(f.read())
|
||||
ChatGLM_PTUNING_CHECKPOINT, = get_conf('ChatGLM_PTUNING_CHECKPOINT')
|
||||
assert os.path.exists(ChatGLM_PTUNING_CHECKPOINT), "找不到微调模型检查点"
|
||||
conf = os.path.join(ChatGLM_PTUNING_CHECKPOINT, "config.json")
|
||||
CHATGLM_PTUNING_CHECKPOINT, = get_conf('CHATGLM_PTUNING_CHECKPOINT')
|
||||
assert os.path.exists(CHATGLM_PTUNING_CHECKPOINT), "找不到微调模型检查点"
|
||||
conf = os.path.join(CHATGLM_PTUNING_CHECKPOINT, "config.json")
|
||||
with open(conf, 'r', encoding='utf8') as f:
|
||||
model_args = json.loads(f.read())
|
||||
if 'model_name_or_path' not in model_args:
|
||||
@ -78,9 +78,9 @@ class GetGLMFTHandle(Process):
|
||||
config.pre_seq_len = model_args['pre_seq_len']
|
||||
config.prefix_projection = model_args['prefix_projection']
|
||||
|
||||
print(f"Loading prefix_encoder weight from {ChatGLM_PTUNING_CHECKPOINT}")
|
||||
print(f"Loading prefix_encoder weight from {CHATGLM_PTUNING_CHECKPOINT}")
|
||||
model = AutoModel.from_pretrained(model_args['model_name_or_path'], config=config, trust_remote_code=True)
|
||||
prefix_state_dict = torch.load(os.path.join(ChatGLM_PTUNING_CHECKPOINT, "pytorch_model.bin"))
|
||||
prefix_state_dict = torch.load(os.path.join(CHATGLM_PTUNING_CHECKPOINT, "pytorch_model.bin"))
|
||||
new_prefix_state_dict = {}
|
||||
for k, v in prefix_state_dict.items():
|
||||
if k.startswith("transformer.prefix_encoder."):
|
||||
|
@ -49,16 +49,17 @@ def get_access_token():
|
||||
|
||||
def generate_message_payload(inputs, llm_kwargs, history, system_prompt):
|
||||
conversation_cnt = len(history) // 2
|
||||
if system_prompt == "": system_prompt = "Hello"
|
||||
messages = [{"role": "user", "content": system_prompt}]
|
||||
messages.append({"role": "assistant", "content": 'Certainly!'})
|
||||
if conversation_cnt:
|
||||
for index in range(0, 2*conversation_cnt, 2):
|
||||
what_i_have_asked = {}
|
||||
what_i_have_asked["role"] = "user"
|
||||
what_i_have_asked["content"] = history[index]
|
||||
what_i_have_asked["content"] = history[index] if history[index]!="" else "Hello"
|
||||
what_gpt_answer = {}
|
||||
what_gpt_answer["role"] = "assistant"
|
||||
what_gpt_answer["content"] = history[index+1]
|
||||
what_gpt_answer["content"] = history[index+1] if history[index]!="" else "Hello"
|
||||
if what_i_have_asked["content"] != "":
|
||||
if what_gpt_answer["content"] == "": continue
|
||||
if what_gpt_answer["content"] == timeout_bot_msg: continue
|
||||
|
@ -9,9 +9,9 @@ validate_path() # 返回项目根路径
|
||||
from tests.test_utils import plugin_test
|
||||
|
||||
if __name__ == "__main__":
|
||||
# plugin_test(plugin='crazy_functions.虚空终端->自动终端', main_input='修改api-key为sk-jhoejriotherjep')
|
||||
# plugin_test(plugin='crazy_functions.虚空终端->虚空终端', main_input='修改api-key为sk-jhoejriotherjep')
|
||||
|
||||
plugin_test(plugin='crazy_functions.虚空终端->自动终端', main_input='调用插件,对C:/Users/fuqingxu/Desktop/旧文件/gpt/chatgpt_academic/crazy_functions/latex_fns中的python文件进行解析')
|
||||
plugin_test(plugin='crazy_functions.虚空终端->虚空终端', main_input='调用插件,对C:/Users/fuqingxu/Desktop/旧文件/gpt/chatgpt_academic/crazy_functions/latex_fns中的python文件进行解析')
|
||||
|
||||
# plugin_test(plugin='crazy_functions.命令行助手->命令行助手', main_input='查看当前的docker容器列表')
|
||||
|
||||
|
@ -13,3 +13,9 @@
|
||||
#input-plugin-group .svelte-1gfkn6j {
|
||||
visibility: hidden;
|
||||
}
|
||||
|
||||
|
||||
/* height of the upload box */
|
||||
.wrap.svelte-xwlu1w {
|
||||
min-height: var(--size-32);
|
||||
}
|
||||
|
44
toolbox.py
44
toolbox.py
@ -24,6 +24,19 @@ pj = os.path.join
|
||||
|
||||
class ChatBotWithCookies(list):
|
||||
def __init__(self, cookie):
|
||||
"""
|
||||
cookies = {
|
||||
'top_p': top_p,
|
||||
'temperature': temperature,
|
||||
'lock_plugin': bool,
|
||||
"files_to_promote": ["file1", "file2"],
|
||||
"most_recent_uploaded": {
|
||||
"path": "uploaded_path",
|
||||
"time": time.time(),
|
||||
"time_str": "timestr",
|
||||
}
|
||||
}
|
||||
"""
|
||||
self._cookies = cookie
|
||||
|
||||
def write_list(self, list):
|
||||
@ -69,7 +82,7 @@ def ArgsGeneralWrapper(f):
|
||||
# 处理个别特殊插件的锁定状态
|
||||
module, fn_name = cookies['lock_plugin'].split('->')
|
||||
f_hot_reload = getattr(importlib.import_module(module, fn_name), fn_name)
|
||||
yield from f_hot_reload(txt_passon, llm_kwargs, plugin_kwargs, chatbot_with_cookie, history, system_prompt, *args)
|
||||
yield from f_hot_reload(txt_passon, llm_kwargs, plugin_kwargs, chatbot_with_cookie, history, system_prompt, request)
|
||||
return decorated
|
||||
|
||||
|
||||
@ -479,7 +492,8 @@ def find_recent_files(directory):
|
||||
current_time = time.time()
|
||||
one_minute_ago = current_time - 60
|
||||
recent_files = []
|
||||
|
||||
if not os.path.exists(directory):
|
||||
os.makedirs(directory, exist_ok=True)
|
||||
for filename in os.listdir(directory):
|
||||
file_path = os.path.join(directory, filename)
|
||||
if file_path.endswith('.log'):
|
||||
@ -503,15 +517,15 @@ def promote_file_to_downloadzone(file, rename_file=None, chatbot=None):
|
||||
if not os.path.exists(new_path): shutil.copyfile(file, new_path)
|
||||
# 将文件添加到chatbot cookie中,避免多用户干扰
|
||||
if chatbot:
|
||||
if 'file_to_promote' in chatbot._cookies: current = chatbot._cookies['file_to_promote']
|
||||
if 'files_to_promote' in chatbot._cookies: current = chatbot._cookies['files_to_promote']
|
||||
else: current = []
|
||||
chatbot._cookies.update({'file_to_promote': [new_path] + current})
|
||||
chatbot._cookies.update({'files_to_promote': [new_path] + current})
|
||||
|
||||
def disable_auto_promotion(chatbot):
|
||||
chatbot._cookies.update({'file_to_promote': []})
|
||||
chatbot._cookies.update({'files_to_promote': []})
|
||||
return
|
||||
|
||||
def on_file_uploaded(files, chatbot, txt, txt2, checkboxes):
|
||||
def on_file_uploaded(files, chatbot, txt, txt2, checkboxes, cookies):
|
||||
"""
|
||||
当文件被上传时的回调函数
|
||||
"""
|
||||
@ -546,14 +560,20 @@ def on_file_uploaded(files, chatbot, txt, txt2, checkboxes):
|
||||
f'[Local Message] 收到以下文件: \n\n{moved_files_str}' +
|
||||
f'\n\n调用路径参数已自动修正到: \n\n{txt}' +
|
||||
f'\n\n现在您点击任意“红颜色”标识的函数插件时,以上文件将被作为输入参数'+err_msg])
|
||||
return chatbot, txt, txt2
|
||||
cookies.update({
|
||||
'most_recent_uploaded': {
|
||||
'path': f'private_upload/{time_tag}',
|
||||
'time': time.time(),
|
||||
'time_str': time_tag
|
||||
}})
|
||||
return chatbot, txt, txt2, cookies
|
||||
|
||||
|
||||
def on_report_generated(cookies, files, chatbot):
|
||||
from toolbox import find_recent_files
|
||||
if 'file_to_promote' in cookies:
|
||||
report_files = cookies['file_to_promote']
|
||||
cookies.pop('file_to_promote')
|
||||
if 'files_to_promote' in cookies:
|
||||
report_files = cookies['files_to_promote']
|
||||
cookies.pop('files_to_promote')
|
||||
else:
|
||||
report_files = find_recent_files('gpt_log')
|
||||
if len(report_files) == 0:
|
||||
@ -1001,7 +1021,7 @@ def get_plugin_default_kwargs():
|
||||
chatbot = ChatBotWithCookies(llm_kwargs)
|
||||
|
||||
# txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port
|
||||
default_plugin_kwargs = {
|
||||
DEFAULT_FN_GROUPS_kwargs = {
|
||||
"main_input": "./README.md",
|
||||
"llm_kwargs": llm_kwargs,
|
||||
"plugin_kwargs": {},
|
||||
@ -1010,7 +1030,7 @@ def get_plugin_default_kwargs():
|
||||
"system_prompt": "You are a good AI.",
|
||||
"web_port": WEB_PORT
|
||||
}
|
||||
return default_plugin_kwargs
|
||||
return DEFAULT_FN_GROUPS_kwargs
|
||||
|
||||
def get_chat_default_kwargs():
|
||||
"""
|
||||
|
4
version
4
version
@ -1,5 +1,5 @@
|
||||
{
|
||||
"version": 3.49,
|
||||
"version": 3.50,
|
||||
"show_feature": true,
|
||||
"new_feature": "支持借助GROBID实现PDF高精度翻译 <-> 接入百度千帆平台和文心一言 <-> 接入阿里通义千问、讯飞星火、上海AI-Lab书生 <-> 优化一键升级 <-> 提高arxiv翻译速度和成功率 <-> 支持自定义APIKEY格式 <-> 临时修复theme的文件丢失问题 <-> 新增实时语音对话插件(自动断句,脱手对话) <-> 支持加载自定义的ChatGLM2微调模型 <-> 动态ChatBot窗口高度 <-> 修复Azure接口的BUG <-> 完善多语言模块"
|
||||
"new_feature": "支持插件分类! <-> 支持用户使用自然语言调度各个插件(虚空终端) ! <-> 改进UI,设计新主题 <-> 支持借助GROBID实现PDF高精度翻译 <-> 接入百度千帆平台和文心一言 <-> 接入阿里通义千问、讯飞星火、上海AI-Lab书生 <-> 优化一键升级 <-> 提高arxiv翻译速度和成功率"
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user