From c3140ce344adb7da674a05a5f231c7cb5a3ad807 Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Mon, 11 Mar 2024 17:26:09 +0800 Subject: [PATCH] merge frontier branch (#1620) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Zhipu sdk update 适配最新的智谱SDK,支持GLM4v (#1502) * 适配 google gemini 优化为从用户input中提取文件 * 适配最新的智谱SDK、支持glm-4v * requirements.txt fix * pending history check --------- Co-authored-by: binary-husky * Update "生成多种Mermaid图表" plugin: Separate out the file reading function (#1520) * Update crazy_functional.py with new functionality deal with PDF * Update crazy_functional.py and Mermaid.py for plugin_kwargs * Update crazy_functional.py with new chart type: mind map * Update SELECT_PROMPT and i_say_show_user messages * Update ArgsReminder message in get_crazy_functions() function * Update with read md file and update PROMPTS * Return the PROMPTS as the test found that the initial version worked best * Update Mermaid chart generation function * version 3.71 * 解决issues #1510 * Remove unnecessary text from sys_prompt in 解析历史输入 function * Remove sys_prompt message in 解析历史输入 function * Update bridge_all.py: supports gpt-4-turbo-preview (#1517) * Update bridge_all.py: supports gpt-4-turbo-preview supports gpt-4-turbo-preview * Update bridge_all.py --------- Co-authored-by: binary-husky <96192199+binary-husky@users.noreply.github.com> * Update config.py: supports gpt-4-turbo-preview (#1516) * Update config.py: supports gpt-4-turbo-preview supports gpt-4-turbo-preview * Update config.py --------- Co-authored-by: binary-husky <96192199+binary-husky@users.noreply.github.com> * Refactor 解析历史输入 function to handle file input * Update Mermaid chart generation functionality * rename files and functions --------- Co-authored-by: binary-husky Co-authored-by: hongyi-zhao Co-authored-by: binary-husky <96192199+binary-husky@users.noreply.github.com> * 接入mathpix ocr功能 (#1468) * Update Latex输出PDF结果.py 借助mathpix实现了PDF翻译中文并重新编译PDF * Update config.py add mathpix appid & appkey * Add 'PDF翻译中文并重新编译PDF' feature to plugins. --------- Co-authored-by: binary-husky <96192199+binary-husky@users.noreply.github.com> * fix zhipuai * check picture * remove glm-4 due to bug * 修改config * 检查MATHPIX_APPID * Remove unnecessary code and update function_plugins dictionary * capture non-standard token overflow * bug fix #1524 * change mermaid style * 支持mermaid 滚动放大缩小重置,鼠标滚动和拖拽 (#1530) * 支持mermaid 滚动放大缩小重置,鼠标滚动和拖拽 * 微调未果 先stage一下 * update --------- Co-authored-by: binary-husky Co-authored-by: binary-husky <96192199+binary-husky@users.noreply.github.com> * ver 3.72 * change live2d * save the status of ``clear btn` in cookie * 前端选择保持 * js ui bug fix * reset btn bug fix * update live2d tips * fix missing get_token_num method * fix live2d toggle switch * fix persistent custom btn with cookie * fix zhipuai feedback with core functionality * Refactor button update and clean up functions * tailing space removal * Fix missing MATHPIX_APPID and MATHPIX_APPKEY configuration * Prompt fix、脑图提示词优化 (#1537) * 适配 google gemini 优化为从用户input中提取文件 * 脑图提示词优化 * Fix missing MATHPIX_APPID and MATHPIX_APPKEY configuration --------- Co-authored-by: binary-husky * 优化“PDF翻译中文并重新编译PDF”插件 (#1602) * Add gemini_endpoint to API_URL_REDIRECT (#1560) * Add gemini_endpoint to API_URL_REDIRECT * Update gemini-pro and gemini-pro-vision model_info endpoints * Update to support new claude models (#1606) * Add anthropic library and update claude models * 更新bridge_claude.py文件,添加了对图片输入的支持。修复了一些bug。 * 添加Claude_3_Models变量以限制图片数量 * Refactor code to improve readability and maintainability * minor claude bug fix * more flexible one-api support * reformat config * fix one-api new access bug * dummy * compat non-standard api * version 3.73 --------- Co-authored-by: XIao <46100050+Kilig947@users.noreply.github.com> Co-authored-by: Menghuan1918 Co-authored-by: hongyi-zhao Co-authored-by: Hao Ma <893017927@qq.com> Co-authored-by: zeyuan huang <599012428@qq.com> --- check_proxy.py | 10 +- colorful.py | 2 +- config.py | 77 ++++----- core_functional.py | 40 ++--- crazy_functions/Latex全文润色.py | 28 ++-- crazy_functions/Latex全文翻译.py | 14 +- crazy_functions/Latex输出PDF.py | 146 +++++++++++------ crazy_functions/agent_fns/pipe.py | 14 +- crazy_functions/agent_fns/watchdog.py | 2 +- crazy_functions/chatglm微调工具.py | 8 +- crazy_functions/diagram_fns/file_tree.py | 2 +- crazy_functions/game_fns/game_ascii_art.py | 2 +- .../game_fns/game_interactive_story.py | 26 +-- crazy_functions/game_fns/game_utils.py | 10 +- crazy_functions/gen_fns/gen_fns_shared.py | 4 +- crazy_functions/ipc_fns/mp.py | 2 +- crazy_functions/json_fns/pydantic_io.py | 2 +- crazy_functions/latex_fns/latex_actions.py | 56 +++---- crazy_functions/live_audio/aliyunASR.py | 10 +- crazy_functions/live_audio/audio_io.py | 6 +- .../multi_stage/multi_stage_utils.py | 8 +- crazy_functions/pdf_fns/breakdown_txt.py | 2 +- crazy_functions/pdf_fns/parse_pdf.py | 10 +- crazy_functions/pdf_fns/parse_word.py | 12 +- crazy_functions/vector_fns/vector_database.py | 22 +-- crazy_functions/vt_fns/vt_call_plugin.py | 10 +- crazy_functions/vt_fns/vt_modify_config.py | 10 +- crazy_functions/vt_fns/vt_state.py | 2 +- crazy_functions/下载arxiv论文翻译摘要.py | 10 +- crazy_functions/互动小游戏.py | 12 +- crazy_functions/交互功能函数模板.py | 2 +- crazy_functions/函数动态生成.py | 46 +++--- crazy_functions/命令行助手.py | 4 +- crazy_functions/图片生成.py | 8 +- crazy_functions/多智能体.py | 6 +- crazy_functions/对话历史存档.py | 10 +- crazy_functions/总结word文档.py | 12 +- crazy_functions/批量Markdown翻译.py | 16 +- crazy_functions/批量总结PDF文档.py | 24 +-- crazy_functions/批量总结PDF文档pdfminer.py | 16 +- crazy_functions/批量翻译PDF文档_NOUGAT.py | 4 +- crazy_functions/批量翻译PDF文档_多线程.py | 12 +- crazy_functions/数学动画生成manim.py | 18 +-- crazy_functions/理解PDF文档内容.py | 12 +- crazy_functions/生成函数注释.py | 4 +- crazy_functions/生成多种Mermaid图表.py | 28 ++-- crazy_functions/知识库问答.py | 8 +- crazy_functions/联网的ChatGPT.py | 14 +- crazy_functions/虚空终端.py | 10 +- crazy_functions/解析项目源代码.py | 8 +- crazy_functions/询问多个大语言模型.py | 8 +- crazy_functions/语音助手.py | 6 +- crazy_functions/谷歌检索小助手.py | 16 +- crazy_functions/高级功能函数模板.py | 12 +- multi_language.py | 76 ++++----- request_llms/bridge_all.py | 98 +++++++---- request_llms/bridge_chatglm.py | 8 +- request_llms/bridge_chatglm3.py | 8 +- request_llms/bridge_chatglmft.py | 14 +- request_llms/bridge_chatglmonnx.py | 2 +- request_llms/bridge_chatgpt.py | 5 +- request_llms/bridge_chatgpt_vision.py | 24 +-- request_llms/bridge_chatgpt_website.py | 22 +-- request_llms/bridge_claude.py | 153 ++++++++++-------- request_llms/bridge_deepseekcoder.py | 6 +- request_llms/bridge_google_gemini.py | 6 +- request_llms/bridge_internlm.py | 6 +- request_llms/bridge_jittorllms_llama.py | 10 +- request_llms/bridge_jittorllms_pangualpha.py | 10 +- request_llms/bridge_jittorllms_rwkv.py | 10 +- request_llms/bridge_llama2.py | 8 +- request_llms/bridge_moss.py | 32 ++-- request_llms/bridge_qwen_local.py | 2 +- request_llms/bridge_tgui.py | 14 +- request_llms/chatglmoonx.py | 2 +- request_llms/com_google.py | 6 +- request_llms/com_zhipuglm.py | 2 +- request_llms/key_manager.py | 6 +- request_llms/local_llm_class.py | 6 +- requirements.txt | 2 +- shared_utils/key_pattern_manager.py | 2 +- shared_utils/map_names.py | 34 ++++ shared_utils/text_mask.py | 2 +- toolbox.py | 65 ++++++++ version | 4 +- 85 files changed, 866 insertions(+), 642 deletions(-) create mode 100644 shared_utils/map_names.py diff --git a/check_proxy.py b/check_proxy.py index 2df8185..99592f7 100644 --- a/check_proxy.py +++ b/check_proxy.py @@ -47,7 +47,7 @@ def backup_and_download(current_version, remote_version): shutil.copytree('./', backup_dir, ignore=lambda x, y: ['history']) proxies = get_conf('proxies') try: r = requests.get('https://github.com/binary-husky/chatgpt_academic/archive/refs/heads/master.zip', proxies=proxies, stream=True) - except: r = requests.get('https://public.gpt-academic.top/publish/master.zip', proxies=proxies, stream=True) + except: r = requests.get('https://public.agent-matrix.com/publish/master.zip', proxies=proxies, stream=True) zip_file_path = backup_dir+'/master.zip' with open(zip_file_path, 'wb+') as f: f.write(r.content) @@ -81,7 +81,7 @@ def patch_and_restart(path): dir_util.copy_tree(path_new_version, './') print亮绿('代码已经更新,即将更新pip包依赖……') for i in reversed(range(5)): time.sleep(1); print(i) - try: + try: import subprocess subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-r', 'requirements.txt']) except: @@ -113,7 +113,7 @@ def auto_update(raise_error=False): import json proxies = get_conf('proxies') try: response = requests.get("https://raw.githubusercontent.com/binary-husky/chatgpt_academic/master/version", proxies=proxies, timeout=5) - except: response = requests.get("https://public.gpt-academic.top/publish/version", proxies=proxies, timeout=5) + except: response = requests.get("https://public.agent-matrix.com/publish/version", proxies=proxies, timeout=5) remote_json_data = json.loads(response.text) remote_version = remote_json_data['version'] if remote_json_data["show_feature"]: @@ -159,7 +159,7 @@ def warm_up_modules(): enc.encode("模块预热", disallowed_special=()) enc = model_info["gpt-4"]['tokenizer'] enc.encode("模块预热", disallowed_special=()) - + def warm_up_vectordb(): print('正在执行一些模块的预热 ...') from toolbox import ProxyNetworkActivate @@ -167,7 +167,7 @@ def warm_up_vectordb(): import nltk with ProxyNetworkActivate("Warmup_Modules"): nltk.download("punkt") - + if __name__ == '__main__': import os os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染 diff --git a/colorful.py b/colorful.py index 9749861..f0414e5 100644 --- a/colorful.py +++ b/colorful.py @@ -3,7 +3,7 @@ from sys import stdout if platform.system()=="Linux": pass -else: +else: from colorama import init init() diff --git a/config.py b/config.py index 5c44b53..1bdb299 100644 --- a/config.py +++ b/config.py @@ -30,7 +30,32 @@ if USE_PROXY: else: proxies = None -# ------------------------------------ 以下配置可以优化体验, 但大部分场合下并不需要修改 ------------------------------------ +# [step 3]>> 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 ) +LLM_MODEL = "gpt-3.5-turbo-16k" # 可选 ↓↓↓ +AVAIL_LLM_MODELS = ["gpt-4-1106-preview", "gpt-4-turbo-preview", "gpt-4-vision-preview", + "gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", + "gpt-4", "gpt-4-32k", "azure-gpt-4", "glm-4", "glm-3-turbo", + "gemini-pro", "chatglm3" + ] +# --- --- --- --- +# P.S. 其他可用的模型还包括 +# AVAIL_LLM_MODELS = [ +# "qianfan", "deepseekcoder", +# "spark", "sparkv2", "sparkv3", "sparkv3.5", +# "qwen-turbo", "qwen-plus", "qwen-max", "qwen-local", +# "moonshot-v1-128k", "moonshot-v1-32k", "moonshot-v1-8k", +# "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-turbo-0125" +# "claude-3-sonnet-20240229","claude-3-opus-20240229", "claude-2.1", "claude-instant-1.2", +# "moss", "llama2", "chatglm_onnx", "internlm", "jittorllms_pangualpha", "jittorllms_llama", +# ] +# --- --- --- --- +# 此外,为了更灵活地接入one-api多模型管理界面,您还可以在接入one-api时, +# 使用"one-api-*"前缀直接使用非标准方式接入的模型,例如 +# AVAIL_LLM_MODELS = ["one-api-claude-3-sonnet-20240229(max_token=100000)"] +# --- --- --- --- + + +# --------------- 以下配置可以优化体验 --------------- # 重新URL重新定向,实现更换API_URL的作用(高危设置! 常规情况下不要修改! 通过修改此设置,您将把您的API-KEY和对话隐私完全暴露给您设定的中间人!) # 格式: API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "在这里填写重定向的api.openai.com的URL"} @@ -85,22 +110,6 @@ MAX_RETRY = 2 DEFAULT_FN_GROUPS = ['对话', '编程', '学术', '智能体'] -# 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 ) -LLM_MODEL = "gpt-3.5-turbo-16k" # 可选 ↓↓↓ -AVAIL_LLM_MODELS = ["gpt-4-1106-preview", "gpt-4-turbo-preview", "gpt-4-vision-preview", - "gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", - "gpt-4", "gpt-4-32k", "azure-gpt-4", "glm-4", "glm-3-turbo", - "gemini-pro", "chatglm3", "claude-2"] -# P.S. 其他可用的模型还包括 [ -# "moonshot-v1-128k", "moonshot-v1-32k", "moonshot-v1-8k", -# "qwen-turbo", "qwen-plus", "qwen-max", -# "zhipuai", "qianfan", "deepseekcoder", "llama2", "qwen-local", "gpt-3.5-turbo-0613", "moss", -# "gpt-3.5-turbo-16k-0613", "gpt-3.5-random", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', -# "spark", "sparkv2", "sparkv3", "sparkv3.5", -# "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama" -# ] - - # 定义界面上“询问多个GPT模型”插件应该使用哪些模型,请从AVAIL_LLM_MODELS中选择,并在不同模型之间用`&`间隔,例如"gpt-3.5-turbo&chatglm3&azure-gpt-4" MULTI_QUERY_LLM_MODELS = "gpt-3.5-turbo&chatglm3" @@ -129,6 +138,7 @@ CHATGLM_PTUNING_CHECKPOINT = "" # 例如"/home/hmp/ChatGLM2-6B/ptuning/output/6b LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda" LOCAL_MODEL_QUANT = "FP16" # 默认 "FP16" "INT4" 启用量化INT4版本 "INT8" 启用量化INT8版本 + # 设置gradio的并行线程数(不需要修改) CONCURRENT_COUNT = 100 @@ -174,14 +184,8 @@ AZURE_ENGINE = "填入你亲手写的部署名" # 读 docs\use_azure. AZURE_CFG_ARRAY = {} -# 使用Newbing (不推荐使用,未来将删除) -NEWBING_STYLE = "creative" # ["creative", "balanced", "precise"] -NEWBING_COOKIES = """ -put your new bing cookies here -""" - - -# 阿里云实时语音识别 配置难度较高 仅建议高手用户使用 参考 https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md +# 阿里云实时语音识别 配置难度较高 +# 参考 https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md ENABLE_AUDIO = False ALIYUN_TOKEN="" # 例如 f37f30e0f9934c34a992f6f64f7eba4f ALIYUN_APPKEY="" # 例如 RoPlZrM88DnAFkZK @@ -200,18 +204,14 @@ ZHIPUAI_API_KEY = "" ZHIPUAI_MODEL = "" # 此选项已废弃,不再需要填写 -# # 火山引擎YUNQUE大模型 -# YUNQUE_SECRET_KEY = "" -# YUNQUE_ACCESS_KEY = "" -# YUNQUE_MODEL = "" - - # Claude API KEY ANTHROPIC_API_KEY = "" + # 月之暗面 API KEY MOONSHOT_API_KEY = "" + # Mathpix 拥有执行PDF的OCR功能,但是需要注册账号 MATHPIX_APPID = "" MATHPIX_APPKEY = "" @@ -270,7 +270,11 @@ PLUGIN_HOT_RELOAD = False # 自定义按钮的最大数量限制 NUM_CUSTOM_BASIC_BTN = 4 + + """ +--------------- 配置关联关系说明 --------------- + 在线大模型配置关联关系示意图 │ ├── "gpt-3.5-turbo" 等openai模型 @@ -294,7 +298,7 @@ NUM_CUSTOM_BASIC_BTN = 4 │ ├── XFYUN_API_SECRET │ └── XFYUN_API_KEY │ -├── "claude-1-100k" 等claude模型 +├── "claude-3-opus-20240229" 等claude模型 │ └── ANTHROPIC_API_KEY │ ├── "stack-claude" @@ -315,9 +319,10 @@ NUM_CUSTOM_BASIC_BTN = 4 ├── "Gemini" │ └── GEMINI_API_KEY │ -└── "newbing" Newbing接口不再稳定,不推荐使用 - ├── NEWBING_STYLE - └── NEWBING_COOKIES +└── "one-api-...(max_token=...)" 用一种更方便的方式接入one-api多模型管理界面 + ├── AVAIL_LLM_MODELS + ├── API_KEY + └── API_URL_REDIRECT 本地大模型示意图 @@ -364,4 +369,4 @@ NUM_CUSTOM_BASIC_BTN = 4 └── MATHPIX_APPKEY -""" +""" \ No newline at end of file diff --git a/core_functional.py b/core_functional.py index 4074cdd..5941135 100644 --- a/core_functional.py +++ b/core_functional.py @@ -34,16 +34,16 @@ def get_core_functions(): # [6] 文本预处理 (可选参数,默认 None,举例:写个函数移除所有的换行符) "PreProcess": None, }, - - + + "总结绘制脑图": { # 前缀,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等 - "Prefix": r"", + "Prefix": '''"""\n\n''', # 后缀,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来 "Suffix": # dedent() 函数用于去除多行字符串的缩进 - dedent("\n"+r''' - ============================== + dedent("\n\n"+r''' + """ 使用mermaid flowchart对以上文本进行总结,概括上述段落的内容以及内在逻辑关系,例如: @@ -57,15 +57,15 @@ def get_core_functions(): C --> |"箭头名2"| F["节点名6"] ``` - 警告: + 注意: (1)使用中文 (2)节点名字使用引号包裹,如["Laptop"] (3)`|` 和 `"`之间不要存在空格 (4)根据情况选择flowchart LR(从左到右)或者flowchart TD(从上到下) '''), }, - - + + "查找语法错误": { "Prefix": r"Help me ensure that the grammar and the spelling is correct. " r"Do not try to polish the text, if no mistake is found, tell me that this paragraph is good. " @@ -85,14 +85,14 @@ def get_core_functions(): "Suffix": r"", "PreProcess": clear_line_break, # 预处理:清除换行符 }, - - + + "中译英": { "Prefix": r"Please translate following sentence to English:" + "\n\n", "Suffix": r"", }, - - + + "学术英中互译": { "Prefix": build_gpt_academic_masked_string_langbased( text_show_chinese= @@ -112,29 +112,29 @@ def get_core_functions(): ) + "\n\n", "Suffix": r"", }, - - + + "英译中": { "Prefix": r"翻译成地道的中文:" + "\n\n", "Suffix": r"", "Visible": False, }, - - + + "找图片": { "Prefix": r"我需要你找一张网络图片。使用Unsplash API(https://source.unsplash.com/960x640/?<英语关键词>)获取图片URL," r"然后请使用Markdown格式封装,并且不要有反斜线,不要用代码块。现在,请按以下描述给我发送图片:" + "\n\n", "Suffix": r"", "Visible": False, }, - - + + "解释代码": { "Prefix": r"请解释以下代码:" + "\n```\n", "Suffix": "\n```\n", }, - - + + "参考文献转Bib": { "Prefix": r"Here are some bibliography items, please transform them into bibtex style." r"Note that, reference styles maybe more than one kind, you should transform each item correctly." diff --git a/crazy_functions/Latex全文润色.py b/crazy_functions/Latex全文润色.py index 3bd0613..8f3074a 100644 --- a/crazy_functions/Latex全文润色.py +++ b/crazy_functions/Latex全文润色.py @@ -46,7 +46,7 @@ class PaperFileGroup(): manifest.append(path + '.polish.tex') f.write(res) return manifest - + def zip_result(self): import os, time folder = os.path.dirname(self.file_paths[0]) @@ -59,7 +59,7 @@ def 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency - # <-------- 读取Latex文件,删除其中的所有注释 ----------> + # <-------- 读取Latex文件,删除其中的所有注释 ----------> pfg = PaperFileGroup() for index, fp in enumerate(file_manifest): @@ -73,31 +73,31 @@ def 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch pfg.file_paths.append(fp) pfg.file_contents.append(clean_tex_content) - # <-------- 拆分过长的latex文件 ----------> + # <-------- 拆分过长的latex文件 ----------> pfg.run_file_split(max_token_limit=1024) n_split = len(pfg.sp_file_contents) - # <-------- 多线程润色开始 ----------> + # <-------- 多线程润色开始 ----------> if language == 'en': if mode == 'polish': - inputs_array = ["Below is a section from an academic paper, polish this section to meet the academic standard, " + - "improve the grammar, clarity and overall readability, do not modify any latex command such as \section, \cite and equations:" + + inputs_array = ["Below is a section from an academic paper, polish this section to meet the academic standard, " + + "improve the grammar, clarity and overall readability, do not modify any latex command such as \section, \cite and equations:" + f"\n\n{frag}" for frag in pfg.sp_file_contents] else: - inputs_array = [r"Below is a section from an academic paper, proofread this section." + - r"Do not modify any latex command such as \section, \cite, \begin, \item and equations. " + - r"Answer me only with the revised text:" + + inputs_array = [r"Below is a section from an academic paper, proofread this section." + + r"Do not modify any latex command such as \section, \cite, \begin, \item and equations. " + + r"Answer me only with the revised text:" + f"\n\n{frag}" for frag in pfg.sp_file_contents] inputs_show_user_array = [f"Polish {f}" for f in pfg.sp_file_tag] sys_prompt_array = ["You are a professional academic paper writer." for _ in range(n_split)] elif language == 'zh': if mode == 'polish': - inputs_array = [f"以下是一篇学术论文中的一段内容,请将此部分润色以满足学术标准,提高语法、清晰度和整体可读性,不要修改任何LaTeX命令,例如\section,\cite和方程式:" + + inputs_array = [f"以下是一篇学术论文中的一段内容,请将此部分润色以满足学术标准,提高语法、清晰度和整体可读性,不要修改任何LaTeX命令,例如\section,\cite和方程式:" + f"\n\n{frag}" for frag in pfg.sp_file_contents] else: - inputs_array = [f"以下是一篇学术论文中的一段内容,请对这部分内容进行语法矫正。不要修改任何LaTeX命令,例如\section,\cite和方程式:" + - f"\n\n{frag}" for frag in pfg.sp_file_contents] + inputs_array = [f"以下是一篇学术论文中的一段内容,请对这部分内容进行语法矫正。不要修改任何LaTeX命令,例如\section,\cite和方程式:" + + f"\n\n{frag}" for frag in pfg.sp_file_contents] inputs_show_user_array = [f"润色 {f}" for f in pfg.sp_file_tag] sys_prompt_array=["你是一位专业的中文学术论文作家。" for _ in range(n_split)] @@ -113,7 +113,7 @@ def 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch scroller_max_len = 80 ) - # <-------- 文本碎片重组为完整的tex文件,整理结果为压缩包 ----------> + # <-------- 文本碎片重组为完整的tex文件,整理结果为压缩包 ----------> try: pfg.sp_file_result = [] for i_say, gpt_say in zip(gpt_response_collection[0::2], gpt_response_collection[1::2]): @@ -124,7 +124,7 @@ def 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch except: print(trimmed_format_exc()) - # <-------- 整理结果,退出 ----------> + # <-------- 整理结果,退出 ----------> create_report_file_name = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + f"-chatgpt.polish.md" res = write_history_to_file(gpt_response_collection, file_basename=create_report_file_name) promote_file_to_downloadzone(res, chatbot=chatbot) diff --git a/crazy_functions/Latex全文翻译.py b/crazy_functions/Latex全文翻译.py index d6c3b5e..a0802fd 100644 --- a/crazy_functions/Latex全文翻译.py +++ b/crazy_functions/Latex全文翻译.py @@ -39,7 +39,7 @@ def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch import time, os, re from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency - # <-------- 读取Latex文件,删除其中的所有注释 ----------> + # <-------- 读取Latex文件,删除其中的所有注释 ----------> pfg = PaperFileGroup() for index, fp in enumerate(file_manifest): @@ -53,11 +53,11 @@ def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch pfg.file_paths.append(fp) pfg.file_contents.append(clean_tex_content) - # <-------- 拆分过长的latex文件 ----------> + # <-------- 拆分过长的latex文件 ----------> pfg.run_file_split(max_token_limit=1024) n_split = len(pfg.sp_file_contents) - # <-------- 抽取摘要 ----------> + # <-------- 抽取摘要 ----------> # if language == 'en': # abs_extract_inputs = f"Please write an abstract for this paper" @@ -70,14 +70,14 @@ def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch # sys_prompt="Your job is to collect information from materials。", # ) - # <-------- 多线程润色开始 ----------> + # <-------- 多线程润色开始 ----------> if language == 'en->zh': - inputs_array = ["Below is a section from an English academic paper, translate it into Chinese, do not modify any latex command such as \section, \cite and equations:" + + inputs_array = ["Below is a section from an English academic paper, translate it into Chinese, do not modify any latex command such as \section, \cite and equations:" + f"\n\n{frag}" for frag in pfg.sp_file_contents] inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag] sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)] elif language == 'zh->en': - inputs_array = [f"Below is a section from a Chinese academic paper, translate it into English, do not modify any latex command such as \section, \cite and equations:" + + inputs_array = [f"Below is a section from a Chinese academic paper, translate it into English, do not modify any latex command such as \section, \cite and equations:" + f"\n\n{frag}" for frag in pfg.sp_file_contents] inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag] sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)] @@ -93,7 +93,7 @@ def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch scroller_max_len = 80 ) - # <-------- 整理结果,退出 ----------> + # <-------- 整理结果,退出 ----------> create_report_file_name = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + f"-chatgpt.polish.md" res = write_history_to_file(gpt_response_collection, create_report_file_name) promote_file_to_downloadzone(res, chatbot=chatbot) diff --git a/crazy_functions/Latex输出PDF.py b/crazy_functions/Latex输出PDF.py index fc878f9..0471749 100644 --- a/crazy_functions/Latex输出PDF.py +++ b/crazy_functions/Latex输出PDF.py @@ -1,4 +1,4 @@ -from toolbox import update_ui, trimmed_format_exc, get_conf, get_log_folder, promote_file_to_downloadzone +from toolbox import update_ui, trimmed_format_exc, get_conf, get_log_folder, promote_file_to_downloadzone, check_repeat_upload, map_file_to_sha256 from toolbox import CatchException, report_exception, update_ui_lastest_msg, zip_result, gen_time_str from functools import partial import glob, os, requests, time, json, tarfile @@ -40,7 +40,7 @@ def switch_prompt(pfg, mode, more_requirement): def desend_to_extracted_folder_if_exist(project_folder): - """ + """ Descend into the extracted folder if it exists, otherwise return the original folder. Args: @@ -56,7 +56,7 @@ def desend_to_extracted_folder_if_exist(project_folder): def move_project(project_folder, arxiv_id=None): - """ + """ Create a new work folder and copy the project folder to it. Args: @@ -112,9 +112,9 @@ def arxiv_download(chatbot, history, txt, allow_cache=True): if ('.' in txt) and ('/' not in txt) and is_float(txt[:10]): # is arxiv ID txt = 'https://arxiv.org/abs/' + txt[:10] - if not txt.startswith('https://arxiv.org'): + if not txt.startswith('https://arxiv.org'): return txt, None # 是本地文件,跳过下载 - + # <-------------- inspect format -------------> chatbot.append([f"检测到arxiv文档连接", '尝试下载 ...']) yield from update_ui(chatbot=chatbot, history=history) @@ -214,7 +214,7 @@ def pdf2tex_project(pdf_file_path): return None -# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= 插件主程序1 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= 插件主程序1 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= @CatchException @@ -291,7 +291,7 @@ def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, histo return success -# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= 插件主程序2 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= 插件主程序2 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= @CatchException def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): @@ -326,7 +326,7 @@ def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, txt, arxiv_id = yield from arxiv_download(chatbot, history, txt, allow_cache) except tarfile.ReadError as e: yield from update_ui_lastest_msg( - "无法自动下载该论文的Latex源码,请前往arxiv打开此论文下载页面,点other Formats,然后download source手动下载latex源码包。接下来调用本地Latex翻译插件即可。", + "无法自动下载该论文的Latex源码,请前往arxiv打开此论文下载页面,点other Formats,然后download source手动下载latex源码包。接下来调用本地Latex翻译插件即可。", chatbot=chatbot, history=history) return @@ -385,7 +385,7 @@ def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, return success -# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- 插件主程序3 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- 插件主程序3 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= @CatchException def PDF翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): @@ -438,47 +438,101 @@ def PDF翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, h yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return - # <-------------- convert pdf into tex -------------> - project_folder = pdf2tex_project(file_manifest[0]) + hash_tag = map_file_to_sha256(file_manifest[0]) - # Translate English Latex to Chinese Latex, and compile it - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] - if len(file_manifest) == 0: - report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.tex文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return + # <-------------- check repeated pdf -------------> + chatbot.append([f"检查PDF是否被重复上传", "正在检查..."]) + yield from update_ui(chatbot=chatbot, history=history) + repeat, project_folder = check_repeat_upload(file_manifest[0], hash_tag) - # <-------------- if is a zip/tar file -------------> - project_folder = desend_to_extracted_folder_if_exist(project_folder) + except_flag = False - # <-------------- move latex project away from temp folder -------------> - project_folder = move_project(project_folder) + if repeat: + yield from update_ui_lastest_msg(f"发现重复上传,请查收结果(压缩包)...", chatbot=chatbot, history=history) - # <-------------- if merge_translate_zh is already generated, skip gpt req -------------> - if not os.path.exists(project_folder + '/merge_translate_zh.tex'): - yield from Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs, - chatbot, history, system_prompt, mode='translate_zh', - switch_prompt=_switch_prompt_) + try: + trans_html_file = [f for f in glob.glob(f'{project_folder}/**/*.trans.html', recursive=True)][0] + promote_file_to_downloadzone(trans_html_file, rename_file=None, chatbot=chatbot) - # <-------------- compile PDF -------------> - success = yield from 编译Latex(chatbot, history, main_file_original='merge', - main_file_modified='merge_translate_zh', mode='translate_zh', - work_folder_original=project_folder, work_folder_modified=project_folder, - work_folder=project_folder) + translate_pdf = [f for f in glob.glob(f'{project_folder}/**/merge_translate_zh.pdf', recursive=True)][0] + promote_file_to_downloadzone(translate_pdf, rename_file=None, chatbot=chatbot) - # <-------------- zip PDF -------------> - zip_res = zip_result(project_folder) - if success: - chatbot.append((f"成功啦", '请查收结果(压缩包)...')) - yield from update_ui(chatbot=chatbot, history=history); - time.sleep(1) # 刷新界面 - promote_file_to_downloadzone(file=zip_res, chatbot=chatbot) - else: - chatbot.append((f"失败了", - '虽然PDF生成失败了, 但请查收结果(压缩包), 内含已经翻译的Tex文档, 您可以到Github Issue区, 用该压缩包进行反馈。如系统是Linux,请检查系统字体(见Github wiki) ...')) - yield from update_ui(chatbot=chatbot, history=history); - time.sleep(1) # 刷新界面 - promote_file_to_downloadzone(file=zip_res, chatbot=chatbot) + comparison_pdf = [f for f in glob.glob(f'{project_folder}/**/comparison.pdf', recursive=True)][0] + promote_file_to_downloadzone(comparison_pdf, rename_file=None, chatbot=chatbot) - # <-------------- we are done -------------> - return success + zip_res = zip_result(project_folder) + promote_file_to_downloadzone(file=zip_res, chatbot=chatbot) + + return True + + except: + report_exception(chatbot, history, b=f"发现重复上传,但是无法找到相关文件") + yield from update_ui(chatbot=chatbot, history=history) + + chatbot.append([f"没有相关文件", '尝试重新翻译PDF...']) + yield from update_ui(chatbot=chatbot, history=history) + + except_flag = True + + + elif not repeat or except_flag: + yield from update_ui_lastest_msg(f"未发现重复上传", chatbot=chatbot, history=history) + + # <-------------- convert pdf into tex -------------> + chatbot.append([f"解析项目: {txt}", "正在将PDF转换为tex项目,请耐心等待..."]) + yield from update_ui(chatbot=chatbot, history=history) + project_folder = pdf2tex_project(file_manifest[0]) + if project_folder is None: + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"PDF转换为tex项目失败") + yield from update_ui(chatbot=chatbot, history=history) + return False + + # <-------------- translate latex file into Chinese -------------> + yield from update_ui_lastest_msg("正在tex项目将翻译为中文...", chatbot=chatbot, history=history) + file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + if len(file_manifest) == 0: + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.tex文件: {txt}") + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + return + + # <-------------- if is a zip/tar file -------------> + project_folder = desend_to_extracted_folder_if_exist(project_folder) + + # <-------------- move latex project away from temp folder -------------> + project_folder = move_project(project_folder) + + # <-------------- set a hash tag for repeat-checking -------------> + with open(pj(project_folder, hash_tag + '.tag'), 'w') as f: + f.write(hash_tag) + f.close() + + + # <-------------- if merge_translate_zh is already generated, skip gpt req -------------> + if not os.path.exists(project_folder + '/merge_translate_zh.tex'): + yield from Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs, + chatbot, history, system_prompt, mode='translate_zh', + switch_prompt=_switch_prompt_) + + # <-------------- compile PDF -------------> + yield from update_ui_lastest_msg("正在将翻译好的项目tex项目编译为PDF...", chatbot=chatbot, history=history) + success = yield from 编译Latex(chatbot, history, main_file_original='merge', + main_file_modified='merge_translate_zh', mode='translate_zh', + work_folder_original=project_folder, work_folder_modified=project_folder, + work_folder=project_folder) + + # <-------------- zip PDF -------------> + zip_res = zip_result(project_folder) + if success: + chatbot.append((f"成功啦", '请查收结果(压缩包)...')) + yield from update_ui(chatbot=chatbot, history=history); + time.sleep(1) # 刷新界面 + promote_file_to_downloadzone(file=zip_res, chatbot=chatbot) + else: + chatbot.append((f"失败了", + '虽然PDF生成失败了, 但请查收结果(压缩包), 内含已经翻译的Tex文档, 您可以到Github Issue区, 用该压缩包进行反馈。如系统是Linux,请检查系统字体(见Github wiki) ...')) + yield from update_ui(chatbot=chatbot, history=history); + time.sleep(1) # 刷新界面 + promote_file_to_downloadzone(file=zip_res, chatbot=chatbot) + + # <-------------- we are done -------------> + return success diff --git a/crazy_functions/agent_fns/pipe.py b/crazy_functions/agent_fns/pipe.py index a292af8..128507c 100644 --- a/crazy_functions/agent_fns/pipe.py +++ b/crazy_functions/agent_fns/pipe.py @@ -72,7 +72,7 @@ class PluginMultiprocessManager: if file_type.lower() in ['png', 'jpg']: image_path = os.path.abspath(fp) self.chatbot.append([ - '检测到新生图像:', + '检测到新生图像:', f'本地文件预览:
' ]) yield from update_ui(chatbot=self.chatbot, history=self.history) @@ -114,21 +114,21 @@ class PluginMultiprocessManager: self.cnt = 1 self.parent_conn = self.launch_subprocess_with_pipe() # ⭐⭐⭐ repeated, cmd_to_autogen = self.send_command(txt) - if txt == 'exit': + if txt == 'exit': self.chatbot.append([f"结束", "结束信号已明确,终止AutoGen程序。"]) yield from update_ui(chatbot=self.chatbot, history=self.history) self.terminate() return "terminate" - + # patience = 10 - + while True: time.sleep(0.5) if not self.alive: # the heartbeat watchdog might have it killed self.terminate() return "terminate" - if self.parent_conn.poll(): + if self.parent_conn.poll(): self.feed_heartbeat_watchdog() if "[GPT-Academic] 等待中" in self.chatbot[-1][-1]: self.chatbot.pop(-1) # remove the last line @@ -152,8 +152,8 @@ class PluginMultiprocessManager: yield from update_ui(chatbot=self.chatbot, history=self.history) if msg.cmd == "interact": yield from self.overwatch_workdir_file_change() - self.chatbot.append([f"程序抵达用户反馈节点.", msg.content + - "\n\n等待您的进一步指令." + + self.chatbot.append([f"程序抵达用户反馈节点.", msg.content + + "\n\n等待您的进一步指令." + "\n\n(1) 一般情况下您不需要说什么, 清空输入区, 然后直接点击“提交”以继续. " + "\n\n(2) 如果您需要补充些什么, 输入要反馈的内容, 直接点击“提交”以继续. " + "\n\n(3) 如果您想终止程序, 输入exit, 直接点击“提交”以终止AutoGen并解锁. " diff --git a/crazy_functions/agent_fns/watchdog.py b/crazy_functions/agent_fns/watchdog.py index 2a2bdfa..7cd14d2 100644 --- a/crazy_functions/agent_fns/watchdog.py +++ b/crazy_functions/agent_fns/watchdog.py @@ -8,7 +8,7 @@ class WatchDog(): self.interval = interval self.msg = msg self.kill_dog = False - + def watch(self): while True: if self.kill_dog: break diff --git a/crazy_functions/chatglm微调工具.py b/crazy_functions/chatglm微调工具.py index 1b28228..8405fc5 100644 --- a/crazy_functions/chatglm微调工具.py +++ b/crazy_functions/chatglm微调工具.py @@ -46,7 +46,7 @@ def 微调数据集生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst chatbot.append(("这是什么功能?", "[Local Message] 微调数据集生成")) if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") args = plugin_kwargs.get("advanced_arg", None) - if args is None: + if args is None: chatbot.append(("没给定指令", "退出")) yield from update_ui(chatbot=chatbot, history=history); return else: @@ -69,7 +69,7 @@ def 微调数据集生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst sys_prompt_array=[arguments.system_prompt for _ in (batch)], max_workers=10 # OpenAI所允许的最大并行过载 ) - + with open(txt+'.generated.json', 'a+', encoding='utf8') as f: for b, r in zip(batch, res[1::2]): f.write(json.dumps({"content":b, "summary":r}, ensure_ascii=False)+'\n') @@ -95,12 +95,12 @@ def 启动微调(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt chatbot.append(("这是什么功能?", "[Local Message] 微调数据集生成")) if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") args = plugin_kwargs.get("advanced_arg", None) - if args is None: + if args is None: chatbot.append(("没给定指令", "退出")) yield from update_ui(chatbot=chatbot, history=history); return else: arguments = string_to_options(arguments=args) - + pre_seq_len = arguments.pre_seq_len # 128 diff --git a/crazy_functions/diagram_fns/file_tree.py b/crazy_functions/diagram_fns/file_tree.py index fa7e2e4..d00ad13 100644 --- a/crazy_functions/diagram_fns/file_tree.py +++ b/crazy_functions/diagram_fns/file_tree.py @@ -10,7 +10,7 @@ class FileNode: self.parenting_ship = [] self.comment = "" self.comment_maxlen_show = 50 - + @staticmethod def add_linebreaks_at_spaces(string, interval=10): return '\n'.join(string[i:i+interval] for i in range(0, len(string), interval)) diff --git a/crazy_functions/game_fns/game_ascii_art.py b/crazy_functions/game_fns/game_ascii_art.py index e0b7008..39d88e1 100644 --- a/crazy_functions/game_fns/game_ascii_art.py +++ b/crazy_functions/game_fns/game_ascii_art.py @@ -8,7 +8,7 @@ import random class MiniGame_ASCII_Art(GptAcademicGameBaseState): def step(self, prompt, chatbot, history): - if self.step_cnt == 0: + if self.step_cnt == 0: chatbot.append(["我画你猜(动物)", "请稍等..."]) else: if prompt.strip() == 'exit': diff --git a/crazy_functions/game_fns/game_interactive_story.py b/crazy_functions/game_fns/game_interactive_story.py index 5c25f4a..6c528c3 100644 --- a/crazy_functions/game_fns/game_interactive_story.py +++ b/crazy_functions/game_fns/game_interactive_story.py @@ -88,8 +88,8 @@ class MiniGame_ResumeStory(GptAcademicGameBaseState): self.story = [] chatbot.append(["互动写故事", f"这次的故事开头是:{self.headstart}"]) self.sys_prompt_ = '你是一个想象力丰富的杰出作家。正在与你的朋友互动,一起写故事,因此你每次写的故事段落应少于300字(结局除外)。' - - + + def generate_story_image(self, story_paragraph): try: from crazy_functions.图片生成 import gen_image @@ -98,13 +98,13 @@ class MiniGame_ResumeStory(GptAcademicGameBaseState): return f'
' except: return '' - + def step(self, prompt, chatbot, history): - + """ 首先,处理游戏初始化等特殊情况 """ - if self.step_cnt == 0: + if self.step_cnt == 0: self.begin_game_step_0(prompt, chatbot, history) self.lock_plugin(chatbot) self.cur_task = 'head_start' @@ -132,7 +132,7 @@ class MiniGame_ResumeStory(GptAcademicGameBaseState): inputs_ = prompts_hs.format(headstart=self.headstart) history_ = [] story_paragraph = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs_, '故事开头', self.llm_kwargs, + inputs_, '故事开头', self.llm_kwargs, chatbot, history_, self.sys_prompt_ ) self.story.append(story_paragraph) @@ -147,7 +147,7 @@ class MiniGame_ResumeStory(GptAcademicGameBaseState): inputs_ = prompts_interact.format(previously_on_story=previously_on_story) history_ = [] self.next_choices = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs_, '请在以下几种故事走向中,选择一种(当然,您也可以选择给出其他故事走向):', self.llm_kwargs, + inputs_, '请在以下几种故事走向中,选择一种(当然,您也可以选择给出其他故事走向):', self.llm_kwargs, chatbot, history_, self.sys_prompt_ @@ -166,7 +166,7 @@ class MiniGame_ResumeStory(GptAcademicGameBaseState): inputs_ = prompts_resume.format(previously_on_story=previously_on_story, choice=self.next_choices, user_choice=prompt) history_ = [] story_paragraph = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs_, f'下一段故事(您的选择是:{prompt})。', self.llm_kwargs, + inputs_, f'下一段故事(您的选择是:{prompt})。', self.llm_kwargs, chatbot, history_, self.sys_prompt_ ) self.story.append(story_paragraph) @@ -181,10 +181,10 @@ class MiniGame_ResumeStory(GptAcademicGameBaseState): inputs_ = prompts_interact.format(previously_on_story=previously_on_story) history_ = [] self.next_choices = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs_, - '请在以下几种故事走向中,选择一种。当然,您也可以给出您心中的其他故事走向。另外,如果您希望剧情立即收尾,请输入剧情走向,并以“剧情收尾”四个字提示程序。', self.llm_kwargs, - chatbot, - history_, + inputs_, + '请在以下几种故事走向中,选择一种。当然,您也可以给出您心中的其他故事走向。另外,如果您希望剧情立即收尾,请输入剧情走向,并以“剧情收尾”四个字提示程序。', self.llm_kwargs, + chatbot, + history_, self.sys_prompt_ ) self.cur_task = 'user_choice' @@ -200,7 +200,7 @@ class MiniGame_ResumeStory(GptAcademicGameBaseState): inputs_ = prompts_terminate.format(previously_on_story=previously_on_story, user_choice=prompt) history_ = [] story_paragraph = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs_, f'故事收尾(您的选择是:{prompt})。', self.llm_kwargs, + inputs_, f'故事收尾(您的选择是:{prompt})。', self.llm_kwargs, chatbot, history_, self.sys_prompt_ ) # # 配图 diff --git a/crazy_functions/game_fns/game_utils.py b/crazy_functions/game_fns/game_utils.py index 09b6f7a..c8f20eb 100644 --- a/crazy_functions/game_fns/game_utils.py +++ b/crazy_functions/game_fns/game_utils.py @@ -5,7 +5,7 @@ def get_code_block(reply): import re pattern = r"```([\s\S]*?)```" # regex pattern to match code blocks matches = re.findall(pattern, reply) # find all code blocks in text - if len(matches) == 1: + if len(matches) == 1: return "```" + matches[0] + "```" # code block raise RuntimeError("GPT is not generating proper code.") @@ -13,10 +13,10 @@ def is_same_thing(a, b, llm_kwargs): from pydantic import BaseModel, Field class IsSameThing(BaseModel): is_same_thing: bool = Field(description="determine whether two objects are same thing.", default=False) - - def run_gpt_fn(inputs, sys_prompt, history=[]): + + def run_gpt_fn(inputs, sys_prompt, history=[]): return predict_no_ui_long_connection( - inputs=inputs, llm_kwargs=llm_kwargs, + inputs=inputs, llm_kwargs=llm_kwargs, history=history, sys_prompt=sys_prompt, observe_window=[] ) @@ -24,7 +24,7 @@ def is_same_thing(a, b, llm_kwargs): inputs_01 = "Identity whether the user input and the target is the same thing: \n target object: {a} \n user input object: {b} \n\n\n".format(a=a, b=b) inputs_01 += "\n\n\n Note that the user may describe the target object with a different language, e.g. cat and 猫 are the same thing." analyze_res_cot_01 = run_gpt_fn(inputs_01, "", []) - + inputs_02 = inputs_01 + gpt_json_io.format_instructions analyze_res = run_gpt_fn(inputs_02, "", [inputs_01, analyze_res_cot_01]) diff --git a/crazy_functions/gen_fns/gen_fns_shared.py b/crazy_functions/gen_fns/gen_fns_shared.py index 8e73794..f451c2c 100644 --- a/crazy_functions/gen_fns/gen_fns_shared.py +++ b/crazy_functions/gen_fns/gen_fns_shared.py @@ -41,11 +41,11 @@ def is_function_successfully_generated(fn_path, class_name, return_dict): # Now you can create an instance of the class instance = some_class() return_dict['success'] = True - return + return except: return_dict['traceback'] = trimmed_format_exc() return - + def subprocess_worker(code, file_path, return_dict): return_dict['result'] = None return_dict['success'] = False diff --git a/crazy_functions/ipc_fns/mp.py b/crazy_functions/ipc_fns/mp.py index 575d47c..7c5e995 100644 --- a/crazy_functions/ipc_fns/mp.py +++ b/crazy_functions/ipc_fns/mp.py @@ -1,4 +1,4 @@ -import platform +import platform import pickle import multiprocessing diff --git a/crazy_functions/json_fns/pydantic_io.py b/crazy_functions/json_fns/pydantic_io.py index 4e300d6..66316d4 100644 --- a/crazy_functions/json_fns/pydantic_io.py +++ b/crazy_functions/json_fns/pydantic_io.py @@ -89,7 +89,7 @@ class GptJsonIO(): error + "\n\n" + \ "Now, fix this json string. \n\n" return prompt - + def generate_output_auto_repair(self, response, gpt_gen_fn): """ response: string containing canidate json diff --git a/crazy_functions/latex_fns/latex_actions.py b/crazy_functions/latex_fns/latex_actions.py index 8772f5e..ac8a6b4 100644 --- a/crazy_functions/latex_fns/latex_actions.py +++ b/crazy_functions/latex_fns/latex_actions.py @@ -90,16 +90,16 @@ class LatexPaperSplit(): "版权归原文作者所有。翻译内容可靠性无保障,请仔细鉴别并以原文为准。" + \ "项目Github地址 \\url{https://github.com/binary-husky/gpt_academic/}。" # 请您不要删除或修改这行警告,除非您是论文的原作者(如果您是论文原作者,欢迎加REAME中的QQ联系开发者) - self.msg_declare = "为了防止大语言模型的意外谬误产生扩散影响,禁止移除或修改此警告。}}\\\\" + self.msg_declare = "为了防止大语言模型的意外谬误产生扩散影响,禁止移除或修改此警告。}}\\\\" self.title = "unknown" self.abstract = "unknown" def read_title_and_abstract(self, txt): try: title, abstract = find_title_and_abs(txt) - if title is not None: + if title is not None: self.title = title.replace('\n', ' ').replace('\\\\', ' ').replace(' ', '').replace(' ', '') - if abstract is not None: + if abstract is not None: self.abstract = abstract.replace('\n', ' ').replace('\\\\', ' ').replace(' ', '').replace(' ', '') except: pass @@ -111,7 +111,7 @@ class LatexPaperSplit(): result_string = "" node_cnt = 0 line_cnt = 0 - + for node in self.nodes: if node.preserve: line_cnt += node.string.count('\n') @@ -144,7 +144,7 @@ class LatexPaperSplit(): return result_string - def split(self, txt, project_folder, opts): + def split(self, txt, project_folder, opts): """ break down latex file to a linked list, each node use a preserve flag to indicate whether it should @@ -155,7 +155,7 @@ class LatexPaperSplit(): manager = multiprocessing.Manager() return_dict = manager.dict() p = multiprocessing.Process( - target=split_subprocess, + target=split_subprocess, args=(txt, project_folder, return_dict, opts)) p.start() p.join() @@ -217,13 +217,13 @@ def Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin from ..crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency from .latex_actions import LatexPaperFileGroup, LatexPaperSplit - # <-------- 寻找主tex文件 ----------> + # <-------- 寻找主tex文件 ----------> maintex = find_main_tex_file(file_manifest, mode) chatbot.append((f"定位主Latex文件", f'[Local Message] 分析结果:该项目的Latex主文件是{maintex}, 如果分析错误, 请立即终止程序, 删除或修改歧义文件, 然后重试。主程序即将开始, 请稍候。')) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 time.sleep(3) - # <-------- 读取Latex文件, 将多文件tex工程融合为一个巨型tex ----------> + # <-------- 读取Latex文件, 将多文件tex工程融合为一个巨型tex ----------> main_tex_basename = os.path.basename(maintex) assert main_tex_basename.endswith('.tex') main_tex_basename_bare = main_tex_basename[:-4] @@ -240,13 +240,13 @@ def Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin with open(project_folder + '/merge.tex', 'w', encoding='utf-8', errors='replace') as f: f.write(merged_content) - # <-------- 精细切分latex文件 ----------> + # <-------- 精细切分latex文件 ----------> chatbot.append((f"Latex文件融合完成", f'[Local Message] 正在精细切分latex文件,这需要一段时间计算,文档越长耗时越长,请耐心等待。')) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 lps = LatexPaperSplit() lps.read_title_and_abstract(merged_content) res = lps.split(merged_content, project_folder, opts) # 消耗时间的函数 - # <-------- 拆分过长的latex片段 ----------> + # <-------- 拆分过长的latex片段 ----------> pfg = LatexPaperFileGroup() for index, r in enumerate(res): pfg.file_paths.append('segment-' + str(index)) @@ -255,17 +255,17 @@ def Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin pfg.run_file_split(max_token_limit=1024) n_split = len(pfg.sp_file_contents) - # <-------- 根据需要切换prompt ----------> + # <-------- 根据需要切换prompt ----------> inputs_array, sys_prompt_array = switch_prompt(pfg, mode) inputs_show_user_array = [f"{mode} {f}" for f in pfg.sp_file_tag] if os.path.exists(pj(project_folder,'temp.pkl')): - # <-------- 【仅调试】如果存在调试缓存文件,则跳过GPT请求环节 ----------> + # <-------- 【仅调试】如果存在调试缓存文件,则跳过GPT请求环节 ----------> pfg = objload(file=pj(project_folder,'temp.pkl')) else: - # <-------- gpt 多线程请求 ----------> + # <-------- gpt 多线程请求 ----------> history_array = [[""] for _ in range(n_split)] # LATEX_EXPERIMENTAL, = get_conf('LATEX_EXPERIMENTAL') # if LATEX_EXPERIMENTAL: @@ -284,32 +284,32 @@ def Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin scroller_max_len = 40 ) - # <-------- 文本碎片重组为完整的tex片段 ----------> + # <-------- 文本碎片重组为完整的tex片段 ----------> pfg.sp_file_result = [] for i_say, gpt_say, orig_content in zip(gpt_response_collection[0::2], gpt_response_collection[1::2], pfg.sp_file_contents): pfg.sp_file_result.append(gpt_say) pfg.merge_result() - # <-------- 临时存储用于调试 ----------> + # <-------- 临时存储用于调试 ----------> pfg.get_token_num = None objdump(pfg, file=pj(project_folder,'temp.pkl')) write_html(pfg.sp_file_contents, pfg.sp_file_result, chatbot=chatbot, project_folder=project_folder) - # <-------- 写出文件 ----------> + # <-------- 写出文件 ----------> msg = f"当前大语言模型: {llm_kwargs['llm_model']},当前语言模型温度设定: {llm_kwargs['temperature']}。" final_tex = lps.merge_result(pfg.file_result, mode, msg) objdump((lps, pfg.file_result, mode, msg), file=pj(project_folder,'merge_result.pkl')) with open(project_folder + f'/merge_{mode}.tex', 'w', encoding='utf-8', errors='replace') as f: if mode != 'translate_zh' or "binary" in final_tex: f.write(final_tex) - - # <-------- 整理结果, 退出 ----------> + + # <-------- 整理结果, 退出 ----------> chatbot.append((f"完成了吗?", 'GPT结果已输出, 即将编译PDF')) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - # <-------- 返回 ----------> + # <-------- 返回 ----------> return project_folder + f'/merge_{mode}.tex' @@ -362,7 +362,7 @@ def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_f yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译转化后的PDF ...', chatbot, history) # 刷新Gradio前端界面 ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_modified}.tex', work_folder_modified) - + if ok and os.path.exists(pj(work_folder_modified, f'{main_file_modified}.pdf')): # 只有第二步成功,才能继续下面的步骤 yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译BibTex ...', chatbot, history) # 刷新Gradio前端界面 @@ -393,9 +393,9 @@ def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_f original_pdf_success = os.path.exists(pj(work_folder_original, f'{main_file_original}.pdf')) modified_pdf_success = os.path.exists(pj(work_folder_modified, f'{main_file_modified}.pdf')) diff_pdf_success = os.path.exists(pj(work_folder, f'merge_diff.pdf')) - results_ += f"原始PDF编译是否成功: {original_pdf_success};" - results_ += f"转化PDF编译是否成功: {modified_pdf_success};" - results_ += f"对比PDF编译是否成功: {diff_pdf_success};" + results_ += f"原始PDF编译是否成功: {original_pdf_success};" + results_ += f"转化PDF编译是否成功: {modified_pdf_success};" + results_ += f"对比PDF编译是否成功: {diff_pdf_success};" yield from update_ui_lastest_msg(f'第{n_fix}编译结束:
{results_}...', chatbot, history) # 刷新Gradio前端界面 if diff_pdf_success: @@ -409,7 +409,7 @@ def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_f shutil.copyfile(result_pdf, pj(work_folder, '..', 'translation', 'translate_zh.pdf')) promote_file_to_downloadzone(result_pdf, rename_file=None, chatbot=chatbot) # promote file to web UI # 将两个PDF拼接 - if original_pdf_success: + if original_pdf_success: try: from .latex_toolbox import merge_pdfs concat_pdf = pj(work_folder_modified, f'comparison.pdf') @@ -425,7 +425,7 @@ def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_f if n_fix>=max_try: break n_fix += 1 can_retry, main_file_modified, buggy_lines = remove_buggy_lines( - file_path=pj(work_folder_modified, f'{main_file_modified}.tex'), + file_path=pj(work_folder_modified, f'{main_file_modified}.tex'), log_path=pj(work_folder_modified, f'{main_file_modified}.log'), tex_name=f'{main_file_modified}.tex', tex_name_pure=f'{main_file_modified}', @@ -445,14 +445,14 @@ def write_html(sp_file_contents, sp_file_result, chatbot, project_folder): import shutil from crazy_functions.pdf_fns.report_gen_html import construct_html from toolbox import gen_time_str - ch = construct_html() + ch = construct_html() orig = "" trans = "" final = [] - for c,r in zip(sp_file_contents, sp_file_result): + for c,r in zip(sp_file_contents, sp_file_result): final.append(c) final.append(r) - for i, k in enumerate(final): + for i, k in enumerate(final): if i%2==0: orig = k if i%2==1: diff --git a/crazy_functions/live_audio/aliyunASR.py b/crazy_functions/live_audio/aliyunASR.py index cba4c01..3a52328 100644 --- a/crazy_functions/live_audio/aliyunASR.py +++ b/crazy_functions/live_audio/aliyunASR.py @@ -85,8 +85,8 @@ def write_numpy_to_wave(filename, rate, data, add_header=False): def is_speaker_speaking(vad, data, sample_rate): # Function to detect if the speaker is speaking - # The WebRTC VAD only accepts 16-bit mono PCM audio, - # sampled at 8000, 16000, 32000 or 48000 Hz. + # The WebRTC VAD only accepts 16-bit mono PCM audio, + # sampled at 8000, 16000, 32000 or 48000 Hz. # A frame must be either 10, 20, or 30 ms in duration: frame_duration = 30 n_bit_each = int(sample_rate * frame_duration / 1000)*2 # x2 because audio is 16 bit (2 bytes) @@ -94,7 +94,7 @@ def is_speaker_speaking(vad, data, sample_rate): for t in range(len(data)): if t!=0 and t % n_bit_each == 0: res_list.append(vad.is_speech(data[t-n_bit_each:t], sample_rate)) - + info = ''.join(['^' if r else '.' for r in res_list]) info = info[:10] if any(res_list): @@ -186,10 +186,10 @@ class AliyunASR(): keep_alive_last_send_time = time.time() while not self.stop: # time.sleep(self.capture_interval) - audio = rad.read(uuid.hex) + audio = rad.read(uuid.hex) if audio is not None: # convert to pcm file - temp_file = f'{temp_folder}/{uuid.hex}.pcm' # + temp_file = f'{temp_folder}/{uuid.hex}.pcm' # dsdata = change_sample_rate(audio, rad.rate, NEW_SAMPLERATE) # 48000 --> 16000 write_numpy_to_wave(temp_file, NEW_SAMPLERATE, dsdata) # read pcm binary diff --git a/crazy_functions/live_audio/audio_io.py b/crazy_functions/live_audio/audio_io.py index 00fd3f2..9fd886c 100644 --- a/crazy_functions/live_audio/audio_io.py +++ b/crazy_functions/live_audio/audio_io.py @@ -3,12 +3,12 @@ from scipy import interpolate def Singleton(cls): _instance = {} - + def _singleton(*args, **kargs): if cls not in _instance: _instance[cls] = cls(*args, **kargs) return _instance[cls] - + return _singleton @@ -39,7 +39,7 @@ class RealtimeAudioDistribution(): else: res = None return res - + def change_sample_rate(audio, old_sr, new_sr): duration = audio.shape[0] / old_sr diff --git a/crazy_functions/multi_stage/multi_stage_utils.py b/crazy_functions/multi_stage/multi_stage_utils.py index 1395e79..952c484 100644 --- a/crazy_functions/multi_stage/multi_stage_utils.py +++ b/crazy_functions/multi_stage/multi_stage_utils.py @@ -40,7 +40,7 @@ class GptAcademicState(): class GptAcademicGameBaseState(): """ - 1. first init: __init__ -> + 1. first init: __init__ -> """ def init_game(self, chatbot, lock_plugin): self.plugin_name = None @@ -53,7 +53,7 @@ class GptAcademicGameBaseState(): raise ValueError("callback_fn is None") chatbot._cookies['lock_plugin'] = self.callback_fn self.dump_state(chatbot) - + def get_plugin_name(self): if self.plugin_name is None: raise ValueError("plugin_name is None") @@ -71,7 +71,7 @@ class GptAcademicGameBaseState(): state = chatbot._cookies.get(f'plugin_state/{plugin_name}', None) if state is not None: state = pickle.loads(state) - else: + else: state = cls() state.init_game(chatbot, lock_plugin) state.plugin_name = plugin_name @@ -79,7 +79,7 @@ class GptAcademicGameBaseState(): state.chatbot = chatbot state.callback_fn = callback_fn return state - + def continue_game(self, prompt, chatbot, history): # 游戏主体 yield from self.step(prompt, chatbot, history) diff --git a/crazy_functions/pdf_fns/breakdown_txt.py b/crazy_functions/pdf_fns/breakdown_txt.py index e7c7673..784d796 100644 --- a/crazy_functions/pdf_fns/breakdown_txt.py +++ b/crazy_functions/pdf_fns/breakdown_txt.py @@ -35,7 +35,7 @@ def cut(limit, get_token_fn, txt_tocut, must_break_at_empty_line, break_anyway=F remain_txt_to_cut_storage = "" # 为了加速计算,我们采样一个特殊的手段。当 remain_txt_to_cut > `_max` 时, 我们把 _max 后的文字转存至 remain_txt_to_cut_storage remain_txt_to_cut, remain_txt_to_cut_storage = maintain_storage(remain_txt_to_cut, remain_txt_to_cut_storage) - + while True: if get_token_fn(remain_txt_to_cut) <= limit: # 如果剩余文本的token数小于限制,那么就不用切了 diff --git a/crazy_functions/pdf_fns/parse_pdf.py b/crazy_functions/pdf_fns/parse_pdf.py index fa27de5..a1b66d0 100644 --- a/crazy_functions/pdf_fns/parse_pdf.py +++ b/crazy_functions/pdf_fns/parse_pdf.py @@ -64,8 +64,8 @@ def produce_report_markdown(gpt_response_collection, meta, paper_meta_info, chat # 再做一个小修改:重新修改当前part的标题,默认用英文的 cur_value += value translated_res_array.append(cur_value) - res_path = write_history_to_file(meta + ["# Meta Translation" , paper_meta_info] + translated_res_array, - file_basename = f"{gen_time_str()}-translated_only.md", + res_path = write_history_to_file(meta + ["# Meta Translation" , paper_meta_info] + translated_res_array, + file_basename = f"{gen_time_str()}-translated_only.md", file_fullname = None, auto_caption = False) promote_file_to_downloadzone(res_path, rename_file=os.path.basename(res_path)+'.md', chatbot=chatbot) @@ -144,11 +144,11 @@ def translate_pdf(article_dict, llm_kwargs, chatbot, fp, generated_conclusion_fi produce_report_markdown(gpt_response_collection, meta, paper_meta_info, chatbot, fp, generated_conclusion_files) # -=-=-=-=-=-=-=-= 写出HTML文件 -=-=-=-=-=-=-=-= - ch = construct_html() + ch = construct_html() orig = "" trans = "" gpt_response_collection_html = copy.deepcopy(gpt_response_collection) - for i,k in enumerate(gpt_response_collection_html): + for i,k in enumerate(gpt_response_collection_html): if i%2==0: gpt_response_collection_html[i] = inputs_show_user_array[i//2] else: @@ -159,7 +159,7 @@ def translate_pdf(article_dict, llm_kwargs, chatbot, fp, generated_conclusion_fi final = ["", "", "一、论文概况", "", "Abstract", paper_meta_info, "二、论文翻译", ""] final.extend(gpt_response_collection_html) - for i, k in enumerate(final): + for i, k in enumerate(final): if i%2==0: orig = k if i%2==1: diff --git a/crazy_functions/pdf_fns/parse_word.py b/crazy_functions/pdf_fns/parse_word.py index 64d07dc..3664a9c 100644 --- a/crazy_functions/pdf_fns/parse_word.py +++ b/crazy_functions/pdf_fns/parse_word.py @@ -22,10 +22,10 @@ def extract_text_from_files(txt, chatbot, history): file_manifest = [] excption = "" - if txt == "": + if txt == "": final_result.append(txt) return False, final_result, page_one, file_manifest, excption #如输入区内容不是文件则直接返回输入区内容 - + #查找输入区内容中的文件 file_pdf,pdf_manifest,folder_pdf = get_files_from_everything(txt, '.pdf') file_md,md_manifest,folder_md = get_files_from_everything(txt, '.md') @@ -35,12 +35,12 @@ def extract_text_from_files(txt, chatbot, history): if file_doc: excption = "word" return False, final_result, page_one, file_manifest, excption - + file_num = len(pdf_manifest) + len(md_manifest) + len(word_manifest) if file_num == 0: final_result.append(txt) return False, final_result, page_one, file_manifest, excption #如输入区内容不是文件则直接返回输入区内容 - + if file_pdf: try: # 尝试导入依赖,如果缺少依赖,则给出安装建议 import fitz @@ -61,7 +61,7 @@ def extract_text_from_files(txt, chatbot, history): file_content = f.read() file_content = file_content.encode('utf-8', 'ignore').decode() headers = re.findall(r'^#\s(.*)$', file_content, re.MULTILINE) #接下来提取md中的一级/二级标题作为摘要 - if len(headers) > 0: + if len(headers) > 0: page_one.append("\n".join(headers)) #合并所有的标题,以换行符分割 else: page_one.append("") @@ -81,5 +81,5 @@ def extract_text_from_files(txt, chatbot, history): page_one.append(file_content[:200]) final_result.append(file_content) file_manifest.append(os.path.relpath(fp, folder_word)) - + return True, final_result, page_one, file_manifest, excption \ No newline at end of file diff --git a/crazy_functions/vector_fns/vector_database.py b/crazy_functions/vector_fns/vector_database.py index cffa22c..46fc72d 100644 --- a/crazy_functions/vector_fns/vector_database.py +++ b/crazy_functions/vector_fns/vector_database.py @@ -28,7 +28,7 @@ EMBEDDING_DEVICE = "cpu" # 基于上下文的prompt模版,请务必保留"{question}"和"{context}" PROMPT_TEMPLATE = """已知信息: -{context} +{context} 根据上述已知信息,简洁和专业的来回答用户的问题。如果无法从中得到答案,请说 “根据已知信息无法回答该问题” 或 “没有提供足够的相关信息”,不允许在答案中添加编造成分,答案请使用中文。 问题是:{question}""" @@ -58,7 +58,7 @@ OPEN_CROSS_DOMAIN = False def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4 ) -> List[Tuple[Document, float]]: - + def seperate_list(ls: List[int]) -> List[List[int]]: lists = [] ls1 = [ls[0]] @@ -200,7 +200,7 @@ class LocalDocQA: return vs_path, loaded_files else: raise RuntimeError("文件加载失败,请检查文件格式是否正确") - + def get_loaded_file(self, vs_path): ds = self.vector_store.docstore return set([ds._dict[k].metadata['source'].split(vs_path)[-1] for k in ds._dict]) @@ -290,10 +290,10 @@ class knowledge_archive_interface(): self.threadLock.acquire() # import uuid self.current_id = id - self.qa_handle, self.kai_path = construct_vector_store( - vs_id=self.current_id, + self.qa_handle, self.kai_path = construct_vector_store( + vs_id=self.current_id, vs_path=vs_path, - files=file_manifest, + files=file_manifest, sentence_size=100, history=[], one_conent="", @@ -304,7 +304,7 @@ class knowledge_archive_interface(): def get_current_archive_id(self): return self.current_id - + def get_loaded_file(self, vs_path): return self.qa_handle.get_loaded_file(vs_path) @@ -312,10 +312,10 @@ class knowledge_archive_interface(): self.threadLock.acquire() if not self.current_id == id: self.current_id = id - self.qa_handle, self.kai_path = construct_vector_store( - vs_id=self.current_id, + self.qa_handle, self.kai_path = construct_vector_store( + vs_id=self.current_id, vs_path=vs_path, - files=[], + files=[], sentence_size=100, history=[], one_conent="", @@ -329,7 +329,7 @@ class knowledge_archive_interface(): query = txt, vs_path = self.kai_path, score_threshold=VECTOR_SEARCH_SCORE_THRESHOLD, - vector_search_top_k=VECTOR_SEARCH_TOP_K, + vector_search_top_k=VECTOR_SEARCH_TOP_K, chunk_conent=True, chunk_size=CHUNK_SIZE, text2vec = self.get_chinese_text2vec(), diff --git a/crazy_functions/vt_fns/vt_call_plugin.py b/crazy_functions/vt_fns/vt_call_plugin.py index f33644d..5824d06 100644 --- a/crazy_functions/vt_fns/vt_call_plugin.py +++ b/crazy_functions/vt_fns/vt_call_plugin.py @@ -35,9 +35,9 @@ def get_recent_file_prompt_support(chatbot): most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None) path = most_recent_uploaded['path'] prompt = "\nAdditional Information:\n" - prompt = "In case that this plugin requires a path or a file as argument," - prompt += f"it is important for you to know that the user has recently uploaded a file, located at: `{path}`" - prompt += f"Only use it when necessary, otherwise, you can ignore this file." + prompt = "In case that this plugin requires a path or a file as argument," + prompt += f"it is important for you to know that the user has recently uploaded a file, located at: `{path}`" + prompt += f"Only use it when necessary, otherwise, you can ignore this file." return prompt def get_inputs_show_user(inputs, plugin_arr_enum_prompt): @@ -82,7 +82,7 @@ def execute_plugin(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom msg += "\n但您可以尝试再试一次\n" yield from update_ui_lastest_msg(lastmsg=msg, chatbot=chatbot, history=history, delay=2) return - + # ⭐ ⭐ ⭐ 确认插件参数 if not have_any_recent_upload_files(chatbot): appendix_info = "" @@ -99,7 +99,7 @@ def execute_plugin(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom inputs = f"A plugin named {plugin_sel.plugin_selection} is selected, " + \ "you should extract plugin_arg from the user requirement, the user requirement is: \n\n" + \ ">> " + (txt + appendix_info).rstrip('\n').replace('\n','\n>> ') + '\n\n' + \ - gpt_json_io.format_instructions + gpt_json_io.format_instructions run_gpt_fn = lambda inputs, sys_prompt: predict_no_ui_long_connection( inputs=inputs, llm_kwargs=llm_kwargs, history=[], sys_prompt=sys_prompt, observe_window=[]) plugin_sel = gpt_json_io.generate_output_auto_repair(run_gpt_fn(inputs, ""), run_gpt_fn) diff --git a/crazy_functions/vt_fns/vt_modify_config.py b/crazy_functions/vt_fns/vt_modify_config.py index 58a8531..11fa8b1 100644 --- a/crazy_functions/vt_fns/vt_modify_config.py +++ b/crazy_functions/vt_fns/vt_modify_config.py @@ -10,7 +10,7 @@ def modify_configuration_hot(txt, llm_kwargs, plugin_kwargs, chatbot, history, s ALLOW_RESET_CONFIG = get_conf('ALLOW_RESET_CONFIG') if not ALLOW_RESET_CONFIG: yield from update_ui_lastest_msg( - lastmsg=f"当前配置不允许被修改!如需激活本功能,请在config.py中设置ALLOW_RESET_CONFIG=True后重启软件。", + lastmsg=f"当前配置不允许被修改!如需激活本功能,请在config.py中设置ALLOW_RESET_CONFIG=True后重启软件。", chatbot=chatbot, history=history, delay=2 ) return @@ -35,7 +35,7 @@ def modify_configuration_hot(txt, llm_kwargs, plugin_kwargs, chatbot, history, s inputs = "Analyze how to change configuration according to following user input, answer me with json: \n\n" + \ ">> " + txt.rstrip('\n').replace('\n','\n>> ') + '\n\n' + \ gpt_json_io.format_instructions - + run_gpt_fn = lambda inputs, sys_prompt: predict_no_ui_long_connection( inputs=inputs, llm_kwargs=llm_kwargs, history=[], sys_prompt=sys_prompt, observe_window=[]) user_intention = gpt_json_io.generate_output_auto_repair(run_gpt_fn(inputs, ""), run_gpt_fn) @@ -45,11 +45,11 @@ def modify_configuration_hot(txt, llm_kwargs, plugin_kwargs, chatbot, history, s ok = (explicit_conf in txt) if ok: yield from update_ui_lastest_msg( - lastmsg=f"正在执行任务: {txt}\n\n新配置{explicit_conf}={user_intention.new_option_value}", + lastmsg=f"正在执行任务: {txt}\n\n新配置{explicit_conf}={user_intention.new_option_value}", chatbot=chatbot, history=history, delay=1 ) yield from update_ui_lastest_msg( - lastmsg=f"正在执行任务: {txt}\n\n新配置{explicit_conf}={user_intention.new_option_value}\n\n正在修改配置中", + lastmsg=f"正在执行任务: {txt}\n\n新配置{explicit_conf}={user_intention.new_option_value}\n\n正在修改配置中", chatbot=chatbot, history=history, delay=2 ) @@ -69,7 +69,7 @@ def modify_configuration_reboot(txt, llm_kwargs, plugin_kwargs, chatbot, history ALLOW_RESET_CONFIG = get_conf('ALLOW_RESET_CONFIG') if not ALLOW_RESET_CONFIG: yield from update_ui_lastest_msg( - lastmsg=f"当前配置不允许被修改!如需激活本功能,请在config.py中设置ALLOW_RESET_CONFIG=True后重启软件。", + lastmsg=f"当前配置不允许被修改!如需激活本功能,请在config.py中设置ALLOW_RESET_CONFIG=True后重启软件。", chatbot=chatbot, history=history, delay=2 ) return diff --git a/crazy_functions/vt_fns/vt_state.py b/crazy_functions/vt_fns/vt_state.py index 1818728..9d5ff4c 100644 --- a/crazy_functions/vt_fns/vt_state.py +++ b/crazy_functions/vt_fns/vt_state.py @@ -6,7 +6,7 @@ class VoidTerminalState(): def reset_state(self): self.has_provided_explaination = False - + def lock_plugin(self, chatbot): chatbot._cookies['lock_plugin'] = 'crazy_functions.虚空终端->虚空终端' chatbot._cookies['plugin_state'] = pickle.dumps(self) diff --git a/crazy_functions/下载arxiv论文翻译摘要.py b/crazy_functions/下载arxiv论文翻译摘要.py index c368b7d..4360df7 100644 --- a/crazy_functions/下载arxiv论文翻译摘要.py +++ b/crazy_functions/下载arxiv论文翻译摘要.py @@ -144,8 +144,8 @@ def 下载arxiv论文并翻译摘要(txt, llm_kwargs, plugin_kwargs, chatbot, hi try: import bs4 except: - report_exception(chatbot, history, - a = f"解析项目: {txt}", + report_exception(chatbot, history, + a = f"解析项目: {txt}", b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade beautifulsoup4```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return @@ -157,12 +157,12 @@ def 下载arxiv论文并翻译摘要(txt, llm_kwargs, plugin_kwargs, chatbot, hi try: pdf_path, info = download_arxiv_(txt) except: - report_exception(chatbot, history, - a = f"解析项目: {txt}", + report_exception(chatbot, history, + a = f"解析项目: {txt}", b = f"下载pdf文件未成功") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return - + # 翻译摘要等 i_say = f"请你阅读以下学术论文相关的材料,提取摘要,翻译为中文。材料如下:{str(info)}" i_say_show_user = f'请你阅读以下学术论文相关的材料,提取摘要,翻译为中文。论文:{pdf_path}' diff --git a/crazy_functions/互动小游戏.py b/crazy_functions/互动小游戏.py index 131e9c9..cf1af22 100644 --- a/crazy_functions/互动小游戏.py +++ b/crazy_functions/互动小游戏.py @@ -12,9 +12,9 @@ def 随机小游戏(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_ # 选择游戏 cls = MiniGame_ResumeStory # 如果之前已经初始化了游戏实例,则继续该实例;否则重新初始化 - state = cls.sync_state(chatbot, - llm_kwargs, - cls, + state = cls.sync_state(chatbot, + llm_kwargs, + cls, plugin_name='MiniGame_ResumeStory', callback_fn='crazy_functions.互动小游戏->随机小游戏', lock_plugin=True @@ -30,9 +30,9 @@ def 随机小游戏1(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system # 选择游戏 cls = MiniGame_ASCII_Art # 如果之前已经初始化了游戏实例,则继续该实例;否则重新初始化 - state = cls.sync_state(chatbot, - llm_kwargs, - cls, + state = cls.sync_state(chatbot, + llm_kwargs, + cls, plugin_name='MiniGame_ASCII_Art', callback_fn='crazy_functions.互动小游戏->随机小游戏1', lock_plugin=True diff --git a/crazy_functions/交互功能函数模板.py b/crazy_functions/交互功能函数模板.py index 811267a..4a8ae6f 100644 --- a/crazy_functions/交互功能函数模板.py +++ b/crazy_functions/交互功能函数模板.py @@ -38,7 +38,7 @@ def 交互功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, s inputs=inputs_show_user=f"Extract all image urls in this html page, pick the first 5 images and show them with markdown format: \n\n {page_return}" gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( inputs=inputs, inputs_show_user=inputs_show_user, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], + llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], sys_prompt="When you want to show an image, use markdown format. e.g. ![image_description](image_url). If there are no image url provided, answer 'no image url provided'" ) chatbot[-1] = [chatbot[-1][0], gpt_say] diff --git a/crazy_functions/函数动态生成.py b/crazy_functions/函数动态生成.py index d20d0cf..2ca2355 100644 --- a/crazy_functions/函数动态生成.py +++ b/crazy_functions/函数动态生成.py @@ -6,10 +6,10 @@ - 将图像转为灰度图像 - 将csv文件转excel表格 -Testing: - - Crop the image, keeping the bottom half. - - Swap the blue channel and red channel of the image. - - Convert the image to grayscale. +Testing: + - Crop the image, keeping the bottom half. + - Swap the blue channel and red channel of the image. + - Convert the image to grayscale. - Convert the CSV file to an Excel spreadsheet. """ @@ -29,12 +29,12 @@ import multiprocessing templete = """ ```python -import ... # Put dependencies here, e.g. import numpy as np. +import ... # Put dependencies here, e.g. import numpy as np. class TerminalFunction(object): # Do not change the name of the class, The name of the class must be `TerminalFunction` def run(self, path): # The name of the function must be `run`, it takes only a positional argument. - # rewrite the function you have just written here + # rewrite the function you have just written here ... return generated_file_path ``` @@ -48,7 +48,7 @@ def get_code_block(reply): import re pattern = r"```([\s\S]*?)```" # regex pattern to match code blocks matches = re.findall(pattern, reply) # find all code blocks in text - if len(matches) == 1: + if len(matches) == 1: return matches[0].strip('python') # code block for match in matches: if 'class TerminalFunction' in match: @@ -68,8 +68,8 @@ def gpt_interact_multi_step(txt, file_type, llm_kwargs, chatbot, history): # 第一步 gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, inputs_show_user=i_say, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=demo, + inputs=i_say, inputs_show_user=i_say, + llm_kwargs=llm_kwargs, chatbot=chatbot, history=demo, sys_prompt= r"You are a world-class programmer." ) history.extend([i_say, gpt_say]) @@ -82,33 +82,33 @@ def gpt_interact_multi_step(txt, file_type, llm_kwargs, chatbot, history): ] i_say = "".join(prompt_compose); inputs_show_user = "If previous stage is successful, rewrite the function you have just written to satisfy executable templete. " gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, inputs_show_user=inputs_show_user, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, + inputs=i_say, inputs_show_user=inputs_show_user, + llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, sys_prompt= r"You are a programmer. You need to replace `...` with valid packages, do not give `...` in your answer!" ) code_to_return = gpt_say history.extend([i_say, gpt_say]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 - + # # 第三步 # i_say = "Please list to packages to install to run the code above. Then show me how to use `try_install_deps` function to install them." # i_say += 'For instance. `try_install_deps(["opencv-python", "scipy", "numpy"])`' # installation_advance = yield from request_gpt_model_in_new_thread_with_ui_alive( - # inputs=i_say, inputs_show_user=inputs_show_user, - # llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, + # inputs=i_say, inputs_show_user=inputs_show_user, + # llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, # sys_prompt= r"You are a programmer." # ) - # # # 第三步 + # # # 第三步 # i_say = "Show me how to use `pip` to install packages to run the code above. " # i_say += 'For instance. `pip install -r opencv-python scipy numpy`' # installation_advance = yield from request_gpt_model_in_new_thread_with_ui_alive( - # inputs=i_say, inputs_show_user=i_say, - # llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, + # inputs=i_say, inputs_show_user=i_say, + # llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, # sys_prompt= r"You are a programmer." # ) installation_advance = "" - + return code_to_return, installation_advance, txt, file_type, llm_kwargs, chatbot, history @@ -117,7 +117,7 @@ def gpt_interact_multi_step(txt, file_type, llm_kwargs, chatbot, history): def for_immediate_show_off_when_possible(file_type, fp, chatbot): if file_type in ['png', 'jpg']: image_path = os.path.abspath(fp) - chatbot.append(['这是一张图片, 展示如下:', + chatbot.append(['这是一张图片, 展示如下:', f'本地文件地址:
`{image_path}`
'+ f'本地文件预览:
' ]) @@ -177,7 +177,7 @@ def 函数动态生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_ chatbot.append(["文件检索", "没有发现任何近期上传的文件。"]) yield from update_ui_lastest_msg("没有发现任何近期上传的文件。", chatbot, history, 1) return # 2. 如果没有文件 - + # 读取文件 file_type = file_list[0].split('.')[-1] @@ -185,7 +185,7 @@ def 函数动态生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_ if is_the_upload_folder(txt): yield from update_ui_lastest_msg(f"请在输入框内填写需求, 然后再次点击该插件! 至于您的文件,不用担心, 文件路径 {txt} 已经被记忆. ", chatbot, history, 1) return - + # 开始干正事 MAX_TRY = 3 for j in range(MAX_TRY): # 最多重试5次 @@ -238,7 +238,7 @@ def 函数动态生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_ # chatbot.append(["如果是缺乏依赖,请参考以下建议", installation_advance]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return - + # 顺利完成,收尾 res = str(res) if os.path.exists(res): @@ -248,5 +248,5 @@ def 函数动态生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 else: chatbot.append(["执行成功了,结果是一个字符串", "结果:" + res]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 diff --git a/crazy_functions/命令行助手.py b/crazy_functions/命令行助手.py index 2869524..43c6d8f 100644 --- a/crazy_functions/命令行助手.py +++ b/crazy_functions/命令行助手.py @@ -21,8 +21,8 @@ def 命令行助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro i_say = "请写bash命令实现以下功能:" + txt # 开始 gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, inputs_show_user=txt, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], + inputs=i_say, inputs_show_user=txt, + llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], sys_prompt="你是一个Linux大师级用户。注意,当我要求你写bash命令时,尽可能地仅用一行命令解决我的要求。" ) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 diff --git a/crazy_functions/图片生成.py b/crazy_functions/图片生成.py index 62f3662..24d3563 100644 --- a/crazy_functions/图片生成.py +++ b/crazy_functions/图片生成.py @@ -7,7 +7,7 @@ def gen_image(llm_kwargs, prompt, resolution="1024x1024", model="dall-e-2", qual from request_llms.bridge_all import model_info proxies = get_conf('proxies') - # Set up OpenAI API key and model + # Set up OpenAI API key and model api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model']) chat_endpoint = model_info[llm_kwargs['llm_model']]['endpoint'] # 'https://api.openai.com/v1/chat/completions' @@ -113,7 +113,7 @@ def 图片生成_DALLE2(prompt, llm_kwargs, plugin_kwargs, chatbot, history, sys if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") resolution = plugin_kwargs.get("advanced_arg", '1024x1024') image_url, image_path = gen_image(llm_kwargs, prompt, resolution) - chatbot.append([prompt, + chatbot.append([prompt, f'图像中转网址:
`{image_url}`
'+ f'中转网址预览:
' f'本地文件地址:
`{image_path}`
'+ @@ -144,7 +144,7 @@ def 图片生成_DALLE3(prompt, llm_kwargs, plugin_kwargs, chatbot, history, sys elif part in ['vivid', 'natural']: style = part image_url, image_path = gen_image(llm_kwargs, prompt, resolution, model="dall-e-3", quality=quality, style=style) - chatbot.append([prompt, + chatbot.append([prompt, f'图像中转网址:
`{image_url}`
'+ f'中转网址预览:
' f'本地文件地址:
`{image_path}`
'+ @@ -164,7 +164,7 @@ class ImageEditState(GptAcademicState): confirm = (len(file_manifest) >= 1 and file_manifest[0].endswith('.png') and os.path.exists(file_manifest[0])) file = None if not confirm else file_manifest[0] return confirm, file - + def lock_plugin(self, chatbot): chatbot._cookies['lock_plugin'] = 'crazy_functions.图片生成->图片修改_DALLE2' self.dump_state(chatbot) diff --git a/crazy_functions/多智能体.py b/crazy_functions/多智能体.py index 4b16b88..00e4539 100644 --- a/crazy_functions/多智能体.py +++ b/crazy_functions/多智能体.py @@ -57,11 +57,11 @@ def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_ if get_conf("AUTOGEN_USE_DOCKER"): import docker except: - chatbot.append([ f"处理任务: {txt}", + chatbot.append([ f"处理任务: {txt}", f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pyautogen docker```。"]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return - + # 尝试导入依赖,如果缺少依赖,则给出安装建议 try: import autogen @@ -72,7 +72,7 @@ def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_ chatbot.append([f"处理任务: {txt}", f"缺少docker运行环境!"]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return - + # 解锁插件 chatbot.get_cookies()['lock_plugin'] = None persistent_class_multi_user_manager = GradioMultiuserManagerForPersistentClasses() diff --git a/crazy_functions/对话历史存档.py b/crazy_functions/对话历史存档.py index 6ffc072..0132bc0 100644 --- a/crazy_functions/对话历史存档.py +++ b/crazy_functions/对话历史存档.py @@ -66,7 +66,7 @@ def read_file_to_chat(chatbot, history, file_name): i_say, gpt_say = h.split('
') chatbot.append([i_say, gpt_say]) chatbot.append([f"存档文件详情?", f"[Local Message] 载入对话{len(html)}条,上下文{len(history)}条。"]) - return chatbot, history + return chatbot, history @CatchException def 对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): @@ -80,7 +80,7 @@ def 对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_ user_request 当前用户的请求信息(IP地址等) """ - chatbot.append(("保存当前对话", + chatbot.append(("保存当前对话", f"[Local Message] {write_chat_to_file(chatbot, history)},您可以调用下拉菜单中的“载入对话历史存档”还原当下的对话。")) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 @@ -108,9 +108,9 @@ def 载入对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, s if txt == "": txt = '空空如也的输入栏' import glob local_history = "
".join([ - "`"+hide_cwd(f)+f" ({gen_file_preview(f)})"+"`" + "`"+hide_cwd(f)+f" ({gen_file_preview(f)})"+"`" for f in glob.glob( - f'{get_log_folder(get_user(chatbot), plugin_name="chat_history")}/**/{f_prefix}*.html', + f'{get_log_folder(get_user(chatbot), plugin_name="chat_history")}/**/{f_prefix}*.html', recursive=True )]) chatbot.append([f"正在查找对话历史文件(html格式): {txt}", f"找不到任何html文件: {txt}。但本地存储了以下历史文件,您可以将任意一个文件路径粘贴到输入区,然后重试:
{local_history}"]) @@ -139,7 +139,7 @@ def 删除所有本地对话历史记录(txt, llm_kwargs, plugin_kwargs, chatbot import glob, os local_history = "
".join([ - "`"+hide_cwd(f)+"`" + "`"+hide_cwd(f)+"`" for f in glob.glob( f'{get_log_folder(get_user(chatbot), plugin_name="chat_history")}/**/{f_prefix}*.html', recursive=True )]) diff --git a/crazy_functions/总结word文档.py b/crazy_functions/总结word文档.py index 8793ea4..c27c952 100644 --- a/crazy_functions/总结word文档.py +++ b/crazy_functions/总结word文档.py @@ -40,10 +40,10 @@ def 解析docx(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot i_say = f'请对下面的文章片段用中文做概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{paper_frag}```' i_say_show_user = f'请对下面的文章片段做概述: {os.path.abspath(fp)}的第{i+1}/{len(paper_fragments)}个片段。' gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, - inputs_show_user=i_say_show_user, + inputs=i_say, + inputs_show_user=i_say_show_user, llm_kwargs=llm_kwargs, - chatbot=chatbot, + chatbot=chatbot, history=[], sys_prompt="总结文章。" ) @@ -56,10 +56,10 @@ def 解析docx(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot if len(paper_fragments) > 1: i_say = f"根据以上的对话,总结文章{os.path.abspath(fp)}的主要内容。" gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, - inputs_show_user=i_say, + inputs=i_say, + inputs_show_user=i_say, llm_kwargs=llm_kwargs, - chatbot=chatbot, + chatbot=chatbot, history=this_paper_history, sys_prompt="总结文章。" ) diff --git a/crazy_functions/批量Markdown翻译.py b/crazy_functions/批量Markdown翻译.py index 1d876d0..7b87589 100644 --- a/crazy_functions/批量Markdown翻译.py +++ b/crazy_functions/批量Markdown翻译.py @@ -53,7 +53,7 @@ class PaperFileGroup(): def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en'): from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency - # <-------- 读取Markdown文件,删除其中的所有注释 ----------> + # <-------- 读取Markdown文件,删除其中的所有注释 ----------> pfg = PaperFileGroup() for index, fp in enumerate(file_manifest): @@ -63,23 +63,23 @@ def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch pfg.file_paths.append(fp) pfg.file_contents.append(file_content) - # <-------- 拆分过长的Markdown文件 ----------> + # <-------- 拆分过长的Markdown文件 ----------> pfg.run_file_split(max_token_limit=1500) n_split = len(pfg.sp_file_contents) - # <-------- 多线程翻译开始 ----------> + # <-------- 多线程翻译开始 ----------> if language == 'en->zh': - inputs_array = ["This is a Markdown file, translate it into Chinese, do not modify any existing Markdown commands:" + + inputs_array = ["This is a Markdown file, translate it into Chinese, do not modify any existing Markdown commands:" + f"\n\n{frag}" for frag in pfg.sp_file_contents] inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag] sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)] elif language == 'zh->en': - inputs_array = [f"This is a Markdown file, translate it into English, do not modify any existing Markdown commands:" + + inputs_array = [f"This is a Markdown file, translate it into English, do not modify any existing Markdown commands:" + f"\n\n{frag}" for frag in pfg.sp_file_contents] inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag] sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)] else: - inputs_array = [f"This is a Markdown file, translate it into {language}, do not modify any existing Markdown commands, only answer me with translated results:" + + inputs_array = [f"This is a Markdown file, translate it into {language}, do not modify any existing Markdown commands, only answer me with translated results:" + f"\n\n{frag}" for frag in pfg.sp_file_contents] inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag] sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)] @@ -103,7 +103,7 @@ def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch except: logging.error(trimmed_format_exc()) - # <-------- 整理结果,退出 ----------> + # <-------- 整理结果,退出 ----------> create_report_file_name = gen_time_str() + f"-chatgpt.md" res = write_history_to_file(gpt_response_collection, file_basename=create_report_file_name) promote_file_to_downloadzone(res, chatbot=chatbot) @@ -255,7 +255,7 @@ def Markdown翻译指定语言(txt, llm_kwargs, plugin_kwargs, chatbot, history, report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return - + if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") language = plugin_kwargs.get("advanced_arg", 'Chinese') yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language=language) \ No newline at end of file diff --git a/crazy_functions/批量总结PDF文档.py b/crazy_functions/批量总结PDF文档.py index 54270ab..4bd772f 100644 --- a/crazy_functions/批量总结PDF文档.py +++ b/crazy_functions/批量总结PDF文档.py @@ -17,7 +17,7 @@ def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, file_content, page_one = read_and_clean_pdf_text(file_name) # (尝试)按照章节切割PDF file_content = file_content.encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars page_one = str(page_one).encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars - + TOKEN_LIMIT_PER_FRAGMENT = 2500 from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit @@ -25,7 +25,7 @@ def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, page_one_fragments = breakdown_text_to_satisfy_token_limit(txt=str(page_one), limit=TOKEN_LIMIT_PER_FRAGMENT//4, llm_model=llm_kwargs['llm_model']) # 为了更好的效果,我们剥离Introduction之后的部分(如果有) paper_meta = page_one_fragments[0].split('introduction')[0].split('Introduction')[0].split('INTRODUCTION')[0] - + ############################## <第 1 步,从摘要中提取高价值信息,放到history中> ################################## final_results = [] final_results.append(paper_meta) @@ -44,10 +44,10 @@ def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, i_say = f"Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} Chinese characters: {paper_fragments[i]}" i_say_show_user = f"[{i+1}/{n_fragment}] Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} Chinese characters: {paper_fragments[i][:200]}" gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, # i_say=真正给chatgpt的提问, i_say_show_user=给用户看的提问 - llm_kwargs, chatbot, + llm_kwargs, chatbot, history=["The main idea of the previous section is?", last_iteration_result], # 迭代上一次的结果 sys_prompt="Extract the main idea of this section with Chinese." # 提示 - ) + ) iteration_results.append(gpt_say) last_iteration_result = gpt_say @@ -67,15 +67,15 @@ def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, - (2):What are the past methods? What are the problems with them? Is the approach well motivated? - (3):What is the research methodology proposed in this paper? - (4):On what task and what performance is achieved by the methods in this paper? Can the performance support their goals? -Follow the format of the output that follows: +Follow the format of the output that follows: 1. Title: xxx\n\n 2. Authors: xxx\n\n 3. Affiliation: xxx\n\n 4. Keywords: xxx\n\n 5. Urls: xxx or xxx , xxx \n\n 6. Summary: \n\n - - (1):xxx;\n - - (2):xxx;\n + - (1):xxx;\n + - (2):xxx;\n - (3):xxx;\n - (4):xxx.\n\n Be sure to use Chinese answers (proper nouns need to be marked in English), statements as concise and academic as possible, @@ -85,8 +85,8 @@ do not have too much repetitive information, numerical values using the original file_write_buffer.extend(final_results) i_say, final_results = input_clipping(i_say, final_results, max_token_limit=2000) gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, inputs_show_user='开始最终总结', - llm_kwargs=llm_kwargs, chatbot=chatbot, history=final_results, + inputs=i_say, inputs_show_user='开始最终总结', + llm_kwargs=llm_kwargs, chatbot=chatbot, history=final_results, sys_prompt= f"Extract the main idea of this paper with less than {NUM_OF_WORD} Chinese characters" ) final_results.append(gpt_say) @@ -114,8 +114,8 @@ def 批量总结PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst try: import fitz except: - report_exception(chatbot, history, - a = f"解析项目: {txt}", + report_exception(chatbot, history, + a = f"解析项目: {txt}", b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return @@ -134,7 +134,7 @@ def 批量总结PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst # 搜索需要处理的文件清单 file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)] - + # 如果没找到任何文件 if len(file_manifest) == 0: report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或.pdf文件: {txt}") diff --git a/crazy_functions/批量总结PDF文档pdfminer.py b/crazy_functions/批量总结PDF文档pdfminer.py index 181d51c..4532f3d 100644 --- a/crazy_functions/批量总结PDF文档pdfminer.py +++ b/crazy_functions/批量总结PDF文档pdfminer.py @@ -85,10 +85,10 @@ def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbo msg = '正常' # ** gpt request ** gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, - inputs_show_user=i_say_show_user, + inputs=i_say, + inputs_show_user=i_say_show_user, llm_kwargs=llm_kwargs, - chatbot=chatbot, + chatbot=chatbot, history=[], sys_prompt="总结文章。" ) # 带超时倒计时 @@ -106,10 +106,10 @@ def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbo msg = '正常' # ** gpt request ** gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, - inputs_show_user=i_say, + inputs=i_say, + inputs_show_user=i_say, llm_kwargs=llm_kwargs, - chatbot=chatbot, + chatbot=chatbot, history=history, sys_prompt="总结文章。" ) # 带超时倒计时 @@ -138,8 +138,8 @@ def 批量总结PDF文档pdfminer(txt, llm_kwargs, plugin_kwargs, chatbot, histo try: import pdfminer, bs4 except: - report_exception(chatbot, history, - a = f"解析项目: {txt}", + report_exception(chatbot, history, + a = f"解析项目: {txt}", b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pdfminer beautifulsoup4```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return diff --git a/crazy_functions/批量翻译PDF文档_NOUGAT.py b/crazy_functions/批量翻译PDF文档_NOUGAT.py index 7a18277..d5e33c2 100644 --- a/crazy_functions/批量翻译PDF文档_NOUGAT.py +++ b/crazy_functions/批量翻译PDF文档_NOUGAT.py @@ -76,8 +76,8 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst success_mmd, file_manifest_mmd, _ = get_files_from_everything(txt, type='.mmd') success = success or success_mmd file_manifest += file_manifest_mmd - chatbot.append(["文件列表:", ", ".join([e.split('/')[-1] for e in file_manifest])]); - yield from update_ui( chatbot=chatbot, history=history) + chatbot.append(["文件列表:", ", ".join([e.split('/')[-1] for e in file_manifest])]); + yield from update_ui( chatbot=chatbot, history=history) # 检测输入参数,如没有给定输入参数,直接退出 if not success: if txt == "": txt = '空空如也的输入栏' diff --git a/crazy_functions/批量翻译PDF文档_多线程.py b/crazy_functions/批量翻译PDF文档_多线程.py index 3d11162..7d6ad4f 100644 --- a/crazy_functions/批量翻译PDF文档_多线程.py +++ b/crazy_functions/批量翻译PDF文档_多线程.py @@ -68,7 +68,7 @@ def 解析PDF_基于GROBID(file_manifest, project_folder, llm_kwargs, plugin_kwa with open(grobid_json_res, 'w+', encoding='utf8') as f: f.write(json.dumps(article_dict, indent=4, ensure_ascii=False)) promote_file_to_downloadzone(grobid_json_res, chatbot=chatbot) - + if article_dict is None: raise RuntimeError("解析PDF失败,请检查PDF是否损坏。") yield from translate_pdf(article_dict, llm_kwargs, chatbot, fp, generated_conclusion_files, TOKEN_LIMIT_PER_FRAGMENT, DST_LANG) chatbot.append(("给出输出文件清单", str(generated_conclusion_files + generated_html_files))) @@ -97,7 +97,7 @@ def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, # 为了更好的效果,我们剥离Introduction之后的部分(如果有) paper_meta = page_one_fragments[0].split('introduction')[0].split('Introduction')[0].split('INTRODUCTION')[0] - + # 单线,获取文章meta信息 paper_meta_info = yield from request_gpt_model_in_new_thread_with_ui_alive( inputs=f"以下是一篇学术论文的基础信息,请从中提取出“标题”、“收录会议或期刊”、“作者”、“摘要”、“编号”、“作者邮箱”这六个部分。请用markdown格式输出,最后用中文翻译摘要部分。请提取:{paper_meta}", @@ -121,7 +121,7 @@ def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, ) gpt_response_collection_md = copy.deepcopy(gpt_response_collection) # 整理报告的格式 - for i,k in enumerate(gpt_response_collection_md): + for i,k in enumerate(gpt_response_collection_md): if i%2==0: gpt_response_collection_md[i] = f"\n\n---\n\n ## 原文[{i//2}/{len(gpt_response_collection_md)//2}]: \n\n {paper_fragments[i//2].replace('#', '')} \n\n---\n\n ## 翻译[{i//2}/{len(gpt_response_collection_md)//2}]:\n " else: @@ -139,18 +139,18 @@ def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, # write html try: - ch = construct_html() + ch = construct_html() orig = "" trans = "" gpt_response_collection_html = copy.deepcopy(gpt_response_collection) - for i,k in enumerate(gpt_response_collection_html): + for i,k in enumerate(gpt_response_collection_html): if i%2==0: gpt_response_collection_html[i] = paper_fragments[i//2].replace('#', '') else: gpt_response_collection_html[i] = gpt_response_collection_html[i] final = ["论文概况", paper_meta_info.replace('# ', '### '), "二、论文翻译", ""] final.extend(gpt_response_collection_html) - for i, k in enumerate(final): + for i, k in enumerate(final): if i%2==0: orig = k if i%2==1: diff --git a/crazy_functions/数学动画生成manim.py b/crazy_functions/数学动画生成manim.py index 9465ccc..551a808 100644 --- a/crazy_functions/数学动画生成manim.py +++ b/crazy_functions/数学动画生成manim.py @@ -27,7 +27,7 @@ def eval_manim(code): class_name = get_class_name(code) - try: + try: time_str = gen_time_str() subprocess.check_output([sys.executable, '-c', f"from gpt_log.MyAnimation import {class_name}; {class_name}().render()"]) shutil.move(f'media/videos/1080p60/{class_name}.mp4', f'gpt_log/{class_name}-{time_str}.mp4') @@ -36,7 +36,7 @@ def eval_manim(code): output = e.output.decode() print(f"Command returned non-zero exit status {e.returncode}: {output}.") return f"Evaluating python script failed: {e.output}." - except: + except: print('generating mp4 failed') return "Generating mp4 failed." @@ -45,7 +45,7 @@ def get_code_block(reply): import re pattern = r"```([\s\S]*?)```" # regex pattern to match code blocks matches = re.findall(pattern, reply) # find all code blocks in text - if len(matches) != 1: + if len(matches) != 1: raise RuntimeError("GPT is not generating proper code.") return matches[0].strip('python') # code block @@ -61,7 +61,7 @@ def 动画生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt user_request 当前用户的请求信息(IP地址等) """ # 清空历史,以免输入溢出 - history = [] + history = [] # 基本信息:功能、贡献者 chatbot.append([ @@ -73,24 +73,24 @@ def 动画生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt # 尝试导入依赖, 如果缺少依赖, 则给出安装建议 dep_ok = yield from inspect_dependency(chatbot=chatbot, history=history) # 刷新界面 if not dep_ok: return - + # 输入 i_say = f'Generate a animation to show: ' + txt demo = ["Here is some examples of manim", examples_of_manim()] _, demo = input_clipping(inputs="", history=demo, max_token_limit=2560) # 开始 gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, inputs_show_user=i_say, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=demo, + inputs=i_say, inputs_show_user=i_say, + llm_kwargs=llm_kwargs, chatbot=chatbot, history=demo, sys_prompt= r"Write a animation script with 3blue1brown's manim. "+ - r"Please begin with `from manim import *`. " + + r"Please begin with `from manim import *`. " + r"Answer me with a code block wrapped by ```." ) chatbot.append(["开始生成动画", "..."]) history.extend([i_say, gpt_say]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 - + # 将代码转为动画 code = get_code_block(gpt_say) res = eval_manim(code) diff --git a/crazy_functions/理解PDF文档内容.py b/crazy_functions/理解PDF文档内容.py index 732c82c..fd935ab 100644 --- a/crazy_functions/理解PDF文档内容.py +++ b/crazy_functions/理解PDF文档内容.py @@ -15,7 +15,7 @@ def 解析PDF(file_name, llm_kwargs, plugin_kwargs, chatbot, history, system_pro file_content, page_one = read_and_clean_pdf_text(file_name) # (尝试)按照章节切割PDF file_content = file_content.encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars page_one = str(page_one).encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars - + TOKEN_LIMIT_PER_FRAGMENT = 2500 from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit @@ -23,7 +23,7 @@ def 解析PDF(file_name, llm_kwargs, plugin_kwargs, chatbot, history, system_pro page_one_fragments = breakdown_text_to_satisfy_token_limit(txt=str(page_one), limit=TOKEN_LIMIT_PER_FRAGMENT//4, llm_model=llm_kwargs['llm_model']) # 为了更好的效果,我们剥离Introduction之后的部分(如果有) paper_meta = page_one_fragments[0].split('introduction')[0].split('Introduction')[0].split('INTRODUCTION')[0] - + ############################## <第 1 步,从摘要中提取高价值信息,放到history中> ################################## final_results = [] final_results.append(paper_meta) @@ -42,10 +42,10 @@ def 解析PDF(file_name, llm_kwargs, plugin_kwargs, chatbot, history, system_pro i_say = f"Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {paper_fragments[i]}" i_say_show_user = f"[{i+1}/{n_fragment}] Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {paper_fragments[i][:200]} ...." gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, # i_say=真正给chatgpt的提问, i_say_show_user=给用户看的提问 - llm_kwargs, chatbot, + llm_kwargs, chatbot, history=["The main idea of the previous section is?", last_iteration_result], # 迭代上一次的结果 sys_prompt="Extract the main idea of this section, answer me with Chinese." # 提示 - ) + ) iteration_results.append(gpt_say) last_iteration_result = gpt_say @@ -76,8 +76,8 @@ def 理解PDF文档内容标准文件输入(txt, llm_kwargs, plugin_kwargs, chat try: import fitz except: - report_exception(chatbot, history, - a = f"解析项目: {txt}", + report_exception(chatbot, history, + a = f"解析项目: {txt}", b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return diff --git a/crazy_functions/生成函数注释.py b/crazy_functions/生成函数注释.py index 78aa453..20cb6d2 100644 --- a/crazy_functions/生成函数注释.py +++ b/crazy_functions/生成函数注释.py @@ -16,7 +16,7 @@ def 生成函数注释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot.append((i_say_show_user, "[Local Message] waiting gpt response.")) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - if not fast_debug: + if not fast_debug: msg = '正常' # ** gpt request ** gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( @@ -27,7 +27,7 @@ def 生成函数注释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 if not fast_debug: time.sleep(2) - if not fast_debug: + if not fast_debug: res = write_history_to_file(history) promote_file_to_downloadzone(res, chatbot=chatbot) chatbot.append(("完成了吗?", res)) diff --git a/crazy_functions/生成多种Mermaid图表.py b/crazy_functions/生成多种Mermaid图表.py index dc01e94..a53fad5 100644 --- a/crazy_functions/生成多种Mermaid图表.py +++ b/crazy_functions/生成多种Mermaid图表.py @@ -179,15 +179,15 @@ def 解析历史输入(history,llm_kwargs,file_manifest,chatbot,plugin_kwargs): i_say = f"Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words in Chinese: {txt[i]}" i_say_show_user = f"[{i+1}/{n_txt}] Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {txt[i][:200]} ...." gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, # i_say=真正给chatgpt的提问, i_say_show_user=给用户看的提问 - llm_kwargs, chatbot, + llm_kwargs, chatbot, history=["The main content of the previous section is?", last_iteration_result], # 迭代上一次的结果 sys_prompt="Extracts the main content from the text section where it is located for graphing purposes, answer me with Chinese." # 提示 - ) + ) results.append(gpt_say) last_iteration_result = gpt_say ############################## <第 2 步,根据整理的摘要选择图表类型> ################################## if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") - gpt_say = plugin_kwargs.get("advanced_arg", "") #将图表类型参数赋值为插件参数 + gpt_say = plugin_kwargs.get("advanced_arg", "") #将图表类型参数赋值为插件参数 results_txt = '\n'.join(results) #合并摘要 if gpt_say not in ['1','2','3','4','5','6','7','8','9']: #如插件参数不正确则使用对话模型判断 i_say_show_user = f'接下来将判断适合的图表类型,如连续3次判断失败将会使用流程图进行绘制'; gpt_say = "[Local Message] 收到。" # 用户提示 @@ -198,7 +198,7 @@ def 解析历史输入(history,llm_kwargs,file_manifest,chatbot,plugin_kwargs): gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( inputs=i_say, inputs_show_user=i_say_show_user, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], + llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], sys_prompt="" ) if gpt_say in ['1','2','3','4','5','6','7','8','9']: #判断返回是否正确 @@ -228,12 +228,12 @@ def 解析历史输入(history,llm_kwargs,file_manifest,chatbot,plugin_kwargs): gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( inputs=i_say, inputs_show_user=i_say_show_user, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], + llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], sys_prompt="" ) history.append(gpt_say) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 - + @CatchException def 生成多种Mermaid图表(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): """ @@ -249,11 +249,11 @@ def 生成多种Mermaid图表(txt, llm_kwargs, plugin_kwargs, chatbot, history, # 基本信息:功能、贡献者 chatbot.append([ - "函数插件功能?", + "函数插件功能?", "根据当前聊天历史或指定的路径文件(文件内容优先)绘制多种mermaid图表,将会由对话模型首先判断适合的图表类型,随后绘制图表。\ \n您也可以使用插件参数指定绘制的图表类型,函数插件贡献者: Menghuan1918"]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - + if os.path.exists(txt): #如输入区无内容则直接解析历史记录 from crazy_functions.pdf_fns.parse_word import extract_text_from_files file_exist, final_result, page_one, file_manifest, excption = extract_text_from_files(txt, chatbot, history) @@ -264,15 +264,15 @@ def 生成多种Mermaid图表(txt, llm_kwargs, plugin_kwargs, chatbot, history, if excption != "": if excption == "word": - report_exception(chatbot, history, - a = f"解析项目: {txt}", + report_exception(chatbot, history, + a = f"解析项目: {txt}", b = f"找到了.doc文件,但是该文件格式不被支持,请先转化为.docx格式。") - + elif excption == "pdf": - report_exception(chatbot, history, - a = f"解析项目: {txt}", + report_exception(chatbot, history, + a = f"解析项目: {txt}", b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。") - + elif excption == "word_pip": report_exception(chatbot, history, a=f"解析项目: {txt}", diff --git a/crazy_functions/知识库问答.py b/crazy_functions/知识库问答.py index f3c7c9e..943eeef 100644 --- a/crazy_functions/知识库问答.py +++ b/crazy_functions/知识库问答.py @@ -9,7 +9,7 @@ install_msg =""" 3. python -m pip install unstructured[all-docs] --upgrade -4. python -c 'import nltk; nltk.download("punkt")' +4. python -c 'import nltk; nltk.download("punkt")' """ @CatchException @@ -56,7 +56,7 @@ def 知识库文件注入(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst chatbot.append(["没有找到任何可读取文件", "当前支持的格式包括: txt, md, docx, pptx, pdf, json等"]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return - + # < -------------------预热文本向量化模组--------------- > chatbot.append(['
'.join(file_manifest), "正在预热文本向量化模组, 如果是第一次运行, 将消耗较长时间下载中文向量化模型..."]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -109,8 +109,8 @@ def 读取知识库作答(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst chatbot.append((txt, f'[知识库 {kai_id}] ' + prompt)) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=prompt, inputs_show_user=txt, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], + inputs=prompt, inputs_show_user=txt, + llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], sys_prompt=system_prompt ) history.extend((prompt, gpt_say)) diff --git a/crazy_functions/联网的ChatGPT.py b/crazy_functions/联网的ChatGPT.py index 346492d..c121e54 100644 --- a/crazy_functions/联网的ChatGPT.py +++ b/crazy_functions/联网的ChatGPT.py @@ -40,10 +40,10 @@ def scrape_text(url, proxies) -> str: 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36', 'Content-Type': 'text/plain', } - try: + try: response = requests.get(url, headers=headers, proxies=proxies, timeout=8) if response.encoding == "ISO-8859-1": response.encoding = response.apparent_encoding - except: + except: return "无法连接到该网页" soup = BeautifulSoup(response.text, "html.parser") for script in soup(["script", "style"]): @@ -66,7 +66,7 @@ def 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, s user_request 当前用户的请求信息(IP地址等) """ history = [] # 清空历史,以免输入溢出 - chatbot.append((f"请结合互联网信息回答以下问题:{txt}", + chatbot.append((f"请结合互联网信息回答以下问题:{txt}", "[Local Message] 请注意,您正在调用一个[函数插件]的模板,该模板可以实现ChatGPT联网信息综合。该函数面向希望实现更多有趣功能的开发者,它可以作为创建新功能函数的模板。您若希望分享新的功能模组,请不吝PR!")) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 @@ -91,13 +91,13 @@ def 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, s # ------------- < 第3步:ChatGPT综合 > ------------- i_say = f"从以上搜索结果中抽取信息,然后回答问题:{txt}" i_say, history = input_clipping( # 裁剪输入,从最长的条目开始裁剪,防止爆token - inputs=i_say, - history=history, + inputs=i_say, + history=history, max_token_limit=model_info[llm_kwargs['llm_model']]['max_token']*3//4 ) gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, inputs_show_user=i_say, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, + inputs=i_say, inputs_show_user=i_say, + llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, sys_prompt="请从给定的若干条搜索结果中抽取信息,对最相关的两个搜索结果进行总结,然后回答问题。" ) chatbot[-1] = (i_say, gpt_say) diff --git a/crazy_functions/虚空终端.py b/crazy_functions/虚空终端.py index 27f4499..e5fa2b6 100644 --- a/crazy_functions/虚空终端.py +++ b/crazy_functions/虚空终端.py @@ -33,7 +33,7 @@ explain_msg = """ - 「请调用插件,解析python源代码项目,代码我刚刚打包拖到上传区了」 - 「请问Transformer网络的结构是怎样的?」 -2. 您可以打开插件下拉菜单以了解本项目的各种能力。 +2. 您可以打开插件下拉菜单以了解本项目的各种能力。 3. 如果您使用「调用插件xxx」、「修改配置xxx」、「请问」等关键词,您的意图可以被识别的更准确。 @@ -67,7 +67,7 @@ class UserIntention(BaseModel): def chat(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention): gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( inputs=txt, inputs_show_user=txt, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], + llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], sys_prompt=system_prompt ) chatbot[-1] = [txt, gpt_say] @@ -115,7 +115,7 @@ def 虚空终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt if is_the_upload_folder(txt): state.set_state(chatbot=chatbot, key='has_provided_explaination', value=False) appendix_msg = "\n\n**很好,您已经上传了文件**,现在请您描述您的需求。" - + if is_certain or (state.has_provided_explaination): # 如果意图明确,跳过提示环节 state.set_state(chatbot=chatbot, key='has_provided_explaination', value=True) @@ -152,7 +152,7 @@ def 虚空终端主路由(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst analyze_res = run_gpt_fn(inputs, "") try: user_intention = gpt_json_io.generate_output_auto_repair(analyze_res, run_gpt_fn) - lastmsg=f"正在执行任务: {txt}\n\n用户意图理解: 意图={explain_intention_to_user[user_intention.intention_type]}", + lastmsg=f"正在执行任务: {txt}\n\n用户意图理解: 意图={explain_intention_to_user[user_intention.intention_type]}", except JsonStringError as e: yield from update_ui_lastest_msg( lastmsg=f"正在执行任务: {txt}\n\n用户意图理解: 失败 当前语言模型({llm_kwargs['llm_model']})不能理解您的意图", chatbot=chatbot, history=history, delay=0) @@ -161,7 +161,7 @@ def 虚空终端主路由(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst pass yield from update_ui_lastest_msg( - lastmsg=f"正在执行任务: {txt}\n\n用户意图理解: 意图={explain_intention_to_user[user_intention.intention_type]}", + lastmsg=f"正在执行任务: {txt}\n\n用户意图理解: 意图={explain_intention_to_user[user_intention.intention_type]}", chatbot=chatbot, history=history, delay=0) # 用户意图: 修改本项目的配置 diff --git a/crazy_functions/解析项目源代码.py b/crazy_functions/解析项目源代码.py index dfd0de0..3df13d4 100644 --- a/crazy_functions/解析项目源代码.py +++ b/crazy_functions/解析项目源代码.py @@ -82,13 +82,13 @@ def 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, inputs=inputs, inputs_show_user=inputs_show_user, llm_kwargs=llm_kwargs, chatbot=chatbot, history=this_iteration_history_feed, # 迭代之前的分析 sys_prompt="你是一个程序架构分析师,正在分析一个项目的源代码。" + sys_prompt_additional) - + diagram_code = make_diagram(this_iteration_files, result, this_iteration_history_feed) summary = "请用一句话概括这些文件的整体功能。\n\n" + diagram_code summary_result = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=summary, - inputs_show_user=summary, - llm_kwargs=llm_kwargs, + inputs=summary, + inputs_show_user=summary, + llm_kwargs=llm_kwargs, chatbot=chatbot, history=[i_say, result], # 迭代之前的分析 sys_prompt="你是一个程序架构分析师,正在分析一个项目的源代码。" + sys_prompt_additional) diff --git a/crazy_functions/询问多个大语言模型.py b/crazy_functions/询问多个大语言模型.py index 069d440..a608b7b 100644 --- a/crazy_functions/询问多个大语言模型.py +++ b/crazy_functions/询问多个大语言模型.py @@ -20,8 +20,8 @@ def 同时问询(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt # llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo&api2d-gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔 llm_kwargs['llm_model'] = MULTI_QUERY_LLM_MODELS # 支持任意数量的llm接口,用&符号分隔 gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=txt, inputs_show_user=txt, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, + inputs=txt, inputs_show_user=txt, + llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, sys_prompt=system_prompt, retry_times_at_unknown_error=0 ) @@ -52,8 +52,8 @@ def 同时问询_指定模型(txt, llm_kwargs, plugin_kwargs, chatbot, history, yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=txt, inputs_show_user=txt, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, + inputs=txt, inputs_show_user=txt, + llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, sys_prompt=system_prompt, retry_times_at_unknown_error=0 ) diff --git a/crazy_functions/语音助手.py b/crazy_functions/语音助手.py index 8af0fd9..1e85b36 100644 --- a/crazy_functions/语音助手.py +++ b/crazy_functions/语音助手.py @@ -39,7 +39,7 @@ class AsyncGptTask(): try: MAX_TOKEN_ALLO = 2560 i_say, history = input_clipping(i_say, history, max_token_limit=MAX_TOKEN_ALLO) - gpt_say_partial = predict_no_ui_long_connection(inputs=i_say, llm_kwargs=llm_kwargs, history=history, sys_prompt=sys_prompt, + gpt_say_partial = predict_no_ui_long_connection(inputs=i_say, llm_kwargs=llm_kwargs, history=history, sys_prompt=sys_prompt, observe_window=observe_window[index], console_slience=True) except ConnectionAbortedError as token_exceed_err: print('至少一个线程任务Token溢出而失败', e) @@ -120,7 +120,7 @@ class InterviewAssistant(AliyunASR): yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 self.plugin_wd.feed() - if self.event_on_result_chg.is_set(): + if self.event_on_result_chg.is_set(): # called when some words have finished self.event_on_result_chg.clear() chatbot[-1] = list(chatbot[-1]) @@ -151,7 +151,7 @@ class InterviewAssistant(AliyunASR): # add gpt task 创建子线程请求gpt,避免线程阻塞 history = chatbot2history(chatbot) self.agt.add_async_gpt_task(self.buffered_sentence, len(chatbot)-1, llm_kwargs, history, system_prompt) - + self.buffered_sentence = "" chatbot.append(["[ 请讲话 ]", "[ 正在等您说完问题 ]"]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 diff --git a/crazy_functions/谷歌检索小助手.py b/crazy_functions/谷歌检索小助手.py index 8b7ea3f..2787351 100644 --- a/crazy_functions/谷歌检索小助手.py +++ b/crazy_functions/谷歌检索小助手.py @@ -20,10 +20,10 @@ def get_meta_information(url, chatbot, history): proxies = get_conf('proxies') headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36', - 'Accept-Encoding': 'gzip, deflate, br', + 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7', 'Cache-Control':'max-age=0', - 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7', + 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7', 'Connection': 'keep-alive' } try: @@ -95,7 +95,7 @@ def get_meta_information(url, chatbot, history): ) try: paper = next(search.results()) except: paper = None - + is_match = paper is not None and string_similar(title, paper.title) > 0.90 # 如果在Arxiv上匹配失败,检索文章的历史版本的题目 @@ -146,8 +146,8 @@ def 谷歌检索小助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst import math from bs4 import BeautifulSoup except: - report_exception(chatbot, history, - a = f"解析项目: {txt}", + report_exception(chatbot, history, + a = f"解析项目: {txt}", b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade beautifulsoup4 arxiv```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return @@ -163,7 +163,7 @@ def 谷歌检索小助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst if len(meta_paper_info_list[:batchsize]) > 0: i_say = "下面是一些学术文献的数据,提取出以下内容:" + \ "1、英文题目;2、中文题目翻译;3、作者;4、arxiv公开(is_paper_in_arxiv);4、引用数量(cite);5、中文摘要翻译。" + \ - f"以下是信息源:{str(meta_paper_info_list[:batchsize])}" + f"以下是信息源:{str(meta_paper_info_list[:batchsize])}" inputs_show_user = f"请分析此页面中出现的所有文章:{txt},这是第{batch+1}批" gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( @@ -175,11 +175,11 @@ def 谷歌检索小助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst history.extend([ f"第{batch+1}批", gpt_say ]) meta_paper_info_list = meta_paper_info_list[batchsize:] - chatbot.append(["状态?", + chatbot.append(["状态?", "已经全部完成,您可以试试让AI写一个Related Works,例如您可以继续输入Write a \"Related Works\" section about \"你搜索的研究领域\" for me."]) msg = '正常' yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 path = write_history_to_file(history) promote_file_to_downloadzone(path, chatbot=chatbot) - chatbot.append(("完成了吗?", path)); + chatbot.append(("完成了吗?", path)); yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 diff --git a/crazy_functions/高级功能函数模板.py b/crazy_functions/高级功能函数模板.py index d22a674..f75f0e8 100644 --- a/crazy_functions/高级功能函数模板.py +++ b/crazy_functions/高级功能函数模板.py @@ -40,7 +40,7 @@ def 高阶功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, s """ history = [] # 清空历史,以免输入溢出 chatbot.append(( - "您正在调用插件:历史上的今天", + "您正在调用插件:历史上的今天", "[Local Message] 请注意,您正在调用一个[函数插件]的模板,该函数面向希望实现更多有趣功能的开发者,它可以作为创建新功能函数的模板(该函数只有20多行代码)。此外我们也提供可同步处理大量文件的多线程Demo供您参考。您若希望分享新的功能模组,请不吝PR!" + 高阶功能模板函数示意图)) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 for i in range(5): @@ -48,8 +48,8 @@ def 高阶功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, s currentDay = (datetime.date.today() + datetime.timedelta(days=i)).day i_say = f'历史中哪些事件发生在{currentMonth}月{currentDay}日?列举两条并发送相关图片。发送图片时,请使用Markdown,将Unsplash API中的PUT_YOUR_QUERY_HERE替换成描述该事件的一个最重要的单词。' gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, inputs_show_user=i_say, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], + inputs=i_say, inputs_show_user=i_say, + llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], sys_prompt="当你想发送一张照片时,请使用Markdown, 并且不要有反斜线, 不要用代码块。使用 Unsplash API (https://source.unsplash.com/1280x720/? < PUT_YOUR_QUERY_HERE >)。" ) chatbot[-1] = (i_say, gpt_say) @@ -84,15 +84,15 @@ def 测试图表渲染(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_ history = [] # 清空历史,以免输入溢出 chatbot.append(("这是什么功能?", "一个测试mermaid绘制图表的功能,您可以在输入框中输入一些关键词,然后使用mermaid+llm绘制图表。")) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 - + if txt == "": txt = "空白的输入栏" # 调皮一下 - + i_say_show_user = f'请绘制有关“{txt}”的逻辑关系图。' i_say = PROMPT.format(subject=txt) gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( inputs=i_say, inputs_show_user=i_say_show_user, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], + llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], sys_prompt="" ) history.append(i_say); history.append(gpt_say) diff --git a/multi_language.py b/multi_language.py index c65872a..93c3178 100644 --- a/multi_language.py +++ b/multi_language.py @@ -1,7 +1,7 @@ """ Translate this project to other languages (experimental, please open an issue if there is any bug) - - + + Usage: 1. modify config.py, set your LLM_MODEL and API_KEY(s) to provide access to OPENAI (or any other LLM model provider) @@ -11,20 +11,20 @@ 3. modify TransPrompt (below ↓) TransPrompt = f"Replace each json value `#` with translated results in English, e.g., \"原始文本\":\"TranslatedText\". Keep Json format. Do not answer #." - 4. Run `python multi_language.py`. + 4. Run `python multi_language.py`. Note: You need to run it multiple times to increase translation coverage because GPT makes mistakes sometimes. (You can also run `CACHE_ONLY=True python multi_language.py` to use cached translation mapping) 5. Find the translated program in `multi-language\English\*` - + P.S. - + - The translation mapping will be stored in `docs/translation_xxxx.json`, you can revised mistaken translation there. - + - If you would like to share your `docs/translation_xxxx.json`, (so that everyone can use the cached & revised translation mapping), please open a Pull Request - If there is any translation error in `docs/translation_xxxx.json`, please open a Pull Request - + - Welcome any Pull Request, regardless of language """ @@ -58,7 +58,7 @@ if not os.path.exists(CACHE_FOLDER): def lru_file_cache(maxsize=128, ttl=None, filename=None): """ - Decorator that caches a function's return value after being called with given arguments. + Decorator that caches a function's return value after being called with given arguments. It uses a Least Recently Used (LRU) cache strategy to limit the size of the cache. maxsize: Maximum size of the cache. Defaults to 128. ttl: Time-to-Live of the cache. If a value hasn't been accessed for `ttl` seconds, it will be evicted from the cache. @@ -151,7 +151,7 @@ def map_to_json(map, language): def read_map_from_json(language): if os.path.exists(f'docs/translate_{language.lower()}.json'): - with open(f'docs/translate_{language.lower()}.json', 'r', encoding='utf8') as f: + with open(f'docs/translate_{language.lower()}.json', 'r', encoding='utf8') as f: res = json.load(f) res = {k:v for k, v in res.items() if v is not None and contains_chinese(k)} return res @@ -168,7 +168,7 @@ def advanced_split(splitted_string, spliter, include_spliter=False): splitted[i] += spliter splitted[i] = splitted[i].strip() for i in reversed(range(len(splitted))): - if not contains_chinese(splitted[i]): + if not contains_chinese(splitted[i]): splitted.pop(i) splitted_string_tmp.extend(splitted) else: @@ -183,12 +183,12 @@ def trans(word_to_translate, language, special=False): if len(word_to_translate) == 0: return {} from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency from toolbox import get_conf, ChatBotWithCookies, load_chat_cookies - + cookies = load_chat_cookies() llm_kwargs = { 'api_key': cookies['api_key'], 'llm_model': cookies['llm_model'], - 'top_p':1.0, + 'top_p':1.0, 'max_length': None, 'temperature':0.4, } @@ -204,12 +204,12 @@ def trans(word_to_translate, language, special=False): sys_prompt_array = [f"Translate following sentences to {LANG}. E.g., You should translate sentences to the following format ['translation of sentence 1', 'translation of sentence 2']. Do NOT answer with Chinese!" for _ in inputs_array] chatbot = ChatBotWithCookies(llm_kwargs) gpt_say_generator = request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency( - inputs_array, - inputs_show_user_array, - llm_kwargs, - chatbot, - history_array, - sys_prompt_array, + inputs_array, + inputs_show_user_array, + llm_kwargs, + chatbot, + history_array, + sys_prompt_array, ) while True: try: @@ -224,7 +224,7 @@ def trans(word_to_translate, language, special=False): try: res_before_trans = eval(result[i-1]) res_after_trans = eval(result[i]) - if len(res_before_trans) != len(res_after_trans): + if len(res_before_trans) != len(res_after_trans): raise RuntimeError for a,b in zip(res_before_trans, res_after_trans): translated_result[a] = b @@ -246,12 +246,12 @@ def trans_json(word_to_translate, language, special=False): if len(word_to_translate) == 0: return {} from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency from toolbox import get_conf, ChatBotWithCookies, load_chat_cookies - + cookies = load_chat_cookies() llm_kwargs = { 'api_key': cookies['api_key'], 'llm_model': cookies['llm_model'], - 'top_p':1.0, + 'top_p':1.0, 'max_length': None, 'temperature':0.4, } @@ -261,18 +261,18 @@ def trans_json(word_to_translate, language, special=False): word_to_translate_split = split_list(word_to_translate, N_EACH_REQ) inputs_array = [{k:"#" for k in s} for s in word_to_translate_split] inputs_array = [ json.dumps(i, ensure_ascii=False) for i in inputs_array] - + inputs_show_user_array = inputs_array history_array = [[] for _ in inputs_array] sys_prompt_array = [TransPrompt for _ in inputs_array] chatbot = ChatBotWithCookies(llm_kwargs) gpt_say_generator = request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency( - inputs_array, - inputs_show_user_array, - llm_kwargs, - chatbot, - history_array, - sys_prompt_array, + inputs_array, + inputs_show_user_array, + llm_kwargs, + chatbot, + history_array, + sys_prompt_array, ) while True: try: @@ -336,7 +336,7 @@ def step_1_core_key_translate(): cached_translation = read_map_from_json(language=LANG_STD) cached_translation_keys = list(cached_translation.keys()) for d in chinese_core_keys_norepeat: - if d not in cached_translation_keys: + if d not in cached_translation_keys: need_translate.append(d) if CACHE_ONLY: @@ -379,7 +379,7 @@ def step_1_core_key_translate(): # read again with open(file_path, 'r', encoding='utf-8') as f: content = f.read() - + for k, v in chinese_core_keys_norepeat_mapping.items(): content = content.replace(k, v) @@ -390,7 +390,7 @@ def step_1_core_key_translate(): def step_2_core_key_translate(): # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= - # step2 + # step2 # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= def load_string(strings, string_input): @@ -423,7 +423,7 @@ def step_2_core_key_translate(): splitted_string = advanced_split(splitted_string, spliter=" ", include_spliter=False) splitted_string = advanced_split(splitted_string, spliter="- ", include_spliter=False) splitted_string = advanced_split(splitted_string, spliter="---", include_spliter=False) - + # -------------------------------------- for j, s in enumerate(splitted_string): # .com if '.com' in s: continue @@ -457,7 +457,7 @@ def step_2_core_key_translate(): comments_arr = [] for code_sp in content.splitlines(): comments = re.findall(r'#.*$', code_sp) - for comment in comments: + for comment in comments: load_string(strings=comments_arr, string_input=comment) string_literals.extend(comments_arr) @@ -479,7 +479,7 @@ def step_2_core_key_translate(): cached_translation = read_map_from_json(language=LANG) cached_translation_keys = list(cached_translation.keys()) for d in chinese_literal_names_norepeat: - if d not in cached_translation_keys: + if d not in cached_translation_keys: need_translate.append(d) if CACHE_ONLY: @@ -504,18 +504,18 @@ def step_2_core_key_translate(): # read again with open(file_path, 'r', encoding='utf-8') as f: content = f.read() - + for k, v in cached_translation.items(): if v is None: continue - if '"' in v: + if '"' in v: v = v.replace('"', "`") - if '\'' in v: + if '\'' in v: v = v.replace('\'', "`") content = content.replace(k, v) with open(file_path, 'w', encoding='utf-8') as f: f.write(content) - + if file.strip('.py') in cached_translation: file_new = cached_translation[file.strip('.py')] + '.py' file_path_new = os.path.join(root, file_new) diff --git a/request_llms/bridge_all.py b/request_llms/bridge_all.py index d7f2ad9..9470747 100644 --- a/request_llms/bridge_all.py +++ b/request_llms/bridge_all.py @@ -8,10 +8,10 @@ 具备多线程调用能力的函数:在函数插件中被调用,灵活而简洁 2. predict_no_ui_long_connection(...) """ -import tiktoken, copy +import tiktoken, copy, re from functools import lru_cache from concurrent.futures import ThreadPoolExecutor -from toolbox import get_conf, trimmed_format_exc, apply_gpt_academic_string_mask +from toolbox import get_conf, trimmed_format_exc, apply_gpt_academic_string_mask, read_one_api_model_name from .bridge_chatgpt import predict_no_ui_long_connection as chatgpt_noui from .bridge_chatgpt import predict as chatgpt_ui @@ -61,6 +61,9 @@ API_URL_REDIRECT, AZURE_ENDPOINT, AZURE_ENGINE = get_conf("API_URL_REDIRECT", "A openai_endpoint = "https://api.openai.com/v1/chat/completions" api2d_endpoint = "https://openai.api2d.net/v1/chat/completions" newbing_endpoint = "wss://sydney.bing.com/sydney/ChatHub" +gemini_endpoint = "https://generativelanguage.googleapis.com/v1beta/models" +claude_endpoint = "https://api.anthropic.com" + if not AZURE_ENDPOINT.endswith('/'): AZURE_ENDPOINT += '/' azure_endpoint = AZURE_ENDPOINT + f'openai/deployments/{AZURE_ENGINE}/chat/completions?api-version=2023-05-15' # 兼容旧版的配置 @@ -75,7 +78,8 @@ except: if openai_endpoint in API_URL_REDIRECT: openai_endpoint = API_URL_REDIRECT[openai_endpoint] if api2d_endpoint in API_URL_REDIRECT: api2d_endpoint = API_URL_REDIRECT[api2d_endpoint] if newbing_endpoint in API_URL_REDIRECT: newbing_endpoint = API_URL_REDIRECT[newbing_endpoint] - +if gemini_endpoint in API_URL_REDIRECT: gemini_endpoint = API_URL_REDIRECT[gemini_endpoint] +if claude_endpoint in API_URL_REDIRECT: claude_endpoint = API_URL_REDIRECT[claude_endpoint] # 获取tokenizer tokenizer_gpt35 = LazyloadTiktoken("gpt-3.5-turbo") @@ -291,7 +295,7 @@ model_info = { "gemini-pro": { "fn_with_ui": genai_ui, "fn_without_ui": genai_noui, - "endpoint": None, + "endpoint": gemini_endpoint, "max_token": 1024 * 32, "tokenizer": tokenizer_gpt35, "token_cnt": get_token_num_gpt35, @@ -299,7 +303,7 @@ model_info = { "gemini-pro-vision": { "fn_with_ui": genai_ui, "fn_without_ui": genai_noui, - "endpoint": None, + "endpoint": gemini_endpoint, "max_token": 1024 * 32, "tokenizer": tokenizer_gpt35, "token_cnt": get_token_num_gpt35, @@ -349,25 +353,57 @@ for model in AVAIL_LLM_MODELS: model_info.update({model: mi}) # -=-=-=-=-=-=- 以下部分是新加入的模型,可能附带额外依赖 -=-=-=-=-=-=- -if "claude-1-100k" in AVAIL_LLM_MODELS or "claude-2" in AVAIL_LLM_MODELS: +# claude家族 +claude_models = ["claude-instant-1.2","claude-2.0","claude-2.1","claude-3-sonnet-20240229","claude-3-opus-20240229"] +if any(item in claude_models for item in AVAIL_LLM_MODELS): from .bridge_claude import predict_no_ui_long_connection as claude_noui from .bridge_claude import predict as claude_ui model_info.update({ - "claude-1-100k": { + "claude-instant-1.2": { "fn_with_ui": claude_ui, "fn_without_ui": claude_noui, - "endpoint": None, - "max_token": 8196, + "endpoint": claude_endpoint, + "max_token": 100000, "tokenizer": tokenizer_gpt35, "token_cnt": get_token_num_gpt35, }, }) model_info.update({ - "claude-2": { + "claude-2.0": { "fn_with_ui": claude_ui, "fn_without_ui": claude_noui, - "endpoint": None, - "max_token": 8196, + "endpoint": claude_endpoint, + "max_token": 100000, + "tokenizer": tokenizer_gpt35, + "token_cnt": get_token_num_gpt35, + }, + }) + model_info.update({ + "claude-2.1": { + "fn_with_ui": claude_ui, + "fn_without_ui": claude_noui, + "endpoint": claude_endpoint, + "max_token": 200000, + "tokenizer": tokenizer_gpt35, + "token_cnt": get_token_num_gpt35, + }, + }) + model_info.update({ + "claude-3-sonnet-20240229": { + "fn_with_ui": claude_ui, + "fn_without_ui": claude_noui, + "endpoint": claude_endpoint, + "max_token": 200000, + "tokenizer": tokenizer_gpt35, + "token_cnt": get_token_num_gpt35, + }, + }) + model_info.update({ + "claude-3-opus-20240229": { + "fn_with_ui": claude_ui, + "fn_without_ui": claude_noui, + "endpoint": claude_endpoint, + "max_token": 200000, "tokenizer": tokenizer_gpt35, "token_cnt": get_token_num_gpt35, }, @@ -675,22 +711,28 @@ if "deepseekcoder" in AVAIL_LLM_MODELS: # deepseekcoder }) except: print(trimmed_format_exc()) -# if "skylark" in AVAIL_LLM_MODELS: -# try: -# from .bridge_skylark2 import predict_no_ui_long_connection as skylark_noui -# from .bridge_skylark2 import predict as skylark_ui -# model_info.update({ -# "skylark": { -# "fn_with_ui": skylark_ui, -# "fn_without_ui": skylark_noui, -# "endpoint": None, -# "max_token": 4096, -# "tokenizer": tokenizer_gpt35, -# "token_cnt": get_token_num_gpt35, -# } -# }) -# except: -# print(trimmed_format_exc()) +# -=-=-=-=-=-=- one-api 对齐支持 -=-=-=-=-=-=- +for model in [m for m in AVAIL_LLM_MODELS if m.startswith("one-api-")]: + # 为了更灵活地接入one-api多模型管理界面,设计了此接口,例子:AVAIL_LLM_MODELS = ["one-api-mixtral-8x7b(max_token=6666)"] + # 其中 + # "one-api-" 是前缀(必要) + # "mixtral-8x7b" 是模型名(必要) + # "(max_token=6666)" 是配置(非必要) + try: + _, max_token_tmp = read_one_api_model_name(model) + except: + print(f"one-api模型 {model} 的 max_token 配置不是整数,请检查配置文件。") + continue + model_info.update({ + model: { + "fn_with_ui": chatgpt_ui, + "fn_without_ui": chatgpt_noui, + "endpoint": openai_endpoint, + "max_token": max_token_tmp, + "tokenizer": tokenizer_gpt35, + "token_cnt": get_token_num_gpt35, + }, + }) # <-- 用于定义和切换多个azure模型 --> diff --git a/request_llms/bridge_chatglm.py b/request_llms/bridge_chatglm.py index c58495d..990556a 100644 --- a/request_llms/bridge_chatglm.py +++ b/request_llms/bridge_chatglm.py @@ -56,15 +56,15 @@ class GetGLM2Handle(LocalLLMHandle): query, max_length, top_p, temperature, history = adaptor(kwargs) - for response, history in self._model.stream_chat(self._tokenizer, - query, - history, + for response, history in self._model.stream_chat(self._tokenizer, + query, + history, max_length=max_length, top_p=top_p, temperature=temperature, ): yield response - + def try_to_import_special_deps(self, **kwargs): # import something that will raise error if the user does not install requirement_*.txt # 🏃‍♂️🏃‍♂️🏃‍♂️ 主进程执行 diff --git a/request_llms/bridge_chatglm3.py b/request_llms/bridge_chatglm3.py index 3caa476..aecfc69 100644 --- a/request_llms/bridge_chatglm3.py +++ b/request_llms/bridge_chatglm3.py @@ -55,15 +55,15 @@ class GetGLM3Handle(LocalLLMHandle): query, max_length, top_p, temperature, history = adaptor(kwargs) - for response, history in self._model.stream_chat(self._tokenizer, - query, - history, + for response, history in self._model.stream_chat(self._tokenizer, + query, + history, max_length=max_length, top_p=top_p, temperature=temperature, ): yield response - + def try_to_import_special_deps(self, **kwargs): # import something that will raise error if the user does not install requirement_*.txt # 🏃‍♂️🏃‍♂️🏃‍♂️ 主进程执行 diff --git a/request_llms/bridge_chatglmft.py b/request_llms/bridge_chatglmft.py index d812bae..84f1426 100644 --- a/request_llms/bridge_chatglmft.py +++ b/request_llms/bridge_chatglmft.py @@ -37,7 +37,7 @@ class GetGLMFTHandle(Process): self.check_dependency() self.start() self.threadLock = threading.Lock() - + def check_dependency(self): try: import sentencepiece @@ -101,7 +101,7 @@ class GetGLMFTHandle(Process): break except Exception as e: retry += 1 - if retry > 3: + if retry > 3: self.child.send('[Local Message] Call ChatGLMFT fail 不能正常加载ChatGLMFT的参数。') raise RuntimeError("不能正常加载ChatGLMFT的参数!") @@ -113,7 +113,7 @@ class GetGLMFTHandle(Process): for response, history in self.chatglmft_model.stream_chat(self.chatglmft_tokenizer, **kwargs): self.child.send(response) # # 中途接收可能的终止指令(如果有的话) - # if self.child.poll(): + # if self.child.poll(): # command = self.child.recv() # if command == '[Terminate]': break except: @@ -133,7 +133,7 @@ class GetGLMFTHandle(Process): else: break self.threadLock.release() - + global glmft_handle glmft_handle = None ################################################################################# @@ -146,7 +146,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", if glmft_handle is None: glmft_handle = GetGLMFTHandle() if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + glmft_handle.info - if not glmft_handle.success: + if not glmft_handle.success: error = glmft_handle.info glmft_handle = None raise RuntimeError(error) @@ -161,7 +161,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", response = "" for response in glmft_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): if len(observe_window) >= 1: observe_window[0] = response - if len(observe_window) >= 2: + if len(observe_window) >= 2: if (time.time()-observe_window[1]) > watch_dog_patience: raise RuntimeError("程序终止。") return response @@ -180,7 +180,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp glmft_handle = GetGLMFTHandle() chatbot[-1] = (inputs, load_message + "\n\n" + glmft_handle.info) yield from update_ui(chatbot=chatbot, history=[]) - if not glmft_handle.success: + if not glmft_handle.success: glmft_handle = None return diff --git a/request_llms/bridge_chatglmonnx.py b/request_llms/bridge_chatglmonnx.py index 4b90571..4cf7095 100644 --- a/request_llms/bridge_chatglmonnx.py +++ b/request_llms/bridge_chatglmonnx.py @@ -59,7 +59,7 @@ class GetONNXGLMHandle(LocalLLMHandle): temperature=temperature, ): yield answer - + def try_to_import_special_deps(self, **kwargs): # import something that will raise error if the user does not install requirement_*.txt # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行 diff --git a/request_llms/bridge_chatgpt.py b/request_llms/bridge_chatgpt.py index e8327d4..692d85a 100644 --- a/request_llms/bridge_chatgpt.py +++ b/request_llms/bridge_chatgpt.py @@ -21,7 +21,7 @@ import random # config_private.py放自己的秘密如API和代理网址 # 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件 -from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys, clip_history, trimmed_format_exc, is_the_upload_folder +from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys, clip_history, trimmed_format_exc, is_the_upload_folder, read_one_api_model_name proxies, TIMEOUT_SECONDS, MAX_RETRY, API_ORG, AZURE_CFG_ARRAY = \ get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY', 'API_ORG', 'AZURE_CFG_ARRAY') @@ -358,6 +358,9 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, stream): model = llm_kwargs['llm_model'] if llm_kwargs['llm_model'].startswith('api2d-'): model = llm_kwargs['llm_model'][len('api2d-'):] + if llm_kwargs['llm_model'].startswith('one-api-'): + model = llm_kwargs['llm_model'][len('one-api-'):] + model, _ = read_one_api_model_name(model) if model == "gpt-3.5-random": # 随机选择, 绕过openai访问频率限制 model = random.choice([ diff --git a/request_llms/bridge_chatgpt_vision.py b/request_llms/bridge_chatgpt_vision.py index ebcf968..45b71bd 100644 --- a/request_llms/bridge_chatgpt_vision.py +++ b/request_llms/bridge_chatgpt_vision.py @@ -27,7 +27,7 @@ timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check def report_invalid_key(key): - if get_conf("BLOCK_INVALID_APIKEY"): + if get_conf("BLOCK_INVALID_APIKEY"): # 实验性功能,自动检测并屏蔽失效的KEY,请勿使用 from request_llms.key_manager import ApiKeyManager api_key = ApiKeyManager().add_key_to_blacklist(key) @@ -51,13 +51,13 @@ def decode_chunk(chunk): choice_valid = False has_content = False has_role = False - try: + try: chunkjson = json.loads(chunk_decoded[6:]) has_choices = 'choices' in chunkjson if has_choices: choice_valid = (len(chunkjson['choices']) > 0) if has_choices and choice_valid: has_content = "content" in chunkjson['choices'][0]["delta"] if has_choices and choice_valid: has_role = "role" in chunkjson['choices'][0]["delta"] - except: + except: pass return chunk_decoded, chunkjson, has_choices, choice_valid, has_content, has_role @@ -103,7 +103,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp raw_input = inputs logging.info(f'[raw_input] {raw_input}') - def make_media_input(inputs, image_paths): + def make_media_input(inputs, image_paths): for image_path in image_paths: inputs = inputs + f'

' return inputs @@ -122,7 +122,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp chatbot[-1] = (inputs, f"您提供的api-key不满足要求,不包含任何可用于{llm_kwargs['llm_model']}的api-key。您可能选择了错误的模型或请求源。") yield from update_ui(chatbot=chatbot, history=history, msg="api-key不满足要求") # 刷新界面 return - + # 检查endpoint是否合法 try: from .bridge_all import model_info @@ -150,7 +150,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp if retry > MAX_RETRY: raise TimeoutError gpt_replying_buffer = "" - + is_head_of_the_stream = True if stream: stream_response = response.iter_lines() @@ -162,21 +162,21 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp chunk_decoded = chunk.decode() error_msg = chunk_decoded # 首先排除一个one-api没有done数据包的第三方Bug情形 - if len(gpt_replying_buffer.strip()) > 0 and len(error_msg) == 0: + if len(gpt_replying_buffer.strip()) > 0 and len(error_msg) == 0: yield from update_ui(chatbot=chatbot, history=history, msg="检测到有缺陷的非OpenAI官方接口,建议选择更稳定的接口。") break # 其他情况,直接返回报错 chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg, api_key) yield from update_ui(chatbot=chatbot, history=history, msg="非OpenAI官方接口返回了错误:" + chunk.decode()) # 刷新界面 return - + # 提前读取一些信息 (用于判断异常) chunk_decoded, chunkjson, has_choices, choice_valid, has_content, has_role = decode_chunk(chunk) if is_head_of_the_stream and (r'"object":"error"' not in chunk_decoded) and (r"content" not in chunk_decoded): # 数据流的第一帧不携带content is_head_of_the_stream = False; continue - + if chunk: try: if has_choices and not choice_valid: @@ -220,7 +220,7 @@ def handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg, openai_website = ' 请登录OpenAI查看详情 https://platform.openai.com/signup' if "reduce the length" in error_msg: if len(history) >= 2: history[-1] = ""; history[-2] = "" # 清除当前溢出的输入:history[-2] 是本次输入, history[-1] 是本次输出 - history = clip_history(inputs=inputs, history=history, tokenizer=model_info[llm_kwargs['llm_model']]['tokenizer'], + history = clip_history(inputs=inputs, history=history, tokenizer=model_info[llm_kwargs['llm_model']]['tokenizer'], max_token_limit=(model_info[llm_kwargs['llm_model']]['max_token'])) # history至少释放二分之一 chatbot[-1] = (chatbot[-1][0], "[Local Message] Reduce the length. 本次输入过长, 或历史数据过长. 历史缓存数据已部分释放, 您可以请再次尝试. (若再次失败则更可能是因为输入过长.)") elif "does not exist" in error_msg: @@ -260,7 +260,7 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, image_paths): "Authorization": f"Bearer {api_key}" } if API_ORG.startswith('org-'): headers.update({"OpenAI-Organization": API_ORG}) - if llm_kwargs['llm_model'].startswith('azure-'): + if llm_kwargs['llm_model'].startswith('azure-'): headers.update({"api-key": api_key}) if llm_kwargs['llm_model'] in AZURE_CFG_ARRAY.keys(): azure_api_key_unshared = AZURE_CFG_ARRAY[llm_kwargs['llm_model']]["AZURE_API_KEY"] @@ -294,7 +294,7 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, image_paths): payload = { "model": model, - "messages": messages, + "messages": messages, "temperature": llm_kwargs['temperature'], # 1.0, "top_p": llm_kwargs['top_p'], # 1.0, "n": 1, diff --git a/request_llms/bridge_chatgpt_website.py b/request_llms/bridge_chatgpt_website.py index f2f0709..94e1ebb 100644 --- a/request_llms/bridge_chatgpt_website.py +++ b/request_llms/bridge_chatgpt_website.py @@ -73,12 +73,12 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", result = '' while True: try: chunk = next(stream_response).decode() - except StopIteration: + except StopIteration: break except requests.exceptions.ConnectionError: chunk = next(stream_response).decode() # 失败了,重试一次?再失败就没办法了。 if len(chunk)==0: continue - if not chunk.startswith('data:'): + if not chunk.startswith('data:'): error_msg = get_full_error(chunk.encode('utf8'), stream_response).decode() if "reduce the length" in error_msg: raise ConnectionAbortedError("OpenAI拒绝了请求:" + error_msg) @@ -89,14 +89,14 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", delta = json_data["delta"] if len(delta) == 0: break if "role" in delta: continue - if "content" in delta: + if "content" in delta: result += delta["content"] if not console_slience: print(delta["content"], end='') - if observe_window is not None: + if observe_window is not None: # 观测窗,把已经获取的数据显示出去 if len(observe_window) >= 1: observe_window[0] += delta["content"] # 看门狗,如果超过期限没有喂狗,则终止 - if len(observe_window) >= 2: + if len(observe_window) >= 2: if (time.time()-observe_window[1]) > watch_dog_patience: raise RuntimeError("用户取消了程序。") else: raise RuntimeError("意外Json结构:"+delta) @@ -132,7 +132,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp chatbot[-1] = (inputs, f"您提供的api-key不满足要求,不包含任何可用于{llm_kwargs['llm_model']}的api-key。您可能选择了错误的模型或请求源。") yield from update_ui(chatbot=chatbot, history=history, msg="api-key不满足要求") # 刷新界面 return - + history.append(inputs); history.append("") retry = 0 @@ -151,7 +151,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp if retry > MAX_RETRY: raise TimeoutError gpt_replying_buffer = "" - + is_head_of_the_stream = True if stream: stream_response = response.iter_lines() @@ -165,12 +165,12 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg) yield from update_ui(chatbot=chatbot, history=history, msg="非Openai官方接口返回了错误:" + chunk.decode()) # 刷新界面 return - + # print(chunk.decode()[6:]) if is_head_of_the_stream and (r'"object":"error"' not in chunk.decode()): # 数据流的第一帧不携带content is_head_of_the_stream = False; continue - + if chunk: try: chunk_decoded = chunk.decode() @@ -203,7 +203,7 @@ def handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg) openai_website = ' 请登录OpenAI查看详情 https://platform.openai.com/signup' if "reduce the length" in error_msg: if len(history) >= 2: history[-1] = ""; history[-2] = "" # 清除当前溢出的输入:history[-2] 是本次输入, history[-1] 是本次输出 - history = clip_history(inputs=inputs, history=history, tokenizer=model_info[llm_kwargs['llm_model']]['tokenizer'], + history = clip_history(inputs=inputs, history=history, tokenizer=model_info[llm_kwargs['llm_model']]['tokenizer'], max_token_limit=(model_info[llm_kwargs['llm_model']]['max_token'])) # history至少释放二分之一 chatbot[-1] = (chatbot[-1][0], "[Local Message] Reduce the length. 本次输入过长, 或历史数据过长. 历史缓存数据已部分释放, 您可以请再次尝试. (若再次失败则更可能是因为输入过长.)") # history = [] # 清除历史 @@ -264,7 +264,7 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, stream): payload = { "model": llm_kwargs['llm_model'].strip('api2d-'), - "messages": messages, + "messages": messages, "temperature": llm_kwargs['temperature'], # 1.0, "top_p": llm_kwargs['top_p'], # 1.0, "n": 1, diff --git a/request_llms/bridge_claude.py b/request_llms/bridge_claude.py index 42b7505..50c0329 100644 --- a/request_llms/bridge_claude.py +++ b/request_llms/bridge_claude.py @@ -11,13 +11,12 @@ """ import os -import json import time -import gradio as gr -import logging import traceback -import requests -import importlib +from toolbox import get_conf, update_ui, trimmed_format_exc, encode_image, every_image_file_in_path + +picture_system_prompt = "\n当回复图像时,必须说明正在回复哪张图像。所有图像仅在最后一个问题中提供,即使它们在历史记录中被提及。请使用'这是第X张图像:'的格式来指明您正在描述的是哪张图像。" +Claude_3_Models = ["claude-3-sonnet-20240229", "claude-3-opus-20240229"] # config_private.py放自己的秘密如API和代理网址 # 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件 @@ -56,7 +55,8 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", """ from anthropic import Anthropic watch_dog_patience = 5 # 看门狗的耐心, 设置5秒即可 - prompt = generate_payload(inputs, llm_kwargs, history, system_prompt=sys_prompt, stream=True) + if inputs == "": inputs = "空空如也的输入栏" + message = generate_payload(inputs, llm_kwargs, history, stream=True, image_paths=None) retry = 0 if len(ANTHROPIC_API_KEY) == 0: raise RuntimeError("没有设置ANTHROPIC_API_KEY选项") @@ -65,15 +65,16 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", try: # make a POST request to the API endpoint, stream=False from .bridge_all import model_info - anthropic = Anthropic(api_key=ANTHROPIC_API_KEY) + anthropic = Anthropic(api_key=ANTHROPIC_API_KEY, base_url=model_info[llm_kwargs['llm_model']]['endpoint']) # endpoint = model_info[llm_kwargs['llm_model']]['endpoint'] # with ProxyNetworkActivate() - stream = anthropic.completions.create( - prompt=prompt, - max_tokens_to_sample=4096, # The maximum number of tokens to generate before stopping. + stream = anthropic.messages.create( + messages=message, + max_tokens=4096, # The maximum number of tokens to generate before stopping. model=llm_kwargs['llm_model'], stream=True, - temperature = llm_kwargs['temperature'] + temperature = llm_kwargs['temperature'], + system=sys_prompt ) break except Exception as e: @@ -82,15 +83,19 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", if retry > MAX_RETRY: raise TimeoutError if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……') result = '' - try: + try: for completion in stream: - result += completion.completion - if not console_slience: print(completion.completion, end='') - if observe_window is not None: + if completion.type == "message_start" or completion.type == "content_block_start": + continue + elif completion.type == "message_stop" or completion.type == "content_block_stop" or completion.type == "message_delta": + break + result += completion.delta.text + if not console_slience: print(completion.delta.text, end='') + if observe_window is not None: # 观测窗,把已经获取的数据显示出去 - if len(observe_window) >= 1: observe_window[0] += completion.completion + if len(observe_window) >= 1: observe_window[0] += completion.delta.text # 看门狗,如果超过期限没有喂狗,则终止 - if len(observe_window) >= 2: + if len(observe_window) >= 2: if (time.time()-observe_window[1]) > watch_dog_patience: raise RuntimeError("用户取消了程序。") except Exception as e: @@ -98,6 +103,10 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", return result +def make_media_input(history,inputs,image_paths): + for image_path in image_paths: + inputs = inputs + f'

' + return inputs def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): """ @@ -109,23 +118,34 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容 additional_fn代表点击的哪个按钮,按钮见functional.py """ + if inputs == "": inputs = "空空如也的输入栏" from anthropic import Anthropic if len(ANTHROPIC_API_KEY) == 0: chatbot.append((inputs, "没有设置ANTHROPIC_API_KEY")) yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面 return - + if additional_fn is not None: from core_functional import handle_core_functionality inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot) - raw_input = inputs - logging.info(f'[raw_input] {raw_input}') - chatbot.append((inputs, "")) - yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面 + have_recent_file, image_paths = every_image_file_in_path(chatbot) + if len(image_paths) > 20: + chatbot.append((inputs, "图片数量超过api上限(20张)")) + yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") + return + + if any([llm_kwargs['llm_model'] == model for model in Claude_3_Models]) and have_recent_file: + if inputs == "" or inputs == "空空如也的输入栏": inputs = "请描述给出的图片" + system_prompt += picture_system_prompt # 由于没有单独的参数保存包含图片的历史,所以只能通过提示词对第几张图片进行定位 + chatbot.append((make_media_input(history,inputs, image_paths), "")) + yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面 + else: + chatbot.append((inputs, "")) + yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面 try: - prompt = generate_payload(inputs, llm_kwargs, history, system_prompt, stream) + message = generate_payload(inputs, llm_kwargs, history, stream, image_paths) except RuntimeError as e: chatbot[-1] = (inputs, f"您提供的api-key不满足要求,不包含任何可用于{llm_kwargs['llm_model']}的api-key。您可能选择了错误的模型或请求源。") yield from update_ui(chatbot=chatbot, history=history, msg="api-key不满足要求") # 刷新界面 @@ -138,17 +158,17 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp try: # make a POST request to the API endpoint, stream=True from .bridge_all import model_info - anthropic = Anthropic(api_key=ANTHROPIC_API_KEY) + anthropic = Anthropic(api_key=ANTHROPIC_API_KEY, base_url=model_info[llm_kwargs['llm_model']]['endpoint']) # endpoint = model_info[llm_kwargs['llm_model']]['endpoint'] # with ProxyNetworkActivate() - stream = anthropic.completions.create( - prompt=prompt, - max_tokens_to_sample=4096, # The maximum number of tokens to generate before stopping. + stream = anthropic.messages.create( + messages=message, + max_tokens=4096, # The maximum number of tokens to generate before stopping. model=llm_kwargs['llm_model'], stream=True, - temperature = llm_kwargs['temperature'] + temperature = llm_kwargs['temperature'], + system=system_prompt ) - break except: retry += 1 @@ -158,10 +178,14 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp if retry > MAX_RETRY: raise TimeoutError gpt_replying_buffer = "" - + for completion in stream: + if completion.type == "message_start" or completion.type == "content_block_start": + continue + elif completion.type == "message_stop" or completion.type == "content_block_stop" or completion.type == "message_delta": + break try: - gpt_replying_buffer = gpt_replying_buffer + completion.completion + gpt_replying_buffer = gpt_replying_buffer + completion.delta.text history[-1] = gpt_replying_buffer chatbot[-1] = (history[-2], history[-1]) yield from update_ui(chatbot=chatbot, history=history, msg='正常') # 刷新界面 @@ -172,57 +196,52 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str}") yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + tb_str) # 刷新界面 return - - - -# https://github.com/jtsang4/claude-to-chatgpt/blob/main/claude_to_chatgpt/adapter.py -def convert_messages_to_prompt(messages): - prompt = "" - role_map = { - "system": "Human", - "user": "Human", - "assistant": "Assistant", - } - for message in messages: - role = message["role"] - content = message["content"] - transformed_role = role_map[role] - prompt += f"\n\n{transformed_role.capitalize()}: {content}" - prompt += "\n\nAssistant: " - return prompt - -def generate_payload(inputs, llm_kwargs, history, system_prompt, stream): +def generate_payload(inputs, llm_kwargs, history, stream, image_paths): """ 整合所有信息,选择LLM模型,生成http请求,为发送请求做准备 """ - from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT conversation_cnt = len(history) // 2 - messages = [{"role": "system", "content": system_prompt}] + messages = [] + if conversation_cnt: for index in range(0, 2*conversation_cnt, 2): what_i_have_asked = {} what_i_have_asked["role"] = "user" - what_i_have_asked["content"] = history[index] + what_i_have_asked["content"] = [{"type": "text", "text": history[index]}] what_gpt_answer = {} what_gpt_answer["role"] = "assistant" - what_gpt_answer["content"] = history[index+1] - if what_i_have_asked["content"] != "": - if what_gpt_answer["content"] == "": continue - if what_gpt_answer["content"] == timeout_bot_msg: continue + what_gpt_answer["content"] = [{"type": "text", "text": history[index+1]}] + if what_i_have_asked["content"][0]["text"] != "": + if what_i_have_asked["content"][0]["text"] == "": continue + if what_i_have_asked["content"][0]["text"] == timeout_bot_msg: continue messages.append(what_i_have_asked) messages.append(what_gpt_answer) else: - messages[-1]['content'] = what_gpt_answer['content'] + messages[-1]['content'][0]['text'] = what_gpt_answer['content'][0]['text'] - what_i_ask_now = {} - what_i_ask_now["role"] = "user" - what_i_ask_now["content"] = inputs + if any([llm_kwargs['llm_model'] == model for model in Claude_3_Models]) and image_paths: + base64_images = [] + for image_path in image_paths: + base64_images.append(encode_image(image_path)) + what_i_ask_now = {} + what_i_ask_now["role"] = "user" + what_i_ask_now["content"] = [] + for base64_image in base64_images: + what_i_ask_now["content"].append({ + "type": "image", + "source": { + "type": "base64", + "media_type": "image/jpeg", + "data": base64_image, + } + }) + what_i_ask_now["content"].append({"type": "text", "text": inputs}) + else: + what_i_ask_now = {} + what_i_ask_now["role"] = "user" + what_i_ask_now["content"] = [{"type": "text", "text": inputs}] messages.append(what_i_ask_now) - prompt = convert_messages_to_prompt(messages) - - return prompt - - + return messages \ No newline at end of file diff --git a/request_llms/bridge_deepseekcoder.py b/request_llms/bridge_deepseekcoder.py index 89964ab..f8e62e6 100644 --- a/request_llms/bridge_deepseekcoder.py +++ b/request_llms/bridge_deepseekcoder.py @@ -88,7 +88,7 @@ class GetCoderLMHandle(LocalLLMHandle): temperature = kwargs['temperature'] history = kwargs['history'] return query, max_length, top_p, temperature, history - + query, max_length, top_p, temperature, history = adaptor(kwargs) history.append({ 'role': 'user', 'content': query}) messages = history @@ -97,14 +97,14 @@ class GetCoderLMHandle(LocalLLMHandle): inputs = inputs[:, -max_length:] inputs = inputs.to(self._model.device) generation_kwargs = dict( - inputs=inputs, + inputs=inputs, max_new_tokens=max_length, do_sample=False, top_p=top_p, streamer = self._streamer, top_k=50, temperature=temperature, - num_return_sequences=1, + num_return_sequences=1, eos_token_id=32021, ) thread = Thread(target=self._model.generate, kwargs=generation_kwargs, daemon=True) diff --git a/request_llms/bridge_google_gemini.py b/request_llms/bridge_google_gemini.py index cb85ecb..5cf3be9 100644 --- a/request_llms/bridge_google_gemini.py +++ b/request_llms/bridge_google_gemini.py @@ -20,7 +20,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", if get_conf("GEMINI_API_KEY") == "": raise ValueError(f"请配置 GEMINI_API_KEY。") - genai = GoogleChatInit() + genai = GoogleChatInit(llm_kwargs) watch_dog_patience = 5 # 看门狗的耐心, 设置5秒即可 gpt_replying_buffer = '' stream_response = genai.generate_chat(inputs, llm_kwargs, history, sys_prompt) @@ -61,7 +61,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp chatbot.append((inputs, "没有检测到任何近期上传的图像文件,请上传jpg格式的图片,此外,请注意拓展名需要小写")) yield from update_ui(chatbot=chatbot, history=history, msg="等待图片") # 刷新界面 return - def make_media_input(inputs, image_paths): + def make_media_input(inputs, image_paths): for image_path in image_paths: inputs = inputs + f'

' return inputs @@ -70,7 +70,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp chatbot.append((inputs, "")) yield from update_ui(chatbot=chatbot, history=history) - genai = GoogleChatInit() + genai = GoogleChatInit(llm_kwargs) retry = 0 while True: try: diff --git a/request_llms/bridge_internlm.py b/request_llms/bridge_internlm.py index b2be36a..fb4437a 100644 --- a/request_llms/bridge_internlm.py +++ b/request_llms/bridge_internlm.py @@ -82,7 +82,7 @@ class GetInternlmHandle(LocalLLMHandle): history = kwargs['history'] real_prompt = combine_history(prompt, history) return model, tokenizer, real_prompt, max_length, top_p, temperature - + model, tokenizer, prompt, max_length, top_p, temperature = adaptor() prefix_allowed_tokens_fn = None logits_processor = None @@ -183,7 +183,7 @@ class GetInternlmHandle(LocalLLMHandle): outputs, model_kwargs, is_encoder_decoder=False ) unfinished_sequences = unfinished_sequences.mul((min(next_tokens != i for i in eos_token_id)).long()) - + output_token_ids = input_ids[0].cpu().tolist() output_token_ids = output_token_ids[input_length:] for each_eos_token_id in eos_token_id: @@ -196,7 +196,7 @@ class GetInternlmHandle(LocalLLMHandle): if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores): return - + # ------------------------------------------------------------------------------------------------------------------------ # 🔌💻 GPT-Academic Interface # ------------------------------------------------------------------------------------------------------------------------ diff --git a/request_llms/bridge_jittorllms_llama.py b/request_llms/bridge_jittorllms_llama.py index 2d3005e..25dbb42 100644 --- a/request_llms/bridge_jittorllms_llama.py +++ b/request_llms/bridge_jittorllms_llama.py @@ -20,7 +20,7 @@ class GetGLMHandle(Process): self.check_dependency() self.start() self.threadLock = threading.Lock() - + def check_dependency(self): try: import pandas @@ -102,7 +102,7 @@ class GetGLMHandle(Process): else: break self.threadLock.release() - + global llama_glm_handle llama_glm_handle = None ################################################################################# @@ -115,7 +115,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", if llama_glm_handle is None: llama_glm_handle = GetGLMHandle() if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + llama_glm_handle.info - if not llama_glm_handle.success: + if not llama_glm_handle.success: error = llama_glm_handle.info llama_glm_handle = None raise RuntimeError(error) @@ -130,7 +130,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", for response in llama_glm_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): print(response) if len(observe_window) >= 1: observe_window[0] = response - if len(observe_window) >= 2: + if len(observe_window) >= 2: if (time.time()-observe_window[1]) > watch_dog_patience: raise RuntimeError("程序终止。") return response @@ -149,7 +149,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp llama_glm_handle = GetGLMHandle() chatbot[-1] = (inputs, load_message + "\n\n" + llama_glm_handle.info) yield from update_ui(chatbot=chatbot, history=[]) - if not llama_glm_handle.success: + if not llama_glm_handle.success: llama_glm_handle = None return diff --git a/request_llms/bridge_jittorllms_pangualpha.py b/request_llms/bridge_jittorllms_pangualpha.py index 2640176..2681157 100644 --- a/request_llms/bridge_jittorllms_pangualpha.py +++ b/request_llms/bridge_jittorllms_pangualpha.py @@ -20,7 +20,7 @@ class GetGLMHandle(Process): self.check_dependency() self.start() self.threadLock = threading.Lock() - + def check_dependency(self): try: import pandas @@ -102,7 +102,7 @@ class GetGLMHandle(Process): else: break self.threadLock.release() - + global pangu_glm_handle pangu_glm_handle = None ################################################################################# @@ -115,7 +115,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", if pangu_glm_handle is None: pangu_glm_handle = GetGLMHandle() if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + pangu_glm_handle.info - if not pangu_glm_handle.success: + if not pangu_glm_handle.success: error = pangu_glm_handle.info pangu_glm_handle = None raise RuntimeError(error) @@ -130,7 +130,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", for response in pangu_glm_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): print(response) if len(observe_window) >= 1: observe_window[0] = response - if len(observe_window) >= 2: + if len(observe_window) >= 2: if (time.time()-observe_window[1]) > watch_dog_patience: raise RuntimeError("程序终止。") return response @@ -149,7 +149,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp pangu_glm_handle = GetGLMHandle() chatbot[-1] = (inputs, load_message + "\n\n" + pangu_glm_handle.info) yield from update_ui(chatbot=chatbot, history=[]) - if not pangu_glm_handle.success: + if not pangu_glm_handle.success: pangu_glm_handle = None return diff --git a/request_llms/bridge_jittorllms_rwkv.py b/request_llms/bridge_jittorllms_rwkv.py index 0021a50..28893d4 100644 --- a/request_llms/bridge_jittorllms_rwkv.py +++ b/request_llms/bridge_jittorllms_rwkv.py @@ -20,7 +20,7 @@ class GetGLMHandle(Process): self.check_dependency() self.start() self.threadLock = threading.Lock() - + def check_dependency(self): try: import pandas @@ -102,7 +102,7 @@ class GetGLMHandle(Process): else: break self.threadLock.release() - + global rwkv_glm_handle rwkv_glm_handle = None ################################################################################# @@ -115,7 +115,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", if rwkv_glm_handle is None: rwkv_glm_handle = GetGLMHandle() if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + rwkv_glm_handle.info - if not rwkv_glm_handle.success: + if not rwkv_glm_handle.success: error = rwkv_glm_handle.info rwkv_glm_handle = None raise RuntimeError(error) @@ -130,7 +130,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", for response in rwkv_glm_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): print(response) if len(observe_window) >= 1: observe_window[0] = response - if len(observe_window) >= 2: + if len(observe_window) >= 2: if (time.time()-observe_window[1]) > watch_dog_patience: raise RuntimeError("程序终止。") return response @@ -149,7 +149,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp rwkv_glm_handle = GetGLMHandle() chatbot[-1] = (inputs, load_message + "\n\n" + rwkv_glm_handle.info) yield from update_ui(chatbot=chatbot, history=[]) - if not rwkv_glm_handle.success: + if not rwkv_glm_handle.success: rwkv_glm_handle = None return diff --git a/request_llms/bridge_llama2.py b/request_llms/bridge_llama2.py index bfa3c14..ba92b21 100644 --- a/request_llms/bridge_llama2.py +++ b/request_llms/bridge_llama2.py @@ -48,7 +48,7 @@ class GetLlamaHandle(LocalLLMHandle): history = kwargs['history'] console_slience = kwargs.get('console_slience', True) return query, max_length, top_p, temperature, history, console_slience - + def convert_messages_to_prompt(query, history): prompt = "" for a, b in history: @@ -56,7 +56,7 @@ class GetLlamaHandle(LocalLLMHandle): prompt += "\n{b}" + b prompt += f"\n[INST]{query}[/INST]" return prompt - + query, max_length, top_p, temperature, history, console_slience = adaptor(kwargs) prompt = convert_messages_to_prompt(query, history) # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=-=- @@ -70,13 +70,13 @@ class GetLlamaHandle(LocalLLMHandle): thread = Thread(target=self._model.generate, kwargs=generation_kwargs) thread.start() generated_text = "" - for new_text in streamer: + for new_text in streamer: generated_text += new_text if not console_slience: print(new_text, end='') yield generated_text.lstrip(prompt_tk_back).rstrip("") if not console_slience: print() # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=-=- - + def try_to_import_special_deps(self, **kwargs): # import something that will raise error if the user does not install requirement_*.txt # 🏃‍♂️🏃‍♂️🏃‍♂️ 主进程执行 diff --git a/request_llms/bridge_moss.py b/request_llms/bridge_moss.py index ee8907c..967f723 100644 --- a/request_llms/bridge_moss.py +++ b/request_llms/bridge_moss.py @@ -18,7 +18,7 @@ class GetGLMHandle(Process): if self.check_dependency(): self.start() self.threadLock = threading.Lock() - + def check_dependency(self): # 主进程执行 try: import datasets, os @@ -54,9 +54,9 @@ class GetGLMHandle(Process): from models.tokenization_moss import MossTokenizer parser = argparse.ArgumentParser() - parser.add_argument("--model_name", default="fnlp/moss-moon-003-sft-int4", - choices=["fnlp/moss-moon-003-sft", - "fnlp/moss-moon-003-sft-int8", + parser.add_argument("--model_name", default="fnlp/moss-moon-003-sft-int4", + choices=["fnlp/moss-moon-003-sft", + "fnlp/moss-moon-003-sft-int8", "fnlp/moss-moon-003-sft-int4"], type=str) parser.add_argument("--gpu", default="0", type=str) args = parser.parse_args() @@ -76,7 +76,7 @@ class GetGLMHandle(Process): config = MossConfig.from_pretrained(model_path) self.tokenizer = MossTokenizer.from_pretrained(model_path) - if num_gpus > 1: + if num_gpus > 1: print("Waiting for all devices to be ready, it may take a few minutes...") with init_empty_weights(): raw_model = MossForCausalLM._from_config(config, torch_dtype=torch.float16) @@ -135,15 +135,15 @@ class GetGLMHandle(Process): inputs = self.tokenizer(self.prompt, return_tensors="pt") with torch.no_grad(): outputs = self.model.generate( - inputs.input_ids.cuda(), - attention_mask=inputs.attention_mask.cuda(), - max_length=2048, - do_sample=True, - top_k=40, - top_p=0.8, + inputs.input_ids.cuda(), + attention_mask=inputs.attention_mask.cuda(), + max_length=2048, + do_sample=True, + top_k=40, + top_p=0.8, temperature=0.7, repetition_penalty=1.02, - num_return_sequences=1, + num_return_sequences=1, eos_token_id=106068, pad_token_id=self.tokenizer.pad_token_id) response = self.tokenizer.decode(outputs[0][inputs.input_ids.shape[1]:], skip_special_tokens=True) @@ -167,7 +167,7 @@ class GetGLMHandle(Process): else: break self.threadLock.release() - + global moss_handle moss_handle = None ################################################################################# @@ -180,7 +180,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", if moss_handle is None: moss_handle = GetGLMHandle() if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + moss_handle.info - if not moss_handle.success: + if not moss_handle.success: error = moss_handle.info moss_handle = None raise RuntimeError(error) @@ -194,7 +194,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", response = "" for response in moss_handle.stream_chat(query=inputs, history=history_feedin, sys_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): if len(observe_window) >= 1: observe_window[0] = response - if len(observe_window) >= 2: + if len(observe_window) >= 2: if (time.time()-observe_window[1]) > watch_dog_patience: raise RuntimeError("程序终止。") return response @@ -213,7 +213,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp moss_handle = GetGLMHandle() chatbot[-1] = (inputs, load_message + "\n\n" + moss_handle.info) yield from update_ui(chatbot=chatbot, history=[]) - if not moss_handle.success: + if not moss_handle.success: moss_handle = None return else: diff --git a/request_llms/bridge_qwen_local.py b/request_llms/bridge_qwen_local.py index e6c2dd5..f68493c 100644 --- a/request_llms/bridge_qwen_local.py +++ b/request_llms/bridge_qwen_local.py @@ -45,7 +45,7 @@ class GetQwenLMHandle(LocalLLMHandle): for response in self._model.chat_stream(self._tokenizer, query, history=history): yield response - + def try_to_import_special_deps(self, **kwargs): # import something that will raise error if the user does not install requirement_*.txt # 🏃‍♂️🏃‍♂️🏃‍♂️ 主进程执行 diff --git a/request_llms/bridge_tgui.py b/request_llms/bridge_tgui.py index 3e03f7b..8a16f1b 100644 --- a/request_llms/bridge_tgui.py +++ b/request_llms/bridge_tgui.py @@ -76,7 +76,7 @@ async def run(context, max_token, temperature, top_p, addr, port): pass elif content["msg"] in ["process_generating", "process_completed"]: yield content["output"]["data"][0] - # You can search for your desired end indicator and + # You can search for your desired end indicator and # stop generation by closing the websocket here if (content["msg"] == "process_completed"): break @@ -117,12 +117,12 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp async def get_result(mutable): # "tgui:galactica-1.3b@localhost:7860" - async for response in run(context=prompt, max_token=llm_kwargs['max_length'], - temperature=llm_kwargs['temperature'], + async for response in run(context=prompt, max_token=llm_kwargs['max_length'], + temperature=llm_kwargs['temperature'], top_p=llm_kwargs['top_p'], addr=addr, port=port): print(response[len(mutable[0]):]) mutable[0] = response - if (time.time() - mutable[1]) > 3: + if (time.time() - mutable[1]) > 3: print('exit when no listener') break asyncio.run(get_result(mutable)) @@ -154,12 +154,12 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser def run_coorotine(observe_window): async def get_result(observe_window): - async for response in run(context=prompt, max_token=llm_kwargs['max_length'], - temperature=llm_kwargs['temperature'], + async for response in run(context=prompt, max_token=llm_kwargs['max_length'], + temperature=llm_kwargs['temperature'], top_p=llm_kwargs['top_p'], addr=addr, port=port): print(response[len(observe_window[0]):]) observe_window[0] = response - if (time.time() - observe_window[1]) > 5: + if (time.time() - observe_window[1]) > 5: print('exit when no listener') break asyncio.run(get_result(observe_window)) diff --git a/request_llms/chatglmoonx.py b/request_llms/chatglmoonx.py index 444181e..dbb83c9 100644 --- a/request_llms/chatglmoonx.py +++ b/request_llms/chatglmoonx.py @@ -119,7 +119,7 @@ class ChatGLMModel(): past_key_values = { k: v for k, v in zip(past_names, past_key_values) } next_token = self.sample_next_token(logits[0, -1], top_k=top_k, top_p=top_p, temperature=temperature) - + output_tokens += [next_token] if next_token == self.eop_token_id or len(output_tokens) > max_generated_tokens: diff --git a/request_llms/com_google.py b/request_llms/com_google.py index e66d659..75f6b53 100644 --- a/request_llms/com_google.py +++ b/request_llms/com_google.py @@ -114,8 +114,10 @@ def html_local_img(__file, layout="left", max_width=None, max_height=None, md=Tr class GoogleChatInit: - def __init__(self): - self.url_gemini = "https://generativelanguage.googleapis.com/v1beta/models/%m:streamGenerateContent?key=%k" + def __init__(self, llm_kwargs): + from .bridge_all import model_info + endpoint = model_info[llm_kwargs['llm_model']]['endpoint'] + self.url_gemini = endpoint + "/%m:streamGenerateContent?key=%k" def generate_chat(self, inputs, llm_kwargs, history, system_prompt): headers, payload = self.generate_message_payload( diff --git a/request_llms/com_zhipuglm.py b/request_llms/com_zhipuglm.py index 2e96d3f..1127431 100644 --- a/request_llms/com_zhipuglm.py +++ b/request_llms/com_zhipuglm.py @@ -8,7 +8,7 @@ from toolbox import get_conf, encode_image, get_pictures_list import logging, os -def input_encode_handler(inputs, llm_kwargs): +def input_encode_handler(inputs, llm_kwargs): if llm_kwargs["most_recent_uploaded"].get("path"): image_paths = get_pictures_list(llm_kwargs["most_recent_uploaded"]["path"]) md_encode = [] diff --git a/request_llms/key_manager.py b/request_llms/key_manager.py index 8563d2e..d2c33f6 100644 --- a/request_llms/key_manager.py +++ b/request_llms/key_manager.py @@ -2,12 +2,12 @@ import random def Singleton(cls): _instance = {} - + def _singleton(*args, **kargs): if cls not in _instance: _instance[cls] = cls(*args, **kargs) return _instance[cls] - + return _singleton @@ -16,7 +16,7 @@ class OpenAI_ApiKeyManager(): def __init__(self, mode='blacklist') -> None: # self.key_avail_list = [] self.key_black_list = [] - + def add_key_to_blacklist(self, key): self.key_black_list.append(key) diff --git a/request_llms/local_llm_class.py b/request_llms/local_llm_class.py index ec7cfd2..47af9e3 100644 --- a/request_llms/local_llm_class.py +++ b/request_llms/local_llm_class.py @@ -90,7 +90,7 @@ class LocalLLMHandle(Process): return self.state def set_state(self, new_state): - # ⭐run in main process or 🏃‍♂️🏃‍♂️🏃‍♂️ run in child process + # ⭐run in main process or 🏃‍♂️🏃‍♂️🏃‍♂️ run in child process if self.is_main_process: self.state = new_state else: @@ -178,8 +178,8 @@ class LocalLLMHandle(Process): r = self.parent.recv() continue break - return - + return + def stream_chat(self, **kwargs): # ⭐run in main process if self.get_state() == "`准备就绪`": diff --git a/requirements.txt b/requirements.txt index 007c5a7..d2eced8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,6 +8,7 @@ pydantic==2.5.2 protobuf==3.18 transformers>=4.27.1 scipdf_parser>=0.52 +anthropic>=0.18.1 python-markdown-math pymdown-extensions websocket-client @@ -16,7 +17,6 @@ prompt_toolkit latex2mathml python-docx mdtex2html -anthropic pyautogen colorama Markdown diff --git a/shared_utils/key_pattern_manager.py b/shared_utils/key_pattern_manager.py index a617b7f..6f919f8 100644 --- a/shared_utils/key_pattern_manager.py +++ b/shared_utils/key_pattern_manager.py @@ -62,7 +62,7 @@ def select_api_key(keys, llm_model): avail_key_list = [] key_list = keys.split(',') - if llm_model.startswith('gpt-'): + if llm_model.startswith('gpt-') or llm_model.startswith('one-api-'): for k in key_list: if is_openai_api_key(k): avail_key_list.append(k) diff --git a/shared_utils/map_names.py b/shared_utils/map_names.py new file mode 100644 index 0000000..23ce66b --- /dev/null +++ b/shared_utils/map_names.py @@ -0,0 +1,34 @@ +import re +mapping_dic = { + # "qianfan": "qianfan(文心一言大模型)", + # "zhipuai": "zhipuai(智谱GLM4超级模型🔥)", + # "gpt-4-1106-preview": "gpt-4-1106-preview(新调优版本GPT-4🔥)", + # "gpt-4-vision-preview": "gpt-4-vision-preview(识图模型GPT-4V)", +} + +rev_mapping_dic = {} +for k, v in mapping_dic.items(): + rev_mapping_dic[v] = k + +def map_model_to_friendly_names(m): + if m in mapping_dic: + return mapping_dic[m] + return m + +def map_friendly_names_to_model(m): + if m in rev_mapping_dic: + return rev_mapping_dic[m] + return m + +def read_one_api_model_name(model: str): + """return real model name and max_token. + """ + max_token_pattern = r"\(max_token=(\d+)\)" + match = re.search(max_token_pattern, model) + if match: + max_token_tmp = match.group(1) # 获取 max_token 的值 + max_token_tmp = int(max_token_tmp) + model = re.sub(max_token_pattern, "", model) # 从原字符串中删除 "(max_token=...)" + else: + max_token_tmp = 4096 + return model, max_token_tmp \ No newline at end of file diff --git a/shared_utils/text_mask.py b/shared_utils/text_mask.py index d57fb1c..4ecb130 100644 --- a/shared_utils/text_mask.py +++ b/shared_utils/text_mask.py @@ -59,7 +59,7 @@ def apply_gpt_academic_string_mask_langbased(string, lang_reference): lang_reference = "hello world" 输出1 "注意,lang_reference这段文字是:英语" - + 输入2 string = "注意,lang_reference这段文字是中文" # 注意这里没有掩码tag,所以不会被处理 lang_reference = "hello world" diff --git a/toolbox.py b/toolbox.py index 77ceaec..b45fe7e 100644 --- a/toolbox.py +++ b/toolbox.py @@ -25,6 +25,9 @@ from shared_utils.text_mask import apply_gpt_academic_string_mask from shared_utils.text_mask import build_gpt_academic_masked_string from shared_utils.text_mask import apply_gpt_academic_string_mask_langbased from shared_utils.text_mask import build_gpt_academic_masked_string_langbased +from shared_utils.map_names import map_friendly_names_to_model +from shared_utils.map_names import map_model_to_friendly_names +from shared_utils.map_names import read_one_api_model_name from shared_utils.handle_upload import html_local_file from shared_utils.handle_upload import html_local_img from shared_utils.handle_upload import file_manifest_filter_type @@ -919,6 +922,18 @@ def have_any_recent_upload_image_files(chatbot): else: return False, None # most_recent_uploaded is too old +# Claude3 model supports graphic context dialogue, reads all images +def every_image_file_in_path(chatbot): + if chatbot is None: + return False, [] # chatbot is None + most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None) + if not most_recent_uploaded: + return False, [] # most_recent_uploaded is None + path = most_recent_uploaded["path"] + file_manifest = get_pictures_list(path) + if len(file_manifest) == 0: + return False, [] + return True, file_manifest # Function to encode the image def encode_image(image_path): @@ -939,3 +954,53 @@ def check_packages(packages=[]): spam_spec = importlib.util.find_spec(p) if spam_spec is None: raise ModuleNotFoundError + + +def map_file_to_sha256(file_path): + import hashlib + + with open(file_path, 'rb') as file: + content = file.read() + + # Calculate the SHA-256 hash of the file contents + sha_hash = hashlib.sha256(content).hexdigest() + + return sha_hash + + +def check_repeat_upload(new_pdf_path, pdf_hash): + ''' + 检查历史上传的文件是否与新上传的文件相同,如果相同则返回(True, 重复文件路径),否则返回(False,None) + ''' + from toolbox import get_conf + import PyPDF2 + + user_upload_dir = os.path.dirname(os.path.dirname(new_pdf_path)) + file_name = os.path.basename(new_pdf_path) + + file_manifest = [f for f in glob.glob(f'{user_upload_dir}/**/{file_name}', recursive=True)] + + for saved_file in file_manifest: + with open(new_pdf_path, 'rb') as file1, open(saved_file, 'rb') as file2: + reader1 = PyPDF2.PdfFileReader(file1) + reader2 = PyPDF2.PdfFileReader(file2) + + # 比较页数是否相同 + if reader1.getNumPages() != reader2.getNumPages(): + continue + + # 比较每一页的内容是否相同 + for page_num in range(reader1.getNumPages()): + page1 = reader1.getPage(page_num).extractText() + page2 = reader2.getPage(page_num).extractText() + if page1 != page2: + continue + + maybe_project_dir = glob.glob('{}/**/{}'.format(get_log_folder(), pdf_hash + ".tag"), recursive=True) + + + if len(maybe_project_dir) > 0: + return True, os.path.dirname(maybe_project_dir[0]) + + # 如果所有页的内容都相同,返回 True + return False, None \ No newline at end of file diff --git a/version b/version index 0cb5958..ed934e2 100644 --- a/version +++ b/version @@ -1,5 +1,5 @@ { - "version": 3.72, + "version": 3.73, "show_feature": true, - "new_feature": "支持切换多个智谱ai模型 <-> 用绘图功能增强部分插件 <-> 基础功能区支持自动切换中英提示词 <-> 支持Mermaid绘图库(让大模型绘制脑图) <-> 支持Gemini-pro <-> 支持直接拖拽文件到上传区 <-> 支持将图片粘贴到输入区" + "new_feature": "优化oneapi接入方法 <-> 接入月之暗面模型 <-> 支持切换多个智谱ai模型 <-> 用绘图功能增强部分插件 <-> 基础功能区支持自动切换中英提示词 <-> 支持Mermaid绘图库(让大模型绘制脑图)" }