From cf085565a7c6ad11687eddb0e1541674e2534fac Mon Sep 17 00:00:00 2001 From: binary-husky Date: Sat, 28 Oct 2023 17:44:17 +0800 Subject: [PATCH] rename folder --- .gitignore | 6 +- README.md | 6 +- check_proxy.py | 2 +- config.py | 2 +- crazy_functions/Latex全文润色.py | 2 +- crazy_functions/Latex全文翻译.py | 2 +- crazy_functions/crazy_utils.py | 6 +- crazy_functions/latex_fns/latex_actions.py | 2 +- crazy_functions/pdf_fns/parse_pdf.py | 2 +- crazy_functions/vt_fns/vt_call_plugin.py | 2 +- crazy_functions/vt_fns/vt_modify_config.py | 2 +- crazy_functions/图片生成.py | 2 +- crazy_functions/总结word文档.py | 2 +- crazy_functions/总结音视频.py | 2 +- crazy_functions/批量Markdown翻译.py | 2 +- crazy_functions/批量总结PDF文档.py | 2 +- crazy_functions/批量翻译PDF文档_多线程.py | 2 +- crazy_functions/理解PDF文档内容.py | 2 +- crazy_functions/联网的ChatGPT.py | 2 +- crazy_functions/联网的ChatGPT_bing版.py | 2 +- crazy_functions/虚空终端.py | 2 +- crazy_functions/解析JupyterNotebook.py | 2 +- crazy_functions/语音助手.py | 2 +- docker-compose.yml | 2 +- docs/GithubAction+AllCapacity | 10 +- docs/GithubAction+ChatGLM+Moss | 10 +- docs/GithubAction+JittorLLMs | 8 +- docs/README.md.German.md | 8 +- docs/README.md.Italian.md | 8 +- docs/README.md.Korean.md | 6 +- docs/README.md.Portuguese.md | 8 +- docs/README_EN.md | 8 +- docs/README_FR.md | 8 +- docs/README_JP.md | 8 +- docs/README_RS.md | 8 +- docs/self_analysis.md | 96 +++++++++---------- docs/translate_english.json | 4 +- docs/translate_japanese.json | 4 +- docs/translate_traditionalchinese.json | 4 +- main.py | 2 +- {request_llm => request_llms}/README.md | 2 +- {request_llm => request_llms}/bridge_all.py | 0 .../bridge_chatglm.py | 6 +- .../bridge_chatglmft.py | 8 +- .../bridge_chatglmonnx.py | 10 +- .../bridge_chatgpt.py | 0 .../bridge_chatgpt_website.py | 0 .../bridge_claude.py | 0 .../bridge_internlm.py | 2 +- .../bridge_jittorllms_llama.py | 12 +-- .../bridge_jittorllms_pangualpha.py | 12 +-- .../bridge_jittorllms_rwkv.py | 12 +-- .../bridge_llama2.py | 2 +- {request_llm => request_llms}/bridge_moss.py | 12 +-- .../bridge_newbingfree.py | 8 +- .../bridge_qianfan.py | 4 +- {request_llm => request_llms}/bridge_qwen.py | 2 +- {request_llm => request_llms}/bridge_spark.py | 4 +- .../bridge_stackclaude.py | 6 +- {request_llm => request_llms}/bridge_tgui.py | 0 {request_llm => request_llms}/chatglmoonx.py | 0 {request_llm => request_llms}/com_sparkapi.py | 0 .../edge_gpt_free.py | 0 .../local_llm_class.py | 4 +- .../requirements_chatglm.txt | 0 .../requirements_chatglm_onnx.txt | 0 .../requirements_jittorllms.txt | 0 .../requirements_moss.txt | 0 .../requirements_newbing.txt | 0 .../requirements_qwen.txt | 0 .../requirements_slackclaude.txt | 0 tests/test_llms.py | 16 ++-- toolbox.py | 4 +- 73 files changed, 193 insertions(+), 193 deletions(-) rename {request_llm => request_llms}/README.md (96%) rename {request_llm => request_llms}/bridge_all.py (100%) rename {request_llm => request_llms}/bridge_chatglm.py (97%) rename {request_llm => request_llms}/bridge_chatglmft.py (97%) rename {request_llm => request_llms}/bridge_chatglmonnx.py (83%) rename {request_llm => request_llms}/bridge_chatgpt.py (100%) rename {request_llm => request_llms}/bridge_chatgpt_website.py (100%) rename {request_llm => request_llms}/bridge_claude.py (100%) rename {request_llm => request_llms}/bridge_internlm.py (99%) rename {request_llm => request_llms}/bridge_jittorllms_llama.py (93%) rename {request_llm => request_llms}/bridge_jittorllms_pangualpha.py (93%) rename {request_llm => request_llms}/bridge_jittorllms_rwkv.py (93%) rename {request_llm => request_llms}/bridge_llama2.py (98%) rename {request_llm => request_llms}/bridge_moss.py (96%) rename {request_llm => request_llms}/bridge_newbingfree.py (97%) rename {request_llm => request_llms}/bridge_qianfan.py (98%) rename {request_llm => request_llms}/bridge_qwen.py (97%) rename {request_llm => request_llms}/bridge_spark.py (95%) rename {request_llm => request_llms}/bridge_stackclaude.py (98%) rename {request_llm => request_llms}/bridge_tgui.py (100%) rename {request_llm => request_llms}/chatglmoonx.py (100%) rename {request_llm => request_llms}/com_sparkapi.py (100%) rename {request_llm => request_llms}/edge_gpt_free.py (100%) rename {request_llm => request_llms}/local_llm_class.py (98%) rename {request_llm => request_llms}/requirements_chatglm.txt (100%) rename {request_llm => request_llms}/requirements_chatglm_onnx.txt (100%) rename {request_llm => request_llms}/requirements_jittorllms.txt (100%) rename {request_llm => request_llms}/requirements_moss.txt (100%) rename {request_llm => request_llms}/requirements_newbing.txt (100%) rename {request_llm => request_llms}/requirements_qwen.txt (100%) rename {request_llm => request_llms}/requirements_slackclaude.txt (100%) diff --git a/.gitignore b/.gitignore index c4df287..286a67d 100644 --- a/.gitignore +++ b/.gitignore @@ -146,9 +146,9 @@ debug* private* crazy_functions/test_project/pdf_and_word crazy_functions/test_samples -request_llm/jittorllms +request_llms/jittorllms multi-language -request_llm/moss +request_llms/moss media flagged -request_llm/ChatGLM-6b-onnx-u8s8 +request_llms/ChatGLM-6b-onnx-u8s8 diff --git a/README.md b/README.md index 77ff15e..667636c 100644 --- a/README.md +++ b/README.md @@ -126,11 +126,11 @@ python -m pip install -r requirements.txt # 这个步骤和pip安装一样的步 【可选步骤】如果需要支持清华ChatGLM2/复旦MOSS作为后端,需要额外安装更多依赖(前提条件:熟悉Python + 用过Pytorch + 电脑配置够强): ```sh # 【可选步骤I】支持清华ChatGLM2。清华ChatGLM备注:如果遇到"Call ChatGLM fail 不能正常加载ChatGLM的参数" 错误,参考如下: 1:以上默认安装的为torch+cpu版,使用cuda需要卸载torch重新安装torch+cuda; 2:如因本机配置不够无法加载模型,可以修改request_llm/bridge_chatglm.py中的模型精度, 将 AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) 都修改为 AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) -python -m pip install -r request_llm/requirements_chatglm.txt +python -m pip install -r request_llms/requirements_chatglm.txt # 【可选步骤II】支持复旦MOSS -python -m pip install -r request_llm/requirements_moss.txt -git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llm/moss # 注意执行此行代码时,必须处于项目根路径 +python -m pip install -r request_llms/requirements_moss.txt +git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss # 注意执行此行代码时,必须处于项目根路径 # 【可选步骤III】支持RWKV Runner 参考wiki:https://github.com/binary-husky/gpt_academic/wiki/%E9%80%82%E9%85%8DRWKV-Runner diff --git a/check_proxy.py b/check_proxy.py index 740eed2..75de7ab 100644 --- a/check_proxy.py +++ b/check_proxy.py @@ -156,7 +156,7 @@ def auto_update(raise_error=False): def warm_up_modules(): print('正在执行一些模块的预热...') from toolbox import ProxyNetworkActivate - from request_llm.bridge_all import model_info + from request_llms.bridge_all import model_info with ProxyNetworkActivate("Warmup_Modules"): enc = model_info["gpt-3.5-turbo"]['tokenizer'] enc.encode("模块预热", disallowed_special=()) diff --git a/config.py b/config.py index 56c8ea3..a18bc4a 100644 --- a/config.py +++ b/config.py @@ -136,7 +136,7 @@ SSL_CERTFILE = "" API_ORG = "" -# 如果需要使用Slack Claude,使用教程详情见 request_llm/README.md +# 如果需要使用Slack Claude,使用教程详情见 request_llms/README.md SLACK_CLAUDE_BOT_ID = '' SLACK_CLAUDE_USER_TOKEN = '' diff --git a/crazy_functions/Latex全文润色.py b/crazy_functions/Latex全文润色.py index 462f965..268a344 100644 --- a/crazy_functions/Latex全文润色.py +++ b/crazy_functions/Latex全文润色.py @@ -11,7 +11,7 @@ class PaperFileGroup(): self.sp_file_tag = [] # count_token - from request_llm.bridge_all import model_info + from request_llms.bridge_all import model_info enc = model_info["gpt-3.5-turbo"]['tokenizer'] def get_token_num(txt): return len(enc.encode(txt, disallowed_special=())) self.get_token_num = get_token_num diff --git a/crazy_functions/Latex全文翻译.py b/crazy_functions/Latex全文翻译.py index b5aad71..697f5ac 100644 --- a/crazy_functions/Latex全文翻译.py +++ b/crazy_functions/Latex全文翻译.py @@ -11,7 +11,7 @@ class PaperFileGroup(): self.sp_file_tag = [] # count_token - from request_llm.bridge_all import model_info + from request_llms.bridge_all import model_info enc = model_info["gpt-3.5-turbo"]['tokenizer'] def get_token_num(txt): return len(enc.encode(txt, disallowed_special=())) self.get_token_num = get_token_num diff --git a/crazy_functions/crazy_utils.py b/crazy_functions/crazy_utils.py index 8533d08..04a4e67 100644 --- a/crazy_functions/crazy_utils.py +++ b/crazy_functions/crazy_utils.py @@ -5,7 +5,7 @@ import logging def input_clipping(inputs, history, max_token_limit): import numpy as np - from request_llm.bridge_all import model_info + from request_llms.bridge_all import model_info enc = model_info["gpt-3.5-turbo"]['tokenizer'] def get_token_num(txt): return len(enc.encode(txt, disallowed_special=())) @@ -63,7 +63,7 @@ def request_gpt_model_in_new_thread_with_ui_alive( """ import time from concurrent.futures import ThreadPoolExecutor - from request_llm.bridge_all import predict_no_ui_long_connection + from request_llms.bridge_all import predict_no_ui_long_connection # 用户反馈 chatbot.append([inputs_show_user, ""]) yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面 @@ -177,7 +177,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency( """ import time, random from concurrent.futures import ThreadPoolExecutor - from request_llm.bridge_all import predict_no_ui_long_connection + from request_llms.bridge_all import predict_no_ui_long_connection assert len(inputs_array) == len(history_array) assert len(inputs_array) == len(sys_prompt_array) if max_workers == -1: # 读取配置文件 diff --git a/crazy_functions/latex_fns/latex_actions.py b/crazy_functions/latex_fns/latex_actions.py index 7e561df..ead3bc4 100644 --- a/crazy_functions/latex_fns/latex_actions.py +++ b/crazy_functions/latex_fns/latex_actions.py @@ -165,7 +165,7 @@ class LatexPaperFileGroup(): self.sp_file_tag = [] # count_token - from request_llm.bridge_all import model_info + from request_llms.bridge_all import model_info enc = model_info["gpt-3.5-turbo"]['tokenizer'] def get_token_num(txt): return len(enc.encode(txt, disallowed_special=())) self.get_token_num = get_token_num diff --git a/crazy_functions/pdf_fns/parse_pdf.py b/crazy_functions/pdf_fns/parse_pdf.py index 9853fd5..396b608 100644 --- a/crazy_functions/pdf_fns/parse_pdf.py +++ b/crazy_functions/pdf_fns/parse_pdf.py @@ -103,7 +103,7 @@ def translate_pdf(article_dict, llm_kwargs, chatbot, fp, generated_conclusion_fi inputs_show_user_array = [] # get_token_num - from request_llm.bridge_all import model_info + from request_llms.bridge_all import model_info enc = model_info[llm_kwargs['llm_model']]['tokenizer'] def get_token_num(txt): return len(enc.encode(txt, disallowed_special=())) diff --git a/crazy_functions/vt_fns/vt_call_plugin.py b/crazy_functions/vt_fns/vt_call_plugin.py index 455ac88..f33644d 100644 --- a/crazy_functions/vt_fns/vt_call_plugin.py +++ b/crazy_functions/vt_fns/vt_call_plugin.py @@ -1,7 +1,7 @@ from pydantic import BaseModel, Field from typing import List from toolbox import update_ui_lastest_msg, disable_auto_promotion -from request_llm.bridge_all import predict_no_ui_long_connection +from request_llms.bridge_all import predict_no_ui_long_connection from crazy_functions.json_fns.pydantic_io import GptJsonIO, JsonStringError import copy, json, pickle, os, sys, time diff --git a/crazy_functions/vt_fns/vt_modify_config.py b/crazy_functions/vt_fns/vt_modify_config.py index e7fd745..0e2b314 100644 --- a/crazy_functions/vt_fns/vt_modify_config.py +++ b/crazy_functions/vt_fns/vt_modify_config.py @@ -1,7 +1,7 @@ from pydantic import BaseModel, Field from typing import List from toolbox import update_ui_lastest_msg, get_conf -from request_llm.bridge_all import predict_no_ui_long_connection +from request_llms.bridge_all import predict_no_ui_long_connection from crazy_functions.json_fns.pydantic_io import GptJsonIO import copy, json, pickle, os, sys diff --git a/crazy_functions/图片生成.py b/crazy_functions/图片生成.py index 51a1baf..09bd9be 100644 --- a/crazy_functions/图片生成.py +++ b/crazy_functions/图片生成.py @@ -5,7 +5,7 @@ import datetime def gen_image(llm_kwargs, prompt, resolution="256x256"): import requests, json, time, os - from request_llm.bridge_all import model_info + from request_llms.bridge_all import model_info proxies, = get_conf('proxies') # Set up OpenAI API key and model diff --git a/crazy_functions/总结word文档.py b/crazy_functions/总结word文档.py index 4ea753c..7c822e9 100644 --- a/crazy_functions/总结word文档.py +++ b/crazy_functions/总结word文档.py @@ -32,7 +32,7 @@ def 解析docx(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot print(file_content) # private_upload里面的文件名在解压zip后容易出现乱码(rar和7z格式正常),故可以只分析文章内容,不输入文件名 from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf - from request_llm.bridge_all import model_info + from request_llms.bridge_all import model_info max_token = model_info[llm_kwargs['llm_model']]['max_token'] TOKEN_LIMIT_PER_FRAGMENT = max_token * 3 // 4 paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf( diff --git a/crazy_functions/总结音视频.py b/crazy_functions/总结音视频.py index 7c113f4..b946d37 100644 --- a/crazy_functions/总结音视频.py +++ b/crazy_functions/总结音视频.py @@ -41,7 +41,7 @@ def split_audio_file(filename, split_duration=1000): def AnalyAudio(parse_prompt, file_manifest, llm_kwargs, chatbot, history): import os, requests from moviepy.editor import AudioFileClip - from request_llm.bridge_all import model_info + from request_llms.bridge_all import model_info # 设置OpenAI密钥和模型 api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model']) diff --git a/crazy_functions/批量Markdown翻译.py b/crazy_functions/批量Markdown翻译.py index 9485b1e..e245b72 100644 --- a/crazy_functions/批量Markdown翻译.py +++ b/crazy_functions/批量Markdown翻译.py @@ -13,7 +13,7 @@ class PaperFileGroup(): self.sp_file_tag = [] # count_token - from request_llm.bridge_all import model_info + from request_llms.bridge_all import model_info enc = model_info["gpt-3.5-turbo"]['tokenizer'] def get_token_num(txt): return len(enc.encode(txt, disallowed_special=())) self.get_token_num = get_token_num diff --git a/crazy_functions/批量总结PDF文档.py b/crazy_functions/批量总结PDF文档.py index b87d482..57a6cdf 100644 --- a/crazy_functions/批量总结PDF文档.py +++ b/crazy_functions/批量总结PDF文档.py @@ -21,7 +21,7 @@ def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, TOKEN_LIMIT_PER_FRAGMENT = 2500 from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf - from request_llm.bridge_all import model_info + from request_llms.bridge_all import model_info enc = model_info["gpt-3.5-turbo"]['tokenizer'] def get_token_num(txt): return len(enc.encode(txt, disallowed_special=())) paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf( diff --git a/crazy_functions/批量翻译PDF文档_多线程.py b/crazy_functions/批量翻译PDF文档_多线程.py index 79c4a26..f2e5cf9 100644 --- a/crazy_functions/批量翻译PDF文档_多线程.py +++ b/crazy_functions/批量翻译PDF文档_多线程.py @@ -95,7 +95,7 @@ def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, # 递归地切割PDF文件 from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf - from request_llm.bridge_all import model_info + from request_llms.bridge_all import model_info enc = model_info["gpt-3.5-turbo"]['tokenizer'] def get_token_num(txt): return len(enc.encode(txt, disallowed_special=())) paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf( diff --git a/crazy_functions/理解PDF文档内容.py b/crazy_functions/理解PDF文档内容.py index f1a89a7..afc1223 100644 --- a/crazy_functions/理解PDF文档内容.py +++ b/crazy_functions/理解PDF文档内容.py @@ -19,7 +19,7 @@ def 解析PDF(file_name, llm_kwargs, plugin_kwargs, chatbot, history, system_pro TOKEN_LIMIT_PER_FRAGMENT = 2500 from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf - from request_llm.bridge_all import model_info + from request_llms.bridge_all import model_info enc = model_info["gpt-3.5-turbo"]['tokenizer'] def get_token_num(txt): return len(enc.encode(txt, disallowed_special=())) paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf( diff --git a/crazy_functions/联网的ChatGPT.py b/crazy_functions/联网的ChatGPT.py index 4ed9aeb..be286bc 100644 --- a/crazy_functions/联网的ChatGPT.py +++ b/crazy_functions/联网的ChatGPT.py @@ -2,7 +2,7 @@ from toolbox import CatchException, update_ui from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, input_clipping import requests from bs4 import BeautifulSoup -from request_llm.bridge_all import model_info +from request_llms.bridge_all import model_info def google(query, proxies): query = query # 在此处替换您要搜索的关键词 diff --git a/crazy_functions/联网的ChatGPT_bing版.py b/crazy_functions/联网的ChatGPT_bing版.py index db5adb7..666fcb8 100644 --- a/crazy_functions/联网的ChatGPT_bing版.py +++ b/crazy_functions/联网的ChatGPT_bing版.py @@ -2,7 +2,7 @@ from toolbox import CatchException, update_ui from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, input_clipping import requests from bs4 import BeautifulSoup -from request_llm.bridge_all import model_info +from request_llms.bridge_all import model_info def bing_search(query, proxies=None): diff --git a/crazy_functions/虚空终端.py b/crazy_functions/虚空终端.py index 5f33249..439e71c 100644 --- a/crazy_functions/虚空终端.py +++ b/crazy_functions/虚空终端.py @@ -48,7 +48,7 @@ from pydantic import BaseModel, Field from typing import List from toolbox import CatchException, update_ui, is_the_upload_folder from toolbox import update_ui_lastest_msg, disable_auto_promotion -from request_llm.bridge_all import predict_no_ui_long_connection +from request_llms.bridge_all import predict_no_ui_long_connection from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive from crazy_functions.crazy_utils import input_clipping from crazy_functions.json_fns.pydantic_io import GptJsonIO, JsonStringError diff --git a/crazy_functions/解析JupyterNotebook.py b/crazy_functions/解析JupyterNotebook.py index d4a3b49..709b7e1 100644 --- a/crazy_functions/解析JupyterNotebook.py +++ b/crazy_functions/解析JupyterNotebook.py @@ -13,7 +13,7 @@ class PaperFileGroup(): self.sp_file_tag = [] # count_token - from request_llm.bridge_all import model_info + from request_llms.bridge_all import model_info enc = model_info["gpt-3.5-turbo"]['tokenizer'] def get_token_num(txt): return len( enc.encode(txt, disallowed_special=())) diff --git a/crazy_functions/语音助手.py b/crazy_functions/语音助手.py index f48286d..3e93cea 100644 --- a/crazy_functions/语音助手.py +++ b/crazy_functions/语音助手.py @@ -2,7 +2,7 @@ from toolbox import update_ui from toolbox import CatchException, get_conf, markdown_convertion from crazy_functions.crazy_utils import input_clipping from crazy_functions.agent_fns.watchdog import WatchDog -from request_llm.bridge_all import predict_no_ui_long_connection +from request_llms.bridge_all import predict_no_ui_long_connection import threading, time import numpy as np from .live_audio.aliyunASR import AliyunASR diff --git a/docker-compose.yml b/docker-compose.yml index dd40dd1..9472a0f 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -137,7 +137,7 @@ services: # P.S. 通过对 command 进行微调,可以便捷地安装额外的依赖 # command: > - # bash -c "pip install -r request_llm/requirements_qwen.txt && python3 -u main.py" + # bash -c "pip install -r request_llms/requirements_qwen.txt && python3 -u main.py" ### =================================================== ### 【方案三】 如果需要运行ChatGPT + LLAMA + 盘古 + RWKV本地模型 diff --git a/docs/GithubAction+AllCapacity b/docs/GithubAction+AllCapacity index bf9482d..4ba0e31 100644 --- a/docs/GithubAction+AllCapacity +++ b/docs/GithubAction+AllCapacity @@ -19,13 +19,13 @@ RUN python3 -m pip install aliyun-python-sdk-core==2.13.3 pyOpenSSL webrtcvad sc WORKDIR /gpt RUN git clone --depth=1 https://github.com/binary-husky/gpt_academic.git WORKDIR /gpt/gpt_academic -RUN git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llm/moss +RUN git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss RUN python3 -m pip install -r requirements.txt -RUN python3 -m pip install -r request_llm/requirements_moss.txt -RUN python3 -m pip install -r request_llm/requirements_qwen.txt -RUN python3 -m pip install -r request_llm/requirements_chatglm.txt -RUN python3 -m pip install -r request_llm/requirements_newbing.txt +RUN python3 -m pip install -r request_llms/requirements_moss.txt +RUN python3 -m pip install -r request_llms/requirements_qwen.txt +RUN python3 -m pip install -r request_llms/requirements_chatglm.txt +RUN python3 -m pip install -r request_llms/requirements_newbing.txt RUN python3 -m pip install nougat-ocr diff --git a/docs/GithubAction+ChatGLM+Moss b/docs/GithubAction+ChatGLM+Moss index 3087d55..3212dc2 100644 --- a/docs/GithubAction+ChatGLM+Moss +++ b/docs/GithubAction+ChatGLM+Moss @@ -14,12 +14,12 @@ RUN python3 -m pip install torch --extra-index-url https://download.pytorch.org/ WORKDIR /gpt RUN git clone --depth=1 https://github.com/binary-husky/gpt_academic.git WORKDIR /gpt/gpt_academic -RUN git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss +RUN git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss RUN python3 -m pip install -r requirements.txt -RUN python3 -m pip install -r request_llm/requirements_moss.txt -RUN python3 -m pip install -r request_llm/requirements_qwen.txt -RUN python3 -m pip install -r request_llm/requirements_chatglm.txt -RUN python3 -m pip install -r request_llm/requirements_newbing.txt +RUN python3 -m pip install -r request_llms/requirements_moss.txt +RUN python3 -m pip install -r request_llms/requirements_qwen.txt +RUN python3 -m pip install -r request_llms/requirements_chatglm.txt +RUN python3 -m pip install -r request_llms/requirements_newbing.txt diff --git a/docs/GithubAction+JittorLLMs b/docs/GithubAction+JittorLLMs index dc883bc..189eb24 100644 --- a/docs/GithubAction+JittorLLMs +++ b/docs/GithubAction+JittorLLMs @@ -16,12 +16,12 @@ WORKDIR /gpt RUN git clone --depth=1 https://github.com/binary-husky/gpt_academic.git WORKDIR /gpt/gpt_academic RUN python3 -m pip install -r requirements.txt -RUN python3 -m pip install -r request_llm/requirements_chatglm.txt -RUN python3 -m pip install -r request_llm/requirements_newbing.txt -RUN python3 -m pip install -r request_llm/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I +RUN python3 -m pip install -r request_llms/requirements_chatglm.txt +RUN python3 -m pip install -r request_llms/requirements_newbing.txt +RUN python3 -m pip install -r request_llms/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I # 下载JittorLLMs -RUN git clone https://github.com/binary-husky/JittorLLMs.git --depth 1 request_llm/jittorllms +RUN git clone https://github.com/binary-husky/JittorLLMs.git --depth 1 request_llms/jittorllms # 禁用缓存,确保更新代码 ADD "https://www.random.org/cgi-bin/randbyte?nbytes=10&format=h" skipcache diff --git a/docs/README.md.German.md b/docs/README.md.German.md index d514de3..b7a53f1 100644 --- a/docs/README.md.German.md +++ b/docs/README.md.German.md @@ -103,12 +103,12 @@ python -m pip install -r requirements.txt # Same step as pip installation [Optional Step] If supporting Tsinghua ChatGLM/Fudan MOSS as backend, additional dependencies need to be installed (Prerequisites: Familiar with Python + Used Pytorch + Sufficient computer configuration): ```sh -# [Optional Step I] Support Tsinghua ChatGLM. Remark: If encountering "Call ChatGLM fail Cannot load ChatGLM parameters", please refer to the following: 1: The above default installation is torch+cpu version. To use cuda, uninstall torch and reinstall torch+cuda; 2: If the model cannot be loaded due to insufficient machine configuration, you can modify the model precision in `request_llm/bridge_chatglm.py`, and modify all AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) -python -m pip install -r request_llm/requirements_chatglm.txt +# [Optional Step I] Support Tsinghua ChatGLM. Remark: If encountering "Call ChatGLM fail Cannot load ChatGLM parameters", please refer to the following: 1: The above default installation is torch+cpu version. To use cuda, uninstall torch and reinstall torch+cuda; 2: If the model cannot be loaded due to insufficient machine configuration, you can modify the model precision in `request_llms/bridge_chatglm.py`, and modify all AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) +python -m pip install -r request_llms/requirements_chatglm.txt # [Optional Step II] Support Fudan MOSS -python -m pip install -r request_llm/requirements_moss.txt -git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # When executing this line of code, you must be in the project root path +python -m pip install -r request_llms/requirements_moss.txt +git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss # When executing this line of code, you must be in the project root path # [Optional Step III] Make sure the AVAIL_LLM_MODELS in the config.py configuration file contains the expected models. Currently supported models are as follows (jittorllms series currently only supports docker solutions): AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] diff --git a/docs/README.md.Italian.md b/docs/README.md.Italian.md index 76efe18..1e24a53 100644 --- a/docs/README.md.Italian.md +++ b/docs/README.md.Italian.md @@ -109,12 +109,12 @@ python -m pip install -r requirements.txt # questo passaggio funziona allo stess 【Passaggio facoltativo】 Se si desidera supportare ChatGLM di Tsinghua/MOSS di Fudan come backend, è necessario installare ulteriori dipendenze (prerequisiti: conoscenza di Python, esperienza con Pytorch e computer sufficientemente potente): ```sh -# 【Passaggio facoltativo I】 Supporto a ChatGLM di Tsinghua. Note su ChatGLM di Tsinghua: in caso di errore "Call ChatGLM fail 不能正常加载ChatGLM的参数" , fare quanto segue: 1. Per impostazione predefinita, viene installata la versione di torch + cpu; per usare CUDA, è necessario disinstallare torch e installare nuovamente torch + cuda; 2. Se non è possibile caricare il modello a causa di una configurazione insufficiente del computer, è possibile modificare la precisione del modello in request_llm/bridge_chatglm.py, cambiando AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) in AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) -python -m pip install -r request_llm/requirements_chatglm.txt +# 【Passaggio facoltativo I】 Supporto a ChatGLM di Tsinghua. Note su ChatGLM di Tsinghua: in caso di errore "Call ChatGLM fail 不能正常加载ChatGLM的参数" , fare quanto segue: 1. Per impostazione predefinita, viene installata la versione di torch + cpu; per usare CUDA, è necessario disinstallare torch e installare nuovamente torch + cuda; 2. Se non è possibile caricare il modello a causa di una configurazione insufficiente del computer, è possibile modificare la precisione del modello in request_llms/bridge_chatglm.py, cambiando AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) in AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) +python -m pip install -r request_llms/requirements_chatglm.txt # 【Passaggio facoltativo II】 Supporto a MOSS di Fudan -python -m pip install -r request_llm/requirements_moss.txt -git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Si prega di notare che quando si esegue questa riga di codice, si deve essere nella directory radice del progetto +python -m pip install -r request_llms/requirements_moss.txt +git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss # Si prega di notare che quando si esegue questa riga di codice, si deve essere nella directory radice del progetto # 【Passaggio facoltativo III】 Assicurati che il file di configurazione config.py includa tutti i modelli desiderati, al momento tutti i modelli supportati sono i seguenti (i modelli della serie jittorllms attualmente supportano solo la soluzione docker): AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] diff --git a/docs/README.md.Korean.md b/docs/README.md.Korean.md index 61b8e4a..db4b2d8 100644 --- a/docs/README.md.Korean.md +++ b/docs/README.md.Korean.md @@ -104,11 +104,11 @@ python -m pip install -r requirements.txt # 이 단계도 pip install의 단계 # 1 : 기본 설치된 것들은 torch + cpu 버전입니다. cuda를 사용하려면 torch를 제거한 다음 torch + cuda를 다시 설치해야합니다. # 2 : 모델을 로드할 수 없는 기계 구성 때문에, AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)를 # AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)로 변경합니다. -python -m pip install -r request_llm/requirements_chatglm.txt +python -m pip install -r request_llms/requirements_chatglm.txt # [선택 사항 II] Fudan MOSS 지원 -python -m pip install -r request_llm/requirements_moss.txt -git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # 다음 코드 줄을 실행할 때 프로젝트 루트 경로에 있어야합니다. +python -m pip install -r request_llms/requirements_moss.txt +git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss # 다음 코드 줄을 실행할 때 프로젝트 루트 경로에 있어야합니다. # [선택 사항III] AVAIL_LLM_MODELS config.py 구성 파일에 기대하는 모델이 포함되어 있는지 확인하십시오. # 현재 지원되는 전체 모델 : diff --git a/docs/README.md.Portuguese.md b/docs/README.md.Portuguese.md index 2347d5a..4a3aba0 100644 --- a/docs/README.md.Portuguese.md +++ b/docs/README.md.Portuguese.md @@ -119,12 +119,12 @@ python -m pip install -r requirements.txt # This step is the same as the pip ins [Optional Step] If you need to support Tsinghua ChatGLM / Fudan MOSS as the backend, you need to install more dependencies (prerequisite: familiar with Python + used Pytorch + computer configuration is strong): ```sh -# 【Optional Step I】support Tsinghua ChatGLM。Tsinghua ChatGLM Note: If you encounter a "Call ChatGLM fails cannot load ChatGLM parameters normally" error, refer to the following: 1: The default installed is torch+cpu version, and using cuda requires uninstalling torch and reinstalling torch+cuda; 2: If the model cannot be loaded due to insufficient computer configuration, you can modify the model accuracy in request_llm/bridge_chatglm.py and change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) -python -m pip install -r request_llm/requirements_chatglm.txt +# 【Optional Step I】support Tsinghua ChatGLM。Tsinghua ChatGLM Note: If you encounter a "Call ChatGLM fails cannot load ChatGLM parameters normally" error, refer to the following: 1: The default installed is torch+cpu version, and using cuda requires uninstalling torch and reinstalling torch+cuda; 2: If the model cannot be loaded due to insufficient computer configuration, you can modify the model accuracy in request_llms/bridge_chatglm.py and change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) +python -m pip install -r request_llms/requirements_chatglm.txt # 【Optional Step II】support Fudan MOSS -python -m pip install -r request_llm/requirements_moss.txt -git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Note: When executing this line of code, you must be in the project root path +python -m pip install -r request_llms/requirements_moss.txt +git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss # Note: When executing this line of code, you must be in the project root path # 【Optional Step III】Make sure that the AVAIL_LLM_MODELS in the config.py configuration file contains the expected model. Currently, all supported models are as follows (jittorllms series currently only supports docker solutions): AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] diff --git a/docs/README_EN.md b/docs/README_EN.md index 02b8588..029186c 100644 --- a/docs/README_EN.md +++ b/docs/README_EN.md @@ -106,12 +106,12 @@ python -m pip install -r requirements.txt # this step is the same as pip install [Optional step] If you need to support Tsinghua ChatGLM/Fudan MOSS as a backend, you need to install more dependencies (prerequisites: familiar with Python + used Pytorch + computer configuration is strong enough): ```sh -# [Optional Step I] Support Tsinghua ChatGLM. Tsinghua ChatGLM remarks: if you encounter the "Call ChatGLM fail cannot load ChatGLM parameters" error, refer to this: 1: The default installation above is torch + cpu version, to use cuda, you need to uninstall torch and reinstall torch + cuda; 2: If the model cannot be loaded due to insufficient local configuration, you can modify the model accuracy in request_llm/bridge_chatglm.py, and change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code = True) -python -m pip install -r request_llm/requirements_chatglm.txt +# [Optional Step I] Support Tsinghua ChatGLM. Tsinghua ChatGLM remarks: if you encounter the "Call ChatGLM fail cannot load ChatGLM parameters" error, refer to this: 1: The default installation above is torch + cpu version, to use cuda, you need to uninstall torch and reinstall torch + cuda; 2: If the model cannot be loaded due to insufficient local configuration, you can modify the model accuracy in request_llms/bridge_chatglm.py, and change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code = True) +python -m pip install -r request_llms/requirements_chatglm.txt # [Optional Step II] Support Fudan MOSS -python -m pip install -r request_llm/requirements_moss.txt -git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # When executing this line of code, you must be in the root directory of the project +python -m pip install -r request_llms/requirements_moss.txt +git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss # When executing this line of code, you must be in the root directory of the project # [Optional Step III] Make sure the AVAIL_LLM_MODELS in the config.py configuration file includes the expected models. Currently supported models are as follows (the jittorllms series only supports the docker solution for the time being): AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] diff --git a/docs/README_FR.md b/docs/README_FR.md index af3bb42..62d81eb 100644 --- a/docs/README_FR.md +++ b/docs/README_FR.md @@ -111,12 +111,12 @@ python -m pip install -r requirements.txt # Same step as pip instalation 【Optional】 Si vous souhaitez prendre en charge THU ChatGLM/FDU MOSS en tant que backend, des dépendances supplémentaires doivent être installées (prérequis: compétent en Python + utilisez Pytorch + configuration suffisante de l'ordinateur): ```sh -# 【Optional Step I】 Support THU ChatGLM. Remarque sur THU ChatGLM: Si vous rencontrez l'erreur "Appel à ChatGLM échoué, les paramètres ChatGLM ne peuvent pas être chargés normalement", reportez-vous à ce qui suit: 1: La version par défaut installée est torch+cpu, si vous souhaitez utiliser cuda, vous devez désinstaller torch et réinstaller torch+cuda; 2: Si le modèle ne peut pas être chargé en raison d'une configuration insuffisante de l'ordinateur local, vous pouvez modifier la précision du modèle dans request_llm/bridge_chatglm.py, modifier AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) par AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) -python -m pip install -r request_llm/requirements_chatglm.txt +# 【Optional Step I】 Support THU ChatGLM. Remarque sur THU ChatGLM: Si vous rencontrez l'erreur "Appel à ChatGLM échoué, les paramètres ChatGLM ne peuvent pas être chargés normalement", reportez-vous à ce qui suit: 1: La version par défaut installée est torch+cpu, si vous souhaitez utiliser cuda, vous devez désinstaller torch et réinstaller torch+cuda; 2: Si le modèle ne peut pas être chargé en raison d'une configuration insuffisante de l'ordinateur local, vous pouvez modifier la précision du modèle dans request_llms/bridge_chatglm.py, modifier AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) par AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) +python -m pip install -r request_llms/requirements_chatglm.txt # 【Optional Step II】 Support FDU MOSS -python -m pip install -r request_llm/requirements_moss.txt -git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Note: When running this line of code, you must be in the project root path. +python -m pip install -r request_llms/requirements_moss.txt +git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss # Note: When running this line of code, you must be in the project root path. # 【Optional Step III】Make sure the AVAIL_LLM_MODELS in the config.py configuration file contains the desired model. Currently, all models supported are as follows (the jittorllms series currently only supports the docker scheme): AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] diff --git a/docs/README_JP.md b/docs/README_JP.md index 46145e1..8ade71b 100644 --- a/docs/README_JP.md +++ b/docs/README_JP.md @@ -120,12 +120,12 @@ python -m pip install -r requirements.txt # This step is the same as the pip ins [Optional Steps] If you need to support Tsinghua ChatGLM/Fudan MOSS as a backend, you need to install more dependencies (precondition: familiar with Python + used Pytorch + computer configuration). Strong enough): ```sh -# Optional step I: support Tsinghua ChatGLM. Tsinghua ChatGLM remarks: If you encounter the error "Call ChatGLM fail cannot load ChatGLM parameters normally", refer to the following: 1: The version installed above is torch+cpu version, using cuda requires uninstalling torch and reinstalling torch+cuda; 2: If the model cannot be loaded due to insufficient local configuration, you can modify the model accuracy in request_llm/bridge_chatglm.py, and change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True). -python -m pip install -r request_llm/requirements_chatglm.txt +# Optional step I: support Tsinghua ChatGLM. Tsinghua ChatGLM remarks: If you encounter the error "Call ChatGLM fail cannot load ChatGLM parameters normally", refer to the following: 1: The version installed above is torch+cpu version, using cuda requires uninstalling torch and reinstalling torch+cuda; 2: If the model cannot be loaded due to insufficient local configuration, you can modify the model accuracy in request_llms/bridge_chatglm.py, and change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True). +python -m pip install -r request_llms/requirements_chatglm.txt # Optional Step II: Support Fudan MOSS. -python -m pip install -r request_llm/requirements_moss.txt -git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Note that when executing this line of code, it must be in the project root. +python -m pip install -r request_llms/requirements_moss.txt +git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss # Note that when executing this line of code, it must be in the project root. # 【Optional Step III】Ensure that the AVAIL_LLM_MODELS in the config.py configuration file contains the expected model. Currently, all supported models are as follows (jittorllms series currently only supports the docker solution): AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] diff --git a/docs/README_RS.md b/docs/README_RS.md index d4888a0..52d18df 100644 --- a/docs/README_RS.md +++ b/docs/README_RS.md @@ -108,12 +108,12 @@ python -m pip install -r requirements.txt # This step is the same as the pip ins [Optional step] If you need to support Tsinghua ChatGLM/Fudan MOSS as backend, you need to install more dependencies (prerequisites: familiar with Python + have used Pytorch + computer configuration is strong): ```sh -# [Optional step I] Support Tsinghua ChatGLM. Tsinghua ChatGLM note: If you encounter the "Call ChatGLM fail cannot load ChatGLM parameters normally" error, refer to the following: 1: The default installation above is torch+cpu version, and cuda is used Need to uninstall torch and reinstall torch+cuda; 2: If you cannot load the model due to insufficient local configuration, you can modify the model accuracy in request_llm/bridge_chatglm.py, AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) Modify to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) -python -m pip install -r request_llm/requirements_chatglm.txt +# [Optional step I] Support Tsinghua ChatGLM. Tsinghua ChatGLM note: If you encounter the "Call ChatGLM fail cannot load ChatGLM parameters normally" error, refer to the following: 1: The default installation above is torch+cpu version, and cuda is used Need to uninstall torch and reinstall torch+cuda; 2: If you cannot load the model due to insufficient local configuration, you can modify the model accuracy in request_llms/bridge_chatglm.py, AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) Modify to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) +python -m pip install -r request_llms/requirements_chatglm.txt # [Optional step II] Support Fudan MOSS -python -m pip install -r request_llm/requirements_moss.txt -git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Note that when executing this line of code, you must be in the project root path +python -m pip install -r request_llms/requirements_moss.txt +git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss # Note that when executing this line of code, you must be in the project root path # [Optional step III] Make sure the AVAIL_LLM_MODELS in the config.py configuration file contains the expected models. Currently, all supported models are as follows (the jittorllms series currently only supports the docker solution): AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] diff --git a/docs/self_analysis.md b/docs/self_analysis.md index ebc2337..c373619 100644 --- a/docs/self_analysis.md +++ b/docs/self_analysis.md @@ -38,20 +38,20 @@ | crazy_functions\读文章写摘要.py | 对论文进行解析和全文摘要生成 | | crazy_functions\谷歌检索小助手.py | 提供谷歌学术搜索页面中相关文章的元数据信息。 | | crazy_functions\高级功能函数模板.py | 使用Unsplash API发送相关图片以回复用户的输入。 | -| request_llm\bridge_all.py | 基于不同LLM模型进行对话。 | -| request_llm\bridge_chatglm.py | 使用ChatGLM模型生成回复,支持单线程和多线程方式。 | -| request_llm\bridge_chatgpt.py | 基于GPT模型完成对话。 | -| request_llm\bridge_jittorllms_llama.py | 使用JittorLLMs模型完成对话,支持单线程和多线程方式。 | -| request_llm\bridge_jittorllms_pangualpha.py | 使用JittorLLMs模型完成对话,基于多进程和多线程方式。 | -| request_llm\bridge_jittorllms_rwkv.py | 使用JittorLLMs模型完成聊天功能,提供包括历史信息、参数调节等在内的多个功能选项。 | -| request_llm\bridge_moss.py | 加载Moss模型完成对话功能。 | -| request_llm\bridge_newbing.py | 使用Newbing聊天机器人进行对话,支持单线程和多线程方式。 | -| request_llm\bridge_newbingfree.py | 基于Bing chatbot API实现聊天机器人的文本生成功能。 | -| request_llm\bridge_stackclaude.py | 基于Slack API实现Claude与用户的交互。 | -| request_llm\bridge_tgui.py | 通过websocket实现聊天机器人与UI界面交互。 | -| request_llm\edge_gpt.py | 调用Bing chatbot API提供聊天机器人服务。 | -| request_llm\edge_gpt_free.py | 实现聊天机器人API,采用aiohttp和httpx工具库。 | -| request_llm\test_llms.py | 对llm模型进行单元测试。 | +| request_llms\bridge_all.py | 基于不同LLM模型进行对话。 | +| request_llms\bridge_chatglm.py | 使用ChatGLM模型生成回复,支持单线程和多线程方式。 | +| request_llms\bridge_chatgpt.py | 基于GPT模型完成对话。 | +| request_llms\bridge_jittorllms_llama.py | 使用JittorLLMs模型完成对话,支持单线程和多线程方式。 | +| request_llms\bridge_jittorllms_pangualpha.py | 使用JittorLLMs模型完成对话,基于多进程和多线程方式。 | +| request_llms\bridge_jittorllms_rwkv.py | 使用JittorLLMs模型完成聊天功能,提供包括历史信息、参数调节等在内的多个功能选项。 | +| request_llms\bridge_moss.py | 加载Moss模型完成对话功能。 | +| request_llms\bridge_newbing.py | 使用Newbing聊天机器人进行对话,支持单线程和多线程方式。 | +| request_llms\bridge_newbingfree.py | 基于Bing chatbot API实现聊天机器人的文本生成功能。 | +| request_llms\bridge_stackclaude.py | 基于Slack API实现Claude与用户的交互。 | +| request_llms\bridge_tgui.py | 通过websocket实现聊天机器人与UI界面交互。 | +| request_llms\edge_gpt.py | 调用Bing chatbot API提供聊天机器人服务。 | +| request_llms\edge_gpt_free.py | 实现聊天机器人API,采用aiohttp和httpx工具库。 | +| request_llms\test_llms.py | 对llm模型进行单元测试。 | ## 接下来请你逐文件分析下面的工程[0/48] 请对下面的程序文件做一个概述: check_proxy.py @@ -129,7 +129,7 @@ toolbox.py是一个工具类库,其中主要包含了一些函数装饰器和 1. `input_clipping`: 该函数用于裁剪输入文本长度,使其不超过一定的限制。 2. `request_gpt_model_in_new_thread_with_ui_alive`: 该函数用于请求 GPT 模型并保持用户界面的响应,支持多线程和实时更新用户界面。 -这两个函数都依赖于从 `toolbox` 和 `request_llm` 中导入的一些工具函数。函数的输入和输出有详细的描述文档。 +这两个函数都依赖于从 `toolbox` 和 `request_llms` 中导入的一些工具函数。函数的输入和输出有详细的描述文档。 ## [12/48] 请对下面的程序文件做一个概述: crazy_functions\Latex全文润色.py @@ -137,7 +137,7 @@ toolbox.py是一个工具类库,其中主要包含了一些函数装饰器和 ## [13/48] 请对下面的程序文件做一个概述: crazy_functions\Latex全文翻译.py -这个文件包含两个函数 `Latex英译中` 和 `Latex中译英`,它们都会对整个Latex项目进行翻译。这个文件还包含一个类 `PaperFileGroup`,它拥有一个方法 `run_file_split`,用于把长文本文件分成多个短文件。其中使用了工具库 `toolbox` 中的一些函数和从 `request_llm` 中导入了 `model_info`。接下来的函数把文件读取进来,把它们的注释删除,进行分割,并进行翻译。这个文件还包括了一些异常处理和界面更新的操作。 +这个文件包含两个函数 `Latex英译中` 和 `Latex中译英`,它们都会对整个Latex项目进行翻译。这个文件还包含一个类 `PaperFileGroup`,它拥有一个方法 `run_file_split`,用于把长文本文件分成多个短文件。其中使用了工具库 `toolbox` 中的一些函数和从 `request_llms` 中导入了 `model_info`。接下来的函数把文件读取进来,把它们的注释删除,进行分割,并进行翻译。这个文件还包括了一些异常处理和界面更新的操作。 ## [14/48] 请对下面的程序文件做一个概述: crazy_functions\__init__.py @@ -227,19 +227,19 @@ toolbox.py是一个工具类库,其中主要包含了一些函数装饰器和 该程序文件定义了一个名为高阶功能模板函数的函数,该函数接受多个参数,包括输入的文本、gpt模型参数、插件模型参数、聊天显示框的句柄、聊天历史等,并利用送出请求,使用 Unsplash API 发送相关图片。其中,为了避免输入溢出,函数会在开始时清空历史。函数也有一些 UI 更新的语句。该程序文件还依赖于其他两个模块:CatchException 和 update_ui,以及一个名为 request_gpt_model_in_new_thread_with_ui_alive 的来自 crazy_utils 模块(应该是自定义的工具包)的函数。 -## [34/48] 请对下面的程序文件做一个概述: request_llm\bridge_all.py +## [34/48] 请对下面的程序文件做一个概述: request_llms\bridge_all.py 该文件包含两个函数:predict和predict_no_ui_long_connection,用于基于不同的LLM模型进行对话。该文件还包含一个lazyloadTiktoken类和一个LLM_CATCH_EXCEPTION修饰器函数。其中lazyloadTiktoken类用于懒加载模型的tokenizer,LLM_CATCH_EXCEPTION用于错误处理。整个文件还定义了一些全局变量和模型信息字典,用于引用和配置LLM模型。 -## [35/48] 请对下面的程序文件做一个概述: request_llm\bridge_chatglm.py +## [35/48] 请对下面的程序文件做一个概述: request_llms\bridge_chatglm.py 这是一个Python程序文件,名为`bridge_chatglm.py`,其中定义了一个名为`GetGLMHandle`的类和三个方法:`predict_no_ui_long_connection`、 `predict`和 `stream_chat`。该文件依赖于多个Python库,如`transformers`和`sentencepiece`。该文件实现了一个聊天机器人,使用ChatGLM模型来生成回复,支持单线程和多线程方式。程序启动时需要加载ChatGLM的模型和tokenizer,需要一段时间。在配置文件`config.py`中设置参数会影响模型的内存和显存使用,因此程序可能会导致低配计算机卡死。 -## [36/48] 请对下面的程序文件做一个概述: request_llm\bridge_chatgpt.py +## [36/48] 请对下面的程序文件做一个概述: request_llms\bridge_chatgpt.py -该文件为 Python 代码文件,文件名为 request_llm\bridge_chatgpt.py。该代码文件主要提供三个函数:predict、predict_no_ui和 predict_no_ui_long_connection,用于发送至 chatGPT 并等待回复,获取输出。该代码文件还包含一些辅助函数,用于处理连接异常、生成 HTTP 请求等。该文件的代码架构清晰,使用了多个自定义函数和模块。 +该文件为 Python 代码文件,文件名为 request_llms\bridge_chatgpt.py。该代码文件主要提供三个函数:predict、predict_no_ui和 predict_no_ui_long_connection,用于发送至 chatGPT 并等待回复,获取输出。该代码文件还包含一些辅助函数,用于处理连接异常、生成 HTTP 请求等。该文件的代码架构清晰,使用了多个自定义函数和模块。 -## [37/48] 请对下面的程序文件做一个概述: request_llm\bridge_jittorllms_llama.py +## [37/48] 请对下面的程序文件做一个概述: request_llms\bridge_jittorllms_llama.py 该代码文件实现了一个聊天机器人,其中使用了 JittorLLMs 模型。主要包括以下几个部分: 1. GetGLMHandle 类:一个进程类,用于加载 JittorLLMs 模型并接收并处理请求。 @@ -248,17 +248,17 @@ toolbox.py是一个工具类库,其中主要包含了一些函数装饰器和 这个文件中还有一些辅助函数和全局变量,例如 importlib、time、threading 等。 -## [38/48] 请对下面的程序文件做一个概述: request_llm\bridge_jittorllms_pangualpha.py +## [38/48] 请对下面的程序文件做一个概述: request_llms\bridge_jittorllms_pangualpha.py 这个文件是为了实现使用jittorllms(一种机器学习模型)来进行聊天功能的代码。其中包括了模型加载、模型的参数加载、消息的收发等相关操作。其中使用了多进程和多线程来提高性能和效率。代码中还包括了处理依赖关系的函数和预处理函数等。 -## [39/48] 请对下面的程序文件做一个概述: request_llm\bridge_jittorllms_rwkv.py +## [39/48] 请对下面的程序文件做一个概述: request_llms\bridge_jittorllms_rwkv.py 这个文件是一个Python程序,文件名为request_llm\bridge_jittorllms_rwkv.py。它依赖transformers、time、threading、importlib、multiprocessing等库。在文件中,通过定义GetGLMHandle类加载jittorllms模型参数和定义stream_chat方法来实现与jittorllms模型的交互。同时,该文件还定义了predict_no_ui_long_connection和predict方法来处理历史信息、调用jittorllms模型、接收回复信息并输出结果。 -## [40/48] 请对下面的程序文件做一个概述: request_llm\bridge_moss.py +## [40/48] 请对下面的程序文件做一个概述: request_llms\bridge_moss.py -该文件为一个Python源代码文件,文件名为 request_llm\bridge_moss.py。代码定义了一个 GetGLMHandle 类和两个函数 predict_no_ui_long_connection 和 predict。 +该文件为一个Python源代码文件,文件名为 request_llms\bridge_moss.py。代码定义了一个 GetGLMHandle 类和两个函数 predict_no_ui_long_connection 和 predict。 GetGLMHandle 类继承自Process类(多进程),主要功能是启动一个子进程并加载 MOSS 模型参数,通过 Pipe 进行主子进程的通信。该类还定义了 check_dependency、moss_init、run 和 stream_chat 等方法,其中 check_dependency 和 moss_init 是子进程的初始化方法,run 是子进程运行方法,stream_chat 实现了主进程和子进程的交互过程。 @@ -266,7 +266,7 @@ GetGLMHandle 类继承自Process类(多进程),主要功能是启动一个 函数 predict 是单线程方法,通过调用 update_ui 将交互过程中 MOSS 的回复实时更新到UI(User Interface)中,并执行一个 named function(additional_fn)指定的函数对输入进行预处理。 -## [41/48] 请对下面的程序文件做一个概述: request_llm\bridge_newbing.py +## [41/48] 请对下面的程序文件做一个概述: request_llms\bridge_newbing.py 这是一个名为`bridge_newbing.py`的程序文件,包含三个部分: @@ -276,11 +276,11 @@ GetGLMHandle 类继承自Process类(多进程),主要功能是启动一个 第三部分定义了一个名为`newbing_handle`的全局变量,并导出了`predict_no_ui_long_connection`和`predict`这两个方法,以供其他程序可以调用。 -## [42/48] 请对下面的程序文件做一个概述: request_llm\bridge_newbingfree.py +## [42/48] 请对下面的程序文件做一个概述: request_llms\bridge_newbingfree.py 这个Python文件包含了三部分内容。第一部分是来自edge_gpt_free.py文件的聊天机器人程序。第二部分是子进程Worker,用于调用主体。第三部分提供了两个函数:predict_no_ui_long_connection和predict用于调用NewBing聊天机器人和返回响应。其中predict函数还提供了一些参数用于控制聊天机器人的回复和更新UI界面。 -## [43/48] 请对下面的程序文件做一个概述: request_llm\bridge_stackclaude.py +## [43/48] 请对下面的程序文件做一个概述: request_llms\bridge_stackclaude.py 这是一个Python源代码文件,文件名为request_llm\bridge_stackclaude.py。代码分为三个主要部分: @@ -290,21 +290,21 @@ GetGLMHandle 类继承自Process类(多进程),主要功能是启动一个 第三部分定义了predict_no_ui_long_connection和predict两个函数,主要用于通过调用ClaudeHandle对象的stream_chat方法来获取Claude的回复,并更新ui以显示相关信息。其中predict函数采用单线程方法,而predict_no_ui_long_connection函数使用多线程方法。 -## [44/48] 请对下面的程序文件做一个概述: request_llm\bridge_tgui.py +## [44/48] 请对下面的程序文件做一个概述: request_llms\bridge_tgui.py 该文件是一个Python代码文件,名为request_llm\bridge_tgui.py。它包含了一些函数用于与chatbot UI交互,并通过WebSocket协议与远程LLM模型通信完成文本生成任务,其中最重要的函数是predict()和predict_no_ui_long_connection()。这个程序还有其他的辅助函数,如random_hash()。整个代码文件在协作的基础上完成了一次修改。 -## [45/48] 请对下面的程序文件做一个概述: request_llm\edge_gpt.py +## [45/48] 请对下面的程序文件做一个概述: request_llms\edge_gpt.py 该文件是一个用于调用Bing chatbot API的Python程序,它由多个类和辅助函数构成,可以根据给定的对话连接在对话中提出问题,使用websocket与远程服务通信。程序实现了一个聊天机器人,可以为用户提供人工智能聊天。 -## [46/48] 请对下面的程序文件做一个概述: request_llm\edge_gpt_free.py +## [46/48] 请对下面的程序文件做一个概述: request_llms\edge_gpt_free.py 该代码文件为一个会话API,可通过Chathub发送消息以返回响应。其中使用了 aiohttp 和 httpx 库进行网络请求并发送。代码中包含了一些函数和常量,多数用于生成请求数据或是请求头信息等。同时该代码文件还包含了一个 Conversation 类,调用该类可实现对话交互。 -## [47/48] 请对下面的程序文件做一个概述: request_llm\test_llms.py +## [47/48] 请对下面的程序文件做一个概述: request_llms\test_llms.py -这个文件是用于对llm模型进行单元测试的Python程序。程序导入一个名为"request_llm.bridge_newbingfree"的模块,然后三次使用该模块中的predict_no_ui_long_connection()函数进行预测,并输出结果。此外,还有一些注释掉的代码段,这些代码段也是关于模型预测的。 +这个文件是用于对llm模型进行单元测试的Python程序。程序导入一个名为"request_llms.bridge_newbingfree"的模块,然后三次使用该模块中的predict_no_ui_long_connection()函数进行预测,并输出结果。此外,还有一些注释掉的代码段,这些代码段也是关于模型预测的。 ## 用一张Markdown表格简要描述以下文件的功能: check_proxy.py, colorful.py, config.py, config_private.py, core_functional.py, crazy_functional.py, main.py, multi_language.py, theme.py, toolbox.py, crazy_functions\crazy_functions_test.py, crazy_functions\crazy_utils.py, crazy_functions\Latex全文润色.py, crazy_functions\Latex全文翻译.py, crazy_functions\__init__.py, crazy_functions\下载arxiv论文翻译摘要.py。根据以上分析,用一句话概括程序的整体功能。 @@ -355,24 +355,24 @@ crazy_functions\代码重写为全英文_多线程.py, crazy_functions\图片生 概括程序的整体功能:提供了一系列处理文本、文件和代码的功能,使用了各类语言模型、多线程、网络请求和数据解析技术来提高效率和精度。 ## 用一张Markdown表格简要描述以下文件的功能: -crazy_functions\谷歌检索小助手.py, crazy_functions\高级功能函数模板.py, request_llm\bridge_all.py, request_llm\bridge_chatglm.py, request_llm\bridge_chatgpt.py, request_llm\bridge_jittorllms_llama.py, request_llm\bridge_jittorllms_pangualpha.py, request_llm\bridge_jittorllms_rwkv.py, request_llm\bridge_moss.py, request_llm\bridge_newbing.py, request_llm\bridge_newbingfree.py, request_llm\bridge_stackclaude.py, request_llm\bridge_tgui.py, request_llm\edge_gpt.py, request_llm\edge_gpt_free.py, request_llm\test_llms.py。根据以上分析,用一句话概括程序的整体功能。 +crazy_functions\谷歌检索小助手.py, crazy_functions\高级功能函数模板.py, request_llms\bridge_all.py, request_llms\bridge_chatglm.py, request_llms\bridge_chatgpt.py, request_llms\bridge_jittorllms_llama.py, request_llms\bridge_jittorllms_pangualpha.py, request_llms\bridge_jittorllms_rwkv.py, request_llms\bridge_moss.py, request_llms\bridge_newbing.py, request_llms\bridge_newbingfree.py, request_llms\bridge_stackclaude.py, request_llms\bridge_tgui.py, request_llms\edge_gpt.py, request_llms\edge_gpt_free.py, request_llms\test_llms.py。根据以上分析,用一句话概括程序的整体功能。 | 文件名 | 功能描述 | | --- | --- | | crazy_functions\谷歌检索小助手.py | 提供谷歌学术搜索页面中相关文章的元数据信息。 | | crazy_functions\高级功能函数模板.py | 使用Unsplash API发送相关图片以回复用户的输入。 | -| request_llm\bridge_all.py | 基于不同LLM模型进行对话。 | -| request_llm\bridge_chatglm.py | 使用ChatGLM模型生成回复,支持单线程和多线程方式。 | -| request_llm\bridge_chatgpt.py | 基于GPT模型完成对话。 | -| request_llm\bridge_jittorllms_llama.py | 使用JittorLLMs模型完成对话,支持单线程和多线程方式。 | -| request_llm\bridge_jittorllms_pangualpha.py | 使用JittorLLMs模型完成对话,基于多进程和多线程方式。 | -| request_llm\bridge_jittorllms_rwkv.py | 使用JittorLLMs模型完成聊天功能,提供包括历史信息、参数调节等在内的多个功能选项。 | -| request_llm\bridge_moss.py | 加载Moss模型完成对话功能。 | -| request_llm\bridge_newbing.py | 使用Newbing聊天机器人进行对话,支持单线程和多线程方式。 | -| request_llm\bridge_newbingfree.py | 基于Bing chatbot API实现聊天机器人的文本生成功能。 | -| request_llm\bridge_stackclaude.py | 基于Slack API实现Claude与用户的交互。 | -| request_llm\bridge_tgui.py | 通过websocket实现聊天机器人与UI界面交互。 | -| request_llm\edge_gpt.py | 调用Bing chatbot API提供聊天机器人服务。 | -| request_llm\edge_gpt_free.py | 实现聊天机器人API,采用aiohttp和httpx工具库。 | -| request_llm\test_llms.py | 对llm模型进行单元测试。 | +| request_llms\bridge_all.py | 基于不同LLM模型进行对话。 | +| request_llms\bridge_chatglm.py | 使用ChatGLM模型生成回复,支持单线程和多线程方式。 | +| request_llms\bridge_chatgpt.py | 基于GPT模型完成对话。 | +| request_llms\bridge_jittorllms_llama.py | 使用JittorLLMs模型完成对话,支持单线程和多线程方式。 | +| request_llms\bridge_jittorllms_pangualpha.py | 使用JittorLLMs模型完成对话,基于多进程和多线程方式。 | +| request_llms\bridge_jittorllms_rwkv.py | 使用JittorLLMs模型完成聊天功能,提供包括历史信息、参数调节等在内的多个功能选项。 | +| request_llms\bridge_moss.py | 加载Moss模型完成对话功能。 | +| request_llms\bridge_newbing.py | 使用Newbing聊天机器人进行对话,支持单线程和多线程方式。 | +| request_llms\bridge_newbingfree.py | 基于Bing chatbot API实现聊天机器人的文本生成功能。 | +| request_llms\bridge_stackclaude.py | 基于Slack API实现Claude与用户的交互。 | +| request_llms\bridge_tgui.py | 通过websocket实现聊天机器人与UI界面交互。 | +| request_llms\edge_gpt.py | 调用Bing chatbot API提供聊天机器人服务。 | +| request_llms\edge_gpt_free.py | 实现聊天机器人API,采用aiohttp和httpx工具库。 | +| request_llms\test_llms.py | 对llm模型进行单元测试。 | | 程序整体功能 | 实现不同种类的聊天机器人,可以根据输入进行文本生成。 | diff --git a/docs/translate_english.json b/docs/translate_english.json index c13ac81..850cae5 100644 --- a/docs/translate_english.json +++ b/docs/translate_english.json @@ -1184,7 +1184,7 @@ "Call ChatGLM fail 不能正常加载ChatGLM的参数": "Call ChatGLM fail, unable to load parameters for ChatGLM", "不能正常加载ChatGLM的参数!": "Unable to load parameters for ChatGLM!", "多线程方法": "Multithreading method", - "函数的说明请见 request_llm/bridge_all.py": "For function details, please see request_llm/bridge_all.py", + "函数的说明请见 request_llms/bridge_all.py": "For function details, please see request_llms/bridge_all.py", "程序终止": "Program terminated", "单线程方法": "Single-threaded method", "等待ChatGLM响应中": "Waiting for response from ChatGLM", @@ -1543,7 +1543,7 @@ "str类型": "str type", "所有音频都总结完成了吗": "Are all audio summaries completed?", "SummaryAudioVideo内容": "SummaryAudioVideo content", - "使用教程详情见 request_llm/README.md": "See request_llm/README.md for detailed usage instructions", + "使用教程详情见 request_llms/README.md": "See request_llms/README.md for detailed usage instructions", "删除中间文件夹": "Delete intermediate folder", "Claude组件初始化成功": "Claude component initialized successfully", "$c$ 是光速": "$c$ is the speed of light", diff --git a/docs/translate_japanese.json b/docs/translate_japanese.json index fa3af4e..ae86dc0 100644 --- a/docs/translate_japanese.json +++ b/docs/translate_japanese.json @@ -782,7 +782,7 @@ "主进程统一调用函数接口": "メインプロセスが関数インターフェースを統一的に呼び出します", "再例如一个包含了待处理文件的路径": "処理待ちのファイルを含むパスの例", "负责把学术论文准确翻译成中文": "学術論文を正確に中国語に翻訳する責任があります", - "函数的说明请见 request_llm/bridge_all.py": "関数の説明については、request_llm/bridge_all.pyを参照してください", + "函数的说明请见 request_llms/bridge_all.py": "関数の説明については、request_llms/bridge_all.pyを参照してください", "然后回车提交": "そしてEnterを押して提出してください", "防止爆token": "トークンの爆発を防止する", "Latex项目全文中译英": "LaTeXプロジェクト全文の中国語から英語への翻訳", @@ -1616,7 +1616,7 @@ "正在重试": "再試行中", "从而更全面地理解项目的整体功能": "プロジェクトの全体的な機能をより理解するために", "正在等您说完问题": "質問が完了するのをお待ちしています", - "使用教程详情见 request_llm/README.md": "使用方法の詳細については、request_llm/README.mdを参照してください", + "使用教程详情见 request_llms/README.md": "使用方法の詳細については、request_llms/README.mdを参照してください", "6.25 加入判定latex模板的代码": "6.25 テンプレートの判定コードを追加", "找不到任何音频或视频文件": "音声またはビデオファイルが見つかりません", "请求GPT模型的": "GPTモデルのリクエスト", diff --git a/docs/translate_traditionalchinese.json b/docs/translate_traditionalchinese.json index 53570ae..a677f10 100644 --- a/docs/translate_traditionalchinese.json +++ b/docs/translate_traditionalchinese.json @@ -123,7 +123,7 @@ "的第": "的第", "减少重复": "減少重複", "如果超过期限没有喂狗": "如果超過期限沒有餵狗", - "函数的说明请见 request_llm/bridge_all.py": "函數的說明請見 request_llm/bridge_all.py", + "函数的说明请见 request_llms/bridge_all.py": "函數的說明請見 request_llms/bridge_all.py", "第7步": "第7步", "说": "說", "中途接收可能的终止指令": "中途接收可能的終止指令", @@ -1887,7 +1887,7 @@ "请继续分析其他源代码": "請繼續分析其他源代碼", "质能方程式": "質能方程式", "功能尚不稳定": "功能尚不穩定", - "使用教程详情见 request_llm/README.md": "使用教程詳情見 request_llm/README.md", + "使用教程详情见 request_llms/README.md": "使用教程詳情見 request_llms/README.md", "从以上搜索结果中抽取信息": "從以上搜索結果中抽取信息", "虽然PDF生成失败了": "雖然PDF生成失敗了", "找图片": "尋找圖片", diff --git a/main.py b/main.py index 9f38995..991dd47 100644 --- a/main.py +++ b/main.py @@ -7,7 +7,7 @@ def main(): import gradio as gr if gr.__version__ not in ['3.32.6']: raise ModuleNotFoundError("使用项目内置Gradio获取最优体验! 请运行 `pip install -r requirements.txt` 指令安装内置Gradio及其他依赖, 详情信息见requirements.txt.") - from request_llm.bridge_all import predict + from request_llms.bridge_all import predict from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, load_chat_cookies, DummyWith # 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到 proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION') diff --git a/request_llm/README.md b/request_llms/README.md similarity index 96% rename from request_llm/README.md rename to request_llms/README.md index 545bc1f..92b856e 100644 --- a/request_llm/README.md +++ b/request_llms/README.md @@ -2,7 +2,7 @@ ## ChatGLM -- 安装依赖 `pip install -r request_llm/requirements_chatglm.txt` +- 安装依赖 `pip install -r request_llms/requirements_chatglm.txt` - 修改配置,在config.py中将LLM_MODEL的值改为"chatglm" ``` sh diff --git a/request_llm/bridge_all.py b/request_llms/bridge_all.py similarity index 100% rename from request_llm/bridge_all.py rename to request_llms/bridge_all.py diff --git a/request_llm/bridge_chatglm.py b/request_llms/bridge_chatglm.py similarity index 97% rename from request_llm/bridge_chatglm.py rename to request_llms/bridge_chatglm.py index 387b3e2..194cd1a 100644 --- a/request_llm/bridge_chatglm.py +++ b/request_llms/bridge_chatglm.py @@ -27,7 +27,7 @@ class GetGLMHandle(Process): self.info = "依赖检测通过" self.success = True except: - self.info = "缺少ChatGLM的依赖,如果要使用ChatGLM,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_chatglm.txt`安装ChatGLM的依赖。" + self.info = "缺少ChatGLM的依赖,如果要使用ChatGLM,除了基础的pip依赖以外,您还需要运行`pip install -r request_llms/requirements_chatglm.txt`安装ChatGLM的依赖。" self.success = False def ready(self): @@ -100,7 +100,7 @@ glm_handle = None def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False): """ 多线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ global glm_handle if glm_handle is None: @@ -131,7 +131,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): """ 单线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ chatbot.append((inputs, "")) diff --git a/request_llm/bridge_chatglmft.py b/request_llms/bridge_chatglmft.py similarity index 97% rename from request_llm/bridge_chatglmft.py rename to request_llms/bridge_chatglmft.py index 4416382..8755bc1 100644 --- a/request_llm/bridge_chatglmft.py +++ b/request_llms/bridge_chatglmft.py @@ -44,7 +44,7 @@ class GetGLMFTHandle(Process): self.info = "依赖检测通过" self.success = True except: - self.info = "缺少ChatGLMFT的依赖,如果要使用ChatGLMFT,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_chatglm.txt`安装ChatGLM的依赖。" + self.info = "缺少ChatGLMFT的依赖,如果要使用ChatGLMFT,除了基础的pip依赖以外,您还需要运行`pip install -r request_llms/requirements_chatglm.txt`安装ChatGLM的依赖。" self.success = False def ready(self): @@ -59,7 +59,7 @@ class GetGLMFTHandle(Process): if self.chatglmft_model is None: from transformers import AutoConfig import torch - # conf = 'request_llm/current_ptune_model.json' + # conf = 'request_llms/current_ptune_model.json' # if not os.path.exists(conf): raise RuntimeError('找不到微调模型信息') # with open(conf, 'r', encoding='utf8') as f: # model_args = json.loads(f.read()) @@ -140,7 +140,7 @@ glmft_handle = None def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False): """ 多线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ global glmft_handle if glmft_handle is None: @@ -171,7 +171,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): """ 单线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ chatbot.append((inputs, "")) diff --git a/request_llm/bridge_chatglmonnx.py b/request_llms/bridge_chatglmonnx.py similarity index 83% rename from request_llm/bridge_chatglmonnx.py rename to request_llms/bridge_chatglmonnx.py index 594bcca..312c684 100644 --- a/request_llm/bridge_chatglmonnx.py +++ b/request_llms/bridge_chatglmonnx.py @@ -1,5 +1,5 @@ model_name = "ChatGLM-ONNX" -cmd_to_install = "`pip install -r request_llm/requirements_chatglm_onnx.txt`" +cmd_to_install = "`pip install -r request_llms/requirements_chatglm_onnx.txt`" from transformers import AutoModel, AutoTokenizer @@ -28,13 +28,13 @@ class GetONNXGLMHandle(LocalLLMHandle): def load_model_and_tokenizer(self): # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行 import os, glob - if not len(glob.glob("./request_llm/ChatGLM-6b-onnx-u8s8/chatglm-6b-int8-onnx-merged/*.bin")) >= 7: # 该模型有七个 bin 文件 + if not len(glob.glob("./request_llms/ChatGLM-6b-onnx-u8s8/chatglm-6b-int8-onnx-merged/*.bin")) >= 7: # 该模型有七个 bin 文件 from huggingface_hub import snapshot_download - snapshot_download(repo_id="K024/ChatGLM-6b-onnx-u8s8", local_dir="./request_llm/ChatGLM-6b-onnx-u8s8") + snapshot_download(repo_id="K024/ChatGLM-6b-onnx-u8s8", local_dir="./request_llms/ChatGLM-6b-onnx-u8s8") def create_model(): return ChatGLMModel( - tokenizer_path = "./request_llm/ChatGLM-6b-onnx-u8s8/chatglm-6b-int8-onnx-merged/sentencepiece.model", - onnx_model_path = "./request_llm/ChatGLM-6b-onnx-u8s8/chatglm-6b-int8-onnx-merged/chatglm-6b-int8.onnx" + tokenizer_path = "./request_llms/ChatGLM-6b-onnx-u8s8/chatglm-6b-int8-onnx-merged/sentencepiece.model", + onnx_model_path = "./request_llms/ChatGLM-6b-onnx-u8s8/chatglm-6b-int8-onnx-merged/chatglm-6b-int8.onnx" ) self._model = create_model() return self._model, None diff --git a/request_llm/bridge_chatgpt.py b/request_llms/bridge_chatgpt.py similarity index 100% rename from request_llm/bridge_chatgpt.py rename to request_llms/bridge_chatgpt.py diff --git a/request_llm/bridge_chatgpt_website.py b/request_llms/bridge_chatgpt_website.py similarity index 100% rename from request_llm/bridge_chatgpt_website.py rename to request_llms/bridge_chatgpt_website.py diff --git a/request_llm/bridge_claude.py b/request_llms/bridge_claude.py similarity index 100% rename from request_llm/bridge_claude.py rename to request_llms/bridge_claude.py diff --git a/request_llm/bridge_internlm.py b/request_llms/bridge_internlm.py similarity index 99% rename from request_llm/bridge_internlm.py rename to request_llms/bridge_internlm.py index 0ec65b6..3304fe2 100644 --- a/request_llm/bridge_internlm.py +++ b/request_llms/bridge_internlm.py @@ -1,5 +1,5 @@ model_name = "InternLM" -cmd_to_install = "`pip install -r request_llm/requirements_chatglm.txt`" +cmd_to_install = "`pip install -r request_llms/requirements_chatglm.txt`" from transformers import AutoModel, AutoTokenizer import time diff --git a/request_llm/bridge_jittorllms_llama.py b/request_llms/bridge_jittorllms_llama.py similarity index 93% rename from request_llm/bridge_jittorllms_llama.py rename to request_llms/bridge_jittorllms_llama.py index d485357..6099cd6 100644 --- a/request_llm/bridge_jittorllms_llama.py +++ b/request_llms/bridge_jittorllms_llama.py @@ -28,8 +28,8 @@ class GetGLMHandle(Process): self.success = True except: from toolbox import trimmed_format_exc - self.info = r"缺少jittorllms的依赖,如果要使用jittorllms,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I`"+\ - r"和`git clone https://gitlink.org.cn/jittor/JittorLLMs.git --depth 1 request_llm/jittorllms`两个指令来安装jittorllms的依赖(在项目根目录运行这两个指令)。" +\ + self.info = r"缺少jittorllms的依赖,如果要使用jittorllms,除了基础的pip依赖以外,您还需要运行`pip install -r request_llms/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I`"+\ + r"和`git clone https://gitlink.org.cn/jittor/JittorLLMs.git --depth 1 request_llms/jittorllms`两个指令来安装jittorllms的依赖(在项目根目录运行这两个指令)。" +\ r"警告:安装jittorllms依赖后将完全破坏现有的pytorch环境,建议使用docker环境!" + trimmed_format_exc() self.success = False @@ -45,8 +45,8 @@ class GetGLMHandle(Process): env = os.environ.get("PATH", "") os.environ["PATH"] = env.replace('/cuda/bin', '/x/bin') root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..') - os.chdir(root_dir_assume + '/request_llm/jittorllms') - sys.path.append(root_dir_assume + '/request_llm/jittorllms') + os.chdir(root_dir_assume + '/request_llms/jittorllms') + sys.path.append(root_dir_assume + '/request_llms/jittorllms') validate_path() # validate path so you can run from base directory def load_model(): @@ -109,7 +109,7 @@ llama_glm_handle = None def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False): """ 多线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ global llama_glm_handle if llama_glm_handle is None: @@ -140,7 +140,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): """ 单线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ chatbot.append((inputs, "")) diff --git a/request_llm/bridge_jittorllms_pangualpha.py b/request_llms/bridge_jittorllms_pangualpha.py similarity index 93% rename from request_llm/bridge_jittorllms_pangualpha.py rename to request_llms/bridge_jittorllms_pangualpha.py index 20a3021..eebefcc 100644 --- a/request_llm/bridge_jittorllms_pangualpha.py +++ b/request_llms/bridge_jittorllms_pangualpha.py @@ -28,8 +28,8 @@ class GetGLMHandle(Process): self.success = True except: from toolbox import trimmed_format_exc - self.info = r"缺少jittorllms的依赖,如果要使用jittorllms,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I`"+\ - r"和`git clone https://gitlink.org.cn/jittor/JittorLLMs.git --depth 1 request_llm/jittorllms`两个指令来安装jittorllms的依赖(在项目根目录运行这两个指令)。" +\ + self.info = r"缺少jittorllms的依赖,如果要使用jittorllms,除了基础的pip依赖以外,您还需要运行`pip install -r request_llms/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I`"+\ + r"和`git clone https://gitlink.org.cn/jittor/JittorLLMs.git --depth 1 request_llms/jittorllms`两个指令来安装jittorllms的依赖(在项目根目录运行这两个指令)。" +\ r"警告:安装jittorllms依赖后将完全破坏现有的pytorch环境,建议使用docker环境!" + trimmed_format_exc() self.success = False @@ -45,8 +45,8 @@ class GetGLMHandle(Process): env = os.environ.get("PATH", "") os.environ["PATH"] = env.replace('/cuda/bin', '/x/bin') root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..') - os.chdir(root_dir_assume + '/request_llm/jittorllms') - sys.path.append(root_dir_assume + '/request_llm/jittorllms') + os.chdir(root_dir_assume + '/request_llms/jittorllms') + sys.path.append(root_dir_assume + '/request_llms/jittorllms') validate_path() # validate path so you can run from base directory def load_model(): @@ -109,7 +109,7 @@ pangu_glm_handle = None def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False): """ 多线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ global pangu_glm_handle if pangu_glm_handle is None: @@ -140,7 +140,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): """ 单线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ chatbot.append((inputs, "")) diff --git a/request_llm/bridge_jittorllms_rwkv.py b/request_llms/bridge_jittorllms_rwkv.py similarity index 93% rename from request_llm/bridge_jittorllms_rwkv.py rename to request_llms/bridge_jittorllms_rwkv.py index ee4f592..32ba3b8 100644 --- a/request_llm/bridge_jittorllms_rwkv.py +++ b/request_llms/bridge_jittorllms_rwkv.py @@ -28,8 +28,8 @@ class GetGLMHandle(Process): self.success = True except: from toolbox import trimmed_format_exc - self.info = r"缺少jittorllms的依赖,如果要使用jittorllms,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I`"+\ - r"和`git clone https://gitlink.org.cn/jittor/JittorLLMs.git --depth 1 request_llm/jittorllms`两个指令来安装jittorllms的依赖(在项目根目录运行这两个指令)。" +\ + self.info = r"缺少jittorllms的依赖,如果要使用jittorllms,除了基础的pip依赖以外,您还需要运行`pip install -r request_llms/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I`"+\ + r"和`git clone https://gitlink.org.cn/jittor/JittorLLMs.git --depth 1 request_llms/jittorllms`两个指令来安装jittorllms的依赖(在项目根目录运行这两个指令)。" +\ r"警告:安装jittorllms依赖后将完全破坏现有的pytorch环境,建议使用docker环境!" + trimmed_format_exc() self.success = False @@ -45,8 +45,8 @@ class GetGLMHandle(Process): env = os.environ.get("PATH", "") os.environ["PATH"] = env.replace('/cuda/bin', '/x/bin') root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..') - os.chdir(root_dir_assume + '/request_llm/jittorllms') - sys.path.append(root_dir_assume + '/request_llm/jittorllms') + os.chdir(root_dir_assume + '/request_llms/jittorllms') + sys.path.append(root_dir_assume + '/request_llms/jittorllms') validate_path() # validate path so you can run from base directory def load_model(): @@ -109,7 +109,7 @@ rwkv_glm_handle = None def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False): """ 多线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ global rwkv_glm_handle if rwkv_glm_handle is None: @@ -140,7 +140,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): """ 单线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ chatbot.append((inputs, "")) diff --git a/request_llm/bridge_llama2.py b/request_llms/bridge_llama2.py similarity index 98% rename from request_llm/bridge_llama2.py rename to request_llms/bridge_llama2.py index d1be446..bc8ef7e 100644 --- a/request_llm/bridge_llama2.py +++ b/request_llms/bridge_llama2.py @@ -1,5 +1,5 @@ model_name = "LLaMA" -cmd_to_install = "`pip install -r request_llm/requirements_chatglm.txt`" +cmd_to_install = "`pip install -r request_llms/requirements_chatglm.txt`" from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer diff --git a/request_llm/bridge_moss.py b/request_llms/bridge_moss.py similarity index 96% rename from request_llm/bridge_moss.py rename to request_llms/bridge_moss.py index 3c6217d..5061fcf 100644 --- a/request_llm/bridge_moss.py +++ b/request_llms/bridge_moss.py @@ -24,12 +24,12 @@ class GetGLMHandle(Process): def check_dependency(self): # 主进程执行 try: import datasets, os - assert os.path.exists('request_llm/moss/models') + assert os.path.exists('request_llms/moss/models') self.info = "依赖检测通过" self.success = True except: self.info = """ - 缺少MOSS的依赖,如果要使用MOSS,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_moss.txt`和`git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss`安装MOSS的依赖。 + 缺少MOSS的依赖,如果要使用MOSS,除了基础的pip依赖以外,您还需要运行`pip install -r request_llms/requirements_moss.txt`和`git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss`安装MOSS的依赖。 """ self.success = False return self.success @@ -110,8 +110,8 @@ class GetGLMHandle(Process): def validate_path(): import os, sys root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..') - os.chdir(root_dir_assume + '/request_llm/moss') - sys.path.append(root_dir_assume + '/request_llm/moss') + os.chdir(root_dir_assume + '/request_llms/moss') + sys.path.append(root_dir_assume + '/request_llms/moss') validate_path() # validate path so you can run from base directory try: @@ -176,7 +176,7 @@ moss_handle = None def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False): """ 多线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ global moss_handle if moss_handle is None: @@ -206,7 +206,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): """ 单线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ chatbot.append((inputs, "")) diff --git a/request_llm/bridge_newbingfree.py b/request_llms/bridge_newbingfree.py similarity index 97% rename from request_llm/bridge_newbingfree.py rename to request_llms/bridge_newbingfree.py index c606645..b5bfb30 100644 --- a/request_llm/bridge_newbingfree.py +++ b/request_llms/bridge_newbingfree.py @@ -54,7 +54,7 @@ class NewBingHandle(Process): self.info = "依赖检测通过,等待NewBing响应。注意目前不能多人同时调用NewBing接口(有线程锁),否则将导致每个人的NewBing问询历史互相渗透。调用NewBing时,会自动使用已配置的代理。" self.success = True except: - self.info = "缺少的依赖,如果要使用Newbing,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_newbing.txt`安装Newbing的依赖。" + self.info = "缺少的依赖,如果要使用Newbing,除了基础的pip依赖以外,您还需要运行`pip install -r request_llms/requirements_newbing.txt`安装Newbing的依赖。" self.success = False def ready(self): @@ -63,7 +63,7 @@ class NewBingHandle(Process): async def async_run(self): # 读取配置 NEWBING_STYLE, = get_conf('NEWBING_STYLE') - from request_llm.bridge_all import model_info + from request_llms.bridge_all import model_info endpoint = model_info['newbing']['endpoint'] while True: # 等待 @@ -181,7 +181,7 @@ newbingfree_handle = None def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False): """ 多线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ global newbingfree_handle if (newbingfree_handle is None) or (not newbingfree_handle.success): @@ -210,7 +210,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): """ 单线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ chatbot.append((inputs, "[Local Message]: 等待NewBing响应中 ...")) diff --git a/request_llm/bridge_qianfan.py b/request_llms/bridge_qianfan.py similarity index 98% rename from request_llm/bridge_qianfan.py rename to request_llms/bridge_qianfan.py index be73976..bf78a34 100644 --- a/request_llm/bridge_qianfan.py +++ b/request_llms/bridge_qianfan.py @@ -119,7 +119,7 @@ def generate_from_baidu_qianfan(inputs, llm_kwargs, history, system_prompt): def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False): """ ⭐多线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ watch_dog_patience = 5 response = "" @@ -134,7 +134,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): """ ⭐单线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ chatbot.append((inputs, "")) diff --git a/request_llm/bridge_qwen.py b/request_llms/bridge_qwen.py similarity index 97% rename from request_llm/bridge_qwen.py rename to request_llms/bridge_qwen.py index 07ed243..62682cf 100644 --- a/request_llm/bridge_qwen.py +++ b/request_llms/bridge_qwen.py @@ -1,5 +1,5 @@ model_name = "Qwen" -cmd_to_install = "`pip install -r request_llm/requirements_qwen.txt`" +cmd_to_install = "`pip install -r request_llms/requirements_qwen.txt`" from transformers import AutoModel, AutoTokenizer diff --git a/request_llm/bridge_spark.py b/request_llms/bridge_spark.py similarity index 95% rename from request_llm/bridge_spark.py rename to request_llms/bridge_spark.py index 0fe925f..8c7bf59 100644 --- a/request_llm/bridge_spark.py +++ b/request_llms/bridge_spark.py @@ -16,7 +16,7 @@ def validate_key(): def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False): """ ⭐多线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ watch_dog_patience = 5 response = "" @@ -36,7 +36,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): """ ⭐单线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ chatbot.append((inputs, "")) yield from update_ui(chatbot=chatbot, history=history) diff --git a/request_llm/bridge_stackclaude.py b/request_llms/bridge_stackclaude.py similarity index 98% rename from request_llm/bridge_stackclaude.py rename to request_llms/bridge_stackclaude.py index 3f2ee67..48612b3 100644 --- a/request_llm/bridge_stackclaude.py +++ b/request_llms/bridge_stackclaude.py @@ -99,7 +99,7 @@ class ClaudeHandle(Process): self.info = "依赖检测通过,等待Claude响应。注意目前不能多人同时调用Claude接口(有线程锁),否则将导致每个人的Claude问询历史互相渗透。调用Claude时,会自动使用已配置的代理。" self.success = True except: - self.info = "缺少的依赖,如果要使用Claude,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_slackclaude.txt`安装Claude的依赖,然后重启程序。" + self.info = "缺少的依赖,如果要使用Claude,除了基础的pip依赖以外,您还需要运行`pip install -r request_llms/requirements_slackclaude.txt`安装Claude的依赖,然后重启程序。" self.success = False def ready(self): @@ -204,7 +204,7 @@ claude_handle = None def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False): """ 多线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ global claude_handle if (claude_handle is None) or (not claude_handle.success): @@ -234,7 +234,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream=True, additional_fn=None): """ 单线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ chatbot.append((inputs, "[Local Message]: 等待Claude响应中 ...")) diff --git a/request_llm/bridge_tgui.py b/request_llms/bridge_tgui.py similarity index 100% rename from request_llm/bridge_tgui.py rename to request_llms/bridge_tgui.py diff --git a/request_llm/chatglmoonx.py b/request_llms/chatglmoonx.py similarity index 100% rename from request_llm/chatglmoonx.py rename to request_llms/chatglmoonx.py diff --git a/request_llm/com_sparkapi.py b/request_llms/com_sparkapi.py similarity index 100% rename from request_llm/com_sparkapi.py rename to request_llms/com_sparkapi.py diff --git a/request_llm/edge_gpt_free.py b/request_llms/edge_gpt_free.py similarity index 100% rename from request_llm/edge_gpt_free.py rename to request_llms/edge_gpt_free.py diff --git a/request_llm/local_llm_class.py b/request_llms/local_llm_class.py similarity index 98% rename from request_llm/local_llm_class.py rename to request_llms/local_llm_class.py index c9c7253..e742d51 100644 --- a/request_llm/local_llm_class.py +++ b/request_llms/local_llm_class.py @@ -120,7 +120,7 @@ def get_local_llm_predict_fns(LLMSingletonClass, model_name): def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False): """ ⭐多线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ _llm_handle = LLMSingletonClass() if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + _llm_handle.info @@ -146,7 +146,7 @@ def get_local_llm_predict_fns(LLMSingletonClass, model_name): def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): """ ⭐单线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ chatbot.append((inputs, "")) diff --git a/request_llm/requirements_chatglm.txt b/request_llms/requirements_chatglm.txt similarity index 100% rename from request_llm/requirements_chatglm.txt rename to request_llms/requirements_chatglm.txt diff --git a/request_llm/requirements_chatglm_onnx.txt b/request_llms/requirements_chatglm_onnx.txt similarity index 100% rename from request_llm/requirements_chatglm_onnx.txt rename to request_llms/requirements_chatglm_onnx.txt diff --git a/request_llm/requirements_jittorllms.txt b/request_llms/requirements_jittorllms.txt similarity index 100% rename from request_llm/requirements_jittorllms.txt rename to request_llms/requirements_jittorllms.txt diff --git a/request_llm/requirements_moss.txt b/request_llms/requirements_moss.txt similarity index 100% rename from request_llm/requirements_moss.txt rename to request_llms/requirements_moss.txt diff --git a/request_llm/requirements_newbing.txt b/request_llms/requirements_newbing.txt similarity index 100% rename from request_llm/requirements_newbing.txt rename to request_llms/requirements_newbing.txt diff --git a/request_llm/requirements_qwen.txt b/request_llms/requirements_qwen.txt similarity index 100% rename from request_llm/requirements_qwen.txt rename to request_llms/requirements_qwen.txt diff --git a/request_llm/requirements_slackclaude.txt b/request_llms/requirements_slackclaude.txt similarity index 100% rename from request_llm/requirements_slackclaude.txt rename to request_llms/requirements_slackclaude.txt diff --git a/tests/test_llms.py b/tests/test_llms.py index 75e2303..6b7019d 100644 --- a/tests/test_llms.py +++ b/tests/test_llms.py @@ -10,14 +10,14 @@ def validate_path(): validate_path() # validate path so you can run from base directory if __name__ == "__main__": - # from request_llm.bridge_newbingfree import predict_no_ui_long_connection - # from request_llm.bridge_moss import predict_no_ui_long_connection - # from request_llm.bridge_jittorllms_pangualpha import predict_no_ui_long_connection - # from request_llm.bridge_jittorllms_llama import predict_no_ui_long_connection - # from request_llm.bridge_claude import predict_no_ui_long_connection - # from request_llm.bridge_internlm import predict_no_ui_long_connection - # from request_llm.bridge_qwen import predict_no_ui_long_connection - from request_llm.bridge_spark import predict_no_ui_long_connection + # from request_llms.bridge_newbingfree import predict_no_ui_long_connection + # from request_llms.bridge_moss import predict_no_ui_long_connection + # from request_llms.bridge_jittorllms_pangualpha import predict_no_ui_long_connection + # from request_llms.bridge_jittorllms_llama import predict_no_ui_long_connection + # from request_llms.bridge_claude import predict_no_ui_long_connection + # from request_llms.bridge_internlm import predict_no_ui_long_connection + # from request_llms.bridge_qwen import predict_no_ui_long_connection + from request_llms.bridge_spark import predict_no_ui_long_connection llm_kwargs = { 'max_length': 4096, diff --git a/toolbox.py b/toolbox.py index 07a9fda..4a783a3 100644 --- a/toolbox.py +++ b/toolbox.py @@ -878,7 +878,7 @@ def clip_history(inputs, history, tokenizer, max_token_limit): 直到历史记录的标记数量降低到阈值以下。 """ import numpy as np - from request_llm.bridge_all import model_info + from request_llms.bridge_all import model_info def get_token_num(txt): return len(tokenizer.encode(txt, disallowed_special=())) input_token_num = get_token_num(inputs) @@ -1069,7 +1069,7 @@ def get_plugin_handle(plugin_name): def get_chat_handle(): """ """ - from request_llm.bridge_all import predict_no_ui_long_connection + from request_llms.bridge_all import predict_no_ui_long_connection return predict_no_ui_long_connection def get_plugin_default_kwargs():