rename folder

This commit is contained in:
binary-husky 2023-10-28 17:44:17 +08:00
parent 5a530df4f2
commit cf085565a7
73 changed files with 193 additions and 193 deletions

6
.gitignore vendored
View File

@ -146,9 +146,9 @@ debug*
private*
crazy_functions/test_project/pdf_and_word
crazy_functions/test_samples
request_llm/jittorllms
request_llms/jittorllms
multi-language
request_llm/moss
request_llms/moss
media
flagged
request_llm/ChatGLM-6b-onnx-u8s8
request_llms/ChatGLM-6b-onnx-u8s8

View File

@ -126,11 +126,11 @@ python -m pip install -r requirements.txt # 这个步骤和pip安装一样的步
【可选步骤】如果需要支持清华ChatGLM2/复旦MOSS作为后端需要额外安装更多依赖前提条件熟悉Python + 用过Pytorch + 电脑配置够强):
```sh
# 【可选步骤I】支持清华ChatGLM2。清华ChatGLM备注如果遇到"Call ChatGLM fail 不能正常加载ChatGLM的参数" 错误,参考如下: 1以上默认安装的为torch+cpu版使用cuda需要卸载torch重新安装torch+cuda 2如因本机配置不够无法加载模型可以修改request_llm/bridge_chatglm.py中的模型精度, 将 AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) 都修改为 AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
python -m pip install -r request_llm/requirements_chatglm.txt
python -m pip install -r request_llms/requirements_chatglm.txt
# 【可选步骤II】支持复旦MOSS
python -m pip install -r request_llm/requirements_moss.txt
git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llm/moss # 注意执行此行代码时,必须处于项目根路径
python -m pip install -r request_llms/requirements_moss.txt
git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss # 注意执行此行代码时,必须处于项目根路径
# 【可选步骤III】支持RWKV Runner
参考wikihttps://github.com/binary-husky/gpt_academic/wiki/%E9%80%82%E9%85%8DRWKV-Runner

View File

@ -156,7 +156,7 @@ def auto_update(raise_error=False):
def warm_up_modules():
print('正在执行一些模块的预热...')
from toolbox import ProxyNetworkActivate
from request_llm.bridge_all import model_info
from request_llms.bridge_all import model_info
with ProxyNetworkActivate("Warmup_Modules"):
enc = model_info["gpt-3.5-turbo"]['tokenizer']
enc.encode("模块预热", disallowed_special=())

View File

@ -136,7 +136,7 @@ SSL_CERTFILE = ""
API_ORG = ""
# 如果需要使用Slack Claude使用教程详情见 request_llm/README.md
# 如果需要使用Slack Claude使用教程详情见 request_llms/README.md
SLACK_CLAUDE_BOT_ID = ''
SLACK_CLAUDE_USER_TOKEN = ''

View File

@ -11,7 +11,7 @@ class PaperFileGroup():
self.sp_file_tag = []
# count_token
from request_llm.bridge_all import model_info
from request_llms.bridge_all import model_info
enc = model_info["gpt-3.5-turbo"]['tokenizer']
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
self.get_token_num = get_token_num

View File

@ -11,7 +11,7 @@ class PaperFileGroup():
self.sp_file_tag = []
# count_token
from request_llm.bridge_all import model_info
from request_llms.bridge_all import model_info
enc = model_info["gpt-3.5-turbo"]['tokenizer']
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
self.get_token_num = get_token_num

View File

@ -5,7 +5,7 @@ import logging
def input_clipping(inputs, history, max_token_limit):
import numpy as np
from request_llm.bridge_all import model_info
from request_llms.bridge_all import model_info
enc = model_info["gpt-3.5-turbo"]['tokenizer']
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
@ -63,7 +63,7 @@ def request_gpt_model_in_new_thread_with_ui_alive(
"""
import time
from concurrent.futures import ThreadPoolExecutor
from request_llm.bridge_all import predict_no_ui_long_connection
from request_llms.bridge_all import predict_no_ui_long_connection
# 用户反馈
chatbot.append([inputs_show_user, ""])
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
@ -177,7 +177,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
"""
import time, random
from concurrent.futures import ThreadPoolExecutor
from request_llm.bridge_all import predict_no_ui_long_connection
from request_llms.bridge_all import predict_no_ui_long_connection
assert len(inputs_array) == len(history_array)
assert len(inputs_array) == len(sys_prompt_array)
if max_workers == -1: # 读取配置文件

View File

@ -165,7 +165,7 @@ class LatexPaperFileGroup():
self.sp_file_tag = []
# count_token
from request_llm.bridge_all import model_info
from request_llms.bridge_all import model_info
enc = model_info["gpt-3.5-turbo"]['tokenizer']
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
self.get_token_num = get_token_num

View File

@ -103,7 +103,7 @@ def translate_pdf(article_dict, llm_kwargs, chatbot, fp, generated_conclusion_fi
inputs_show_user_array = []
# get_token_num
from request_llm.bridge_all import model_info
from request_llms.bridge_all import model_info
enc = model_info[llm_kwargs['llm_model']]['tokenizer']
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))

View File

@ -1,7 +1,7 @@
from pydantic import BaseModel, Field
from typing import List
from toolbox import update_ui_lastest_msg, disable_auto_promotion
from request_llm.bridge_all import predict_no_ui_long_connection
from request_llms.bridge_all import predict_no_ui_long_connection
from crazy_functions.json_fns.pydantic_io import GptJsonIO, JsonStringError
import copy, json, pickle, os, sys, time

View File

@ -1,7 +1,7 @@
from pydantic import BaseModel, Field
from typing import List
from toolbox import update_ui_lastest_msg, get_conf
from request_llm.bridge_all import predict_no_ui_long_connection
from request_llms.bridge_all import predict_no_ui_long_connection
from crazy_functions.json_fns.pydantic_io import GptJsonIO
import copy, json, pickle, os, sys

View File

@ -5,7 +5,7 @@ import datetime
def gen_image(llm_kwargs, prompt, resolution="256x256"):
import requests, json, time, os
from request_llm.bridge_all import model_info
from request_llms.bridge_all import model_info
proxies, = get_conf('proxies')
# Set up OpenAI API key and model

View File

@ -32,7 +32,7 @@ def 解析docx(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot
print(file_content)
# private_upload里面的文件名在解压zip后容易出现乱码rar和7z格式正常故可以只分析文章内容不输入文件名
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
from request_llm.bridge_all import model_info
from request_llms.bridge_all import model_info
max_token = model_info[llm_kwargs['llm_model']]['max_token']
TOKEN_LIMIT_PER_FRAGMENT = max_token * 3 // 4
paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(

View File

@ -41,7 +41,7 @@ def split_audio_file(filename, split_duration=1000):
def AnalyAudio(parse_prompt, file_manifest, llm_kwargs, chatbot, history):
import os, requests
from moviepy.editor import AudioFileClip
from request_llm.bridge_all import model_info
from request_llms.bridge_all import model_info
# 设置OpenAI密钥和模型
api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])

View File

@ -13,7 +13,7 @@ class PaperFileGroup():
self.sp_file_tag = []
# count_token
from request_llm.bridge_all import model_info
from request_llms.bridge_all import model_info
enc = model_info["gpt-3.5-turbo"]['tokenizer']
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
self.get_token_num = get_token_num

View File

@ -21,7 +21,7 @@ def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot,
TOKEN_LIMIT_PER_FRAGMENT = 2500
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
from request_llm.bridge_all import model_info
from request_llms.bridge_all import model_info
enc = model_info["gpt-3.5-turbo"]['tokenizer']
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(

View File

@ -95,7 +95,7 @@ def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot,
# 递归地切割PDF文件
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
from request_llm.bridge_all import model_info
from request_llms.bridge_all import model_info
enc = model_info["gpt-3.5-turbo"]['tokenizer']
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(

View File

@ -19,7 +19,7 @@ def 解析PDF(file_name, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
TOKEN_LIMIT_PER_FRAGMENT = 2500
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
from request_llm.bridge_all import model_info
from request_llms.bridge_all import model_info
enc = model_info["gpt-3.5-turbo"]['tokenizer']
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(

View File

@ -2,7 +2,7 @@ from toolbox import CatchException, update_ui
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, input_clipping
import requests
from bs4 import BeautifulSoup
from request_llm.bridge_all import model_info
from request_llms.bridge_all import model_info
def google(query, proxies):
query = query # 在此处替换您要搜索的关键词

View File

@ -2,7 +2,7 @@ from toolbox import CatchException, update_ui
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, input_clipping
import requests
from bs4 import BeautifulSoup
from request_llm.bridge_all import model_info
from request_llms.bridge_all import model_info
def bing_search(query, proxies=None):

View File

@ -48,7 +48,7 @@ from pydantic import BaseModel, Field
from typing import List
from toolbox import CatchException, update_ui, is_the_upload_folder
from toolbox import update_ui_lastest_msg, disable_auto_promotion
from request_llm.bridge_all import predict_no_ui_long_connection
from request_llms.bridge_all import predict_no_ui_long_connection
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
from crazy_functions.crazy_utils import input_clipping
from crazy_functions.json_fns.pydantic_io import GptJsonIO, JsonStringError

View File

@ -13,7 +13,7 @@ class PaperFileGroup():
self.sp_file_tag = []
# count_token
from request_llm.bridge_all import model_info
from request_llms.bridge_all import model_info
enc = model_info["gpt-3.5-turbo"]['tokenizer']
def get_token_num(txt): return len(
enc.encode(txt, disallowed_special=()))

View File

@ -2,7 +2,7 @@ from toolbox import update_ui
from toolbox import CatchException, get_conf, markdown_convertion
from crazy_functions.crazy_utils import input_clipping
from crazy_functions.agent_fns.watchdog import WatchDog
from request_llm.bridge_all import predict_no_ui_long_connection
from request_llms.bridge_all import predict_no_ui_long_connection
import threading, time
import numpy as np
from .live_audio.aliyunASR import AliyunASR

View File

@ -137,7 +137,7 @@ services:
# P.S. 通过对 command 进行微调,可以便捷地安装额外的依赖
# command: >
# bash -c "pip install -r request_llm/requirements_qwen.txt && python3 -u main.py"
# bash -c "pip install -r request_llms/requirements_qwen.txt && python3 -u main.py"
### ===================================================
### 【方案三】 如果需要运行ChatGPT + LLAMA + 盘古 + RWKV本地模型

View File

@ -19,13 +19,13 @@ RUN python3 -m pip install aliyun-python-sdk-core==2.13.3 pyOpenSSL webrtcvad sc
WORKDIR /gpt
RUN git clone --depth=1 https://github.com/binary-husky/gpt_academic.git
WORKDIR /gpt/gpt_academic
RUN git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llm/moss
RUN git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss
RUN python3 -m pip install -r requirements.txt
RUN python3 -m pip install -r request_llm/requirements_moss.txt
RUN python3 -m pip install -r request_llm/requirements_qwen.txt
RUN python3 -m pip install -r request_llm/requirements_chatglm.txt
RUN python3 -m pip install -r request_llm/requirements_newbing.txt
RUN python3 -m pip install -r request_llms/requirements_moss.txt
RUN python3 -m pip install -r request_llms/requirements_qwen.txt
RUN python3 -m pip install -r request_llms/requirements_chatglm.txt
RUN python3 -m pip install -r request_llms/requirements_newbing.txt
RUN python3 -m pip install nougat-ocr

View File

@ -14,12 +14,12 @@ RUN python3 -m pip install torch --extra-index-url https://download.pytorch.org/
WORKDIR /gpt
RUN git clone --depth=1 https://github.com/binary-husky/gpt_academic.git
WORKDIR /gpt/gpt_academic
RUN git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss
RUN git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss
RUN python3 -m pip install -r requirements.txt
RUN python3 -m pip install -r request_llm/requirements_moss.txt
RUN python3 -m pip install -r request_llm/requirements_qwen.txt
RUN python3 -m pip install -r request_llm/requirements_chatglm.txt
RUN python3 -m pip install -r request_llm/requirements_newbing.txt
RUN python3 -m pip install -r request_llms/requirements_moss.txt
RUN python3 -m pip install -r request_llms/requirements_qwen.txt
RUN python3 -m pip install -r request_llms/requirements_chatglm.txt
RUN python3 -m pip install -r request_llms/requirements_newbing.txt

View File

@ -16,12 +16,12 @@ WORKDIR /gpt
RUN git clone --depth=1 https://github.com/binary-husky/gpt_academic.git
WORKDIR /gpt/gpt_academic
RUN python3 -m pip install -r requirements.txt
RUN python3 -m pip install -r request_llm/requirements_chatglm.txt
RUN python3 -m pip install -r request_llm/requirements_newbing.txt
RUN python3 -m pip install -r request_llm/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I
RUN python3 -m pip install -r request_llms/requirements_chatglm.txt
RUN python3 -m pip install -r request_llms/requirements_newbing.txt
RUN python3 -m pip install -r request_llms/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I
# 下载JittorLLMs
RUN git clone https://github.com/binary-husky/JittorLLMs.git --depth 1 request_llm/jittorllms
RUN git clone https://github.com/binary-husky/JittorLLMs.git --depth 1 request_llms/jittorllms
# 禁用缓存,确保更新代码
ADD "https://www.random.org/cgi-bin/randbyte?nbytes=10&format=h" skipcache

View File

@ -103,12 +103,12 @@ python -m pip install -r requirements.txt # Same step as pip installation
[Optional Step] If supporting Tsinghua ChatGLM/Fudan MOSS as backend, additional dependencies need to be installed (Prerequisites: Familiar with Python + Used Pytorch + Sufficient computer configuration):
```sh
# [Optional Step I] Support Tsinghua ChatGLM. Remark: If encountering "Call ChatGLM fail Cannot load ChatGLM parameters", please refer to the following: 1: The above default installation is torch+cpu version. To use cuda, uninstall torch and reinstall torch+cuda; 2: If the model cannot be loaded due to insufficient machine configuration, you can modify the model precision in `request_llm/bridge_chatglm.py`, and modify all AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
python -m pip install -r request_llm/requirements_chatglm.txt
# [Optional Step I] Support Tsinghua ChatGLM. Remark: If encountering "Call ChatGLM fail Cannot load ChatGLM parameters", please refer to the following: 1: The above default installation is torch+cpu version. To use cuda, uninstall torch and reinstall torch+cuda; 2: If the model cannot be loaded due to insufficient machine configuration, you can modify the model precision in `request_llms/bridge_chatglm.py`, and modify all AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
python -m pip install -r request_llms/requirements_chatglm.txt
# [Optional Step II] Support Fudan MOSS
python -m pip install -r request_llm/requirements_moss.txt
git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # When executing this line of code, you must be in the project root path
python -m pip install -r request_llms/requirements_moss.txt
git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss # When executing this line of code, you must be in the project root path
# [Optional Step III] Make sure the AVAIL_LLM_MODELS in the config.py configuration file contains the expected models. Currently supported models are as follows (jittorllms series currently only supports docker solutions):
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]

View File

@ -109,12 +109,12 @@ python -m pip install -r requirements.txt # questo passaggio funziona allo stess
【Passaggio facoltativo】 Se si desidera supportare ChatGLM di Tsinghua/MOSS di Fudan come backend, è necessario installare ulteriori dipendenze (prerequisiti: conoscenza di Python, esperienza con Pytorch e computer sufficientemente potente):
```sh
# 【Passaggio facoltativo I】 Supporto a ChatGLM di Tsinghua. Note su ChatGLM di Tsinghua: in caso di errore "Call ChatGLM fail 不能正常加载ChatGLM的参数" , fare quanto segue: 1. Per impostazione predefinita, viene installata la versione di torch + cpu; per usare CUDA, è necessario disinstallare torch e installare nuovamente torch + cuda; 2. Se non è possibile caricare il modello a causa di una configurazione insufficiente del computer, è possibile modificare la precisione del modello in request_llm/bridge_chatglm.py, cambiando AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) in AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
python -m pip install -r request_llm/requirements_chatglm.txt
# 【Passaggio facoltativo I】 Supporto a ChatGLM di Tsinghua. Note su ChatGLM di Tsinghua: in caso di errore "Call ChatGLM fail 不能正常加载ChatGLM的参数" , fare quanto segue: 1. Per impostazione predefinita, viene installata la versione di torch + cpu; per usare CUDA, è necessario disinstallare torch e installare nuovamente torch + cuda; 2. Se non è possibile caricare il modello a causa di una configurazione insufficiente del computer, è possibile modificare la precisione del modello in request_llms/bridge_chatglm.py, cambiando AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) in AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
python -m pip install -r request_llms/requirements_chatglm.txt
# 【Passaggio facoltativo II】 Supporto a MOSS di Fudan
python -m pip install -r request_llm/requirements_moss.txt
git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Si prega di notare che quando si esegue questa riga di codice, si deve essere nella directory radice del progetto
python -m pip install -r request_llms/requirements_moss.txt
git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss # Si prega di notare che quando si esegue questa riga di codice, si deve essere nella directory radice del progetto
# 【Passaggio facoltativo III】 Assicurati che il file di configurazione config.py includa tutti i modelli desiderati, al momento tutti i modelli supportati sono i seguenti (i modelli della serie jittorllms attualmente supportano solo la soluzione docker):
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]

View File

@ -104,11 +104,11 @@ python -m pip install -r requirements.txt # 이 단계도 pip install의 단계
# 1 : 기본 설치된 것들은 torch + cpu 버전입니다. cuda를 사용하려면 torch를 제거한 다음 torch + cuda를 다시 설치해야합니다.
# 2 : 모델을 로드할 수 없는 기계 구성 때문에, AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)를
# AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)로 변경합니다.
python -m pip install -r request_llm/requirements_chatglm.txt
python -m pip install -r request_llms/requirements_chatglm.txt
# [선택 사항 II] Fudan MOSS 지원
python -m pip install -r request_llm/requirements_moss.txt
git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # 다음 코드 줄을 실행할 때 프로젝트 루트 경로에 있어야합니다.
python -m pip install -r request_llms/requirements_moss.txt
git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss # 다음 코드 줄을 실행할 때 프로젝트 루트 경로에 있어야합니다.
# [선택 사항III] AVAIL_LLM_MODELS config.py 구성 파일에 기대하는 모델이 포함되어 있는지 확인하십시오.
# 현재 지원되는 전체 모델 :

View File

@ -119,12 +119,12 @@ python -m pip install -r requirements.txt # This step is the same as the pip ins
[Optional Step] If you need to support Tsinghua ChatGLM / Fudan MOSS as the backend, you need to install more dependencies (prerequisite: familiar with Python + used Pytorch + computer configuration is strong):
```sh
# 【Optional Step I】support Tsinghua ChatGLM。Tsinghua ChatGLM Note: If you encounter a "Call ChatGLM fails cannot load ChatGLM parameters normally" error, refer to the following: 1: The default installed is torch+cpu version, and using cuda requires uninstalling torch and reinstalling torch+cuda; 2: If the model cannot be loaded due to insufficient computer configuration, you can modify the model accuracy in request_llm/bridge_chatglm.py and change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
python -m pip install -r request_llm/requirements_chatglm.txt
# 【Optional Step I】support Tsinghua ChatGLM。Tsinghua ChatGLM Note: If you encounter a "Call ChatGLM fails cannot load ChatGLM parameters normally" error, refer to the following: 1: The default installed is torch+cpu version, and using cuda requires uninstalling torch and reinstalling torch+cuda; 2: If the model cannot be loaded due to insufficient computer configuration, you can modify the model accuracy in request_llms/bridge_chatglm.py and change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
python -m pip install -r request_llms/requirements_chatglm.txt
# 【Optional Step II】support Fudan MOSS
python -m pip install -r request_llm/requirements_moss.txt
git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Note: When executing this line of code, you must be in the project root path
python -m pip install -r request_llms/requirements_moss.txt
git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss # Note: When executing this line of code, you must be in the project root path
# 【Optional Step III】Make sure that the AVAIL_LLM_MODELS in the config.py configuration file contains the expected model. Currently, all supported models are as follows (jittorllms series currently only supports docker solutions):
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]

View File

@ -106,12 +106,12 @@ python -m pip install -r requirements.txt # this step is the same as pip install
[Optional step] If you need to support Tsinghua ChatGLM/Fudan MOSS as a backend, you need to install more dependencies (prerequisites: familiar with Python + used Pytorch + computer configuration is strong enough):
```sh
# [Optional Step I] Support Tsinghua ChatGLM. Tsinghua ChatGLM remarks: if you encounter the "Call ChatGLM fail cannot load ChatGLM parameters" error, refer to this: 1: The default installation above is torch + cpu version, to use cuda, you need to uninstall torch and reinstall torch + cuda; 2: If the model cannot be loaded due to insufficient local configuration, you can modify the model accuracy in request_llm/bridge_chatglm.py, and change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code = True)
python -m pip install -r request_llm/requirements_chatglm.txt
# [Optional Step I] Support Tsinghua ChatGLM. Tsinghua ChatGLM remarks: if you encounter the "Call ChatGLM fail cannot load ChatGLM parameters" error, refer to this: 1: The default installation above is torch + cpu version, to use cuda, you need to uninstall torch and reinstall torch + cuda; 2: If the model cannot be loaded due to insufficient local configuration, you can modify the model accuracy in request_llms/bridge_chatglm.py, and change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code = True)
python -m pip install -r request_llms/requirements_chatglm.txt
# [Optional Step II] Support Fudan MOSS
python -m pip install -r request_llm/requirements_moss.txt
git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # When executing this line of code, you must be in the root directory of the project
python -m pip install -r request_llms/requirements_moss.txt
git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss # When executing this line of code, you must be in the root directory of the project
# [Optional Step III] Make sure the AVAIL_LLM_MODELS in the config.py configuration file includes the expected models. Currently supported models are as follows (the jittorllms series only supports the docker solution for the time being):
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]

View File

@ -111,12 +111,12 @@ python -m pip install -r requirements.txt # Same step as pip instalation
【Optional】 Si vous souhaitez prendre en charge THU ChatGLM/FDU MOSS en tant que backend, des dépendances supplémentaires doivent être installées (prérequis: compétent en Python + utilisez Pytorch + configuration suffisante de l'ordinateur):
```sh
# 【Optional Step I】 Support THU ChatGLM. Remarque sur THU ChatGLM: Si vous rencontrez l'erreur "Appel à ChatGLM échoué, les paramètres ChatGLM ne peuvent pas être chargés normalement", reportez-vous à ce qui suit: 1: La version par défaut installée est torch+cpu, si vous souhaitez utiliser cuda, vous devez désinstaller torch et réinstaller torch+cuda; 2: Si le modèle ne peut pas être chargé en raison d'une configuration insuffisante de l'ordinateur local, vous pouvez modifier la précision du modèle dans request_llm/bridge_chatglm.py, modifier AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) par AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
python -m pip install -r request_llm/requirements_chatglm.txt
# 【Optional Step I】 Support THU ChatGLM. Remarque sur THU ChatGLM: Si vous rencontrez l'erreur "Appel à ChatGLM échoué, les paramètres ChatGLM ne peuvent pas être chargés normalement", reportez-vous à ce qui suit: 1: La version par défaut installée est torch+cpu, si vous souhaitez utiliser cuda, vous devez désinstaller torch et réinstaller torch+cuda; 2: Si le modèle ne peut pas être chargé en raison d'une configuration insuffisante de l'ordinateur local, vous pouvez modifier la précision du modèle dans request_llms/bridge_chatglm.py, modifier AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) par AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
python -m pip install -r request_llms/requirements_chatglm.txt
# 【Optional Step II】 Support FDU MOSS
python -m pip install -r request_llm/requirements_moss.txt
git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Note: When running this line of code, you must be in the project root path.
python -m pip install -r request_llms/requirements_moss.txt
git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss # Note: When running this line of code, you must be in the project root path.
# 【Optional Step III】Make sure the AVAIL_LLM_MODELS in the config.py configuration file contains the desired model. Currently, all models supported are as follows (the jittorllms series currently only supports the docker scheme):
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]

View File

@ -120,12 +120,12 @@ python -m pip install -r requirements.txt # This step is the same as the pip ins
[Optional Steps] If you need to support Tsinghua ChatGLM/Fudan MOSS as a backend, you need to install more dependencies (precondition: familiar with Python + used Pytorch + computer configuration). Strong enough):
```sh
# Optional step I: support Tsinghua ChatGLM. Tsinghua ChatGLM remarks: If you encounter the error "Call ChatGLM fail cannot load ChatGLM parameters normally", refer to the following: 1: The version installed above is torch+cpu version, using cuda requires uninstalling torch and reinstalling torch+cuda; 2: If the model cannot be loaded due to insufficient local configuration, you can modify the model accuracy in request_llm/bridge_chatglm.py, and change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True).
python -m pip install -r request_llm/requirements_chatglm.txt
# Optional step I: support Tsinghua ChatGLM. Tsinghua ChatGLM remarks: If you encounter the error "Call ChatGLM fail cannot load ChatGLM parameters normally", refer to the following: 1: The version installed above is torch+cpu version, using cuda requires uninstalling torch and reinstalling torch+cuda; 2: If the model cannot be loaded due to insufficient local configuration, you can modify the model accuracy in request_llms/bridge_chatglm.py, and change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True).
python -m pip install -r request_llms/requirements_chatglm.txt
# Optional Step II: Support Fudan MOSS.
python -m pip install -r request_llm/requirements_moss.txt
git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Note that when executing this line of code, it must be in the project root.
python -m pip install -r request_llms/requirements_moss.txt
git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss # Note that when executing this line of code, it must be in the project root.
# 【Optional Step III】Ensure that the AVAIL_LLM_MODELS in the config.py configuration file contains the expected model. Currently, all supported models are as follows (jittorllms series currently only supports the docker solution):
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]

View File

@ -108,12 +108,12 @@ python -m pip install -r requirements.txt # This step is the same as the pip ins
[Optional step] If you need to support Tsinghua ChatGLM/Fudan MOSS as backend, you need to install more dependencies (prerequisites: familiar with Python + have used Pytorch + computer configuration is strong):
```sh
# [Optional step I] Support Tsinghua ChatGLM. Tsinghua ChatGLM note: If you encounter the "Call ChatGLM fail cannot load ChatGLM parameters normally" error, refer to the following: 1: The default installation above is torch+cpu version, and cuda is used Need to uninstall torch and reinstall torch+cuda; 2: If you cannot load the model due to insufficient local configuration, you can modify the model accuracy in request_llm/bridge_chatglm.py, AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) Modify to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
python -m pip install -r request_llm/requirements_chatglm.txt
# [Optional step I] Support Tsinghua ChatGLM. Tsinghua ChatGLM note: If you encounter the "Call ChatGLM fail cannot load ChatGLM parameters normally" error, refer to the following: 1: The default installation above is torch+cpu version, and cuda is used Need to uninstall torch and reinstall torch+cuda; 2: If you cannot load the model due to insufficient local configuration, you can modify the model accuracy in request_llms/bridge_chatglm.py, AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) Modify to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
python -m pip install -r request_llms/requirements_chatglm.txt
# [Optional step II] Support Fudan MOSS
python -m pip install -r request_llm/requirements_moss.txt
git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Note that when executing this line of code, you must be in the project root path
python -m pip install -r request_llms/requirements_moss.txt
git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss # Note that when executing this line of code, you must be in the project root path
# [Optional step III] Make sure the AVAIL_LLM_MODELS in the config.py configuration file contains the expected models. Currently, all supported models are as follows (the jittorllms series currently only supports the docker solution):
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]

View File

@ -38,20 +38,20 @@
| crazy_functions\读文章写摘要.py | 对论文进行解析和全文摘要生成 |
| crazy_functions\谷歌检索小助手.py | 提供谷歌学术搜索页面中相关文章的元数据信息。 |
| crazy_functions\高级功能函数模板.py | 使用Unsplash API发送相关图片以回复用户的输入。 |
| request_llm\bridge_all.py | 基于不同LLM模型进行对话。 |
| request_llm\bridge_chatglm.py | 使用ChatGLM模型生成回复支持单线程和多线程方式。 |
| request_llm\bridge_chatgpt.py | 基于GPT模型完成对话。 |
| request_llm\bridge_jittorllms_llama.py | 使用JittorLLMs模型完成对话支持单线程和多线程方式。 |
| request_llm\bridge_jittorllms_pangualpha.py | 使用JittorLLMs模型完成对话基于多进程和多线程方式。 |
| request_llm\bridge_jittorllms_rwkv.py | 使用JittorLLMs模型完成聊天功能提供包括历史信息、参数调节等在内的多个功能选项。 |
| request_llm\bridge_moss.py | 加载Moss模型完成对话功能。 |
| request_llm\bridge_newbing.py | 使用Newbing聊天机器人进行对话支持单线程和多线程方式。 |
| request_llm\bridge_newbingfree.py | 基于Bing chatbot API实现聊天机器人的文本生成功能。 |
| request_llm\bridge_stackclaude.py | 基于Slack API实现Claude与用户的交互。 |
| request_llm\bridge_tgui.py | 通过websocket实现聊天机器人与UI界面交互。 |
| request_llm\edge_gpt.py | 调用Bing chatbot API提供聊天机器人服务。 |
| request_llm\edge_gpt_free.py | 实现聊天机器人API采用aiohttp和httpx工具库。 |
| request_llm\test_llms.py | 对llm模型进行单元测试。 |
| request_llms\bridge_all.py | 基于不同LLM模型进行对话。 |
| request_llms\bridge_chatglm.py | 使用ChatGLM模型生成回复支持单线程和多线程方式。 |
| request_llms\bridge_chatgpt.py | 基于GPT模型完成对话。 |
| request_llms\bridge_jittorllms_llama.py | 使用JittorLLMs模型完成对话支持单线程和多线程方式。 |
| request_llms\bridge_jittorllms_pangualpha.py | 使用JittorLLMs模型完成对话基于多进程和多线程方式。 |
| request_llms\bridge_jittorllms_rwkv.py | 使用JittorLLMs模型完成聊天功能提供包括历史信息、参数调节等在内的多个功能选项。 |
| request_llms\bridge_moss.py | 加载Moss模型完成对话功能。 |
| request_llms\bridge_newbing.py | 使用Newbing聊天机器人进行对话支持单线程和多线程方式。 |
| request_llms\bridge_newbingfree.py | 基于Bing chatbot API实现聊天机器人的文本生成功能。 |
| request_llms\bridge_stackclaude.py | 基于Slack API实现Claude与用户的交互。 |
| request_llms\bridge_tgui.py | 通过websocket实现聊天机器人与UI界面交互。 |
| request_llms\edge_gpt.py | 调用Bing chatbot API提供聊天机器人服务。 |
| request_llms\edge_gpt_free.py | 实现聊天机器人API采用aiohttp和httpx工具库。 |
| request_llms\test_llms.py | 对llm模型进行单元测试。 |
## 接下来请你逐文件分析下面的工程[0/48] 请对下面的程序文件做一个概述: check_proxy.py
@ -129,7 +129,7 @@ toolbox.py是一个工具类库其中主要包含了一些函数装饰器和
1. `input_clipping`: 该函数用于裁剪输入文本长度,使其不超过一定的限制。
2. `request_gpt_model_in_new_thread_with_ui_alive`: 该函数用于请求 GPT 模型并保持用户界面的响应,支持多线程和实时更新用户界面。
这两个函数都依赖于从 `toolbox``request_llm` 中导入的一些工具函数。函数的输入和输出有详细的描述文档。
这两个函数都依赖于从 `toolbox``request_llms` 中导入的一些工具函数。函数的输入和输出有详细的描述文档。
## [12/48] 请对下面的程序文件做一个概述: crazy_functions\Latex全文润色.py
@ -137,7 +137,7 @@ toolbox.py是一个工具类库其中主要包含了一些函数装饰器和
## [13/48] 请对下面的程序文件做一个概述: crazy_functions\Latex全文翻译.py
这个文件包含两个函数 `Latex英译中``Latex中译英`它们都会对整个Latex项目进行翻译。这个文件还包含一个类 `PaperFileGroup`,它拥有一个方法 `run_file_split`,用于把长文本文件分成多个短文件。其中使用了工具库 `toolbox` 中的一些函数和从 `request_llm` 中导入了 `model_info`。接下来的函数把文件读取进来,把它们的注释删除,进行分割,并进行翻译。这个文件还包括了一些异常处理和界面更新的操作。
这个文件包含两个函数 `Latex英译中``Latex中译英`它们都会对整个Latex项目进行翻译。这个文件还包含一个类 `PaperFileGroup`,它拥有一个方法 `run_file_split`,用于把长文本文件分成多个短文件。其中使用了工具库 `toolbox` 中的一些函数和从 `request_llms` 中导入了 `model_info`。接下来的函数把文件读取进来,把它们的注释删除,进行分割,并进行翻译。这个文件还包括了一些异常处理和界面更新的操作。
## [14/48] 请对下面的程序文件做一个概述: crazy_functions\__init__.py
@ -227,19 +227,19 @@ toolbox.py是一个工具类库其中主要包含了一些函数装饰器和
该程序文件定义了一个名为高阶功能模板函数的函数该函数接受多个参数包括输入的文本、gpt模型参数、插件模型参数、聊天显示框的句柄、聊天历史等并利用送出请求使用 Unsplash API 发送相关图片。其中,为了避免输入溢出,函数会在开始时清空历史。函数也有一些 UI 更新的语句。该程序文件还依赖于其他两个模块CatchException 和 update_ui以及一个名为 request_gpt_model_in_new_thread_with_ui_alive 的来自 crazy_utils 模块(应该是自定义的工具包)的函数。
## [34/48] 请对下面的程序文件做一个概述: request_llm\bridge_all.py
## [34/48] 请对下面的程序文件做一个概述: request_llms\bridge_all.py
该文件包含两个函数predict和predict_no_ui_long_connection用于基于不同的LLM模型进行对话。该文件还包含一个lazyloadTiktoken类和一个LLM_CATCH_EXCEPTION修饰器函数。其中lazyloadTiktoken类用于懒加载模型的tokenizerLLM_CATCH_EXCEPTION用于错误处理。整个文件还定义了一些全局变量和模型信息字典用于引用和配置LLM模型。
## [35/48] 请对下面的程序文件做一个概述: request_llm\bridge_chatglm.py
## [35/48] 请对下面的程序文件做一个概述: request_llms\bridge_chatglm.py
这是一个Python程序文件名为`bridge_chatglm.py`,其中定义了一个名为`GetGLMHandle`的类和三个方法:`predict_no_ui_long_connection``predict``stream_chat`。该文件依赖于多个Python库`transformers``sentencepiece`。该文件实现了一个聊天机器人使用ChatGLM模型来生成回复支持单线程和多线程方式。程序启动时需要加载ChatGLM的模型和tokenizer需要一段时间。在配置文件`config.py`中设置参数会影响模型的内存和显存使用,因此程序可能会导致低配计算机卡死。
## [36/48] 请对下面的程序文件做一个概述: request_llm\bridge_chatgpt.py
## [36/48] 请对下面的程序文件做一个概述: request_llms\bridge_chatgpt.py
该文件为 Python 代码文件,文件名为 request_llm\bridge_chatgpt.py。该代码文件主要提供三个函数predict、predict_no_ui和 predict_no_ui_long_connection用于发送至 chatGPT 并等待回复,获取输出。该代码文件还包含一些辅助函数,用于处理连接异常、生成 HTTP 请求等。该文件的代码架构清晰,使用了多个自定义函数和模块。
该文件为 Python 代码文件,文件名为 request_llms\bridge_chatgpt.py。该代码文件主要提供三个函数predict、predict_no_ui和 predict_no_ui_long_connection用于发送至 chatGPT 并等待回复,获取输出。该代码文件还包含一些辅助函数,用于处理连接异常、生成 HTTP 请求等。该文件的代码架构清晰,使用了多个自定义函数和模块。
## [37/48] 请对下面的程序文件做一个概述: request_llm\bridge_jittorllms_llama.py
## [37/48] 请对下面的程序文件做一个概述: request_llms\bridge_jittorllms_llama.py
该代码文件实现了一个聊天机器人,其中使用了 JittorLLMs 模型。主要包括以下几个部分:
1. GetGLMHandle 类:一个进程类,用于加载 JittorLLMs 模型并接收并处理请求。
@ -248,17 +248,17 @@ toolbox.py是一个工具类库其中主要包含了一些函数装饰器和
这个文件中还有一些辅助函数和全局变量,例如 importlib、time、threading 等。
## [38/48] 请对下面的程序文件做一个概述: request_llm\bridge_jittorllms_pangualpha.py
## [38/48] 请对下面的程序文件做一个概述: request_llms\bridge_jittorllms_pangualpha.py
这个文件是为了实现使用jittorllms一种机器学习模型来进行聊天功能的代码。其中包括了模型加载、模型的参数加载、消息的收发等相关操作。其中使用了多进程和多线程来提高性能和效率。代码中还包括了处理依赖关系的函数和预处理函数等。
## [39/48] 请对下面的程序文件做一个概述: request_llm\bridge_jittorllms_rwkv.py
## [39/48] 请对下面的程序文件做一个概述: request_llms\bridge_jittorllms_rwkv.py
这个文件是一个Python程序文件名为request_llm\bridge_jittorllms_rwkv.py。它依赖transformers、time、threading、importlib、multiprocessing等库。在文件中通过定义GetGLMHandle类加载jittorllms模型参数和定义stream_chat方法来实现与jittorllms模型的交互。同时该文件还定义了predict_no_ui_long_connection和predict方法来处理历史信息、调用jittorllms模型、接收回复信息并输出结果。
## [40/48] 请对下面的程序文件做一个概述: request_llm\bridge_moss.py
## [40/48] 请对下面的程序文件做一个概述: request_llms\bridge_moss.py
该文件为一个Python源代码文件文件名为 request_llm\bridge_moss.py。代码定义了一个 GetGLMHandle 类和两个函数 predict_no_ui_long_connection 和 predict。
该文件为一个Python源代码文件文件名为 request_llms\bridge_moss.py。代码定义了一个 GetGLMHandle 类和两个函数 predict_no_ui_long_connection 和 predict。
GetGLMHandle 类继承自Process类多进程主要功能是启动一个子进程并加载 MOSS 模型参数,通过 Pipe 进行主子进程的通信。该类还定义了 check_dependency、moss_init、run 和 stream_chat 等方法,其中 check_dependency 和 moss_init 是子进程的初始化方法run 是子进程运行方法stream_chat 实现了主进程和子进程的交互过程。
@ -266,7 +266,7 @@ GetGLMHandle 类继承自Process类多进程主要功能是启动一个
函数 predict 是单线程方法,通过调用 update_ui 将交互过程中 MOSS 的回复实时更新到UIUser Interface并执行一个 named functionadditional_fn指定的函数对输入进行预处理。
## [41/48] 请对下面的程序文件做一个概述: request_llm\bridge_newbing.py
## [41/48] 请对下面的程序文件做一个概述: request_llms\bridge_newbing.py
这是一个名为`bridge_newbing.py`的程序文件,包含三个部分:
@ -276,11 +276,11 @@ GetGLMHandle 类继承自Process类多进程主要功能是启动一个
第三部分定义了一个名为`newbing_handle`的全局变量,并导出了`predict_no_ui_long_connection``predict`这两个方法,以供其他程序可以调用。
## [42/48] 请对下面的程序文件做一个概述: request_llm\bridge_newbingfree.py
## [42/48] 请对下面的程序文件做一个概述: request_llms\bridge_newbingfree.py
这个Python文件包含了三部分内容。第一部分是来自edge_gpt_free.py文件的聊天机器人程序。第二部分是子进程Worker用于调用主体。第三部分提供了两个函数predict_no_ui_long_connection和predict用于调用NewBing聊天机器人和返回响应。其中predict函数还提供了一些参数用于控制聊天机器人的回复和更新UI界面。
## [43/48] 请对下面的程序文件做一个概述: request_llm\bridge_stackclaude.py
## [43/48] 请对下面的程序文件做一个概述: request_llms\bridge_stackclaude.py
这是一个Python源代码文件文件名为request_llm\bridge_stackclaude.py。代码分为三个主要部分
@ -290,21 +290,21 @@ GetGLMHandle 类继承自Process类多进程主要功能是启动一个
第三部分定义了predict_no_ui_long_connection和predict两个函数主要用于通过调用ClaudeHandle对象的stream_chat方法来获取Claude的回复并更新ui以显示相关信息。其中predict函数采用单线程方法而predict_no_ui_long_connection函数使用多线程方法。
## [44/48] 请对下面的程序文件做一个概述: request_llm\bridge_tgui.py
## [44/48] 请对下面的程序文件做一个概述: request_llms\bridge_tgui.py
该文件是一个Python代码文件名为request_llm\bridge_tgui.py。它包含了一些函数用于与chatbot UI交互并通过WebSocket协议与远程LLM模型通信完成文本生成任务其中最重要的函数是predict()和predict_no_ui_long_connection()。这个程序还有其他的辅助函数如random_hash()。整个代码文件在协作的基础上完成了一次修改。
## [45/48] 请对下面的程序文件做一个概述: request_llm\edge_gpt.py
## [45/48] 请对下面的程序文件做一个概述: request_llms\edge_gpt.py
该文件是一个用于调用Bing chatbot API的Python程序它由多个类和辅助函数构成可以根据给定的对话连接在对话中提出问题使用websocket与远程服务通信。程序实现了一个聊天机器人可以为用户提供人工智能聊天。
## [46/48] 请对下面的程序文件做一个概述: request_llm\edge_gpt_free.py
## [46/48] 请对下面的程序文件做一个概述: request_llms\edge_gpt_free.py
该代码文件为一个会话API可通过Chathub发送消息以返回响应。其中使用了 aiohttp 和 httpx 库进行网络请求并发送。代码中包含了一些函数和常量,多数用于生成请求数据或是请求头信息等。同时该代码文件还包含了一个 Conversation 类,调用该类可实现对话交互。
## [47/48] 请对下面的程序文件做一个概述: request_llm\test_llms.py
## [47/48] 请对下面的程序文件做一个概述: request_llms\test_llms.py
这个文件是用于对llm模型进行单元测试的Python程序。程序导入一个名为"request_llm.bridge_newbingfree"的模块然后三次使用该模块中的predict_no_ui_long_connection()函数进行预测,并输出结果。此外,还有一些注释掉的代码段,这些代码段也是关于模型预测的。
这个文件是用于对llm模型进行单元测试的Python程序。程序导入一个名为"request_llms.bridge_newbingfree"的模块然后三次使用该模块中的predict_no_ui_long_connection()函数进行预测,并输出结果。此外,还有一些注释掉的代码段,这些代码段也是关于模型预测的。
## 用一张Markdown表格简要描述以下文件的功能
check_proxy.py, colorful.py, config.py, config_private.py, core_functional.py, crazy_functional.py, main.py, multi_language.py, theme.py, toolbox.py, crazy_functions\crazy_functions_test.py, crazy_functions\crazy_utils.py, crazy_functions\Latex全文润色.py, crazy_functions\Latex全文翻译.py, crazy_functions\__init__.py, crazy_functions\下载arxiv论文翻译摘要.py。根据以上分析用一句话概括程序的整体功能。
@ -355,24 +355,24 @@ crazy_functions\代码重写为全英文_多线程.py, crazy_functions\图片生
概括程序的整体功能:提供了一系列处理文本、文件和代码的功能,使用了各类语言模型、多线程、网络请求和数据解析技术来提高效率和精度。
## 用一张Markdown表格简要描述以下文件的功能
crazy_functions\谷歌检索小助手.py, crazy_functions\高级功能函数模板.py, request_llm\bridge_all.py, request_llm\bridge_chatglm.py, request_llm\bridge_chatgpt.py, request_llm\bridge_jittorllms_llama.py, request_llm\bridge_jittorllms_pangualpha.py, request_llm\bridge_jittorllms_rwkv.py, request_llm\bridge_moss.py, request_llm\bridge_newbing.py, request_llm\bridge_newbingfree.py, request_llm\bridge_stackclaude.py, request_llm\bridge_tgui.py, request_llm\edge_gpt.py, request_llm\edge_gpt_free.py, request_llm\test_llms.py。根据以上分析用一句话概括程序的整体功能。
crazy_functions\谷歌检索小助手.py, crazy_functions\高级功能函数模板.py, request_llms\bridge_all.py, request_llms\bridge_chatglm.py, request_llms\bridge_chatgpt.py, request_llms\bridge_jittorllms_llama.py, request_llms\bridge_jittorllms_pangualpha.py, request_llms\bridge_jittorllms_rwkv.py, request_llms\bridge_moss.py, request_llms\bridge_newbing.py, request_llms\bridge_newbingfree.py, request_llms\bridge_stackclaude.py, request_llms\bridge_tgui.py, request_llms\edge_gpt.py, request_llms\edge_gpt_free.py, request_llms\test_llms.py。根据以上分析用一句话概括程序的整体功能。
| 文件名 | 功能描述 |
| --- | --- |
| crazy_functions\谷歌检索小助手.py | 提供谷歌学术搜索页面中相关文章的元数据信息。 |
| crazy_functions\高级功能函数模板.py | 使用Unsplash API发送相关图片以回复用户的输入。 |
| request_llm\bridge_all.py | 基于不同LLM模型进行对话。 |
| request_llm\bridge_chatglm.py | 使用ChatGLM模型生成回复支持单线程和多线程方式。 |
| request_llm\bridge_chatgpt.py | 基于GPT模型完成对话。 |
| request_llm\bridge_jittorllms_llama.py | 使用JittorLLMs模型完成对话支持单线程和多线程方式。 |
| request_llm\bridge_jittorllms_pangualpha.py | 使用JittorLLMs模型完成对话基于多进程和多线程方式。 |
| request_llm\bridge_jittorllms_rwkv.py | 使用JittorLLMs模型完成聊天功能提供包括历史信息、参数调节等在内的多个功能选项。 |
| request_llm\bridge_moss.py | 加载Moss模型完成对话功能。 |
| request_llm\bridge_newbing.py | 使用Newbing聊天机器人进行对话支持单线程和多线程方式。 |
| request_llm\bridge_newbingfree.py | 基于Bing chatbot API实现聊天机器人的文本生成功能。 |
| request_llm\bridge_stackclaude.py | 基于Slack API实现Claude与用户的交互。 |
| request_llm\bridge_tgui.py | 通过websocket实现聊天机器人与UI界面交互。 |
| request_llm\edge_gpt.py | 调用Bing chatbot API提供聊天机器人服务。 |
| request_llm\edge_gpt_free.py | 实现聊天机器人API采用aiohttp和httpx工具库。 |
| request_llm\test_llms.py | 对llm模型进行单元测试。 |
| request_llms\bridge_all.py | 基于不同LLM模型进行对话。 |
| request_llms\bridge_chatglm.py | 使用ChatGLM模型生成回复支持单线程和多线程方式。 |
| request_llms\bridge_chatgpt.py | 基于GPT模型完成对话。 |
| request_llms\bridge_jittorllms_llama.py | 使用JittorLLMs模型完成对话支持单线程和多线程方式。 |
| request_llms\bridge_jittorllms_pangualpha.py | 使用JittorLLMs模型完成对话基于多进程和多线程方式。 |
| request_llms\bridge_jittorllms_rwkv.py | 使用JittorLLMs模型完成聊天功能提供包括历史信息、参数调节等在内的多个功能选项。 |
| request_llms\bridge_moss.py | 加载Moss模型完成对话功能。 |
| request_llms\bridge_newbing.py | 使用Newbing聊天机器人进行对话支持单线程和多线程方式。 |
| request_llms\bridge_newbingfree.py | 基于Bing chatbot API实现聊天机器人的文本生成功能。 |
| request_llms\bridge_stackclaude.py | 基于Slack API实现Claude与用户的交互。 |
| request_llms\bridge_tgui.py | 通过websocket实现聊天机器人与UI界面交互。 |
| request_llms\edge_gpt.py | 调用Bing chatbot API提供聊天机器人服务。 |
| request_llms\edge_gpt_free.py | 实现聊天机器人API采用aiohttp和httpx工具库。 |
| request_llms\test_llms.py | 对llm模型进行单元测试。 |
| 程序整体功能 | 实现不同种类的聊天机器人,可以根据输入进行文本生成。 |

View File

@ -1184,7 +1184,7 @@
"Call ChatGLM fail 不能正常加载ChatGLM的参数": "Call ChatGLM fail, unable to load parameters for ChatGLM",
"不能正常加载ChatGLM的参数": "Unable to load parameters for ChatGLM!",
"多线程方法": "Multithreading method",
"函数的说明请见 request_llm/bridge_all.py": "For function details, please see request_llm/bridge_all.py",
"函数的说明请见 request_llms/bridge_all.py": "For function details, please see request_llms/bridge_all.py",
"程序终止": "Program terminated",
"单线程方法": "Single-threaded method",
"等待ChatGLM响应中": "Waiting for response from ChatGLM",
@ -1543,7 +1543,7 @@
"str类型": "str type",
"所有音频都总结完成了吗": "Are all audio summaries completed?",
"SummaryAudioVideo内容": "SummaryAudioVideo content",
"使用教程详情见 request_llm/README.md": "See request_llm/README.md for detailed usage instructions",
"使用教程详情见 request_llms/README.md": "See request_llms/README.md for detailed usage instructions",
"删除中间文件夹": "Delete intermediate folder",
"Claude组件初始化成功": "Claude component initialized successfully",
"$c$ 是光速": "$c$ is the speed of light",

View File

@ -782,7 +782,7 @@
"主进程统一调用函数接口": "メインプロセスが関数インターフェースを統一的に呼び出します",
"再例如一个包含了待处理文件的路径": "処理待ちのファイルを含むパスの例",
"负责把学术论文准确翻译成中文": "学術論文を正確に中国語に翻訳する責任があります",
"函数的说明请见 request_llm/bridge_all.py": "関数の説明については、request_llm/bridge_all.pyを参照してください",
"函数的说明请见 request_llms/bridge_all.py": "関数の説明については、request_llms/bridge_all.pyを参照してください",
"然后回车提交": "そしてEnterを押して提出してください",
"防止爆token": "トークンの爆発を防止する",
"Latex项目全文中译英": "LaTeXプロジェクト全文の中国語から英語への翻訳",
@ -1616,7 +1616,7 @@
"正在重试": "再試行中",
"从而更全面地理解项目的整体功能": "プロジェクトの全体的な機能をより理解するために",
"正在等您说完问题": "質問が完了するのをお待ちしています",
"使用教程详情见 request_llm/README.md": "使用方法の詳細については、request_llm/README.mdを参照してください",
"使用教程详情见 request_llms/README.md": "使用方法の詳細については、request_llms/README.mdを参照してください",
"6.25 加入判定latex模板的代码": "6.25 テンプレートの判定コードを追加",
"找不到任何音频或视频文件": "音声またはビデオファイルが見つかりません",
"请求GPT模型的": "GPTモデルのリクエスト",

View File

@ -123,7 +123,7 @@
"的第": "的第",
"减少重复": "減少重複",
"如果超过期限没有喂狗": "如果超過期限沒有餵狗",
"函数的说明请见 request_llm/bridge_all.py": "函數的說明請見 request_llm/bridge_all.py",
"函数的说明请见 request_llms/bridge_all.py": "函數的說明請見 request_llms/bridge_all.py",
"第7步": "第7步",
"说": "說",
"中途接收可能的终止指令": "中途接收可能的終止指令",
@ -1887,7 +1887,7 @@
"请继续分析其他源代码": "請繼續分析其他源代碼",
"质能方程式": "質能方程式",
"功能尚不稳定": "功能尚不穩定",
"使用教程详情见 request_llm/README.md": "使用教程詳情見 request_llm/README.md",
"使用教程详情见 request_llms/README.md": "使用教程詳情見 request_llms/README.md",
"从以上搜索结果中抽取信息": "從以上搜索結果中抽取信息",
"虽然PDF生成失败了": "雖然PDF生成失敗了",
"找图片": "尋找圖片",

View File

@ -7,7 +7,7 @@ def main():
import gradio as gr
if gr.__version__ not in ['3.32.6']:
raise ModuleNotFoundError("使用项目内置Gradio获取最优体验! 请运行 `pip install -r requirements.txt` 指令安装内置Gradio及其他依赖, 详情信息见requirements.txt.")
from request_llm.bridge_all import predict
from request_llms.bridge_all import predict
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, load_chat_cookies, DummyWith
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION')

View File

@ -2,7 +2,7 @@
## ChatGLM
- 安装依赖 `pip install -r request_llm/requirements_chatglm.txt`
- 安装依赖 `pip install -r request_llms/requirements_chatglm.txt`
- 修改配置在config.py中将LLM_MODEL的值改为"chatglm"
``` sh

View File

@ -27,7 +27,7 @@ class GetGLMHandle(Process):
self.info = "依赖检测通过"
self.success = True
except:
self.info = "缺少ChatGLM的依赖如果要使用ChatGLM除了基础的pip依赖以外您还需要运行`pip install -r request_llm/requirements_chatglm.txt`安装ChatGLM的依赖。"
self.info = "缺少ChatGLM的依赖如果要使用ChatGLM除了基础的pip依赖以外您还需要运行`pip install -r request_llms/requirements_chatglm.txt`安装ChatGLM的依赖。"
self.success = False
def ready(self):
@ -100,7 +100,7 @@ glm_handle = None
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
"""
多线程方法
函数的说明请见 request_llm/bridge_all.py
函数的说明请见 request_llms/bridge_all.py
"""
global glm_handle
if glm_handle is None:
@ -131,7 +131,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
"""
单线程方法
函数的说明请见 request_llm/bridge_all.py
函数的说明请见 request_llms/bridge_all.py
"""
chatbot.append((inputs, ""))

View File

@ -44,7 +44,7 @@ class GetGLMFTHandle(Process):
self.info = "依赖检测通过"
self.success = True
except:
self.info = "缺少ChatGLMFT的依赖如果要使用ChatGLMFT除了基础的pip依赖以外您还需要运行`pip install -r request_llm/requirements_chatglm.txt`安装ChatGLM的依赖。"
self.info = "缺少ChatGLMFT的依赖如果要使用ChatGLMFT除了基础的pip依赖以外您还需要运行`pip install -r request_llms/requirements_chatglm.txt`安装ChatGLM的依赖。"
self.success = False
def ready(self):
@ -59,7 +59,7 @@ class GetGLMFTHandle(Process):
if self.chatglmft_model is None:
from transformers import AutoConfig
import torch
# conf = 'request_llm/current_ptune_model.json'
# conf = 'request_llms/current_ptune_model.json'
# if not os.path.exists(conf): raise RuntimeError('找不到微调模型信息')
# with open(conf, 'r', encoding='utf8') as f:
# model_args = json.loads(f.read())
@ -140,7 +140,7 @@ glmft_handle = None
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
"""
多线程方法
函数的说明请见 request_llm/bridge_all.py
函数的说明请见 request_llms/bridge_all.py
"""
global glmft_handle
if glmft_handle is None:
@ -171,7 +171,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
"""
单线程方法
函数的说明请见 request_llm/bridge_all.py
函数的说明请见 request_llms/bridge_all.py
"""
chatbot.append((inputs, ""))

View File

@ -1,5 +1,5 @@
model_name = "ChatGLM-ONNX"
cmd_to_install = "`pip install -r request_llm/requirements_chatglm_onnx.txt`"
cmd_to_install = "`pip install -r request_llms/requirements_chatglm_onnx.txt`"
from transformers import AutoModel, AutoTokenizer
@ -28,13 +28,13 @@ class GetONNXGLMHandle(LocalLLMHandle):
def load_model_and_tokenizer(self):
# 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行
import os, glob
if not len(glob.glob("./request_llm/ChatGLM-6b-onnx-u8s8/chatglm-6b-int8-onnx-merged/*.bin")) >= 7: # 该模型有七个 bin 文件
if not len(glob.glob("./request_llms/ChatGLM-6b-onnx-u8s8/chatglm-6b-int8-onnx-merged/*.bin")) >= 7: # 该模型有七个 bin 文件
from huggingface_hub import snapshot_download
snapshot_download(repo_id="K024/ChatGLM-6b-onnx-u8s8", local_dir="./request_llm/ChatGLM-6b-onnx-u8s8")
snapshot_download(repo_id="K024/ChatGLM-6b-onnx-u8s8", local_dir="./request_llms/ChatGLM-6b-onnx-u8s8")
def create_model():
return ChatGLMModel(
tokenizer_path = "./request_llm/ChatGLM-6b-onnx-u8s8/chatglm-6b-int8-onnx-merged/sentencepiece.model",
onnx_model_path = "./request_llm/ChatGLM-6b-onnx-u8s8/chatglm-6b-int8-onnx-merged/chatglm-6b-int8.onnx"
tokenizer_path = "./request_llms/ChatGLM-6b-onnx-u8s8/chatglm-6b-int8-onnx-merged/sentencepiece.model",
onnx_model_path = "./request_llms/ChatGLM-6b-onnx-u8s8/chatglm-6b-int8-onnx-merged/chatglm-6b-int8.onnx"
)
self._model = create_model()
return self._model, None

View File

@ -1,5 +1,5 @@
model_name = "InternLM"
cmd_to_install = "`pip install -r request_llm/requirements_chatglm.txt`"
cmd_to_install = "`pip install -r request_llms/requirements_chatglm.txt`"
from transformers import AutoModel, AutoTokenizer
import time

View File

@ -28,8 +28,8 @@ class GetGLMHandle(Process):
self.success = True
except:
from toolbox import trimmed_format_exc
self.info = r"缺少jittorllms的依赖如果要使用jittorllms除了基础的pip依赖以外您还需要运行`pip install -r request_llm/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I`"+\
r"和`git clone https://gitlink.org.cn/jittor/JittorLLMs.git --depth 1 request_llm/jittorllms`两个指令来安装jittorllms的依赖在项目根目录运行这两个指令" +\
self.info = r"缺少jittorllms的依赖如果要使用jittorllms除了基础的pip依赖以外您还需要运行`pip install -r request_llms/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I`"+\
r"和`git clone https://gitlink.org.cn/jittor/JittorLLMs.git --depth 1 request_llms/jittorllms`两个指令来安装jittorllms的依赖在项目根目录运行这两个指令" +\
r"警告安装jittorllms依赖后将完全破坏现有的pytorch环境建议使用docker环境" + trimmed_format_exc()
self.success = False
@ -45,8 +45,8 @@ class GetGLMHandle(Process):
env = os.environ.get("PATH", "")
os.environ["PATH"] = env.replace('/cuda/bin', '/x/bin')
root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..')
os.chdir(root_dir_assume + '/request_llm/jittorllms')
sys.path.append(root_dir_assume + '/request_llm/jittorllms')
os.chdir(root_dir_assume + '/request_llms/jittorllms')
sys.path.append(root_dir_assume + '/request_llms/jittorllms')
validate_path() # validate path so you can run from base directory
def load_model():
@ -109,7 +109,7 @@ llama_glm_handle = None
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
"""
多线程方法
函数的说明请见 request_llm/bridge_all.py
函数的说明请见 request_llms/bridge_all.py
"""
global llama_glm_handle
if llama_glm_handle is None:
@ -140,7 +140,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
"""
单线程方法
函数的说明请见 request_llm/bridge_all.py
函数的说明请见 request_llms/bridge_all.py
"""
chatbot.append((inputs, ""))

View File

@ -28,8 +28,8 @@ class GetGLMHandle(Process):
self.success = True
except:
from toolbox import trimmed_format_exc
self.info = r"缺少jittorllms的依赖如果要使用jittorllms除了基础的pip依赖以外您还需要运行`pip install -r request_llm/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I`"+\
r"和`git clone https://gitlink.org.cn/jittor/JittorLLMs.git --depth 1 request_llm/jittorllms`两个指令来安装jittorllms的依赖在项目根目录运行这两个指令" +\
self.info = r"缺少jittorllms的依赖如果要使用jittorllms除了基础的pip依赖以外您还需要运行`pip install -r request_llms/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I`"+\
r"和`git clone https://gitlink.org.cn/jittor/JittorLLMs.git --depth 1 request_llms/jittorllms`两个指令来安装jittorllms的依赖在项目根目录运行这两个指令" +\
r"警告安装jittorllms依赖后将完全破坏现有的pytorch环境建议使用docker环境" + trimmed_format_exc()
self.success = False
@ -45,8 +45,8 @@ class GetGLMHandle(Process):
env = os.environ.get("PATH", "")
os.environ["PATH"] = env.replace('/cuda/bin', '/x/bin')
root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..')
os.chdir(root_dir_assume + '/request_llm/jittorllms')
sys.path.append(root_dir_assume + '/request_llm/jittorllms')
os.chdir(root_dir_assume + '/request_llms/jittorllms')
sys.path.append(root_dir_assume + '/request_llms/jittorllms')
validate_path() # validate path so you can run from base directory
def load_model():
@ -109,7 +109,7 @@ pangu_glm_handle = None
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
"""
多线程方法
函数的说明请见 request_llm/bridge_all.py
函数的说明请见 request_llms/bridge_all.py
"""
global pangu_glm_handle
if pangu_glm_handle is None:
@ -140,7 +140,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
"""
单线程方法
函数的说明请见 request_llm/bridge_all.py
函数的说明请见 request_llms/bridge_all.py
"""
chatbot.append((inputs, ""))

View File

@ -28,8 +28,8 @@ class GetGLMHandle(Process):
self.success = True
except:
from toolbox import trimmed_format_exc
self.info = r"缺少jittorllms的依赖如果要使用jittorllms除了基础的pip依赖以外您还需要运行`pip install -r request_llm/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I`"+\
r"和`git clone https://gitlink.org.cn/jittor/JittorLLMs.git --depth 1 request_llm/jittorllms`两个指令来安装jittorllms的依赖在项目根目录运行这两个指令" +\
self.info = r"缺少jittorllms的依赖如果要使用jittorllms除了基础的pip依赖以外您还需要运行`pip install -r request_llms/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I`"+\
r"和`git clone https://gitlink.org.cn/jittor/JittorLLMs.git --depth 1 request_llms/jittorllms`两个指令来安装jittorllms的依赖在项目根目录运行这两个指令" +\
r"警告安装jittorllms依赖后将完全破坏现有的pytorch环境建议使用docker环境" + trimmed_format_exc()
self.success = False
@ -45,8 +45,8 @@ class GetGLMHandle(Process):
env = os.environ.get("PATH", "")
os.environ["PATH"] = env.replace('/cuda/bin', '/x/bin')
root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..')
os.chdir(root_dir_assume + '/request_llm/jittorllms')
sys.path.append(root_dir_assume + '/request_llm/jittorllms')
os.chdir(root_dir_assume + '/request_llms/jittorllms')
sys.path.append(root_dir_assume + '/request_llms/jittorllms')
validate_path() # validate path so you can run from base directory
def load_model():
@ -109,7 +109,7 @@ rwkv_glm_handle = None
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
"""
多线程方法
函数的说明请见 request_llm/bridge_all.py
函数的说明请见 request_llms/bridge_all.py
"""
global rwkv_glm_handle
if rwkv_glm_handle is None:
@ -140,7 +140,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
"""
单线程方法
函数的说明请见 request_llm/bridge_all.py
函数的说明请见 request_llms/bridge_all.py
"""
chatbot.append((inputs, ""))

View File

@ -1,5 +1,5 @@
model_name = "LLaMA"
cmd_to_install = "`pip install -r request_llm/requirements_chatglm.txt`"
cmd_to_install = "`pip install -r request_llms/requirements_chatglm.txt`"
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer

View File

@ -24,12 +24,12 @@ class GetGLMHandle(Process):
def check_dependency(self): # 主进程执行
try:
import datasets, os
assert os.path.exists('request_llm/moss/models')
assert os.path.exists('request_llms/moss/models')
self.info = "依赖检测通过"
self.success = True
except:
self.info = """
缺少MOSS的依赖如果要使用MOSS除了基础的pip依赖以外您还需要运行`pip install -r request_llm/requirements_moss.txt``git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss`安装MOSS的依赖
缺少MOSS的依赖如果要使用MOSS除了基础的pip依赖以外您还需要运行`pip install -r request_llms/requirements_moss.txt``git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss`安装MOSS的依赖
"""
self.success = False
return self.success
@ -110,8 +110,8 @@ class GetGLMHandle(Process):
def validate_path():
import os, sys
root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..')
os.chdir(root_dir_assume + '/request_llm/moss')
sys.path.append(root_dir_assume + '/request_llm/moss')
os.chdir(root_dir_assume + '/request_llms/moss')
sys.path.append(root_dir_assume + '/request_llms/moss')
validate_path() # validate path so you can run from base directory
try:
@ -176,7 +176,7 @@ moss_handle = None
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
"""
多线程方法
函数的说明请见 request_llm/bridge_all.py
函数的说明请见 request_llms/bridge_all.py
"""
global moss_handle
if moss_handle is None:
@ -206,7 +206,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
"""
单线程方法
函数的说明请见 request_llm/bridge_all.py
函数的说明请见 request_llms/bridge_all.py
"""
chatbot.append((inputs, ""))

View File

@ -54,7 +54,7 @@ class NewBingHandle(Process):
self.info = "依赖检测通过等待NewBing响应。注意目前不能多人同时调用NewBing接口有线程锁否则将导致每个人的NewBing问询历史互相渗透。调用NewBing时会自动使用已配置的代理。"
self.success = True
except:
self.info = "缺少的依赖如果要使用Newbing除了基础的pip依赖以外您还需要运行`pip install -r request_llm/requirements_newbing.txt`安装Newbing的依赖。"
self.info = "缺少的依赖如果要使用Newbing除了基础的pip依赖以外您还需要运行`pip install -r request_llms/requirements_newbing.txt`安装Newbing的依赖。"
self.success = False
def ready(self):
@ -63,7 +63,7 @@ class NewBingHandle(Process):
async def async_run(self):
# 读取配置
NEWBING_STYLE, = get_conf('NEWBING_STYLE')
from request_llm.bridge_all import model_info
from request_llms.bridge_all import model_info
endpoint = model_info['newbing']['endpoint']
while True:
# 等待
@ -181,7 +181,7 @@ newbingfree_handle = None
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
"""
多线程方法
函数的说明请见 request_llm/bridge_all.py
函数的说明请见 request_llms/bridge_all.py
"""
global newbingfree_handle
if (newbingfree_handle is None) or (not newbingfree_handle.success):
@ -210,7 +210,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
"""
单线程方法
函数的说明请见 request_llm/bridge_all.py
函数的说明请见 request_llms/bridge_all.py
"""
chatbot.append((inputs, "[Local Message]: 等待NewBing响应中 ..."))

View File

@ -119,7 +119,7 @@ def generate_from_baidu_qianfan(inputs, llm_kwargs, history, system_prompt):
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
"""
多线程方法
函数的说明请见 request_llm/bridge_all.py
函数的说明请见 request_llms/bridge_all.py
"""
watch_dog_patience = 5
response = ""
@ -134,7 +134,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
"""
单线程方法
函数的说明请见 request_llm/bridge_all.py
函数的说明请见 request_llms/bridge_all.py
"""
chatbot.append((inputs, ""))

View File

@ -1,5 +1,5 @@
model_name = "Qwen"
cmd_to_install = "`pip install -r request_llm/requirements_qwen.txt`"
cmd_to_install = "`pip install -r request_llms/requirements_qwen.txt`"
from transformers import AutoModel, AutoTokenizer

View File

@ -16,7 +16,7 @@ def validate_key():
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
"""
多线程方法
函数的说明请见 request_llm/bridge_all.py
函数的说明请见 request_llms/bridge_all.py
"""
watch_dog_patience = 5
response = ""
@ -36,7 +36,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
"""
单线程方法
函数的说明请见 request_llm/bridge_all.py
函数的说明请见 request_llms/bridge_all.py
"""
chatbot.append((inputs, ""))
yield from update_ui(chatbot=chatbot, history=history)

View File

@ -99,7 +99,7 @@ class ClaudeHandle(Process):
self.info = "依赖检测通过等待Claude响应。注意目前不能多人同时调用Claude接口有线程锁否则将导致每个人的Claude问询历史互相渗透。调用Claude时会自动使用已配置的代理。"
self.success = True
except:
self.info = "缺少的依赖如果要使用Claude除了基础的pip依赖以外您还需要运行`pip install -r request_llm/requirements_slackclaude.txt`安装Claude的依赖然后重启程序。"
self.info = "缺少的依赖如果要使用Claude除了基础的pip依赖以外您还需要运行`pip install -r request_llms/requirements_slackclaude.txt`安装Claude的依赖然后重启程序。"
self.success = False
def ready(self):
@ -204,7 +204,7 @@ claude_handle = None
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
"""
多线程方法
函数的说明请见 request_llm/bridge_all.py
函数的说明请见 request_llms/bridge_all.py
"""
global claude_handle
if (claude_handle is None) or (not claude_handle.success):
@ -234,7 +234,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream=True, additional_fn=None):
"""
单线程方法
函数的说明请见 request_llm/bridge_all.py
函数的说明请见 request_llms/bridge_all.py
"""
chatbot.append((inputs, "[Local Message]: 等待Claude响应中 ..."))

View File

@ -120,7 +120,7 @@ def get_local_llm_predict_fns(LLMSingletonClass, model_name):
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
"""
多线程方法
函数的说明请见 request_llm/bridge_all.py
函数的说明请见 request_llms/bridge_all.py
"""
_llm_handle = LLMSingletonClass()
if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + _llm_handle.info
@ -146,7 +146,7 @@ def get_local_llm_predict_fns(LLMSingletonClass, model_name):
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
"""
单线程方法
函数的说明请见 request_llm/bridge_all.py
函数的说明请见 request_llms/bridge_all.py
"""
chatbot.append((inputs, ""))

View File

@ -10,14 +10,14 @@ def validate_path():
validate_path() # validate path so you can run from base directory
if __name__ == "__main__":
# from request_llm.bridge_newbingfree import predict_no_ui_long_connection
# from request_llm.bridge_moss import predict_no_ui_long_connection
# from request_llm.bridge_jittorllms_pangualpha import predict_no_ui_long_connection
# from request_llm.bridge_jittorllms_llama import predict_no_ui_long_connection
# from request_llm.bridge_claude import predict_no_ui_long_connection
# from request_llm.bridge_internlm import predict_no_ui_long_connection
# from request_llm.bridge_qwen import predict_no_ui_long_connection
from request_llm.bridge_spark import predict_no_ui_long_connection
# from request_llms.bridge_newbingfree import predict_no_ui_long_connection
# from request_llms.bridge_moss import predict_no_ui_long_connection
# from request_llms.bridge_jittorllms_pangualpha import predict_no_ui_long_connection
# from request_llms.bridge_jittorllms_llama import predict_no_ui_long_connection
# from request_llms.bridge_claude import predict_no_ui_long_connection
# from request_llms.bridge_internlm import predict_no_ui_long_connection
# from request_llms.bridge_qwen import predict_no_ui_long_connection
from request_llms.bridge_spark import predict_no_ui_long_connection
llm_kwargs = {
'max_length': 4096,

View File

@ -878,7 +878,7 @@ def clip_history(inputs, history, tokenizer, max_token_limit):
直到历史记录的标记数量降低到阈值以下
"""
import numpy as np
from request_llm.bridge_all import model_info
from request_llms.bridge_all import model_info
def get_token_num(txt):
return len(tokenizer.encode(txt, disallowed_special=()))
input_token_num = get_token_num(inputs)
@ -1069,7 +1069,7 @@ def get_plugin_handle(plugin_name):
def get_chat_handle():
"""
"""
from request_llm.bridge_all import predict_no_ui_long_connection
from request_llms.bridge_all import predict_no_ui_long_connection
return predict_no_ui_long_connection
def get_plugin_default_kwargs():