Merge branch 'master' into frontier

This commit is contained in:
binary-husky 2023-12-09 22:36:28 +08:00
commit c0a36e37be
21 changed files with 846 additions and 291 deletions

View File

@ -167,6 +167,14 @@ git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss #
# 【可选步骤IV】确保config.py配置文件的AVAIL_LLM_MODELS包含了期望的模型目前支持的全部模型如下(jittorllms系列目前仅支持docker方案)
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
# 【可选步骤V】支持本地模型INT8,INT4量化这里所指的模型本身不是量化版本目前deepseek-coder支持后面测试后会加入更多模型量化选择
pip install bitsandbyte
# windows用户安装bitsandbytes需要使用下面bitsandbytes-windows-webui
python -m pip install bitsandbytes --prefer-binary --extra-index-url=https://jllllll.github.io/bitsandbytes-windows-webui
pip install -U git+https://github.com/huggingface/transformers.git
pip install -U git+https://github.com/huggingface/accelerate.git
pip install peft
```
</p>

View File

@ -160,6 +160,14 @@ def warm_up_modules():
enc = model_info["gpt-4"]['tokenizer']
enc.encode("模块预热", disallowed_special=())
def warm_up_vectordb():
print('正在执行一些模块的预热 ...')
from toolbox import ProxyNetworkActivate
with ProxyNetworkActivate("Warmup_Modules"):
import nltk
with ProxyNetworkActivate("Warmup_Modules"): nltk.download("punkt")
if __name__ == '__main__':
import os
os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染

View File

@ -120,7 +120,6 @@ CHATGLM_PTUNING_CHECKPOINT = "" # 例如"/home/hmp/ChatGLM2-6B/ptuning/output/6b
LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
LOCAL_MODEL_QUANT = "FP16" # 默认 "FP16" "INT4" 启用量化INT4版本 "INT8" 启用量化INT8版本
# 设置gradio的并行线程数不需要修改
CONCURRENT_COUNT = 100
@ -238,6 +237,10 @@ WHEN_TO_USE_PROXY = ["Download_LLM", "Download_Gradio_Theme", "Connect_Grobid",
BLOCK_INVALID_APIKEY = False
# 启用插件热加载
PLUGIN_HOT_RELOAD = False
# 自定义按钮的最大数量限制
NUM_CUSTOM_BASIC_BTN = 4

View File

@ -440,7 +440,7 @@ def get_crazy_functions():
print('Load function plugin failed')
try:
from crazy_functions.Langchain知识库 import 知识库问答
from crazy_functions.知识库问答 import 知识库文件注入
function_plugins.update({
"构建知识库(先上传文件素材,再运行此插件)": {
"Group": "对话",
@ -448,7 +448,7 @@ def get_crazy_functions():
"AsButton": False,
"AdvancedArgs": True,
"ArgsReminder": "此处待注入的知识库名称id, 默认为default。文件进入知识库后可长期保存。可以通过再次调用本插件的方式向知识库追加更多文档。",
"Function": HotReload(知识库问答)
"Function": HotReload(知识库文件注入)
}
})
except:
@ -456,9 +456,9 @@ def get_crazy_functions():
print('Load function plugin failed')
try:
from crazy_functions.Langchain知识库 import 读取知识库作答
from crazy_functions.知识库问答 import 读取知识库作答
function_plugins.update({
"知识库问答(构建知识库后,再运行此插件)": {
"知识库文件注入(构建知识库后,再运行此插件)": {
"Group": "对话",
"Color": "stop",
"AsButton": False,
@ -590,6 +590,20 @@ def get_crazy_functions():
print(trimmed_format_exc())
print('Load function plugin failed')
# try:
# from crazy_functions.互动小游戏 import 随机小游戏
# function_plugins.update({
# "随机小游戏": {
# "Group": "智能体",
# "Color": "stop",
# "AsButton": True,
# "Function": HotReload(随机小游戏)
# }
# })
# except:
# print(trimmed_format_exc())
# print('Load function plugin failed')
# try:
# from crazy_functions.chatglm微调工具 import 微调数据集生成
# function_plugins.update({

View File

@ -1,4 +1,4 @@
from toolbox import update_ui, get_conf, trimmed_format_exc, get_max_token
from toolbox import update_ui, get_conf, trimmed_format_exc, get_max_token, Singleton
import threading
import os
import logging
@ -632,89 +632,6 @@ def get_files_from_everything(txt, type): # type='.md'
def Singleton(cls):
_instance = {}
def _singleton(*args, **kargs):
if cls not in _instance:
_instance[cls] = cls(*args, **kargs)
return _instance[cls]
return _singleton
@Singleton
class knowledge_archive_interface():
def __init__(self) -> None:
self.threadLock = threading.Lock()
self.current_id = ""
self.kai_path = None
self.qa_handle = None
self.text2vec_large_chinese = None
def get_chinese_text2vec(self):
if self.text2vec_large_chinese is None:
# < -------------------预热文本向量化模组--------------- >
from toolbox import ProxyNetworkActivate
print('Checking Text2vec ...')
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
with ProxyNetworkActivate('Download_LLM'): # 临时地激活代理网络
self.text2vec_large_chinese = HuggingFaceEmbeddings(model_name="GanymedeNil/text2vec-large-chinese")
return self.text2vec_large_chinese
def feed_archive(self, file_manifest, id="default"):
self.threadLock.acquire()
# import uuid
self.current_id = id
from zh_langchain import construct_vector_store
self.qa_handle, self.kai_path = construct_vector_store(
vs_id=self.current_id,
files=file_manifest,
sentence_size=100,
history=[],
one_conent="",
one_content_segmentation="",
text2vec = self.get_chinese_text2vec(),
)
self.threadLock.release()
def get_current_archive_id(self):
return self.current_id
def get_loaded_file(self):
return self.qa_handle.get_loaded_file()
def answer_with_archive_by_id(self, txt, id):
self.threadLock.acquire()
if not self.current_id == id:
self.current_id = id
from zh_langchain import construct_vector_store
self.qa_handle, self.kai_path = construct_vector_store(
vs_id=self.current_id,
files=[],
sentence_size=100,
history=[],
one_conent="",
one_content_segmentation="",
text2vec = self.get_chinese_text2vec(),
)
VECTOR_SEARCH_SCORE_THRESHOLD = 0
VECTOR_SEARCH_TOP_K = 4
CHUNK_SIZE = 512
resp, prompt = self.qa_handle.get_knowledge_based_conent_test(
query = txt,
vs_path = self.kai_path,
score_threshold=VECTOR_SEARCH_SCORE_THRESHOLD,
vector_search_top_k=VECTOR_SEARCH_TOP_K,
chunk_conent=True,
chunk_size=CHUNK_SIZE,
text2vec = self.get_chinese_text2vec(),
)
self.threadLock.release()
return resp, prompt
@Singleton
class nougat_interface():
def __init__(self):

View File

@ -0,0 +1,35 @@
from crazy_functions.json_fns.pydantic_io import GptJsonIO, JsonStringError
from request_llms.bridge_all import predict_no_ui_long_connection
def get_code_block(reply):
import re
pattern = r"```([\s\S]*?)```" # regex pattern to match code blocks
matches = re.findall(pattern, reply) # find all code blocks in text
if len(matches) == 1:
return "```" + matches[0] + "```" # code block
raise RuntimeError("GPT is not generating proper code.")
def is_same_thing(a, b, llm_kwargs):
from pydantic import BaseModel, Field
class IsSameThing(BaseModel):
is_same_thing: bool = Field(description="determine whether two objects are same thing.", default=False)
def run_gpt_fn(inputs, sys_prompt, history=[]):
return predict_no_ui_long_connection(
inputs=inputs, llm_kwargs=llm_kwargs,
history=history, sys_prompt=sys_prompt, observe_window=[]
)
gpt_json_io = GptJsonIO(IsSameThing)
inputs_01 = "Identity whether the user input and the target is the same thing: \n target object: {a} \n user input object: {b} \n\n\n".format(a=a, b=b)
inputs_01 += "\n\n\n Note that the user may describe the target object with a different language, e.g. cat and 猫 are the same thing."
analyze_res_cot_01 = run_gpt_fn(inputs_01, "", [])
inputs_02 = inputs_01 + gpt_json_io.format_instructions
analyze_res = run_gpt_fn(inputs_02, "", [inputs_01, analyze_res_cot_01])
try:
res = gpt_json_io.generate_output_auto_repair(analyze_res, run_gpt_fn)
return res.is_same_thing
except JsonStringError as e:
return False

View File

@ -1,6 +1,7 @@
from pydantic import BaseModel, Field
from typing import List
from toolbox import update_ui_lastest_msg, disable_auto_promotion
from toolbox import CatchException, update_ui, get_conf, select_api_key, get_log_folder
from request_llms.bridge_all import predict_no_ui_long_connection
from crazy_functions.json_fns.pydantic_io import GptJsonIO, JsonStringError
import time
@ -36,6 +37,57 @@ class GptAcademicState():
state.chatbot = chatbot
return state
class GatherMaterials():
def __init__(self, materials) -> None:
materials = ['image', 'prompt']
class GptAcademicGameBaseState():
"""
1. first init: __init__ ->
"""
def init_game(self, chatbot, lock_plugin):
self.plugin_name = None
self.callback_fn = None
self.delete_game = False
self.step_cnt = 0
def lock_plugin(self, chatbot):
if self.callback_fn is None:
raise ValueError("callback_fn is None")
chatbot._cookies['lock_plugin'] = self.callback_fn
self.dump_state(chatbot)
def get_plugin_name(self):
if self.plugin_name is None:
raise ValueError("plugin_name is None")
return self.plugin_name
def dump_state(self, chatbot):
chatbot._cookies[f'plugin_state/{self.get_plugin_name()}'] = pickle.dumps(self)
def set_state(self, chatbot, key, value):
setattr(self, key, value)
chatbot._cookies[f'plugin_state/{self.get_plugin_name()}'] = pickle.dumps(self)
@staticmethod
def sync_state(chatbot, llm_kwargs, cls, plugin_name, callback_fn, lock_plugin=True):
state = chatbot._cookies.get(f'plugin_state/{plugin_name}', None)
if state is not None:
state = pickle.loads(state)
else:
state = cls()
state.init_game(chatbot, lock_plugin)
state.plugin_name = plugin_name
state.llm_kwargs = llm_kwargs
state.chatbot = chatbot
state.callback_fn = callback_fn
return state
def continue_game(self, prompt, chatbot, history):
# 游戏主体
yield from self.step(prompt, chatbot, history)
self.step_cnt += 1
# 保存状态,收尾
self.dump_state(chatbot)
# 如果游戏结束,清理
if self.delete_game:
chatbot._cookies['lock_plugin'] = None
chatbot._cookies[f'plugin_state/{self.get_plugin_name()}'] = None
yield from update_ui(chatbot=chatbot, history=history)

View File

View File

@ -0,0 +1,70 @@
# From project chatglm-langchain
from langchain.document_loaders import UnstructuredFileLoader
from langchain.text_splitter import CharacterTextSplitter
import re
from typing import List
class ChineseTextSplitter(CharacterTextSplitter):
def __init__(self, pdf: bool = False, sentence_size: int = None, **kwargs):
super().__init__(**kwargs)
self.pdf = pdf
self.sentence_size = sentence_size
def split_text1(self, text: str) -> List[str]:
if self.pdf:
text = re.sub(r"\n{3,}", "\n", text)
text = re.sub('\s', ' ', text)
text = text.replace("\n\n", "")
sent_sep_pattern = re.compile('([﹒﹔﹖﹗.。!?]["’”」』]{0,2}|(?=["‘“「『]{1,2}|$))') # del
sent_list = []
for ele in sent_sep_pattern.split(text):
if sent_sep_pattern.match(ele) and sent_list:
sent_list[-1] += ele
elif ele:
sent_list.append(ele)
return sent_list
def split_text(self, text: str) -> List[str]: ##此处需要进一步优化逻辑
if self.pdf:
text = re.sub(r"\n{3,}", r"\n", text)
text = re.sub('\s', " ", text)
text = re.sub("\n\n", "", text)
text = re.sub(r'([;.!?。!?\?])([^”’])', r"\1\n\2", text) # 单字符断句符
text = re.sub(r'(\.{6})([^"’”」』])', r"\1\n\2", text) # 英文省略号
text = re.sub(r'(\{2})([^"’”」』])', r"\1\n\2", text) # 中文省略号
text = re.sub(r'([;!?。!?\?]["’”」』]{0,2})([^;!?,。!?\?])', r'\1\n\2', text)
# 如果双引号前有终止符,那么双引号才是句子的终点,把分句符\n放到双引号后注意前面的几句都小心保留了双引号
text = text.rstrip() # 段尾如果有多余的\n就去掉它
# 很多规则中会考虑分号;,但是这里我把它忽略不计,破折号、英文双引号等同样忽略,需要的再做些简单调整即可。
ls = [i for i in text.split("\n") if i]
for ele in ls:
if len(ele) > self.sentence_size:
ele1 = re.sub(r'([,.]["’”」』]{0,2})([^,.])', r'\1\n\2', ele)
ele1_ls = ele1.split("\n")
for ele_ele1 in ele1_ls:
if len(ele_ele1) > self.sentence_size:
ele_ele2 = re.sub(r'([\n]{1,}| {2,}["’”」』]{0,2})([^\s])', r'\1\n\2', ele_ele1)
ele2_ls = ele_ele2.split("\n")
for ele_ele2 in ele2_ls:
if len(ele_ele2) > self.sentence_size:
ele_ele3 = re.sub('( ["’”」』]{0,2})([^ ])', r'\1\n\2', ele_ele2)
ele2_id = ele2_ls.index(ele_ele2)
ele2_ls = ele2_ls[:ele2_id] + [i for i in ele_ele3.split("\n") if i] + ele2_ls[
ele2_id + 1:]
ele_id = ele1_ls.index(ele_ele1)
ele1_ls = ele1_ls[:ele_id] + [i for i in ele2_ls if i] + ele1_ls[ele_id + 1:]
id = ls.index(ele)
ls = ls[:id] + [i for i in ele1_ls if i] + ls[id + 1:]
return ls
def load_file(filepath, sentence_size):
loader = UnstructuredFileLoader(filepath, mode="elements")
textsplitter = ChineseTextSplitter(pdf=False, sentence_size=sentence_size)
docs = loader.load_and_split(text_splitter=textsplitter)
# write_check_file(filepath, docs)
return docs

View File

@ -0,0 +1,338 @@
# From project chatglm-langchain
import threading
from toolbox import Singleton
import os
import shutil
import os
import uuid
import tqdm
from langchain.vectorstores import FAISS
from langchain.docstore.document import Document
from typing import List, Tuple
import numpy as np
from crazy_functions.vector_fns.general_file_loader import load_file
embedding_model_dict = {
"ernie-tiny": "nghuyong/ernie-3.0-nano-zh",
"ernie-base": "nghuyong/ernie-3.0-base-zh",
"text2vec-base": "shibing624/text2vec-base-chinese",
"text2vec": "GanymedeNil/text2vec-large-chinese",
}
# Embedding model name
EMBEDDING_MODEL = "text2vec"
# Embedding running device
EMBEDDING_DEVICE = "cpu"
# 基于上下文的prompt模版请务必保留"{question}"和"{context}"
PROMPT_TEMPLATE = """已知信息:
{context}
根据上述已知信息简洁和专业的来回答用户的问题如果无法从中得到答案请说 根据已知信息无法回答该问题 没有提供足够的相关信息不允许在答案中添加编造成分答案请使用中文 问题是{question}"""
# 文本分句长度
SENTENCE_SIZE = 100
# 匹配后单段上下文长度
CHUNK_SIZE = 250
# LLM input history length
LLM_HISTORY_LEN = 3
# return top-k text chunk from vector store
VECTOR_SEARCH_TOP_K = 5
# 知识检索内容相关度 Score, 数值范围约为0-1100如果为0则不生效经测试设置为小于500时匹配结果更精准
VECTOR_SEARCH_SCORE_THRESHOLD = 0
NLTK_DATA_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), "nltk_data")
FLAG_USER_NAME = uuid.uuid4().hex
# 是否开启跨域默认为False如果需要开启请设置为True
# is open cross domain
OPEN_CROSS_DOMAIN = False
def similarity_search_with_score_by_vector(
self, embedding: List[float], k: int = 4
) -> List[Tuple[Document, float]]:
def seperate_list(ls: List[int]) -> List[List[int]]:
lists = []
ls1 = [ls[0]]
for i in range(1, len(ls)):
if ls[i - 1] + 1 == ls[i]:
ls1.append(ls[i])
else:
lists.append(ls1)
ls1 = [ls[i]]
lists.append(ls1)
return lists
scores, indices = self.index.search(np.array([embedding], dtype=np.float32), k)
docs = []
id_set = set()
store_len = len(self.index_to_docstore_id)
for j, i in enumerate(indices[0]):
if i == -1 or 0 < self.score_threshold < scores[0][j]:
# This happens when not enough docs are returned.
continue
_id = self.index_to_docstore_id[i]
doc = self.docstore.search(_id)
if not self.chunk_conent:
if not isinstance(doc, Document):
raise ValueError(f"Could not find document for id {_id}, got {doc}")
doc.metadata["score"] = int(scores[0][j])
docs.append(doc)
continue
id_set.add(i)
docs_len = len(doc.page_content)
for k in range(1, max(i, store_len - i)):
break_flag = False
for l in [i + k, i - k]:
if 0 <= l < len(self.index_to_docstore_id):
_id0 = self.index_to_docstore_id[l]
doc0 = self.docstore.search(_id0)
if docs_len + len(doc0.page_content) > self.chunk_size:
break_flag = True
break
elif doc0.metadata["source"] == doc.metadata["source"]:
docs_len += len(doc0.page_content)
id_set.add(l)
if break_flag:
break
if not self.chunk_conent:
return docs
if len(id_set) == 0 and self.score_threshold > 0:
return []
id_list = sorted(list(id_set))
id_lists = seperate_list(id_list)
for id_seq in id_lists:
for id in id_seq:
if id == id_seq[0]:
_id = self.index_to_docstore_id[id]
doc = self.docstore.search(_id)
else:
_id0 = self.index_to_docstore_id[id]
doc0 = self.docstore.search(_id0)
doc.page_content += " " + doc0.page_content
if not isinstance(doc, Document):
raise ValueError(f"Could not find document for id {_id}, got {doc}")
doc_score = min([scores[0][id] for id in [indices[0].tolist().index(i) for i in id_seq if i in indices[0]]])
doc.metadata["score"] = int(doc_score)
docs.append(doc)
return docs
class LocalDocQA:
llm: object = None
embeddings: object = None
top_k: int = VECTOR_SEARCH_TOP_K
chunk_size: int = CHUNK_SIZE
chunk_conent: bool = True
score_threshold: int = VECTOR_SEARCH_SCORE_THRESHOLD
def init_cfg(self,
top_k=VECTOR_SEARCH_TOP_K,
):
self.llm = None
self.top_k = top_k
def init_knowledge_vector_store(self,
filepath,
vs_path: str or os.PathLike = None,
sentence_size=SENTENCE_SIZE,
text2vec=None):
loaded_files = []
failed_files = []
if isinstance(filepath, str):
if not os.path.exists(filepath):
print("路径不存在")
return None
elif os.path.isfile(filepath):
file = os.path.split(filepath)[-1]
try:
docs = load_file(filepath, SENTENCE_SIZE)
print(f"{file} 已成功加载")
loaded_files.append(filepath)
except Exception as e:
print(e)
print(f"{file} 未能成功加载")
return None
elif os.path.isdir(filepath):
docs = []
for file in tqdm(os.listdir(filepath), desc="加载文件"):
fullfilepath = os.path.join(filepath, file)
try:
docs += load_file(fullfilepath, SENTENCE_SIZE)
loaded_files.append(fullfilepath)
except Exception as e:
print(e)
failed_files.append(file)
if len(failed_files) > 0:
print("以下文件未能成功加载:")
for file in failed_files:
print(f"{file}\n")
else:
docs = []
for file in filepath:
docs += load_file(file, SENTENCE_SIZE)
print(f"{file} 已成功加载")
loaded_files.append(file)
if len(docs) > 0:
print("文件加载完毕,正在生成向量库")
if vs_path and os.path.isdir(vs_path):
try:
self.vector_store = FAISS.load_local(vs_path, text2vec)
self.vector_store.add_documents(docs)
except:
self.vector_store = FAISS.from_documents(docs, text2vec)
else:
self.vector_store = FAISS.from_documents(docs, text2vec) # docs 为Document列表
self.vector_store.save_local(vs_path)
return vs_path, loaded_files
else:
raise RuntimeError("文件加载失败,请检查文件格式是否正确")
def get_loaded_file(self, vs_path):
ds = self.vector_store.docstore
return set([ds._dict[k].metadata['source'].split(vs_path)[-1] for k in ds._dict])
# query 查询内容
# vs_path 知识库路径
# chunk_conent 是否启用上下文关联
# score_threshold 搜索匹配score阈值
# vector_search_top_k 搜索知识库内容条数默认搜索5条结果
# chunk_sizes 匹配单段内容的连接上下文长度
def get_knowledge_based_conent_test(self, query, vs_path, chunk_conent,
score_threshold=VECTOR_SEARCH_SCORE_THRESHOLD,
vector_search_top_k=VECTOR_SEARCH_TOP_K, chunk_size=CHUNK_SIZE,
text2vec=None):
self.vector_store = FAISS.load_local(vs_path, text2vec)
self.vector_store.chunk_conent = chunk_conent
self.vector_store.score_threshold = score_threshold
self.vector_store.chunk_size = chunk_size
embedding = self.vector_store.embedding_function.embed_query(query)
related_docs_with_score = similarity_search_with_score_by_vector(self.vector_store, embedding, k=vector_search_top_k)
if not related_docs_with_score:
response = {"query": query,
"source_documents": []}
return response, ""
# prompt = f"{query}. You should answer this question using information from following documents: \n\n"
prompt = f"{query}. 你必须利用以下文档中包含的信息回答这个问题: \n\n---\n\n"
prompt += "\n\n".join([f"({k}): " + doc.page_content for k, doc in enumerate(related_docs_with_score)])
prompt += "\n\n---\n\n"
prompt = prompt.encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars
# print(prompt)
response = {"query": query, "source_documents": related_docs_with_score}
return response, prompt
def construct_vector_store(vs_id, vs_path, files, sentence_size, history, one_conent, one_content_segmentation, text2vec):
for file in files:
assert os.path.exists(file), "输入文件不存在:" + file
import nltk
if NLTK_DATA_PATH not in nltk.data.path: nltk.data.path = [NLTK_DATA_PATH] + nltk.data.path
local_doc_qa = LocalDocQA()
local_doc_qa.init_cfg()
filelist = []
if not os.path.exists(os.path.join(vs_path, vs_id)):
os.makedirs(os.path.join(vs_path, vs_id))
for file in files:
file_name = file.name if not isinstance(file, str) else file
filename = os.path.split(file_name)[-1]
shutil.copyfile(file_name, os.path.join(vs_path, vs_id, filename))
filelist.append(os.path.join(vs_path, vs_id, filename))
vs_path, loaded_files = local_doc_qa.init_knowledge_vector_store(filelist, os.path.join(vs_path, vs_id), sentence_size, text2vec)
if len(loaded_files):
file_status = f"已添加 {''.join([os.path.split(i)[-1] for i in loaded_files if i])} 内容至知识库,并已加载知识库,请开始提问"
else:
pass
# file_status = "文件未成功加载,请重新上传文件"
# print(file_status)
return local_doc_qa, vs_path
@Singleton
class knowledge_archive_interface():
def __init__(self) -> None:
self.threadLock = threading.Lock()
self.current_id = ""
self.kai_path = None
self.qa_handle = None
self.text2vec_large_chinese = None
def get_chinese_text2vec(self):
if self.text2vec_large_chinese is None:
# < -------------------预热文本向量化模组--------------- >
from toolbox import ProxyNetworkActivate
print('Checking Text2vec ...')
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
with ProxyNetworkActivate('Download_LLM'): # 临时地激活代理网络
self.text2vec_large_chinese = HuggingFaceEmbeddings(model_name="GanymedeNil/text2vec-large-chinese")
return self.text2vec_large_chinese
def feed_archive(self, file_manifest, vs_path, id="default"):
self.threadLock.acquire()
# import uuid
self.current_id = id
self.qa_handle, self.kai_path = construct_vector_store(
vs_id=self.current_id,
vs_path=vs_path,
files=file_manifest,
sentence_size=100,
history=[],
one_conent="",
one_content_segmentation="",
text2vec = self.get_chinese_text2vec(),
)
self.threadLock.release()
def get_current_archive_id(self):
return self.current_id
def get_loaded_file(self, vs_path):
return self.qa_handle.get_loaded_file(vs_path)
def answer_with_archive_by_id(self, txt, id, vs_path):
self.threadLock.acquire()
if not self.current_id == id:
self.current_id = id
self.qa_handle, self.kai_path = construct_vector_store(
vs_id=self.current_id,
vs_path=vs_path,
files=[],
sentence_size=100,
history=[],
one_conent="",
one_content_segmentation="",
text2vec = self.get_chinese_text2vec(),
)
VECTOR_SEARCH_SCORE_THRESHOLD = 0
VECTOR_SEARCH_TOP_K = 4
CHUNK_SIZE = 512
resp, prompt = self.qa_handle.get_knowledge_based_conent_test(
query = txt,
vs_path = self.kai_path,
score_threshold=VECTOR_SEARCH_SCORE_THRESHOLD,
vector_search_top_k=VECTOR_SEARCH_TOP_K,
chunk_conent=True,
chunk_size=CHUNK_SIZE,
text2vec = self.get_chinese_text2vec(),
)
self.threadLock.release()
return resp, prompt

View File

@ -1,159 +1,59 @@
from toolbox import CatchException, update_ui, get_conf, select_api_key, get_log_folder
from crazy_functions.multi_stage.multi_stage_utils import GptAcademicState
from toolbox import CatchException, update_ui, update_ui_lastest_msg
from crazy_functions.multi_stage.multi_stage_utils import GptAcademicGameBaseState
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
from request_llms.bridge_all import predict_no_ui_long_connection
from crazy_functions.game_fns.game_utils import get_code_block, is_same_thing
import random
class 小游戏(GptAcademicState):
def __init__(self):
self.need_game_reset = True
self.llm_kwargs = None
super().__init__()
def lock_plugin(self, chatbot):
chatbot._cookies['lock_plugin'] = 'crazy_functions.互动小游戏->谁是卧底'
self.dump_state(chatbot)
class MiniGame_ASCII_Art(GptAcademicGameBaseState):
def unlock_plugin(self, chatbot):
self.reset()
chatbot._cookies['lock_plugin'] = None
self.dump_state(chatbot)
def set_state(self, chatbot, key, value):
return super().set_state(chatbot, key, value)
def init_game(self, chatbot):
chatbot.get_cookies()['lock_plugin'] = ''
def clean_up_game(self, chatbot):
chatbot.get_cookies()['lock_plugin'] = None
def init_player(self):
pass
def step(self, prompt, chatbot):
pass
def continue_game(self, prompt, chatbot):
if self.need_game_reset:
self.need_game_reset = False
yield from self.init_game(chatbot)
yield from self.step(prompt, chatbot)
self.dump_state(chatbot)
yield update_ui(chatbot=chatbot, history=[])
class 小游戏_谁是卧底_玩家():
def __init__(self, game_handle, card, llm_model, name) -> None:
self.game_handle = game_handle
self.card = card
self.name = name
self.is_out = False
self.llm_model = llm_model
self.is_human = llm_model == 'human'
self.what_player_has_spoken = []
def speek(self, content=None):
if content is None:
assert not self.is_human
speak_what = yield from
def step(self, prompt, chatbot, history):
if self.step_cnt == 0:
chatbot.append(["我画你猜(动物)", "请稍等..."])
else:
self.what_player_has_spoken.append(content)
if prompt.strip() == 'exit':
self.delete_game = True
yield from update_ui_lastest_msg(lastmsg=f"谜底是{self.obj},游戏结束。", chatbot=chatbot, history=history, delay=0.)
return
chatbot.append([prompt, ""])
yield from update_ui(chatbot=chatbot, history=history)
def agi_speek(self):
inputs = f'please say something about {self.card}'
res = yield from request_gpt_model_in_new_thread_with_ui_alive(
inputs = inputs,
inputs_show_user=inputs,
llm_kwargs=self.game_handle.llm_kwargs,
chatbot=chatbot,
history=history,
sys_prompt=sys_prompt
)
pass
if self.step_cnt == 0:
self.lock_plugin(chatbot)
self.cur_task = 'draw'
def vote(self, content=None):
if content is None:
assert not self.is_human
self.vote_who = yield from
else:
try:
self.vote_who = int(content)
except:
self.vote_who = None
if self.cur_task == 'draw':
avail_obj = ["","","","","老鼠",""]
self.obj = random.choice(avail_obj)
inputs = "I want to play a game called Guess the ASCII art. You can draw the ASCII art and I will try to guess it. " + f"This time you draw a {self.obj}. Note that you must not indicate what you have draw in the text, and you should only produce the ASCII art wrapped by ```. "
raw_res = predict_no_ui_long_connection(inputs=inputs, llm_kwargs=self.llm_kwargs, history=[], sys_prompt="")
self.cur_task = 'identify user guess'
res = get_code_block(raw_res)
history += ['', f'the answer is {self.obj}', inputs, res]
yield from update_ui_lastest_msg(lastmsg=res, chatbot=chatbot, history=history, delay=0.)
def agi_vote(self):
pass
class 小游戏_谁是卧底(小游戏):
def __init__(self):
self.game_phase = '发言' # 投票
super().__init__()
def init_game(self, chatbot):
self.n_players = 3
self.n_ai_players = self.n_players - 1
card = "橙子"
undercover_card = "橘子"
llm_model = self.llm_kwargs['llm_model']
self.players = [
小游戏_谁是卧底(self, card, llm_model, str(i)) for i in range(self.n_players)
]
undercover = random.randint(0, self.n_players-1)
human = 0
self.players[undercover].card = undercover_card
self.players[human].llm_model = 'human'
super().init_game(chatbot)
def who_is_out(self):
votes = {}
for player in self.players:
if player.is_out: continue
if player.vote is None: continue
if player.vote not in votes: votes[player.vote] = 0
votes[player.vote] += 1
max_votes = max(votes.values())
players_with_max_votes = [player for player, vote_count in votes.items() if vote_count == max_votes]
for player in players_with_max_votes:
print('淘汰了', player.name)
player.is_out = True
return players_with_max_votes
def step(self, prompt, chatbot):
if self.game_phase == '发言':
for player in self.players:
if player.is_out: continue
if player.is_human:
player.speek(prompt)
else:
player.speek()
self.game_phase = '投票'
elif self.game_phase == '投票':
for player in self.players:
if player.is_out: continue
if player.is_human:
player.vote(prompt)
else:
player.vote()
self.who_is_out()
if len([player for player in self.players if not player.is_out]) <= 2:
if sum([player for player in self.players if player.is_undercover]) == 1:
print('卧底获胜')
else:
print('平民获胜')
self.need_game_reset = True
self.game_phase = '发言'
else:
raise RuntimeError
elif self.cur_task == 'identify user guess':
if is_same_thing(self.obj, prompt, self.llm_kwargs):
self.delete_game = True
yield from update_ui_lastest_msg(lastmsg="你猜对了!", chatbot=chatbot, history=history, delay=0.)
else:
self.cur_task = 'identify user guess'
yield from update_ui_lastest_msg(lastmsg="猜错了再试试输入“exit”获取答案。", chatbot=chatbot, history=history, delay=0.)
@CatchException
def 谁是卧底(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
# 尚未完成
history = [] # 清空历史
state = 小游戏_谁是卧底.get_state(chatbot, 小游戏_谁是卧底)
state.llm_kwargs = llm_kwargs
yield from state.continue_game(prompt, chatbot)
def 随机小游戏(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
# 清空历史
history = []
# 选择游戏
cls = MiniGame_ASCII_Art
# 如果之前已经初始化了游戏实例,则继续该实例;否则重新初始化
state = cls.sync_state(chatbot,
llm_kwargs,
cls,
plugin_name='MiniGame_ASCII_Art',
callback_fn='crazy_functions.互动小游戏->随机小游戏',
lock_plugin=True
)
yield from state.continue_game(prompt, chatbot, history)

View File

@ -1,10 +1,19 @@
from toolbox import CatchException, update_ui, ProxyNetworkActivate, update_ui_lastest_msg
from toolbox import CatchException, update_ui, ProxyNetworkActivate, update_ui_lastest_msg, get_log_folder, get_user
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, get_files_from_everything
install_msg ="""
1. python -m pip install torch --index-url https://download.pytorch.org/whl/cpu
2. python -m pip install transformers protobuf langchain sentence-transformers faiss-cpu nltk beautifulsoup4 bitsandbytes tabulate icetk --upgrade
3. python -m pip install unstructured[all-docs] --upgrade
4. python -c 'import nltk; nltk.download("punkt")'
"""
@CatchException
def 知识库问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 知识库文件注入(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
"""
txt 输入栏用户输入的文本例如需要翻译的一段话再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数, 如温度和top_p等, 一般原样传递下去就行
@ -25,15 +34,15 @@ def 知识库问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
# resolve deps
try:
from zh_langchain import construct_vector_store
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from .crazy_utils import knowledge_archive_interface
# from zh_langchain import construct_vector_store
# from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from crazy_functions.vector_fns.vector_database import knowledge_archive_interface
except Exception as e:
chatbot.append(["依赖不足", "导入依赖失败。正在尝试自动安装,请查看终端的输出或耐心等待..."])
chatbot.append(["依赖不足", f"{str(e)}\n\n导入依赖失败。请用以下命令安装" + install_msg])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
from .crazy_utils import try_install_deps
try_install_deps(['zh_langchain==0.2.1', 'pypinyin'], reload_m=['pypinyin', 'zh_langchain'])
yield from update_ui_lastest_msg("安装完成,您可以再次重试。", chatbot, history)
# from .crazy_utils import try_install_deps
# try_install_deps(['zh_langchain==0.2.1', 'pypinyin'], reload_m=['pypinyin', 'zh_langchain'])
# yield from update_ui_lastest_msg("安装完成,您可以再次重试。", chatbot, history)
return
# < --------------------读取文件--------------- >
@ -62,13 +71,14 @@ def 知识库问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
print('Establishing knowledge archive ...')
with ProxyNetworkActivate('Download_LLM'): # 临时地激活代理网络
kai = knowledge_archive_interface()
kai.feed_archive(file_manifest=file_manifest, id=kai_id)
kai_files = kai.get_loaded_file()
vs_path = get_log_folder(user=get_user(chatbot), plugin_name='vec_store')
kai.feed_archive(file_manifest=file_manifest, vs_path=vs_path, id=kai_id)
kai_files = kai.get_loaded_file(vs_path=vs_path)
kai_files = '<br/>'.join(kai_files)
# chatbot.append(['知识库构建成功', "正在将知识库存储至cookie中"])
# yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
# chatbot._cookies['langchain_plugin_embedding'] = kai.get_current_archive_id()
# chatbot._cookies['lock_plugin'] = 'crazy_functions.Langchain知识库->读取知识库作答'
# chatbot._cookies['lock_plugin'] = 'crazy_functions.知识库文件注入->读取知识库作答'
# chatbot.append(['完成', "“根据知识库作答”函数插件已经接管问答系统, 提问吧! 但注意, 您接下来不能再使用其他插件了,刷新页面即可以退出知识库问答模式。"])
chatbot.append(['构建完成', f"当前知识库内的有效文件:\n\n---\n\n{kai_files}\n\n---\n\n请切换至“知识库问答”插件进行知识库访问, 或者使用此插件继续上传更多文件。"])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间我们先及时地做一次界面更新
@ -77,15 +87,15 @@ def 知识库问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
def 读取知识库作答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port=-1):
# resolve deps
try:
from zh_langchain import construct_vector_store
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from .crazy_utils import knowledge_archive_interface
# from zh_langchain import construct_vector_store
# from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from crazy_functions.vector_fns.vector_database import knowledge_archive_interface
except Exception as e:
chatbot.append(["依赖不足", "导入依赖失败。正在尝试自动安装,请查看终端的输出或耐心等待..."])
chatbot.append(["依赖不足", f"{str(e)}\n\n导入依赖失败。请用以下命令安装" + install_msg])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
from .crazy_utils import try_install_deps
try_install_deps(['zh_langchain==0.2.1', 'pypinyin'], reload_m=['pypinyin', 'zh_langchain'])
yield from update_ui_lastest_msg("安装完成,您可以再次重试。", chatbot, history)
# from .crazy_utils import try_install_deps
# try_install_deps(['zh_langchain==0.2.1', 'pypinyin'], reload_m=['pypinyin', 'zh_langchain'])
# yield from update_ui_lastest_msg("安装完成,您可以再次重试。", chatbot, history)
return
# < ------------------- --------------- >
@ -93,7 +103,8 @@ def 读取知识库作答(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
kai_id = plugin_kwargs.get("advanced_arg", 'default')
resp, prompt = kai.answer_with_archive_by_id(txt, kai_id)
vs_path = get_log_folder(user=get_user(chatbot), plugin_name='vec_store')
resp, prompt = kai.answer_with_archive_by_id(txt, kai_id, vs_path)
chatbot.append((txt, f'[知识库 {kai_id}] ' + prompt))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间我们先及时地做一次界面更新

View File

@ -0,0 +1,26 @@
# 此Dockerfile适用于“无本地模型”的环境构建如果需要使用chatglm等本地模型请参考 docs/Dockerfile+ChatGLM
# 如何构建: 先修改 `config.py` 然后 docker build -t gpt-academic-nolocal-vs -f docs/GithubAction+NoLocal+Vectordb .
# 如何运行: docker run --rm -it --net=host gpt-academic-nolocal-vs
FROM python:3.11
# 指定路径
WORKDIR /gpt
# 装载项目文件
COPY . .
# 安装依赖
RUN pip3 install -r requirements.txt
# 安装知识库插件的额外依赖
RUN apt-get update && apt-get install libgl1 -y
RUN pip3 install torch torchvision --index-url https://download.pytorch.org/whl/cpu
RUN pip3 install transformers protobuf langchain sentence-transformers faiss-cpu nltk beautifulsoup4 bitsandbytes tabulate icetk --upgrade
RUN pip3 install unstructured[all-docs] --upgrade
# 可选步骤,用于预热模块
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
RUN python3 -c 'from check_proxy import warm_up_vectordb; warm_up_vectordb()'
# 启动
CMD ["python3", "-u", "main.py"]

View File

@ -2902,5 +2902,107 @@
"高优先级": "High priority",
"请配置ZHIPUAI_API_KEY": "Please configure ZHIPUAI_API_KEY",
"单个azure模型": "Single Azure model",
"预留参数 context 未实现": "Reserved parameter 'context' not implemented"
"预留参数 context 未实现": "Reserved parameter 'context' not implemented",
"在输入区输入临时API_KEY后提交": "Submit after entering temporary API_KEY in the input area",
"鸟": "Bird",
"图片中需要修改的位置用橡皮擦擦除为纯白色": "Erase the areas in the image that need to be modified with an eraser to pure white",
"└── PDF文档精准解析": "└── Accurate parsing of PDF documents",
"└── ALLOW_RESET_CONFIG 是否允许通过自然语言描述修改本页的配置": "└── ALLOW_RESET_CONFIG Whether to allow modifying the configuration of this page through natural language description",
"等待指令": "Waiting for instructions",
"不存在": "Does not exist",
"选择游戏": "Select game",
"本地大模型示意图": "Local large model diagram",
"无视此消息即可": "You can ignore this message",
"即RGB=255": "That is, RGB=255",
"如需追问": "If you have further questions",
"也可以是具体的模型路径": "It can also be a specific model path",
"才会起作用": "Will take effect",
"下载失败": "Download failed",
"网页刷新后失效": "Invalid after webpage refresh",
"crazy_functions.互动小游戏-": "crazy_functions.Interactive mini game-",
"右对齐": "Right alignment",
"您可以调用下拉菜单中的“LoadConversationHistoryArchive”还原当下的对话": "You can use the 'LoadConversationHistoryArchive' in the drop-down menu to restore the current conversation",
"左对齐": "Left alignment",
"使用默认的 FP16": "Use default FP16",
"一小时": "One hour",
"从而方便内存的释放": "Thus facilitating memory release",
"如何临时更换API_KEY": "How to temporarily change API_KEY",
"请输入 1024x1024-HD": "Please enter 1024x1024-HD",
"使用 INT8 量化": "Use INT8 quantization",
"3. 输入修改需求": "3. Enter modification requirements",
"刷新界面 由于请求gpt需要一段时间": "Refreshing the interface takes some time due to the request for gpt",
"随机小游戏": "Random mini game",
"那么请在下面的QWEN_MODEL_SELECTION中指定具体的模型": "So please specify the specific model in QWEN_MODEL_SELECTION below",
"表值": "Table value",
"我画你猜": "I draw, you guess",
"狗": "Dog",
"2. 输入分辨率": "2. Enter resolution",
"鱼": "Fish",
"尚未完成": "Not yet completed",
"表头": "Table header",
"填localhost或者127.0.0.1": "Fill in localhost or 127.0.0.1",
"请上传jpg格式的图片": "Please upload images in jpg format",
"API_URL_REDIRECT填写格式是错误的": "The format of API_URL_REDIRECT is incorrect",
"├── RWKV的支持见Wiki": "Support for RWKV is available in the Wiki",
"如果中文Prompt效果不理想": "If the Chinese prompt is not effective",
"/SEAFILE_LOCAL/50503047/我的资料库/学位/paperlatex/aaai/Fu_8368_with_appendix": "/SEAFILE_LOCAL/50503047/My Library/Degree/paperlatex/aaai/Fu_8368_with_appendix",
"只有当AVAIL_LLM_MODELS包含了对应本地模型时": "Only when AVAIL_LLM_MODELS contains the corresponding local model",
"选择本地模型变体": "Choose the local model variant",
"如果您确信自己没填错": "If you are sure you haven't made a mistake",
"PyPDF2这个库有严重的内存泄露问题": "PyPDF2 library has serious memory leak issues",
"整理文件集合 输出消息": "Organize file collection and output message",
"没有检测到任何近期上传的图像文件": "No recently uploaded image files detected",
"游戏结束": "Game over",
"调用结束": "Call ended",
"猫": "Cat",
"请及时切换模型": "Please switch models in time",
"次中": "In the meantime",
"如需生成高清图像": "If you need to generate high-definition images",
"CPU 模式": "CPU mode",
"项目目录": "Project directory",
"动物": "Animal",
"居中对齐": "Center alignment",
"请注意拓展名需要小写": "Please note that the extension name needs to be lowercase",
"重试第": "Retry",
"实验性功能": "Experimental feature",
"猜错了": "Wrong guess",
"打开你的代理软件查看代理协议": "Open your proxy software to view the proxy agreement",
"您不需要再重复强调该文件的路径了": "You don't need to emphasize the file path again",
"请阅读": "Please read",
"请直接输入您的问题": "Please enter your question directly",
"API_URL_REDIRECT填错了": "API_URL_REDIRECT is filled incorrectly",
"谜底是": "The answer is",
"第一个模型": "The first model",
"你猜对了!": "You guessed it right!",
"已经接收到您上传的文件": "The file you uploaded has been received",
"您正在调用“图像生成”插件": "You are calling the 'Image Generation' plugin",
"刷新界面 界面更新": "Refresh the interface, interface update",
"如果之前已经初始化了游戏实例": "If the game instance has been initialized before",
"文件": "File",
"老鼠": "Mouse",
"列2": "Column 2",
"等待图片": "Waiting for image",
"使用 INT4 量化": "Use INT4 quantization",
"from crazy_functions.互动小游戏 import 随机小游戏": "TranslatedText",
"游戏主体": "TranslatedText",
"该模型不具备上下文对话能力": "TranslatedText",
"列3": "TranslatedText",
"清理": "TranslatedText",
"检查量化配置": "TranslatedText",
"如果游戏结束": "TranslatedText",
"蛇": "TranslatedText",
"则继续该实例;否则重新初始化": "TranslatedText",
"e.g. cat and 猫 are the same thing": "TranslatedText",
"第三个模型": "TranslatedText",
"如果你选择Qwen系列的模型": "TranslatedText",
"列4": "TranslatedText",
"输入“exit”获取答案": "TranslatedText",
"把它放到子进程中运行": "TranslatedText",
"列1": "TranslatedText",
"使用该模型需要额外依赖": "TranslatedText",
"再试试": "TranslatedText",
"1. 上传图片": "TranslatedText",
"保存状态": "TranslatedText",
"GPT-Academic对话存档": "TranslatedText",
"Arxiv论文精细翻译": "TranslatedText"
}

View File

@ -182,12 +182,12 @@ cached_translation = read_map_from_json(language=LANG)
def trans(word_to_translate, language, special=False):
if len(word_to_translate) == 0: return {}
from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
from toolbox import get_conf, ChatBotWithCookies
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY = \
get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY')
from toolbox import get_conf, ChatBotWithCookies, load_chat_cookies
cookies = load_chat_cookies()
llm_kwargs = {
'api_key': API_KEY,
'llm_model': LLM_MODEL,
'api_key': cookies['api_key'],
'llm_model': cookies['llm_model'],
'top_p':1.0,
'max_length': None,
'temperature':0.4,
@ -245,15 +245,15 @@ def trans(word_to_translate, language, special=False):
def trans_json(word_to_translate, language, special=False):
if len(word_to_translate) == 0: return {}
from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
from toolbox import get_conf, ChatBotWithCookies
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY = \
get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY')
from toolbox import get_conf, ChatBotWithCookies, load_chat_cookies
cookies = load_chat_cookies()
llm_kwargs = {
'api_key': API_KEY,
'llm_model': LLM_MODEL,
'api_key': cookies['api_key'],
'llm_model': cookies['llm_model'],
'top_p':1.0,
'max_length': None,
'temperature':0.1,
'temperature':0.4,
}
import random
N_EACH_REQ = random.randint(16, 32)

View File

@ -552,7 +552,7 @@ if "deepseekcoder" in AVAIL_LLM_MODELS: # deepseekcoder
"fn_with_ui": deepseekcoder_ui,
"fn_without_ui": deepseekcoder_noui,
"endpoint": None,
"max_token": 4096,
"max_token": 2048,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
}

View File

@ -6,6 +6,7 @@ from toolbox import ProxyNetworkActivate
from toolbox import get_conf
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
from threading import Thread
import torch
def download_huggingface_model(model_name, max_retry, local_dir):
from huggingface_hub import snapshot_download
@ -36,9 +37,46 @@ class GetCoderLMHandle(LocalLLMHandle):
# tokenizer = download_huggingface_model(model_name, max_retry=128, local_dir=local_dir)
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
self._streamer = TextIteratorStreamer(tokenizer)
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
device_map = {
"transformer.word_embeddings": 0,
"transformer.word_embeddings_layernorm": 0,
"lm_head": 0,
"transformer.h": 0,
"transformer.ln_f": 0,
"model.embed_tokens": 0,
"model.layers": 0,
"model.norm": 0,
}
# 检查量化配置
quantization_type = get_conf('LOCAL_MODEL_QUANT')
if get_conf('LOCAL_MODEL_DEVICE') != 'cpu':
model = model.cuda()
if quantization_type == "INT8":
from transformers import BitsAndBytesConfig
# 使用 INT8 量化
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True, load_in_8bit=True,
device_map=device_map)
elif quantization_type == "INT4":
from transformers import BitsAndBytesConfig
# 使用 INT4 量化
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16
)
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True,
quantization_config=bnb_config, device_map=device_map)
else:
# 使用默认的 FP16
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True,
torch_dtype=torch.bfloat16, device_map=device_map)
else:
# CPU 模式
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True,
torch_dtype=torch.bfloat16)
return model, tokenizer
def llm_stream_generator(self, **kwargs):
@ -54,7 +92,10 @@ class GetCoderLMHandle(LocalLLMHandle):
query, max_length, top_p, temperature, history = adaptor(kwargs)
history.append({ 'role': 'user', 'content': query})
messages = history
inputs = self._tokenizer.apply_chat_template(messages, return_tensors="pt").to(self._model.device)
inputs = self._tokenizer.apply_chat_template(messages, return_tensors="pt")
if inputs.shape[1] > max_length:
inputs = inputs[:, -max_length:]
inputs = inputs.to(self._model.device)
generation_kwargs = dict(
inputs=inputs,
max_new_tokens=max_length,

View File

@ -48,11 +48,11 @@ if __name__ == "__main__":
# for lang in ["English", "French", "Japanese", "Korean", "Russian", "Italian", "German", "Portuguese", "Arabic"]:
# plugin_test(plugin='crazy_functions.批量Markdown翻译->Markdown翻译指定语言', main_input="README.md", advanced_arg={"advanced_arg": lang})
# plugin_test(plugin='crazy_functions.Langchain知识库->知识库问答', main_input="./")
# plugin_test(plugin='crazy_functions.知识库文件注入->知识库文件注入', main_input="./")
# plugin_test(plugin='crazy_functions.Langchain知识库->读取知识库作答', main_input="What is the installation method")
# plugin_test(plugin='crazy_functions.知识库文件注入->读取知识库作答', main_input="What is the installation method")
# plugin_test(plugin='crazy_functions.Langchain知识库->读取知识库作答', main_input="远程云服务器部署?")
# plugin_test(plugin='crazy_functions.知识库文件注入->读取知识库作答', main_input="远程云服务器部署?")
# plugin_test(plugin='crazy_functions.Latex输出PDF结果->Latex翻译中文并重新编译PDF', main_input="2210.03629")

View File

@ -56,11 +56,11 @@ vt.get_plugin_handle = silence_stdout_fn(get_plugin_handle)
vt.get_plugin_default_kwargs = silence_stdout_fn(get_plugin_default_kwargs)
vt.get_chat_handle = silence_stdout_fn(get_chat_handle)
vt.get_chat_default_kwargs = silence_stdout_fn(get_chat_default_kwargs)
vt.chat_to_markdown_str = chat_to_markdown_str
vt.chat_to_markdown_str = (chat_to_markdown_str)
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY = \
vt.get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY')
def plugin_test(main_input, plugin, advanced_arg=None):
def plugin_test(main_input, plugin, advanced_arg=None, debug=True):
from rich.live import Live
from rich.markdown import Markdown
@ -72,7 +72,10 @@ def plugin_test(main_input, plugin, advanced_arg=None):
plugin_kwargs['main_input'] = main_input
if advanced_arg is not None:
plugin_kwargs['plugin_kwargs'] = advanced_arg
my_working_plugin = silence_stdout(plugin)(**plugin_kwargs)
if debug:
my_working_plugin = (plugin)(**plugin_kwargs)
else:
my_working_plugin = silence_stdout(plugin)(**plugin_kwargs)
with Live(Markdown(""), auto_refresh=False, vertical_overflow="visible") as live:
for cookies, chat, hist, msg in my_working_plugin:

View File

@ -0,0 +1,17 @@
"""
对项目中的各个插件进行测试运行方法直接运行 python tests/test_plugins.py
"""
import os, sys
def validate_path(): dir_name = os.path.dirname(__file__); root_dir_assume = os.path.abspath(dir_name + '/..'); os.chdir(root_dir_assume); sys.path.append(root_dir_assume)
validate_path() # 返回项目根路径
if __name__ == "__main__":
from tests.test_utils import plugin_test
plugin_test(plugin='crazy_functions.知识库问答->知识库文件注入', main_input="./README.md")
plugin_test(plugin='crazy_functions.知识库问答->读取知识库作答', main_input="What is the installation method")
plugin_test(plugin='crazy_functions.知识库问答->读取知识库作答', main_input="远程云服务器部署?")

View File

@ -180,12 +180,15 @@ def HotReload(f):
最后使用yield from语句返回重新加载过的函数并在被装饰的函数上执行
最终装饰器函数返回内部函数这个内部函数可以将函数的原始定义更新为最新版本并执行函数的新版本
"""
@wraps(f)
def decorated(*args, **kwargs):
fn_name = f.__name__
f_hot_reload = getattr(importlib.reload(inspect.getmodule(f)), fn_name)
yield from f_hot_reload(*args, **kwargs)
return decorated
if get_conf('PLUGIN_HOT_RELOAD'):
@wraps(f)
def decorated(*args, **kwargs):
fn_name = f.__name__
f_hot_reload = getattr(importlib.reload(inspect.getmodule(f)), fn_name)
yield from f_hot_reload(*args, **kwargs)
return decorated
else:
return f
"""
@ -916,7 +919,14 @@ def read_single_conf_with_lru_cache(arg):
@lru_cache(maxsize=128)
def get_conf(*args):
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
"""
本项目的所有配置都集中在config.py中 修改配置有三种方法您只需要选择其中一种即可
- 直接修改config.py
- 创建并修改config_private.py
- 修改环境变量修改docker-compose.yml等价于修改容器内部的环境变量
注意如果您使用docker-compose部署请修改docker-compose等价于修改容器内部的环境变量
"""
res = []
for arg in args:
r = read_single_conf_with_lru_cache(arg)