Merge branch 'frontier' of github.com:binary-husky/chatgpt_academic into frontier
This commit is contained in:
commit
f0a5c49a9c
@ -80,7 +80,7 @@ DEFAULT_FN_GROUPS = ['对话', '编程', '学术', '智能体']
|
|||||||
# 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 )
|
# 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 )
|
||||||
LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
|
LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
|
||||||
AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "api2d-gpt-3.5-turbo",
|
AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "api2d-gpt-3.5-turbo",
|
||||||
"gpt-4", "gpt-4-32k", "api2d-gpt-4", "chatglm", "moss", "newbing", "stack-claude"]
|
"gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "stack-claude"]
|
||||||
# P.S. 其他可用的模型还包括 ["qianfan", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613",
|
# P.S. 其他可用的模型还包括 ["qianfan", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613",
|
||||||
# "spark", "sparkv2", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"]
|
# "spark", "sparkv2", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"]
|
||||||
|
|
||||||
@ -183,6 +183,9 @@ ALLOW_RESET_CONFIG = False
|
|||||||
PATH_PRIVATE_UPLOAD = "private_upload"
|
PATH_PRIVATE_UPLOAD = "private_upload"
|
||||||
# 日志文件夹的位置,请勿修改
|
# 日志文件夹的位置,请勿修改
|
||||||
PATH_LOGGING = "gpt_log"
|
PATH_LOGGING = "gpt_log"
|
||||||
|
# 除了连接OpenAI之外,还有哪些场合允许使用代理,请勿修改
|
||||||
|
WHEN_TO_USE_PROXY = ["Download_LLM", "Download_Gradio_Theme"]
|
||||||
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
在线大模型配置关联关系示意图
|
在线大模型配置关联关系示意图
|
||||||
|
@ -53,14 +53,14 @@ def 知识库问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
|
|||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
print('Checking Text2vec ...')
|
print('Checking Text2vec ...')
|
||||||
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
||||||
with ProxyNetworkActivate(): # 临时地激活代理网络
|
with ProxyNetworkActivate('Download_LLM'): # 临时地激活代理网络
|
||||||
HuggingFaceEmbeddings(model_name="GanymedeNil/text2vec-large-chinese")
|
HuggingFaceEmbeddings(model_name="GanymedeNil/text2vec-large-chinese")
|
||||||
|
|
||||||
# < -------------------构建知识库--------------- >
|
# < -------------------构建知识库--------------- >
|
||||||
chatbot.append(['<br/>'.join(file_manifest), "正在构建知识库..."])
|
chatbot.append(['<br/>'.join(file_manifest), "正在构建知识库..."])
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
print('Establishing knowledge archive ...')
|
print('Establishing knowledge archive ...')
|
||||||
with ProxyNetworkActivate(): # 临时地激活代理网络
|
with ProxyNetworkActivate('Download_LLM'): # 临时地激活代理网络
|
||||||
kai = knowledge_archive_interface()
|
kai = knowledge_archive_interface()
|
||||||
kai.feed_archive(file_manifest=file_manifest, id=kai_id)
|
kai.feed_archive(file_manifest=file_manifest, id=kai_id)
|
||||||
kai_files = kai.get_loaded_file()
|
kai_files = kai.get_loaded_file()
|
||||||
|
@ -651,7 +651,7 @@ class knowledge_archive_interface():
|
|||||||
from toolbox import ProxyNetworkActivate
|
from toolbox import ProxyNetworkActivate
|
||||||
print('Checking Text2vec ...')
|
print('Checking Text2vec ...')
|
||||||
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
||||||
with ProxyNetworkActivate(): # 临时地激活代理网络
|
with ProxyNetworkActivate('Download_LLM'): # 临时地激活代理网络
|
||||||
self.text2vec_large_chinese = HuggingFaceEmbeddings(model_name="GanymedeNil/text2vec-large-chinese")
|
self.text2vec_large_chinese = HuggingFaceEmbeddings(model_name="GanymedeNil/text2vec-large-chinese")
|
||||||
|
|
||||||
return self.text2vec_large_chinese
|
return self.text2vec_large_chinese
|
||||||
|
1
main.py
1
main.py
@ -285,6 +285,7 @@ def main():
|
|||||||
|
|
||||||
auto_opentab_delay()
|
auto_opentab_delay()
|
||||||
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(
|
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(
|
||||||
|
quiet=True,
|
||||||
server_name="0.0.0.0",
|
server_name="0.0.0.0",
|
||||||
server_port=PORT,
|
server_port=PORT,
|
||||||
favicon_path="docs/logo.png",
|
favicon_path="docs/logo.png",
|
||||||
|
@ -145,6 +145,15 @@ model_info = {
|
|||||||
"token_cnt": get_token_num_gpt35,
|
"token_cnt": get_token_num_gpt35,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"azure-gpt-4":{
|
||||||
|
"fn_with_ui": chatgpt_ui,
|
||||||
|
"fn_without_ui": chatgpt_noui,
|
||||||
|
"endpoint": azure_endpoint,
|
||||||
|
"max_token": 8192,
|
||||||
|
"tokenizer": tokenizer_gpt35,
|
||||||
|
"token_cnt": get_token_num_gpt35,
|
||||||
|
},
|
||||||
|
|
||||||
# api_2d
|
# api_2d
|
||||||
"api2d-gpt-3.5-turbo": {
|
"api2d-gpt-3.5-turbo": {
|
||||||
"fn_with_ui": chatgpt_ui,
|
"fn_with_ui": chatgpt_ui,
|
||||||
|
@ -3,7 +3,7 @@ from transformers import AutoModel, AutoTokenizer
|
|||||||
import time
|
import time
|
||||||
import threading
|
import threading
|
||||||
import importlib
|
import importlib
|
||||||
from toolbox import update_ui, get_conf
|
from toolbox import update_ui, get_conf, ProxyNetworkActivate
|
||||||
from multiprocessing import Process, Pipe
|
from multiprocessing import Process, Pipe
|
||||||
|
|
||||||
load_message = "ChatGLM尚未加载,加载需要一段时间。注意,取决于`config.py`的配置,ChatGLM消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……"
|
load_message = "ChatGLM尚未加载,加载需要一段时间。注意,取决于`config.py`的配置,ChatGLM消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……"
|
||||||
@ -48,16 +48,17 @@ class GetGLMHandle(Process):
|
|||||||
|
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
if self.chatglm_model is None:
|
with ProxyNetworkActivate('Download_LLM'):
|
||||||
self.chatglm_tokenizer = AutoTokenizer.from_pretrained(_model_name_, trust_remote_code=True)
|
if self.chatglm_model is None:
|
||||||
if device=='cpu':
|
self.chatglm_tokenizer = AutoTokenizer.from_pretrained(_model_name_, trust_remote_code=True)
|
||||||
self.chatglm_model = AutoModel.from_pretrained(_model_name_, trust_remote_code=True).float()
|
if device=='cpu':
|
||||||
|
self.chatglm_model = AutoModel.from_pretrained(_model_name_, trust_remote_code=True).float()
|
||||||
|
else:
|
||||||
|
self.chatglm_model = AutoModel.from_pretrained(_model_name_, trust_remote_code=True).half().cuda()
|
||||||
|
self.chatglm_model = self.chatglm_model.eval()
|
||||||
|
break
|
||||||
else:
|
else:
|
||||||
self.chatglm_model = AutoModel.from_pretrained(_model_name_, trust_remote_code=True).half().cuda()
|
break
|
||||||
self.chatglm_model = self.chatglm_model.eval()
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
break
|
|
||||||
except:
|
except:
|
||||||
retry += 1
|
retry += 1
|
||||||
if retry > 3:
|
if retry > 3:
|
||||||
|
@ -30,7 +30,7 @@ class GetONNXGLMHandle(LocalLLMHandle):
|
|||||||
with open(os.path.expanduser('~/.cache/huggingface/token'), 'w') as f:
|
with open(os.path.expanduser('~/.cache/huggingface/token'), 'w') as f:
|
||||||
f.write(huggingface_token)
|
f.write(huggingface_token)
|
||||||
model_id = 'meta-llama/Llama-2-7b-chat-hf'
|
model_id = 'meta-llama/Llama-2-7b-chat-hf'
|
||||||
with ProxyNetworkActivate():
|
with ProxyNetworkActivate('Download_LLM'):
|
||||||
self._tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=huggingface_token)
|
self._tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=huggingface_token)
|
||||||
# use fp16
|
# use fp16
|
||||||
model = AutoModelForCausalLM.from_pretrained(model_id, use_auth_token=huggingface_token).eval()
|
model = AutoModelForCausalLM.from_pretrained(model_id, use_auth_token=huggingface_token).eval()
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
protobuf
|
protobuf
|
||||||
transformers>=4.27.1
|
|
||||||
cpm_kernels
|
cpm_kernels
|
||||||
torch>=1.10
|
torch>=1.10
|
||||||
mdtex2html
|
mdtex2html
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
protobuf
|
protobuf
|
||||||
transformers>=4.27.1
|
|
||||||
cpm_kernels
|
cpm_kernels
|
||||||
torch>=1.10
|
torch>=1.10
|
||||||
mdtex2html
|
mdtex2html
|
||||||
|
@ -2,6 +2,5 @@ jittor >= 1.3.7.9
|
|||||||
jtorch >= 0.1.3
|
jtorch >= 0.1.3
|
||||||
torch
|
torch
|
||||||
torchvision
|
torchvision
|
||||||
transformers==4.26.1
|
|
||||||
pandas
|
pandas
|
||||||
jieba
|
jieba
|
@ -1,5 +1,4 @@
|
|||||||
torch
|
torch
|
||||||
transformers==4.25.1
|
|
||||||
sentencepiece
|
sentencepiece
|
||||||
datasets
|
datasets
|
||||||
accelerate
|
accelerate
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
pydantic==1.10.11
|
pydantic==1.10.11
|
||||||
tiktoken>=0.3.3
|
tiktoken>=0.3.3
|
||||||
requests[socks]
|
requests[socks]
|
||||||
transformers
|
transformers>=4.27.1
|
||||||
python-markdown-math
|
python-markdown-math
|
||||||
beautifulsoup4
|
beautifulsoup4
|
||||||
prompt_toolkit
|
prompt_toolkit
|
||||||
|
@ -5,7 +5,7 @@ CODE_HIGHLIGHT, ADD_WAIFU, LAYOUT = get_conf('CODE_HIGHLIGHT', 'ADD_WAIFU', 'LAY
|
|||||||
|
|
||||||
def dynamic_set_theme(THEME):
|
def dynamic_set_theme(THEME):
|
||||||
set_theme = gr.themes.ThemeClass()
|
set_theme = gr.themes.ThemeClass()
|
||||||
with ProxyNetworkActivate():
|
with ProxyNetworkActivate('Download_Gradio_Theme'):
|
||||||
logging.info('正在下载Gradio主题,请稍等。')
|
logging.info('正在下载Gradio主题,请稍等。')
|
||||||
if THEME.startswith('Huggingface-'): THEME = THEME.lstrip('Huggingface-')
|
if THEME.startswith('Huggingface-'): THEME = THEME.lstrip('Huggingface-')
|
||||||
if THEME.startswith('huggingface-'): THEME = THEME.lstrip('huggingface-')
|
if THEME.startswith('huggingface-'): THEME = THEME.lstrip('huggingface-')
|
||||||
@ -16,7 +16,7 @@ def adjust_theme():
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
set_theme = gr.themes.ThemeClass()
|
set_theme = gr.themes.ThemeClass()
|
||||||
with ProxyNetworkActivate():
|
with ProxyNetworkActivate('Download_Gradio_Theme'):
|
||||||
logging.info('正在下载Gradio主题,请稍等。')
|
logging.info('正在下载Gradio主题,请稍等。')
|
||||||
THEME, = get_conf('THEME')
|
THEME, = get_conf('THEME')
|
||||||
if THEME.startswith('Huggingface-'): THEME = THEME.lstrip('Huggingface-')
|
if THEME.startswith('Huggingface-'): THEME = THEME.lstrip('Huggingface-')
|
||||||
|
12
toolbox.py
12
toolbox.py
@ -956,7 +956,19 @@ class ProxyNetworkActivate():
|
|||||||
"""
|
"""
|
||||||
这段代码定义了一个名为TempProxy的空上下文管理器, 用于给一小段代码上代理
|
这段代码定义了一个名为TempProxy的空上下文管理器, 用于给一小段代码上代理
|
||||||
"""
|
"""
|
||||||
|
def __init__(self, task=None) -> None:
|
||||||
|
self.task = task
|
||||||
|
if not task:
|
||||||
|
# 不给定task, 那么我们默认代理生效
|
||||||
|
self.valid = True
|
||||||
|
else:
|
||||||
|
# 给定了task, 我们检查一下
|
||||||
|
from toolbox import get_conf
|
||||||
|
WHEN_TO_USE_PROXY, = get_conf('WHEN_TO_USE_PROXY')
|
||||||
|
self.valid = (task in WHEN_TO_USE_PROXY)
|
||||||
|
|
||||||
def __enter__(self):
|
def __enter__(self):
|
||||||
|
if not self.valid: return self
|
||||||
from toolbox import get_conf
|
from toolbox import get_conf
|
||||||
proxies, = get_conf('proxies')
|
proxies, = get_conf('proxies')
|
||||||
if 'no_proxy' in os.environ: os.environ.pop('no_proxy')
|
if 'no_proxy' in os.environ: os.environ.pop('no_proxy')
|
||||||
|
4
version
4
version
@ -1,5 +1,5 @@
|
|||||||
{
|
{
|
||||||
"version": 3.53,
|
"version": 3.54,
|
||||||
"show_feature": true,
|
"show_feature": true,
|
||||||
"new_feature": "支持动态选择不同界面主题 <-> 提高稳定性&解决多用户冲突问题 <-> 支持插件分类和更多UI皮肤外观 <-> 支持用户使用自然语言调度各个插件(虚空终端) ! <-> 改进UI,设计新主题 <-> 支持借助GROBID实现PDF高精度翻译 <-> 接入百度千帆平台和文心一言 <-> 接入阿里通义千问、讯飞星火、上海AI-Lab书生 <-> 优化一键升级 <-> 提高arxiv翻译速度和成功率"
|
"new_feature": "新增动态代码解释器(CodeInterpreter) <-> 增加文本回答复制按钮 <-> 细分代理场合 <-> 支持动态选择不同界面主题 <-> 提高稳定性&解决多用户冲突问题 <-> 支持插件分类和更多UI皮肤外观 <-> 支持用户使用自然语言调度各个插件(虚空终端) ! <-> 改进UI,设计新主题 <-> 支持借助GROBID实现PDF高精度翻译 <-> 接入百度千帆平台和文心一言 <-> 接入阿里通义千问、讯飞星火、上海AI-Lab书生 <-> 优化一键升级 <-> 提高arxiv翻译速度和成功率"
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user