修复本地模型在Windows下的加载BUG

This commit is contained in:
qingxu fu 2023-11-11 21:11:55 +08:00
parent e4409b94d1
commit f75e39dc27
8 changed files with 25 additions and 25 deletions

View File

@ -7,8 +7,7 @@
1. predict: 正常对话时使用具备完备的交互功能不可多线程
具备多线程调用能力的函数
2. predict_no_ui高级实验性功能模块调用不会实时显示在界面上参数简单可以多线程并行方便实现复杂的功能逻辑
3. predict_no_ui_long_connection在实验过程中发现调用predict_no_ui处理长文档时和openai的连接容易断掉这个函数用stream的方式解决这个问题同样支持多线程
2. predict_no_ui_long_connection支持多线程
"""
import json

View File

@ -7,8 +7,7 @@
1. predict: 正常对话时使用具备完备的交互功能不可多线程
具备多线程调用能力的函数
2. predict_no_ui高级实验性功能模块调用不会实时显示在界面上参数简单可以多线程并行方便实现复杂的功能逻辑
3. predict_no_ui_long_connection在实验过程中发现调用predict_no_ui处理长文档时和openai的连接容易断掉这个函数用stream的方式解决这个问题同样支持多线程
2. predict_no_ui_long_connection支持多线程
"""
import json

View File

@ -7,7 +7,7 @@
1. predict: 正常对话时使用具备完备的交互功能不可多线程
具备多线程调用能力的函数
2. predict_no_ui_long_connection在实验过程中发现调用predict_no_ui处理长文档时和openai的连接容易断掉这个函数用stream的方式解决这个问题同样支持多线程
2. predict_no_ui_long_connection支持多线程
"""
import os

View File

@ -5,7 +5,7 @@ from transformers import AutoModel, AutoTokenizer
import time
import threading
import importlib
from toolbox import update_ui, get_conf
from toolbox import update_ui, get_conf, ProxyNetworkActivate
from multiprocessing import Process, Pipe
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
@ -52,14 +52,15 @@ class GetInternlmHandle(LocalLLMHandle):
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
device = get_conf('LOCAL_MODEL_DEVICE')
if self._model is None:
tokenizer = AutoTokenizer.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True)
if device=='cpu':
model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True).to(torch.bfloat16)
else:
model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True).to(torch.bfloat16).cuda()
with ProxyNetworkActivate('Download_LLM'):
if self._model is None:
tokenizer = AutoTokenizer.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True)
if device=='cpu':
model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True).to(torch.bfloat16)
else:
model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True).to(torch.bfloat16).cuda()
model = model.eval()
model = model.eval()
return model, tokenizer
def llm_stream_generator(self, **kwargs):

View File

@ -6,7 +6,7 @@ from transformers import AutoModel, AutoTokenizer
import time
import threading
import importlib
from toolbox import update_ui, get_conf
from toolbox import update_ui, get_conf, ProxyNetworkActivate
from multiprocessing import Process, Pipe
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
@ -29,12 +29,13 @@ class GetONNXGLMHandle(LocalLLMHandle):
import platform
from modelscope import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
model_id = 'qwen/Qwen-7B-Chat'
self._tokenizer = AutoTokenizer.from_pretrained('Qwen/Qwen-7B-Chat', trust_remote_code=True, resume_download=True)
# use fp16
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", trust_remote_code=True, fp16=True).eval()
model.generation_config = GenerationConfig.from_pretrained(model_id, trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参
self._model = model
with ProxyNetworkActivate('Download_LLM'):
model_id = 'qwen/Qwen-7B-Chat'
self._tokenizer = AutoTokenizer.from_pretrained('Qwen/Qwen-7B-Chat', trust_remote_code=True, resume_download=True)
# use fp16
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", trust_remote_code=True, fp16=True).eval()
model.generation_config = GenerationConfig.from_pretrained(model_id, trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参
self._model = model
return self._model, self._tokenizer

View File

@ -201,7 +201,7 @@ class LocalLLMHandle(Process):
if res.startswith(self.std_tag):
new_output = res[len(self.std_tag):]
std_out = std_out[:std_out_clip_len]
# print(new_output, end='')
print(new_output, end='')
std_out = new_output + std_out
yield self.std_tag + '\n```\n' + std_out + '\n```\n'
elif res == '[Finish]':

View File

@ -15,11 +15,11 @@ if __name__ == "__main__":
# from request_llms.bridge_jittorllms_pangualpha import predict_no_ui_long_connection
# from request_llms.bridge_jittorllms_llama import predict_no_ui_long_connection
# from request_llms.bridge_claude import predict_no_ui_long_connection
# from request_llms.bridge_internlm import predict_no_ui_long_connection
from request_llms.bridge_internlm import predict_no_ui_long_connection
# from request_llms.bridge_qwen import predict_no_ui_long_connection
# from request_llms.bridge_spark import predict_no_ui_long_connection
# from request_llms.bridge_zhipu import predict_no_ui_long_connection
from request_llms.bridge_chatglm3 import predict_no_ui_long_connection
# from request_llms.bridge_chatglm3 import predict_no_ui_long_connection
llm_kwargs = {
'max_length': 4096,

View File

@ -1,5 +1,5 @@
{
"version": 3.57,
"version": 3.58,
"show_feature": true,
"new_feature": "支持文心一言v4和星火v3 <-> 支持GLM3和智谱的API <-> 解决本地模型并发BUG <-> 支持动态追加基础功能按钮 <-> 新汇报PDF汇总页面 <-> 重新编译Gradio优化使用体验"
"new_feature": "修复本地模型在Windows下的加载BUG <-> 支持文心一言v4和星火v3 <-> 支持GLM3和智谱的API <-> 解决本地模型并发BUG <-> 支持动态追加基础功能按钮 <-> 新汇报PDF汇总页面 <-> 重新编译Gradio优化使用体验"
}