修复本地模型在windows上的兼容性

This commit is contained in:
qingxu fu 2023-11-11 17:58:17 +08:00
parent fcf04554c6
commit 2b917edf26
7 changed files with 10 additions and 15 deletions

View File

@ -4,14 +4,13 @@ cmd_to_install = "`pip install -r request_llms/requirements_chatglm.txt`"
from transformers import AutoModel, AutoTokenizer
from toolbox import get_conf, ProxyNetworkActivate
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
# ------------------------------------------------------------------------------------------------------------------------
# 🔌💻 Local Model
# ------------------------------------------------------------------------------------------------------------------------
@SingletonLocalLLM
class GetGLM2Handle(LocalLLMHandle):
def load_model_info(self):

View File

@ -4,14 +4,13 @@ cmd_to_install = "`pip install -r request_llms/requirements_chatglm.txt`"
from transformers import AutoModel, AutoTokenizer
from toolbox import get_conf, ProxyNetworkActivate
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
# ------------------------------------------------------------------------------------------------------------------------
# 🔌💻 Local Model
# ------------------------------------------------------------------------------------------------------------------------
@SingletonLocalLLM
class GetGLM3Handle(LocalLLMHandle):
def load_model_info(self):

View File

@ -8,7 +8,7 @@ import threading
import importlib
from toolbox import update_ui, get_conf
from multiprocessing import Process, Pipe
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
from .chatglmoonx import ChatGLMModel, chat_template
@ -17,7 +17,6 @@ from .chatglmoonx import ChatGLMModel, chat_template
# ------------------------------------------------------------------------------------------------------------------------
# 🔌💻 Local Model
# ------------------------------------------------------------------------------------------------------------------------
@SingletonLocalLLM
class GetONNXGLMHandle(LocalLLMHandle):
def load_model_info(self):

View File

@ -7,7 +7,7 @@ import threading
import importlib
from toolbox import update_ui, get_conf
from multiprocessing import Process, Pipe
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
# ------------------------------------------------------------------------------------------------------------------------
@ -34,7 +34,6 @@ def combine_history(prompt, hist):
# ------------------------------------------------------------------------------------------------------------------------
# 🔌💻 Local Model
# ------------------------------------------------------------------------------------------------------------------------
@SingletonLocalLLM
class GetInternlmHandle(LocalLLMHandle):
def load_model_info(self):

View File

@ -5,14 +5,13 @@ cmd_to_install = "`pip install -r request_llms/requirements_chatglm.txt`"
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
from toolbox import update_ui, get_conf, ProxyNetworkActivate
from multiprocessing import Process, Pipe
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
from threading import Thread
# ------------------------------------------------------------------------------------------------------------------------
# 🔌💻 Local Model
# ------------------------------------------------------------------------------------------------------------------------
@SingletonLocalLLM
class GetONNXGLMHandle(LocalLLMHandle):
def load_model_info(self):

View File

@ -8,14 +8,13 @@ import threading
import importlib
from toolbox import update_ui, get_conf
from multiprocessing import Process, Pipe
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
# ------------------------------------------------------------------------------------------------------------------------
# 🔌💻 Local Model
# ------------------------------------------------------------------------------------------------------------------------
@SingletonLocalLLM
class GetONNXGLMHandle(LocalLLMHandle):
def load_model_info(self):

View File

@ -76,7 +76,6 @@ class LocalLLMHandle(Process):
self.parent_state, self.child_state = create_queue_pipe()
# allow redirect_stdout
self.std_tag = "[Subprocess Message] "
self.child.write = lambda x: self.child.send(self.std_tag + x)
self.running = True
self._model = None
self._tokenizer = None
@ -137,6 +136,8 @@ class LocalLLMHandle(Process):
def run(self):
# 🏃‍♂️🏃‍♂️🏃‍♂️ run in child process
# 第一次运行,加载参数
self.child.flush = lambda *args: None
self.child.write = lambda x: self.child.send(self.std_tag + x)
reset_tqdm_output()
self.set_state("`尝试加载模型`")
try:
@ -220,7 +221,7 @@ def get_local_llm_predict_fns(LLMSingletonClass, model_name, history_format='cla
"""
refer to request_llms/bridge_all.py
"""
_llm_handle = LLMSingletonClass()
_llm_handle = SingletonLocalLLM(LLMSingletonClass)()
if len(observe_window) >= 1:
observe_window[0] = load_message + "\n\n" + _llm_handle.get_state()
if not _llm_handle.running:
@ -268,7 +269,7 @@ def get_local_llm_predict_fns(LLMSingletonClass, model_name, history_format='cla
"""
chatbot.append((inputs, ""))
_llm_handle = LLMSingletonClass()
_llm_handle = SingletonLocalLLM(LLMSingletonClass)()
chatbot[-1] = (inputs, load_message + "\n\n" + _llm_handle.get_state())
yield from update_ui(chatbot=chatbot, history=[])
if not _llm_handle.running: