修复本地模型在windows上的兼容性
This commit is contained in:
parent
fcf04554c6
commit
2b917edf26
@ -4,14 +4,13 @@ cmd_to_install = "`pip install -r request_llms/requirements_chatglm.txt`"
|
|||||||
|
|
||||||
from transformers import AutoModel, AutoTokenizer
|
from transformers import AutoModel, AutoTokenizer
|
||||||
from toolbox import get_conf, ProxyNetworkActivate
|
from toolbox import get_conf, ProxyNetworkActivate
|
||||||
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM
|
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
# 🔌💻 Local Model
|
# 🔌💻 Local Model
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
@SingletonLocalLLM
|
|
||||||
class GetGLM2Handle(LocalLLMHandle):
|
class GetGLM2Handle(LocalLLMHandle):
|
||||||
|
|
||||||
def load_model_info(self):
|
def load_model_info(self):
|
||||||
|
@ -4,14 +4,13 @@ cmd_to_install = "`pip install -r request_llms/requirements_chatglm.txt`"
|
|||||||
|
|
||||||
from transformers import AutoModel, AutoTokenizer
|
from transformers import AutoModel, AutoTokenizer
|
||||||
from toolbox import get_conf, ProxyNetworkActivate
|
from toolbox import get_conf, ProxyNetworkActivate
|
||||||
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM
|
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
# 🔌💻 Local Model
|
# 🔌💻 Local Model
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
@SingletonLocalLLM
|
|
||||||
class GetGLM3Handle(LocalLLMHandle):
|
class GetGLM3Handle(LocalLLMHandle):
|
||||||
|
|
||||||
def load_model_info(self):
|
def load_model_info(self):
|
||||||
|
@ -8,7 +8,7 @@ import threading
|
|||||||
import importlib
|
import importlib
|
||||||
from toolbox import update_ui, get_conf
|
from toolbox import update_ui, get_conf
|
||||||
from multiprocessing import Process, Pipe
|
from multiprocessing import Process, Pipe
|
||||||
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM
|
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
|
||||||
|
|
||||||
from .chatglmoonx import ChatGLMModel, chat_template
|
from .chatglmoonx import ChatGLMModel, chat_template
|
||||||
|
|
||||||
@ -17,7 +17,6 @@ from .chatglmoonx import ChatGLMModel, chat_template
|
|||||||
# ------------------------------------------------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
# 🔌💻 Local Model
|
# 🔌💻 Local Model
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
@SingletonLocalLLM
|
|
||||||
class GetONNXGLMHandle(LocalLLMHandle):
|
class GetONNXGLMHandle(LocalLLMHandle):
|
||||||
|
|
||||||
def load_model_info(self):
|
def load_model_info(self):
|
||||||
|
@ -7,7 +7,7 @@ import threading
|
|||||||
import importlib
|
import importlib
|
||||||
from toolbox import update_ui, get_conf
|
from toolbox import update_ui, get_conf
|
||||||
from multiprocessing import Process, Pipe
|
from multiprocessing import Process, Pipe
|
||||||
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM
|
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
|
||||||
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
@ -34,7 +34,6 @@ def combine_history(prompt, hist):
|
|||||||
# ------------------------------------------------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
# 🔌💻 Local Model
|
# 🔌💻 Local Model
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
@SingletonLocalLLM
|
|
||||||
class GetInternlmHandle(LocalLLMHandle):
|
class GetInternlmHandle(LocalLLMHandle):
|
||||||
|
|
||||||
def load_model_info(self):
|
def load_model_info(self):
|
||||||
|
@ -5,14 +5,13 @@ cmd_to_install = "`pip install -r request_llms/requirements_chatglm.txt`"
|
|||||||
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
||||||
from toolbox import update_ui, get_conf, ProxyNetworkActivate
|
from toolbox import update_ui, get_conf, ProxyNetworkActivate
|
||||||
from multiprocessing import Process, Pipe
|
from multiprocessing import Process, Pipe
|
||||||
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM
|
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
|
||||||
from threading import Thread
|
from threading import Thread
|
||||||
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
# 🔌💻 Local Model
|
# 🔌💻 Local Model
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
@SingletonLocalLLM
|
|
||||||
class GetONNXGLMHandle(LocalLLMHandle):
|
class GetONNXGLMHandle(LocalLLMHandle):
|
||||||
|
|
||||||
def load_model_info(self):
|
def load_model_info(self):
|
||||||
|
@ -8,14 +8,13 @@ import threading
|
|||||||
import importlib
|
import importlib
|
||||||
from toolbox import update_ui, get_conf
|
from toolbox import update_ui, get_conf
|
||||||
from multiprocessing import Process, Pipe
|
from multiprocessing import Process, Pipe
|
||||||
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM
|
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
# 🔌💻 Local Model
|
# 🔌💻 Local Model
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
@SingletonLocalLLM
|
|
||||||
class GetONNXGLMHandle(LocalLLMHandle):
|
class GetONNXGLMHandle(LocalLLMHandle):
|
||||||
|
|
||||||
def load_model_info(self):
|
def load_model_info(self):
|
||||||
|
@ -76,7 +76,6 @@ class LocalLLMHandle(Process):
|
|||||||
self.parent_state, self.child_state = create_queue_pipe()
|
self.parent_state, self.child_state = create_queue_pipe()
|
||||||
# allow redirect_stdout
|
# allow redirect_stdout
|
||||||
self.std_tag = "[Subprocess Message] "
|
self.std_tag = "[Subprocess Message] "
|
||||||
self.child.write = lambda x: self.child.send(self.std_tag + x)
|
|
||||||
self.running = True
|
self.running = True
|
||||||
self._model = None
|
self._model = None
|
||||||
self._tokenizer = None
|
self._tokenizer = None
|
||||||
@ -137,6 +136,8 @@ class LocalLLMHandle(Process):
|
|||||||
def run(self):
|
def run(self):
|
||||||
# 🏃♂️🏃♂️🏃♂️ run in child process
|
# 🏃♂️🏃♂️🏃♂️ run in child process
|
||||||
# 第一次运行,加载参数
|
# 第一次运行,加载参数
|
||||||
|
self.child.flush = lambda *args: None
|
||||||
|
self.child.write = lambda x: self.child.send(self.std_tag + x)
|
||||||
reset_tqdm_output()
|
reset_tqdm_output()
|
||||||
self.set_state("`尝试加载模型`")
|
self.set_state("`尝试加载模型`")
|
||||||
try:
|
try:
|
||||||
@ -220,7 +221,7 @@ def get_local_llm_predict_fns(LLMSingletonClass, model_name, history_format='cla
|
|||||||
"""
|
"""
|
||||||
refer to request_llms/bridge_all.py
|
refer to request_llms/bridge_all.py
|
||||||
"""
|
"""
|
||||||
_llm_handle = LLMSingletonClass()
|
_llm_handle = SingletonLocalLLM(LLMSingletonClass)()
|
||||||
if len(observe_window) >= 1:
|
if len(observe_window) >= 1:
|
||||||
observe_window[0] = load_message + "\n\n" + _llm_handle.get_state()
|
observe_window[0] = load_message + "\n\n" + _llm_handle.get_state()
|
||||||
if not _llm_handle.running:
|
if not _llm_handle.running:
|
||||||
@ -268,7 +269,7 @@ def get_local_llm_predict_fns(LLMSingletonClass, model_name, history_format='cla
|
|||||||
"""
|
"""
|
||||||
chatbot.append((inputs, ""))
|
chatbot.append((inputs, ""))
|
||||||
|
|
||||||
_llm_handle = LLMSingletonClass()
|
_llm_handle = SingletonLocalLLM(LLMSingletonClass)()
|
||||||
chatbot[-1] = (inputs, load_message + "\n\n" + _llm_handle.get_state())
|
chatbot[-1] = (inputs, load_message + "\n\n" + _llm_handle.get_state())
|
||||||
yield from update_ui(chatbot=chatbot, history=[])
|
yield from update_ui(chatbot=chatbot, history=[])
|
||||||
if not _llm_handle.running:
|
if not _llm_handle.running:
|
||||||
|
Loading…
x
Reference in New Issue
Block a user