thread lock in chatglm
This commit is contained in:
parent
abd81cc215
commit
34b767d1fd
@ -1,6 +1,7 @@
|
|||||||
|
|
||||||
from transformers import AutoModel, AutoTokenizer
|
from transformers import AutoModel, AutoTokenizer
|
||||||
import time
|
import time
|
||||||
|
import threading
|
||||||
import importlib
|
import importlib
|
||||||
from toolbox import update_ui, get_conf
|
from toolbox import update_ui, get_conf
|
||||||
from multiprocessing import Process, Pipe
|
from multiprocessing import Process, Pipe
|
||||||
@ -18,6 +19,7 @@ class GetGLMHandle(Process):
|
|||||||
self.success = True
|
self.success = True
|
||||||
self.check_dependency()
|
self.check_dependency()
|
||||||
self.start()
|
self.start()
|
||||||
|
self.threadLock = threading.Lock()
|
||||||
|
|
||||||
def check_dependency(self):
|
def check_dependency(self):
|
||||||
try:
|
try:
|
||||||
@ -72,6 +74,7 @@ class GetGLMHandle(Process):
|
|||||||
|
|
||||||
def stream_chat(self, **kwargs):
|
def stream_chat(self, **kwargs):
|
||||||
# 主进程执行
|
# 主进程执行
|
||||||
|
self.threadLock.acquire()
|
||||||
self.parent.send(kwargs)
|
self.parent.send(kwargs)
|
||||||
while True:
|
while True:
|
||||||
res = self.parent.recv()
|
res = self.parent.recv()
|
||||||
@ -79,7 +82,7 @@ class GetGLMHandle(Process):
|
|||||||
yield res
|
yield res
|
||||||
else:
|
else:
|
||||||
break
|
break
|
||||||
return
|
self.threadLock.release()
|
||||||
|
|
||||||
global glm_handle
|
global glm_handle
|
||||||
glm_handle = None
|
glm_handle = None
|
||||||
@ -145,10 +148,13 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
history_feedin.append([history[2*i], history[2*i+1]] )
|
history_feedin.append([history[2*i], history[2*i+1]] )
|
||||||
|
|
||||||
# 开始接收chatglm的回复
|
# 开始接收chatglm的回复
|
||||||
|
response = "[Local Message]: 等待ChatGLM响应中 ..."
|
||||||
for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
||||||
chatbot[-1] = (inputs, response)
|
chatbot[-1] = (inputs, response)
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
|
||||||
# 总结输出
|
# 总结输出
|
||||||
|
if response == "[Local Message]: 等待ChatGLM响应中 ...":
|
||||||
|
response = "[Local Message]: ChatGLM响应异常 ..."
|
||||||
history.extend([inputs, response])
|
history.extend([inputs, response])
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user