ChatGLM改成多进程运行
This commit is contained in:
parent
294ac338bd
commit
6aba339538
@ -66,7 +66,7 @@ def request_gpt_model_in_new_thread_with_ui_alive(
|
||||
chatbot.append([inputs_show_user, ""])
|
||||
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
|
||||
executor = ThreadPoolExecutor(max_workers=16)
|
||||
mutable = ["", time.time()]
|
||||
mutable = ["", time.time(), ""]
|
||||
def _req_gpt(inputs, history, sys_prompt):
|
||||
retry_op = retry_times_at_unknown_error
|
||||
exceeded_cnt = 0
|
||||
|
@ -20,7 +20,8 @@ def 同时问询(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=txt, inputs_show_user=txt,
|
||||
llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
|
||||
sys_prompt=system_prompt
|
||||
sys_prompt=system_prompt,
|
||||
retry_times_at_unknown_error=0
|
||||
)
|
||||
|
||||
history.append(txt)
|
||||
|
@ -24,7 +24,7 @@ RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8
|
||||
|
||||
# 下载分支
|
||||
WORKDIR /gpt
|
||||
RUN $useProxyNetwork git clone https://github.com/binary-husky/chatgpt_academic.git -b v3.0
|
||||
RUN $useProxyNetwork git clone https://github.com/binary-husky/chatgpt_academic.git -b v3.1
|
||||
WORKDIR /gpt/chatgpt_academic
|
||||
RUN $useProxyNetwork python3 -m pip install -r requirements.txt
|
||||
RUN $useProxyNetwork python3 -m pip install -r request_llm/requirements_chatglm.txt
|
||||
|
7
main.py
7
main.py
@ -1,8 +1,9 @@
|
||||
import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
||||
|
||||
def main():
|
||||
import gradio as gr
|
||||
from request_llm.bridge_all import predict
|
||||
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith
|
||||
|
||||
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
|
||||
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY = \
|
||||
get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY')
|
||||
@ -161,6 +162,7 @@ with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=
|
||||
# 终止按钮的回调函数注册
|
||||
stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
|
||||
stopBtn2.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
|
||||
|
||||
# gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
|
||||
def auto_opentab_delay():
|
||||
import threading, webbrowser, time
|
||||
@ -175,3 +177,6 @@ def auto_opentab_delay():
|
||||
|
||||
auto_opentab_delay()
|
||||
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -31,6 +31,24 @@ methods = {
|
||||
"tgui-ui": tgui_ui,
|
||||
}
|
||||
|
||||
def LLM_CATCH_EXCEPTION(f):
|
||||
"""
|
||||
装饰器函数,将错误显示出来
|
||||
"""
|
||||
def decorated(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience):
|
||||
try:
|
||||
return f(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
|
||||
except Exception as e:
|
||||
from toolbox import get_conf
|
||||
import traceback
|
||||
proxies, = get_conf('proxies')
|
||||
tb_str = '\n```\n' + traceback.format_exc() + '\n```\n'
|
||||
observe_window[0] = tb_str
|
||||
return tb_str
|
||||
return decorated
|
||||
|
||||
colors = ['#FF00FF', '#00FFFF', '#FF0000''#990099', '#009999', '#990044']
|
||||
|
||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience=False):
|
||||
"""
|
||||
发送至LLM,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
|
||||
@ -62,17 +80,13 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
|
||||
return method(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
|
||||
else:
|
||||
# 如果同时询问多个大语言模型:
|
||||
executor = ThreadPoolExecutor(max_workers=16)
|
||||
executor = ThreadPoolExecutor(max_workers=4)
|
||||
models = model.split('&')
|
||||
n_model = len(models)
|
||||
|
||||
window_len = len(observe_window)
|
||||
if window_len==0:
|
||||
window_mutex = [[] for _ in range(n_model)] + [True]
|
||||
elif window_len==1:
|
||||
window_mutex = [[""] for _ in range(n_model)] + [True]
|
||||
elif window_len==2:
|
||||
window_mutex = [["", time.time()] for _ in range(n_model)] + [True]
|
||||
assert window_len==3
|
||||
window_mutex = [["", time.time(), ""] for _ in range(n_model)] + [True]
|
||||
|
||||
futures = []
|
||||
for i in range(n_model):
|
||||
@ -85,12 +99,12 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
|
||||
method = methods['tgui-no-ui']
|
||||
llm_kwargs_feedin = copy.deepcopy(llm_kwargs)
|
||||
llm_kwargs_feedin['llm_model'] = model
|
||||
future = executor.submit(method, inputs, llm_kwargs_feedin, history, sys_prompt, window_mutex[i], console_slience)
|
||||
future = executor.submit(LLM_CATCH_EXCEPTION(method), inputs, llm_kwargs_feedin, history, sys_prompt, window_mutex[i], console_slience)
|
||||
futures.append(future)
|
||||
|
||||
def mutex_manager(window_mutex, observe_window):
|
||||
while True:
|
||||
time.sleep(0.2)
|
||||
time.sleep(0.5)
|
||||
if not window_mutex[-1]: break
|
||||
# 看门狗(watchdog)
|
||||
for i in range(n_model):
|
||||
@ -98,8 +112,8 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
|
||||
# 观察窗(window)
|
||||
chat_string = []
|
||||
for i in range(n_model):
|
||||
chat_string.append( f"[{str(models[i])} 说]: {window_mutex[i][0]}" )
|
||||
res = '\n\n---\n\n'.join(chat_string)
|
||||
chat_string.append( f"【{str(models[i])} 说】: <font color=\"{colors[i]}\"> {window_mutex[i][0]} </font>" )
|
||||
res = '<br/><br/>\n\n---\n\n'.join(chat_string)
|
||||
# # # # # # # # # # #
|
||||
observe_window[0] = res
|
||||
|
||||
@ -107,10 +121,18 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
|
||||
t_model.start()
|
||||
|
||||
return_string_collect = []
|
||||
while True:
|
||||
worker_done = [h.done() for h in futures]
|
||||
if all(worker_done):
|
||||
executor.shutdown()
|
||||
break
|
||||
time.sleep(1)
|
||||
|
||||
for i, future in enumerate(futures): # wait and get
|
||||
return_string_collect.append( f"[{str(models[i])} 说]: {future.result()}" )
|
||||
return_string_collect.append( f"【{str(models[i])} 说】: <font color=\"{colors[i]}\"> {future.result()} </font>" )
|
||||
|
||||
window_mutex[-1] = False # stop mutex thread
|
||||
res = '\n\n---\n\n'.join(return_string_collect)
|
||||
res = '<br/>\n\n---\n\n'.join(return_string_collect)
|
||||
return res
|
||||
|
||||
|
||||
|
@ -3,35 +3,69 @@ from transformers import AutoModel, AutoTokenizer
|
||||
import time
|
||||
import importlib
|
||||
from toolbox import update_ui, get_conf
|
||||
from multiprocessing import Process, Pipe
|
||||
|
||||
#################################################################################
|
||||
class GetGLMHandle(Process):
|
||||
def __init__(self):
|
||||
super().__init__(daemon=True)
|
||||
self.parent, self.child = Pipe()
|
||||
self.chatglm_model = None
|
||||
self.chatglm_tokenizer = None
|
||||
self.start()
|
||||
print('初始化')
|
||||
|
||||
global chatglm_model, chatglm_tokenizer
|
||||
def ready(self):
|
||||
return self.chatglm_model is not None
|
||||
|
||||
chatglm_model = None
|
||||
chatglm_tokenizer = None
|
||||
|
||||
def model_loader():
|
||||
global chatglm_model, chatglm_tokenizer
|
||||
if chatglm_tokenizer is None:
|
||||
chatglm_tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
|
||||
if chatglm_model is None: # 尚未加载
|
||||
def run(self):
|
||||
while True:
|
||||
try:
|
||||
if self.chatglm_model is None:
|
||||
self.chatglm_tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
|
||||
device, = get_conf('LOCAL_MODEL_DEVICE')
|
||||
if device=='cpu':
|
||||
chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float()
|
||||
self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float()
|
||||
else:
|
||||
chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
|
||||
chatglm_model = chatglm_model.eval()
|
||||
chatglm_model = chatglm_model.eval()
|
||||
self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
|
||||
self.chatglm_model = self.chatglm_model.eval()
|
||||
break
|
||||
else:
|
||||
break
|
||||
except:
|
||||
pass
|
||||
while True:
|
||||
kwargs = self.child.recv()
|
||||
try:
|
||||
for response, history in self.chatglm_model.stream_chat(self.chatglm_tokenizer, **kwargs):
|
||||
self.child.send(response)
|
||||
except:
|
||||
self.child.send('[Local Message] Call ChatGLM fail.')
|
||||
self.child.send('[Finish]')
|
||||
|
||||
def stream_chat(self, **kwargs):
|
||||
self.parent.send(kwargs)
|
||||
while True:
|
||||
res = self.parent.recv()
|
||||
if res != '[Finish]':
|
||||
yield res
|
||||
else:
|
||||
break
|
||||
return
|
||||
|
||||
global glm_handle
|
||||
glm_handle = None
|
||||
#################################################################################
|
||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
|
||||
"""
|
||||
多线程方法
|
||||
函数的说明请见 request_llm/bridge_all.py
|
||||
"""
|
||||
global chatglm_model, chatglm_tokenizer
|
||||
if chatglm_model is None:
|
||||
observe_window[0] = "ChatGLM尚未加载,加载需要一段时间 ……"
|
||||
global glm_handle
|
||||
if glm_handle is None:
|
||||
glm_handle = GetGLMHandle()
|
||||
observe_window[0] = "ChatGLM尚未加载,加载需要一段时间。注意,取决于`config.py`的配置,ChatGLM消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……"
|
||||
|
||||
model_loader()
|
||||
# chatglm 没有 sys_prompt 接口,因此把prompt加入 history
|
||||
history_feedin = []
|
||||
for i in range(len(history)//2):
|
||||
@ -40,29 +74,27 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
||||
|
||||
watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
|
||||
response = ""
|
||||
for response, history in chatglm_model.stream_chat(chatglm_tokenizer, inputs, history=history_feedin, max_length=llm_kwargs['max_length'],
|
||||
top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
||||
# 观测窗,把已经获取的数据显示出去
|
||||
for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
||||
observe_window[0] = response
|
||||
# 看门狗 (watchdog),如果超过期限没有喂狗,则终止
|
||||
if len(observe_window) >= 2:
|
||||
if (time.time()-observe_window[1]) > watch_dog_patience:
|
||||
raise RuntimeError("程序终止。")
|
||||
# if not console_slience:
|
||||
# print(response)
|
||||
return response
|
||||
|
||||
|
||||
|
||||
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
|
||||
"""
|
||||
单线程方法
|
||||
函数的说明请见 request_llm/bridge_all.py
|
||||
"""
|
||||
global chatglm_model, chatglm_tokenizer
|
||||
chatbot.append((inputs, ""))
|
||||
if chatglm_model is None:
|
||||
chatbot[-1] = (inputs, "ChatGLM尚未加载,加载需要一段时间 ……")
|
||||
|
||||
global glm_handle
|
||||
if glm_handle is None:
|
||||
glm_handle = GetGLMHandle()
|
||||
chatbot[-1] = (inputs, "ChatGLM尚未加载,加载需要一段时间。注意,取决于`config.py`的配置,ChatGLM消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……")
|
||||
yield from update_ui(chatbot=chatbot, history=[])
|
||||
model_loader()
|
||||
|
||||
if additional_fn is not None:
|
||||
import core_functional
|
||||
@ -71,13 +103,11 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
|
||||
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
|
||||
|
||||
|
||||
history_feedin = []
|
||||
for i in range(len(history)//2):
|
||||
history_feedin.append(["What can I do?", system_prompt] )
|
||||
history_feedin.append([history[2*i], history[2*i+1]] )
|
||||
|
||||
for response, history in chatglm_model.stream_chat(chatglm_tokenizer, inputs, history=history_feedin, max_length=llm_kwargs['max_length'],
|
||||
top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
||||
for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
||||
chatbot[-1] = (inputs, response)
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
Loading…
x
Reference in New Issue
Block a user