ChatGLM改成多进程运行
This commit is contained in:
parent
294ac338bd
commit
6aba339538
@ -66,7 +66,7 @@ def request_gpt_model_in_new_thread_with_ui_alive(
|
|||||||
chatbot.append([inputs_show_user, ""])
|
chatbot.append([inputs_show_user, ""])
|
||||||
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
|
||||||
executor = ThreadPoolExecutor(max_workers=16)
|
executor = ThreadPoolExecutor(max_workers=16)
|
||||||
mutable = ["", time.time()]
|
mutable = ["", time.time(), ""]
|
||||||
def _req_gpt(inputs, history, sys_prompt):
|
def _req_gpt(inputs, history, sys_prompt):
|
||||||
retry_op = retry_times_at_unknown_error
|
retry_op = retry_times_at_unknown_error
|
||||||
exceeded_cnt = 0
|
exceeded_cnt = 0
|
||||||
|
@ -20,7 +20,8 @@ def 同时问询(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
|
|||||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||||
inputs=txt, inputs_show_user=txt,
|
inputs=txt, inputs_show_user=txt,
|
||||||
llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
|
llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
|
||||||
sys_prompt=system_prompt
|
sys_prompt=system_prompt,
|
||||||
|
retry_times_at_unknown_error=0
|
||||||
)
|
)
|
||||||
|
|
||||||
history.append(txt)
|
history.append(txt)
|
||||||
|
@ -24,7 +24,7 @@ RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8
|
|||||||
|
|
||||||
# 下载分支
|
# 下载分支
|
||||||
WORKDIR /gpt
|
WORKDIR /gpt
|
||||||
RUN $useProxyNetwork git clone https://github.com/binary-husky/chatgpt_academic.git -b v3.0
|
RUN $useProxyNetwork git clone https://github.com/binary-husky/chatgpt_academic.git -b v3.1
|
||||||
WORKDIR /gpt/chatgpt_academic
|
WORKDIR /gpt/chatgpt_academic
|
||||||
RUN $useProxyNetwork python3 -m pip install -r requirements.txt
|
RUN $useProxyNetwork python3 -m pip install -r requirements.txt
|
||||||
RUN $useProxyNetwork python3 -m pip install -r request_llm/requirements_chatglm.txt
|
RUN $useProxyNetwork python3 -m pip install -r request_llm/requirements_chatglm.txt
|
||||||
|
87
main.py
87
main.py
@ -1,56 +1,57 @@
|
|||||||
import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
||||||
import gradio as gr
|
|
||||||
from request_llm.bridge_all import predict
|
|
||||||
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith
|
|
||||||
|
|
||||||
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
|
def main():
|
||||||
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY = \
|
import gradio as gr
|
||||||
|
from request_llm.bridge_all import predict
|
||||||
|
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith
|
||||||
|
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
|
||||||
|
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY = \
|
||||||
get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY')
|
get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY')
|
||||||
|
|
||||||
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
||||||
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
||||||
if not AUTHENTICATION: AUTHENTICATION = None
|
if not AUTHENTICATION: AUTHENTICATION = None
|
||||||
|
|
||||||
from check_proxy import get_current_version
|
from check_proxy import get_current_version
|
||||||
initial_prompt = "Serve me as a writing and programming assistant."
|
initial_prompt = "Serve me as a writing and programming assistant."
|
||||||
title_html = f"<h1 align=\"center\">ChatGPT 学术优化 {get_current_version()}</h1>"
|
title_html = f"<h1 align=\"center\">ChatGPT 学术优化 {get_current_version()}</h1>"
|
||||||
description = """代码开源和更新[地址🚀](https://github.com/binary-husky/chatgpt_academic),感谢热情的[开发者们❤️](https://github.com/binary-husky/chatgpt_academic/graphs/contributors)"""
|
description = """代码开源和更新[地址🚀](https://github.com/binary-husky/chatgpt_academic),感谢热情的[开发者们❤️](https://github.com/binary-husky/chatgpt_academic/graphs/contributors)"""
|
||||||
|
|
||||||
# 问询记录, python 版本建议3.9+(越新越好)
|
# 问询记录, python 版本建议3.9+(越新越好)
|
||||||
import logging
|
import logging
|
||||||
os.makedirs("gpt_log", exist_ok=True)
|
os.makedirs("gpt_log", exist_ok=True)
|
||||||
try:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO, encoding="utf-8")
|
try:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO, encoding="utf-8")
|
||||||
except:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO)
|
except:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO)
|
||||||
print("所有问询记录将自动保存在本地目录./gpt_log/chat_secrets.log, 请注意自我隐私保护哦!")
|
print("所有问询记录将自动保存在本地目录./gpt_log/chat_secrets.log, 请注意自我隐私保护哦!")
|
||||||
|
|
||||||
# 一些普通功能模块
|
# 一些普通功能模块
|
||||||
from core_functional import get_core_functions
|
from core_functional import get_core_functions
|
||||||
functional = get_core_functions()
|
functional = get_core_functions()
|
||||||
|
|
||||||
# 高级函数插件
|
# 高级函数插件
|
||||||
from crazy_functional import get_crazy_functions
|
from crazy_functional import get_crazy_functions
|
||||||
crazy_fns = get_crazy_functions()
|
crazy_fns = get_crazy_functions()
|
||||||
|
|
||||||
# 处理markdown文本格式的转变
|
# 处理markdown文本格式的转变
|
||||||
gr.Chatbot.postprocess = format_io
|
gr.Chatbot.postprocess = format_io
|
||||||
|
|
||||||
# 做一些外观色彩上的调整
|
# 做一些外观色彩上的调整
|
||||||
from theme import adjust_theme, advanced_css
|
from theme import adjust_theme, advanced_css
|
||||||
set_theme = adjust_theme()
|
set_theme = adjust_theme()
|
||||||
|
|
||||||
# 代理与自动更新
|
# 代理与自动更新
|
||||||
from check_proxy import check_proxy, auto_update
|
from check_proxy import check_proxy, auto_update
|
||||||
proxy_info = check_proxy(proxies)
|
proxy_info = check_proxy(proxies)
|
||||||
|
|
||||||
gr_L1 = lambda: gr.Row().style()
|
gr_L1 = lambda: gr.Row().style()
|
||||||
gr_L2 = lambda scale: gr.Column(scale=scale)
|
gr_L2 = lambda scale: gr.Column(scale=scale)
|
||||||
if LAYOUT == "TOP-DOWN":
|
if LAYOUT == "TOP-DOWN":
|
||||||
gr_L1 = lambda: DummyWith()
|
gr_L1 = lambda: DummyWith()
|
||||||
gr_L2 = lambda scale: gr.Row()
|
gr_L2 = lambda scale: gr.Row()
|
||||||
CHATBOT_HEIGHT /= 2
|
CHATBOT_HEIGHT /= 2
|
||||||
|
|
||||||
cancel_handles = []
|
cancel_handles = []
|
||||||
with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
|
with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
|
||||||
gr.HTML(title_html)
|
gr.HTML(title_html)
|
||||||
cookies = gr.State({'api_key': API_KEY, 'llm_model': LLM_MODEL})
|
cookies = gr.State({'api_key': API_KEY, 'llm_model': LLM_MODEL})
|
||||||
with gr_L1():
|
with gr_L1():
|
||||||
@ -161,8 +162,9 @@ with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=
|
|||||||
# 终止按钮的回调函数注册
|
# 终止按钮的回调函数注册
|
||||||
stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
|
stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
|
||||||
stopBtn2.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
|
stopBtn2.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
|
||||||
# gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
|
|
||||||
def auto_opentab_delay():
|
# gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
|
||||||
|
def auto_opentab_delay():
|
||||||
import threading, webbrowser, time
|
import threading, webbrowser, time
|
||||||
print(f"如果浏览器没有自动打开,请复制并转到以下URL:")
|
print(f"如果浏览器没有自动打开,请复制并转到以下URL:")
|
||||||
print(f"\t(亮色主题): http://localhost:{PORT}")
|
print(f"\t(亮色主题): http://localhost:{PORT}")
|
||||||
@ -173,5 +175,8 @@ def auto_opentab_delay():
|
|||||||
threading.Thread(target=open, name="open-browser", daemon=True).start()
|
threading.Thread(target=open, name="open-browser", daemon=True).start()
|
||||||
threading.Thread(target=auto_update, name="self-upgrade", daemon=True).start()
|
threading.Thread(target=auto_update, name="self-upgrade", daemon=True).start()
|
||||||
|
|
||||||
auto_opentab_delay()
|
auto_opentab_delay()
|
||||||
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION)
|
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
@ -31,6 +31,24 @@ methods = {
|
|||||||
"tgui-ui": tgui_ui,
|
"tgui-ui": tgui_ui,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
def LLM_CATCH_EXCEPTION(f):
|
||||||
|
"""
|
||||||
|
装饰器函数,将错误显示出来
|
||||||
|
"""
|
||||||
|
def decorated(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience):
|
||||||
|
try:
|
||||||
|
return f(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
|
||||||
|
except Exception as e:
|
||||||
|
from toolbox import get_conf
|
||||||
|
import traceback
|
||||||
|
proxies, = get_conf('proxies')
|
||||||
|
tb_str = '\n```\n' + traceback.format_exc() + '\n```\n'
|
||||||
|
observe_window[0] = tb_str
|
||||||
|
return tb_str
|
||||||
|
return decorated
|
||||||
|
|
||||||
|
colors = ['#FF00FF', '#00FFFF', '#FF0000''#990099', '#009999', '#990044']
|
||||||
|
|
||||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience=False):
|
def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience=False):
|
||||||
"""
|
"""
|
||||||
发送至LLM,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
|
发送至LLM,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
|
||||||
@ -62,17 +80,13 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
|
|||||||
return method(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
|
return method(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
|
||||||
else:
|
else:
|
||||||
# 如果同时询问多个大语言模型:
|
# 如果同时询问多个大语言模型:
|
||||||
executor = ThreadPoolExecutor(max_workers=16)
|
executor = ThreadPoolExecutor(max_workers=4)
|
||||||
models = model.split('&')
|
models = model.split('&')
|
||||||
n_model = len(models)
|
n_model = len(models)
|
||||||
|
|
||||||
window_len = len(observe_window)
|
window_len = len(observe_window)
|
||||||
if window_len==0:
|
assert window_len==3
|
||||||
window_mutex = [[] for _ in range(n_model)] + [True]
|
window_mutex = [["", time.time(), ""] for _ in range(n_model)] + [True]
|
||||||
elif window_len==1:
|
|
||||||
window_mutex = [[""] for _ in range(n_model)] + [True]
|
|
||||||
elif window_len==2:
|
|
||||||
window_mutex = [["", time.time()] for _ in range(n_model)] + [True]
|
|
||||||
|
|
||||||
futures = []
|
futures = []
|
||||||
for i in range(n_model):
|
for i in range(n_model):
|
||||||
@ -85,12 +99,12 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
|
|||||||
method = methods['tgui-no-ui']
|
method = methods['tgui-no-ui']
|
||||||
llm_kwargs_feedin = copy.deepcopy(llm_kwargs)
|
llm_kwargs_feedin = copy.deepcopy(llm_kwargs)
|
||||||
llm_kwargs_feedin['llm_model'] = model
|
llm_kwargs_feedin['llm_model'] = model
|
||||||
future = executor.submit(method, inputs, llm_kwargs_feedin, history, sys_prompt, window_mutex[i], console_slience)
|
future = executor.submit(LLM_CATCH_EXCEPTION(method), inputs, llm_kwargs_feedin, history, sys_prompt, window_mutex[i], console_slience)
|
||||||
futures.append(future)
|
futures.append(future)
|
||||||
|
|
||||||
def mutex_manager(window_mutex, observe_window):
|
def mutex_manager(window_mutex, observe_window):
|
||||||
while True:
|
while True:
|
||||||
time.sleep(0.2)
|
time.sleep(0.5)
|
||||||
if not window_mutex[-1]: break
|
if not window_mutex[-1]: break
|
||||||
# 看门狗(watchdog)
|
# 看门狗(watchdog)
|
||||||
for i in range(n_model):
|
for i in range(n_model):
|
||||||
@ -98,8 +112,8 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
|
|||||||
# 观察窗(window)
|
# 观察窗(window)
|
||||||
chat_string = []
|
chat_string = []
|
||||||
for i in range(n_model):
|
for i in range(n_model):
|
||||||
chat_string.append( f"[{str(models[i])} 说]: {window_mutex[i][0]}" )
|
chat_string.append( f"【{str(models[i])} 说】: <font color=\"{colors[i]}\"> {window_mutex[i][0]} </font>" )
|
||||||
res = '\n\n---\n\n'.join(chat_string)
|
res = '<br/><br/>\n\n---\n\n'.join(chat_string)
|
||||||
# # # # # # # # # # #
|
# # # # # # # # # # #
|
||||||
observe_window[0] = res
|
observe_window[0] = res
|
||||||
|
|
||||||
@ -107,10 +121,18 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
|
|||||||
t_model.start()
|
t_model.start()
|
||||||
|
|
||||||
return_string_collect = []
|
return_string_collect = []
|
||||||
|
while True:
|
||||||
|
worker_done = [h.done() for h in futures]
|
||||||
|
if all(worker_done):
|
||||||
|
executor.shutdown()
|
||||||
|
break
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
for i, future in enumerate(futures): # wait and get
|
for i, future in enumerate(futures): # wait and get
|
||||||
return_string_collect.append( f"[{str(models[i])} 说]: {future.result()}" )
|
return_string_collect.append( f"【{str(models[i])} 说】: <font color=\"{colors[i]}\"> {future.result()} </font>" )
|
||||||
|
|
||||||
window_mutex[-1] = False # stop mutex thread
|
window_mutex[-1] = False # stop mutex thread
|
||||||
res = '\n\n---\n\n'.join(return_string_collect)
|
res = '<br/>\n\n---\n\n'.join(return_string_collect)
|
||||||
return res
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
@ -3,35 +3,69 @@ from transformers import AutoModel, AutoTokenizer
|
|||||||
import time
|
import time
|
||||||
import importlib
|
import importlib
|
||||||
from toolbox import update_ui, get_conf
|
from toolbox import update_ui, get_conf
|
||||||
|
from multiprocessing import Process, Pipe
|
||||||
|
|
||||||
|
#################################################################################
|
||||||
|
class GetGLMHandle(Process):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(daemon=True)
|
||||||
|
self.parent, self.child = Pipe()
|
||||||
|
self.chatglm_model = None
|
||||||
|
self.chatglm_tokenizer = None
|
||||||
|
self.start()
|
||||||
|
print('初始化')
|
||||||
|
|
||||||
global chatglm_model, chatglm_tokenizer
|
def ready(self):
|
||||||
|
return self.chatglm_model is not None
|
||||||
|
|
||||||
chatglm_model = None
|
def run(self):
|
||||||
chatglm_tokenizer = None
|
while True:
|
||||||
|
try:
|
||||||
def model_loader():
|
if self.chatglm_model is None:
|
||||||
global chatglm_model, chatglm_tokenizer
|
self.chatglm_tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
|
||||||
if chatglm_tokenizer is None:
|
|
||||||
chatglm_tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
|
|
||||||
if chatglm_model is None: # 尚未加载
|
|
||||||
device, = get_conf('LOCAL_MODEL_DEVICE')
|
device, = get_conf('LOCAL_MODEL_DEVICE')
|
||||||
if device=='cpu':
|
if device=='cpu':
|
||||||
chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float()
|
self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float()
|
||||||
else:
|
else:
|
||||||
chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
|
self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
|
||||||
chatglm_model = chatglm_model.eval()
|
self.chatglm_model = self.chatglm_model.eval()
|
||||||
chatglm_model = chatglm_model.eval()
|
break
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
while True:
|
||||||
|
kwargs = self.child.recv()
|
||||||
|
try:
|
||||||
|
for response, history in self.chatglm_model.stream_chat(self.chatglm_tokenizer, **kwargs):
|
||||||
|
self.child.send(response)
|
||||||
|
except:
|
||||||
|
self.child.send('[Local Message] Call ChatGLM fail.')
|
||||||
|
self.child.send('[Finish]')
|
||||||
|
|
||||||
|
def stream_chat(self, **kwargs):
|
||||||
|
self.parent.send(kwargs)
|
||||||
|
while True:
|
||||||
|
res = self.parent.recv()
|
||||||
|
if res != '[Finish]':
|
||||||
|
yield res
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
return
|
||||||
|
|
||||||
|
global glm_handle
|
||||||
|
glm_handle = None
|
||||||
|
#################################################################################
|
||||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
|
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
|
||||||
"""
|
"""
|
||||||
|
多线程方法
|
||||||
函数的说明请见 request_llm/bridge_all.py
|
函数的说明请见 request_llm/bridge_all.py
|
||||||
"""
|
"""
|
||||||
global chatglm_model, chatglm_tokenizer
|
global glm_handle
|
||||||
if chatglm_model is None:
|
if glm_handle is None:
|
||||||
observe_window[0] = "ChatGLM尚未加载,加载需要一段时间 ……"
|
glm_handle = GetGLMHandle()
|
||||||
|
observe_window[0] = "ChatGLM尚未加载,加载需要一段时间。注意,取决于`config.py`的配置,ChatGLM消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……"
|
||||||
|
|
||||||
model_loader()
|
|
||||||
# chatglm 没有 sys_prompt 接口,因此把prompt加入 history
|
# chatglm 没有 sys_prompt 接口,因此把prompt加入 history
|
||||||
history_feedin = []
|
history_feedin = []
|
||||||
for i in range(len(history)//2):
|
for i in range(len(history)//2):
|
||||||
@ -40,29 +74,27 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
|||||||
|
|
||||||
watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
|
watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
|
||||||
response = ""
|
response = ""
|
||||||
for response, history in chatglm_model.stream_chat(chatglm_tokenizer, inputs, history=history_feedin, max_length=llm_kwargs['max_length'],
|
for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
||||||
top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
|
||||||
# 观测窗,把已经获取的数据显示出去
|
|
||||||
observe_window[0] = response
|
observe_window[0] = response
|
||||||
# 看门狗 (watchdog),如果超过期限没有喂狗,则终止
|
|
||||||
if len(observe_window) >= 2:
|
if len(observe_window) >= 2:
|
||||||
if (time.time()-observe_window[1]) > watch_dog_patience:
|
if (time.time()-observe_window[1]) > watch_dog_patience:
|
||||||
raise RuntimeError("程序终止。")
|
raise RuntimeError("程序终止。")
|
||||||
# if not console_slience:
|
|
||||||
# print(response)
|
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
|
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
|
||||||
"""
|
"""
|
||||||
|
单线程方法
|
||||||
函数的说明请见 request_llm/bridge_all.py
|
函数的说明请见 request_llm/bridge_all.py
|
||||||
"""
|
"""
|
||||||
global chatglm_model, chatglm_tokenizer
|
|
||||||
chatbot.append((inputs, ""))
|
chatbot.append((inputs, ""))
|
||||||
if chatglm_model is None:
|
|
||||||
chatbot[-1] = (inputs, "ChatGLM尚未加载,加载需要一段时间 ……")
|
global glm_handle
|
||||||
|
if glm_handle is None:
|
||||||
|
glm_handle = GetGLMHandle()
|
||||||
|
chatbot[-1] = (inputs, "ChatGLM尚未加载,加载需要一段时间。注意,取决于`config.py`的配置,ChatGLM消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……")
|
||||||
yield from update_ui(chatbot=chatbot, history=[])
|
yield from update_ui(chatbot=chatbot, history=[])
|
||||||
model_loader()
|
|
||||||
|
|
||||||
if additional_fn is not None:
|
if additional_fn is not None:
|
||||||
import core_functional
|
import core_functional
|
||||||
@ -71,13 +103,11 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
|
if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
|
||||||
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
|
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
|
||||||
|
|
||||||
|
|
||||||
history_feedin = []
|
history_feedin = []
|
||||||
for i in range(len(history)//2):
|
for i in range(len(history)//2):
|
||||||
history_feedin.append(["What can I do?", system_prompt] )
|
history_feedin.append(["What can I do?", system_prompt] )
|
||||||
history_feedin.append([history[2*i], history[2*i+1]] )
|
history_feedin.append([history[2*i], history[2*i+1]] )
|
||||||
|
|
||||||
for response, history in chatglm_model.stream_chat(chatglm_tokenizer, inputs, history=history_feedin, max_length=llm_kwargs['max_length'],
|
for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
||||||
top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
|
||||||
chatbot[-1] = (inputs, response)
|
chatbot[-1] = (inputs, response)
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
yield from update_ui(chatbot=chatbot, history=history)
|
Loading…
x
Reference in New Issue
Block a user