ChatGLM改成多进程运行
This commit is contained in:
parent
294ac338bd
commit
6aba339538
@ -66,7 +66,7 @@ def request_gpt_model_in_new_thread_with_ui_alive(
|
|||||||
chatbot.append([inputs_show_user, ""])
|
chatbot.append([inputs_show_user, ""])
|
||||||
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
|
||||||
executor = ThreadPoolExecutor(max_workers=16)
|
executor = ThreadPoolExecutor(max_workers=16)
|
||||||
mutable = ["", time.time()]
|
mutable = ["", time.time(), ""]
|
||||||
def _req_gpt(inputs, history, sys_prompt):
|
def _req_gpt(inputs, history, sys_prompt):
|
||||||
retry_op = retry_times_at_unknown_error
|
retry_op = retry_times_at_unknown_error
|
||||||
exceeded_cnt = 0
|
exceeded_cnt = 0
|
||||||
|
@ -20,7 +20,8 @@ def 同时问询(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
|
|||||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||||
inputs=txt, inputs_show_user=txt,
|
inputs=txt, inputs_show_user=txt,
|
||||||
llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
|
llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
|
||||||
sys_prompt=system_prompt
|
sys_prompt=system_prompt,
|
||||||
|
retry_times_at_unknown_error=0
|
||||||
)
|
)
|
||||||
|
|
||||||
history.append(txt)
|
history.append(txt)
|
||||||
|
@ -24,7 +24,7 @@ RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8
|
|||||||
|
|
||||||
# 下载分支
|
# 下载分支
|
||||||
WORKDIR /gpt
|
WORKDIR /gpt
|
||||||
RUN $useProxyNetwork git clone https://github.com/binary-husky/chatgpt_academic.git -b v3.0
|
RUN $useProxyNetwork git clone https://github.com/binary-husky/chatgpt_academic.git -b v3.1
|
||||||
WORKDIR /gpt/chatgpt_academic
|
WORKDIR /gpt/chatgpt_academic
|
||||||
RUN $useProxyNetwork python3 -m pip install -r requirements.txt
|
RUN $useProxyNetwork python3 -m pip install -r requirements.txt
|
||||||
RUN $useProxyNetwork python3 -m pip install -r request_llm/requirements_chatglm.txt
|
RUN $useProxyNetwork python3 -m pip install -r request_llm/requirements_chatglm.txt
|
||||||
|
327
main.py
327
main.py
@ -1,177 +1,182 @@
|
|||||||
import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
||||||
import gradio as gr
|
|
||||||
from request_llm.bridge_all import predict
|
|
||||||
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith
|
|
||||||
|
|
||||||
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
|
def main():
|
||||||
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY = \
|
import gradio as gr
|
||||||
get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY')
|
from request_llm.bridge_all import predict
|
||||||
|
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith
|
||||||
|
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
|
||||||
|
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY = \
|
||||||
|
get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY')
|
||||||
|
|
||||||
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
||||||
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
||||||
if not AUTHENTICATION: AUTHENTICATION = None
|
if not AUTHENTICATION: AUTHENTICATION = None
|
||||||
|
|
||||||
from check_proxy import get_current_version
|
from check_proxy import get_current_version
|
||||||
initial_prompt = "Serve me as a writing and programming assistant."
|
initial_prompt = "Serve me as a writing and programming assistant."
|
||||||
title_html = f"<h1 align=\"center\">ChatGPT 学术优化 {get_current_version()}</h1>"
|
title_html = f"<h1 align=\"center\">ChatGPT 学术优化 {get_current_version()}</h1>"
|
||||||
description = """代码开源和更新[地址🚀](https://github.com/binary-husky/chatgpt_academic),感谢热情的[开发者们❤️](https://github.com/binary-husky/chatgpt_academic/graphs/contributors)"""
|
description = """代码开源和更新[地址🚀](https://github.com/binary-husky/chatgpt_academic),感谢热情的[开发者们❤️](https://github.com/binary-husky/chatgpt_academic/graphs/contributors)"""
|
||||||
|
|
||||||
# 问询记录, python 版本建议3.9+(越新越好)
|
# 问询记录, python 版本建议3.9+(越新越好)
|
||||||
import logging
|
import logging
|
||||||
os.makedirs("gpt_log", exist_ok=True)
|
os.makedirs("gpt_log", exist_ok=True)
|
||||||
try:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO, encoding="utf-8")
|
try:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO, encoding="utf-8")
|
||||||
except:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO)
|
except:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO)
|
||||||
print("所有问询记录将自动保存在本地目录./gpt_log/chat_secrets.log, 请注意自我隐私保护哦!")
|
print("所有问询记录将自动保存在本地目录./gpt_log/chat_secrets.log, 请注意自我隐私保护哦!")
|
||||||
|
|
||||||
# 一些普通功能模块
|
# 一些普通功能模块
|
||||||
from core_functional import get_core_functions
|
from core_functional import get_core_functions
|
||||||
functional = get_core_functions()
|
functional = get_core_functions()
|
||||||
|
|
||||||
# 高级函数插件
|
# 高级函数插件
|
||||||
from crazy_functional import get_crazy_functions
|
from crazy_functional import get_crazy_functions
|
||||||
crazy_fns = get_crazy_functions()
|
crazy_fns = get_crazy_functions()
|
||||||
|
|
||||||
# 处理markdown文本格式的转变
|
# 处理markdown文本格式的转变
|
||||||
gr.Chatbot.postprocess = format_io
|
gr.Chatbot.postprocess = format_io
|
||||||
|
|
||||||
# 做一些外观色彩上的调整
|
# 做一些外观色彩上的调整
|
||||||
from theme import adjust_theme, advanced_css
|
from theme import adjust_theme, advanced_css
|
||||||
set_theme = adjust_theme()
|
set_theme = adjust_theme()
|
||||||
|
|
||||||
# 代理与自动更新
|
# 代理与自动更新
|
||||||
from check_proxy import check_proxy, auto_update
|
from check_proxy import check_proxy, auto_update
|
||||||
proxy_info = check_proxy(proxies)
|
proxy_info = check_proxy(proxies)
|
||||||
|
|
||||||
gr_L1 = lambda: gr.Row().style()
|
gr_L1 = lambda: gr.Row().style()
|
||||||
gr_L2 = lambda scale: gr.Column(scale=scale)
|
gr_L2 = lambda scale: gr.Column(scale=scale)
|
||||||
if LAYOUT == "TOP-DOWN":
|
if LAYOUT == "TOP-DOWN":
|
||||||
gr_L1 = lambda: DummyWith()
|
gr_L1 = lambda: DummyWith()
|
||||||
gr_L2 = lambda scale: gr.Row()
|
gr_L2 = lambda scale: gr.Row()
|
||||||
CHATBOT_HEIGHT /= 2
|
CHATBOT_HEIGHT /= 2
|
||||||
|
|
||||||
cancel_handles = []
|
cancel_handles = []
|
||||||
with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
|
with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
|
||||||
gr.HTML(title_html)
|
gr.HTML(title_html)
|
||||||
cookies = gr.State({'api_key': API_KEY, 'llm_model': LLM_MODEL})
|
cookies = gr.State({'api_key': API_KEY, 'llm_model': LLM_MODEL})
|
||||||
with gr_L1():
|
with gr_L1():
|
||||||
with gr_L2(scale=2):
|
with gr_L2(scale=2):
|
||||||
chatbot = gr.Chatbot()
|
chatbot = gr.Chatbot()
|
||||||
chatbot.style(height=CHATBOT_HEIGHT)
|
chatbot.style(height=CHATBOT_HEIGHT)
|
||||||
history = gr.State([])
|
history = gr.State([])
|
||||||
with gr_L2(scale=1):
|
with gr_L2(scale=1):
|
||||||
with gr.Accordion("输入区", open=True) as area_input_primary:
|
with gr.Accordion("输入区", open=True) as area_input_primary:
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
txt = gr.Textbox(show_label=False, placeholder="Input question here.").style(container=False)
|
txt = gr.Textbox(show_label=False, placeholder="Input question here.").style(container=False)
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
submitBtn = gr.Button("提交", variant="primary")
|
submitBtn = gr.Button("提交", variant="primary")
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
resetBtn = gr.Button("重置", variant="secondary"); resetBtn.style(size="sm")
|
resetBtn = gr.Button("重置", variant="secondary"); resetBtn.style(size="sm")
|
||||||
stopBtn = gr.Button("停止", variant="secondary"); stopBtn.style(size="sm")
|
stopBtn = gr.Button("停止", variant="secondary"); stopBtn.style(size="sm")
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}")
|
status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}")
|
||||||
with gr.Accordion("基础功能区", open=True) as area_basic_fn:
|
with gr.Accordion("基础功能区", open=True) as area_basic_fn:
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
for k in functional:
|
for k in functional:
|
||||||
variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
|
variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
|
||||||
functional[k]["Button"] = gr.Button(k, variant=variant)
|
functional[k]["Button"] = gr.Button(k, variant=variant)
|
||||||
with gr.Accordion("函数插件区", open=True) as area_crazy_fn:
|
with gr.Accordion("函数插件区", open=True) as area_crazy_fn:
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
gr.Markdown("注意:以下“红颜色”标识的函数插件需从输入区读取路径作为参数.")
|
gr.Markdown("注意:以下“红颜色”标识的函数插件需从输入区读取路径作为参数.")
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
for k in crazy_fns:
|
for k in crazy_fns:
|
||||||
if not crazy_fns[k].get("AsButton", True): continue
|
if not crazy_fns[k].get("AsButton", True): continue
|
||||||
variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary"
|
variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary"
|
||||||
crazy_fns[k]["Button"] = gr.Button(k, variant=variant)
|
crazy_fns[k]["Button"] = gr.Button(k, variant=variant)
|
||||||
crazy_fns[k]["Button"].style(size="sm")
|
crazy_fns[k]["Button"].style(size="sm")
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
with gr.Accordion("更多函数插件", open=True):
|
with gr.Accordion("更多函数插件", open=True):
|
||||||
dropdown_fn_list = [k for k in crazy_fns.keys() if not crazy_fns[k].get("AsButton", True)]
|
dropdown_fn_list = [k for k in crazy_fns.keys() if not crazy_fns[k].get("AsButton", True)]
|
||||||
with gr.Column(scale=1):
|
with gr.Column(scale=1):
|
||||||
dropdown = gr.Dropdown(dropdown_fn_list, value=r"打开插件列表", label="").style(container=False)
|
dropdown = gr.Dropdown(dropdown_fn_list, value=r"打开插件列表", label="").style(container=False)
|
||||||
with gr.Column(scale=1):
|
with gr.Column(scale=1):
|
||||||
switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary")
|
switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary")
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
with gr.Accordion("点击展开“文件上传区”。上传本地文件可供红色函数插件调用。", open=False) as area_file_up:
|
with gr.Accordion("点击展开“文件上传区”。上传本地文件可供红色函数插件调用。", open=False) as area_file_up:
|
||||||
file_upload = gr.Files(label="任何文件, 但推荐上传压缩文件(zip, tar)", file_count="multiple")
|
file_upload = gr.Files(label="任何文件, 但推荐上传压缩文件(zip, tar)", file_count="multiple")
|
||||||
with gr.Accordion("展开SysPrompt & 交互界面布局 & Github地址", open=(LAYOUT == "TOP-DOWN")):
|
with gr.Accordion("展开SysPrompt & 交互界面布局 & Github地址", open=(LAYOUT == "TOP-DOWN")):
|
||||||
system_prompt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt)
|
system_prompt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt)
|
||||||
top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
|
top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
|
||||||
temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
|
temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
|
||||||
max_length_sl = gr.Slider(minimum=256, maximum=4096, value=512, step=1, interactive=True, label="MaxLength",)
|
max_length_sl = gr.Slider(minimum=256, maximum=4096, value=512, step=1, interactive=True, label="MaxLength",)
|
||||||
checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "底部输入区"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区")
|
checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "底部输入区"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区")
|
||||||
md_dropdown = gr.Dropdown(["gpt-3.5-turbo", "chatglm"], value=LLM_MODEL, label="").style(container=False)
|
md_dropdown = gr.Dropdown(["gpt-3.5-turbo", "chatglm"], value=LLM_MODEL, label="").style(container=False)
|
||||||
|
|
||||||
gr.Markdown(description)
|
gr.Markdown(description)
|
||||||
with gr.Accordion("备选输入区", open=True, visible=False) as area_input_secondary:
|
with gr.Accordion("备选输入区", open=True, visible=False) as area_input_secondary:
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
txt2 = gr.Textbox(show_label=False, placeholder="Input question here.", label="输入区2").style(container=False)
|
txt2 = gr.Textbox(show_label=False, placeholder="Input question here.", label="输入区2").style(container=False)
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
submitBtn2 = gr.Button("提交", variant="primary")
|
submitBtn2 = gr.Button("提交", variant="primary")
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
resetBtn2 = gr.Button("重置", variant="secondary"); resetBtn.style(size="sm")
|
resetBtn2 = gr.Button("重置", variant="secondary"); resetBtn.style(size="sm")
|
||||||
stopBtn2 = gr.Button("停止", variant="secondary"); stopBtn.style(size="sm")
|
stopBtn2 = gr.Button("停止", variant="secondary"); stopBtn.style(size="sm")
|
||||||
# 功能区显示开关与功能区的互动
|
# 功能区显示开关与功能区的互动
|
||||||
def fn_area_visibility(a):
|
def fn_area_visibility(a):
|
||||||
ret = {}
|
ret = {}
|
||||||
ret.update({area_basic_fn: gr.update(visible=("基础功能区" in a))})
|
ret.update({area_basic_fn: gr.update(visible=("基础功能区" in a))})
|
||||||
ret.update({area_crazy_fn: gr.update(visible=("函数插件区" in a))})
|
ret.update({area_crazy_fn: gr.update(visible=("函数插件区" in a))})
|
||||||
ret.update({area_input_primary: gr.update(visible=("底部输入区" not in a))})
|
ret.update({area_input_primary: gr.update(visible=("底部输入区" not in a))})
|
||||||
ret.update({area_input_secondary: gr.update(visible=("底部输入区" in a))})
|
ret.update({area_input_secondary: gr.update(visible=("底部输入区" in a))})
|
||||||
if "底部输入区" in a: ret.update({txt: gr.update(value="")})
|
if "底部输入区" in a: ret.update({txt: gr.update(value="")})
|
||||||
return ret
|
return ret
|
||||||
checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn, area_input_primary, area_input_secondary, txt, txt2] )
|
checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn, area_input_primary, area_input_secondary, txt, txt2] )
|
||||||
# 整理反复出现的控件句柄组合
|
# 整理反复出现的控件句柄组合
|
||||||
input_combo = [cookies, max_length_sl, md_dropdown, txt, txt2, top_p, temperature, chatbot, history, system_prompt]
|
input_combo = [cookies, max_length_sl, md_dropdown, txt, txt2, top_p, temperature, chatbot, history, system_prompt]
|
||||||
output_combo = [cookies, chatbot, history, status]
|
output_combo = [cookies, chatbot, history, status]
|
||||||
predict_args = dict(fn=ArgsGeneralWrapper(predict), inputs=input_combo, outputs=output_combo)
|
predict_args = dict(fn=ArgsGeneralWrapper(predict), inputs=input_combo, outputs=output_combo)
|
||||||
# 提交按钮、重置按钮
|
# 提交按钮、重置按钮
|
||||||
cancel_handles.append(txt.submit(**predict_args))
|
cancel_handles.append(txt.submit(**predict_args))
|
||||||
cancel_handles.append(txt2.submit(**predict_args))
|
cancel_handles.append(txt2.submit(**predict_args))
|
||||||
cancel_handles.append(submitBtn.click(**predict_args))
|
cancel_handles.append(submitBtn.click(**predict_args))
|
||||||
cancel_handles.append(submitBtn2.click(**predict_args))
|
cancel_handles.append(submitBtn2.click(**predict_args))
|
||||||
resetBtn.click(lambda: ([], [], "已重置"), None, [chatbot, history, status])
|
resetBtn.click(lambda: ([], [], "已重置"), None, [chatbot, history, status])
|
||||||
resetBtn2.click(lambda: ([], [], "已重置"), None, [chatbot, history, status])
|
resetBtn2.click(lambda: ([], [], "已重置"), None, [chatbot, history, status])
|
||||||
# 基础功能区的回调函数注册
|
# 基础功能区的回调函数注册
|
||||||
for k in functional:
|
for k in functional:
|
||||||
click_handle = functional[k]["Button"].click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(k)], outputs=output_combo)
|
click_handle = functional[k]["Button"].click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(k)], outputs=output_combo)
|
||||||
cancel_handles.append(click_handle)
|
cancel_handles.append(click_handle)
|
||||||
# 文件上传区,接收文件后与chatbot的互动
|
# 文件上传区,接收文件后与chatbot的互动
|
||||||
file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt], [chatbot, txt])
|
file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt], [chatbot, txt])
|
||||||
# 函数插件-固定按钮区
|
# 函数插件-固定按钮区
|
||||||
for k in crazy_fns:
|
for k in crazy_fns:
|
||||||
if not crazy_fns[k].get("AsButton", True): continue
|
if not crazy_fns[k].get("AsButton", True): continue
|
||||||
click_handle = crazy_fns[k]["Button"].click(ArgsGeneralWrapper(crazy_fns[k]["Function"]), [*input_combo, gr.State(PORT)], output_combo)
|
click_handle = crazy_fns[k]["Button"].click(ArgsGeneralWrapper(crazy_fns[k]["Function"]), [*input_combo, gr.State(PORT)], output_combo)
|
||||||
|
click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot])
|
||||||
|
cancel_handles.append(click_handle)
|
||||||
|
# 函数插件-下拉菜单与随变按钮的互动
|
||||||
|
def on_dropdown_changed(k):
|
||||||
|
variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary"
|
||||||
|
return {switchy_bt: gr.update(value=k, variant=variant)}
|
||||||
|
dropdown.select(on_dropdown_changed, [dropdown], [switchy_bt] )
|
||||||
|
# 随变按钮的回调函数注册
|
||||||
|
def route(k, *args, **kwargs):
|
||||||
|
if k in [r"打开插件列表", r"请先从插件列表中选择"]: return
|
||||||
|
yield from ArgsGeneralWrapper(crazy_fns[k]["Function"])(*args, **kwargs)
|
||||||
|
click_handle = switchy_bt.click(route,[switchy_bt, *input_combo, gr.State(PORT)], output_combo)
|
||||||
click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot])
|
click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot])
|
||||||
|
# def expand_file_area(file_upload, area_file_up):
|
||||||
|
# if len(file_upload)>0: return {area_file_up: gr.update(open=True)}
|
||||||
|
# click_handle.then(expand_file_area, [file_upload, area_file_up], [area_file_up])
|
||||||
cancel_handles.append(click_handle)
|
cancel_handles.append(click_handle)
|
||||||
# 函数插件-下拉菜单与随变按钮的互动
|
# 终止按钮的回调函数注册
|
||||||
def on_dropdown_changed(k):
|
stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
|
||||||
variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary"
|
stopBtn2.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
|
||||||
return {switchy_bt: gr.update(value=k, variant=variant)}
|
|
||||||
dropdown.select(on_dropdown_changed, [dropdown], [switchy_bt] )
|
|
||||||
# 随变按钮的回调函数注册
|
|
||||||
def route(k, *args, **kwargs):
|
|
||||||
if k in [r"打开插件列表", r"请先从插件列表中选择"]: return
|
|
||||||
yield from ArgsGeneralWrapper(crazy_fns[k]["Function"])(*args, **kwargs)
|
|
||||||
click_handle = switchy_bt.click(route,[switchy_bt, *input_combo, gr.State(PORT)], output_combo)
|
|
||||||
click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot])
|
|
||||||
# def expand_file_area(file_upload, area_file_up):
|
|
||||||
# if len(file_upload)>0: return {area_file_up: gr.update(open=True)}
|
|
||||||
# click_handle.then(expand_file_area, [file_upload, area_file_up], [area_file_up])
|
|
||||||
cancel_handles.append(click_handle)
|
|
||||||
# 终止按钮的回调函数注册
|
|
||||||
stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
|
|
||||||
stopBtn2.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
|
|
||||||
# gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
|
|
||||||
def auto_opentab_delay():
|
|
||||||
import threading, webbrowser, time
|
|
||||||
print(f"如果浏览器没有自动打开,请复制并转到以下URL:")
|
|
||||||
print(f"\t(亮色主题): http://localhost:{PORT}")
|
|
||||||
print(f"\t(暗色主题): http://localhost:{PORT}/?__dark-theme=true")
|
|
||||||
def open():
|
|
||||||
time.sleep(2) # 打开浏览器
|
|
||||||
webbrowser.open_new_tab(f"http://localhost:{PORT}/?__dark-theme=true")
|
|
||||||
threading.Thread(target=open, name="open-browser", daemon=True).start()
|
|
||||||
threading.Thread(target=auto_update, name="self-upgrade", daemon=True).start()
|
|
||||||
|
|
||||||
auto_opentab_delay()
|
# gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
|
||||||
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION)
|
def auto_opentab_delay():
|
||||||
|
import threading, webbrowser, time
|
||||||
|
print(f"如果浏览器没有自动打开,请复制并转到以下URL:")
|
||||||
|
print(f"\t(亮色主题): http://localhost:{PORT}")
|
||||||
|
print(f"\t(暗色主题): http://localhost:{PORT}/?__dark-theme=true")
|
||||||
|
def open():
|
||||||
|
time.sleep(2) # 打开浏览器
|
||||||
|
webbrowser.open_new_tab(f"http://localhost:{PORT}/?__dark-theme=true")
|
||||||
|
threading.Thread(target=open, name="open-browser", daemon=True).start()
|
||||||
|
threading.Thread(target=auto_update, name="self-upgrade", daemon=True).start()
|
||||||
|
|
||||||
|
auto_opentab_delay()
|
||||||
|
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
@ -31,6 +31,24 @@ methods = {
|
|||||||
"tgui-ui": tgui_ui,
|
"tgui-ui": tgui_ui,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
def LLM_CATCH_EXCEPTION(f):
|
||||||
|
"""
|
||||||
|
装饰器函数,将错误显示出来
|
||||||
|
"""
|
||||||
|
def decorated(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience):
|
||||||
|
try:
|
||||||
|
return f(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
|
||||||
|
except Exception as e:
|
||||||
|
from toolbox import get_conf
|
||||||
|
import traceback
|
||||||
|
proxies, = get_conf('proxies')
|
||||||
|
tb_str = '\n```\n' + traceback.format_exc() + '\n```\n'
|
||||||
|
observe_window[0] = tb_str
|
||||||
|
return tb_str
|
||||||
|
return decorated
|
||||||
|
|
||||||
|
colors = ['#FF00FF', '#00FFFF', '#FF0000''#990099', '#009999', '#990044']
|
||||||
|
|
||||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience=False):
|
def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience=False):
|
||||||
"""
|
"""
|
||||||
发送至LLM,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
|
发送至LLM,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
|
||||||
@ -62,17 +80,13 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
|
|||||||
return method(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
|
return method(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
|
||||||
else:
|
else:
|
||||||
# 如果同时询问多个大语言模型:
|
# 如果同时询问多个大语言模型:
|
||||||
executor = ThreadPoolExecutor(max_workers=16)
|
executor = ThreadPoolExecutor(max_workers=4)
|
||||||
models = model.split('&')
|
models = model.split('&')
|
||||||
n_model = len(models)
|
n_model = len(models)
|
||||||
|
|
||||||
window_len = len(observe_window)
|
window_len = len(observe_window)
|
||||||
if window_len==0:
|
assert window_len==3
|
||||||
window_mutex = [[] for _ in range(n_model)] + [True]
|
window_mutex = [["", time.time(), ""] for _ in range(n_model)] + [True]
|
||||||
elif window_len==1:
|
|
||||||
window_mutex = [[""] for _ in range(n_model)] + [True]
|
|
||||||
elif window_len==2:
|
|
||||||
window_mutex = [["", time.time()] for _ in range(n_model)] + [True]
|
|
||||||
|
|
||||||
futures = []
|
futures = []
|
||||||
for i in range(n_model):
|
for i in range(n_model):
|
||||||
@ -85,12 +99,12 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
|
|||||||
method = methods['tgui-no-ui']
|
method = methods['tgui-no-ui']
|
||||||
llm_kwargs_feedin = copy.deepcopy(llm_kwargs)
|
llm_kwargs_feedin = copy.deepcopy(llm_kwargs)
|
||||||
llm_kwargs_feedin['llm_model'] = model
|
llm_kwargs_feedin['llm_model'] = model
|
||||||
future = executor.submit(method, inputs, llm_kwargs_feedin, history, sys_prompt, window_mutex[i], console_slience)
|
future = executor.submit(LLM_CATCH_EXCEPTION(method), inputs, llm_kwargs_feedin, history, sys_prompt, window_mutex[i], console_slience)
|
||||||
futures.append(future)
|
futures.append(future)
|
||||||
|
|
||||||
def mutex_manager(window_mutex, observe_window):
|
def mutex_manager(window_mutex, observe_window):
|
||||||
while True:
|
while True:
|
||||||
time.sleep(0.2)
|
time.sleep(0.5)
|
||||||
if not window_mutex[-1]: break
|
if not window_mutex[-1]: break
|
||||||
# 看门狗(watchdog)
|
# 看门狗(watchdog)
|
||||||
for i in range(n_model):
|
for i in range(n_model):
|
||||||
@ -98,8 +112,8 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
|
|||||||
# 观察窗(window)
|
# 观察窗(window)
|
||||||
chat_string = []
|
chat_string = []
|
||||||
for i in range(n_model):
|
for i in range(n_model):
|
||||||
chat_string.append( f"[{str(models[i])} 说]: {window_mutex[i][0]}" )
|
chat_string.append( f"【{str(models[i])} 说】: <font color=\"{colors[i]}\"> {window_mutex[i][0]} </font>" )
|
||||||
res = '\n\n---\n\n'.join(chat_string)
|
res = '<br/><br/>\n\n---\n\n'.join(chat_string)
|
||||||
# # # # # # # # # # #
|
# # # # # # # # # # #
|
||||||
observe_window[0] = res
|
observe_window[0] = res
|
||||||
|
|
||||||
@ -107,10 +121,18 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
|
|||||||
t_model.start()
|
t_model.start()
|
||||||
|
|
||||||
return_string_collect = []
|
return_string_collect = []
|
||||||
|
while True:
|
||||||
|
worker_done = [h.done() for h in futures]
|
||||||
|
if all(worker_done):
|
||||||
|
executor.shutdown()
|
||||||
|
break
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
for i, future in enumerate(futures): # wait and get
|
for i, future in enumerate(futures): # wait and get
|
||||||
return_string_collect.append( f"[{str(models[i])} 说]: {future.result()}" )
|
return_string_collect.append( f"【{str(models[i])} 说】: <font color=\"{colors[i]}\"> {future.result()} </font>" )
|
||||||
|
|
||||||
window_mutex[-1] = False # stop mutex thread
|
window_mutex[-1] = False # stop mutex thread
|
||||||
res = '\n\n---\n\n'.join(return_string_collect)
|
res = '<br/>\n\n---\n\n'.join(return_string_collect)
|
||||||
return res
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
@ -3,35 +3,69 @@ from transformers import AutoModel, AutoTokenizer
|
|||||||
import time
|
import time
|
||||||
import importlib
|
import importlib
|
||||||
from toolbox import update_ui, get_conf
|
from toolbox import update_ui, get_conf
|
||||||
|
from multiprocessing import Process, Pipe
|
||||||
|
|
||||||
|
#################################################################################
|
||||||
|
class GetGLMHandle(Process):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(daemon=True)
|
||||||
|
self.parent, self.child = Pipe()
|
||||||
|
self.chatglm_model = None
|
||||||
|
self.chatglm_tokenizer = None
|
||||||
|
self.start()
|
||||||
|
print('初始化')
|
||||||
|
|
||||||
|
def ready(self):
|
||||||
|
return self.chatglm_model is not None
|
||||||
|
|
||||||
global chatglm_model, chatglm_tokenizer
|
def run(self):
|
||||||
|
while True:
|
||||||
chatglm_model = None
|
try:
|
||||||
chatglm_tokenizer = None
|
if self.chatglm_model is None:
|
||||||
|
self.chatglm_tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
|
||||||
def model_loader():
|
device, = get_conf('LOCAL_MODEL_DEVICE')
|
||||||
global chatglm_model, chatglm_tokenizer
|
if device=='cpu':
|
||||||
if chatglm_tokenizer is None:
|
self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float()
|
||||||
chatglm_tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
|
else:
|
||||||
if chatglm_model is None: # 尚未加载
|
self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
|
||||||
device, = get_conf('LOCAL_MODEL_DEVICE')
|
self.chatglm_model = self.chatglm_model.eval()
|
||||||
if device=='cpu':
|
break
|
||||||
chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float()
|
else:
|
||||||
else:
|
break
|
||||||
chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
|
except:
|
||||||
chatglm_model = chatglm_model.eval()
|
pass
|
||||||
chatglm_model = chatglm_model.eval()
|
while True:
|
||||||
|
kwargs = self.child.recv()
|
||||||
|
try:
|
||||||
|
for response, history in self.chatglm_model.stream_chat(self.chatglm_tokenizer, **kwargs):
|
||||||
|
self.child.send(response)
|
||||||
|
except:
|
||||||
|
self.child.send('[Local Message] Call ChatGLM fail.')
|
||||||
|
self.child.send('[Finish]')
|
||||||
|
|
||||||
|
def stream_chat(self, **kwargs):
|
||||||
|
self.parent.send(kwargs)
|
||||||
|
while True:
|
||||||
|
res = self.parent.recv()
|
||||||
|
if res != '[Finish]':
|
||||||
|
yield res
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
return
|
||||||
|
|
||||||
|
global glm_handle
|
||||||
|
glm_handle = None
|
||||||
|
#################################################################################
|
||||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
|
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
|
||||||
"""
|
"""
|
||||||
|
多线程方法
|
||||||
函数的说明请见 request_llm/bridge_all.py
|
函数的说明请见 request_llm/bridge_all.py
|
||||||
"""
|
"""
|
||||||
global chatglm_model, chatglm_tokenizer
|
global glm_handle
|
||||||
if chatglm_model is None:
|
if glm_handle is None:
|
||||||
observe_window[0] = "ChatGLM尚未加载,加载需要一段时间 ……"
|
glm_handle = GetGLMHandle()
|
||||||
|
observe_window[0] = "ChatGLM尚未加载,加载需要一段时间。注意,取决于`config.py`的配置,ChatGLM消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……"
|
||||||
|
|
||||||
model_loader()
|
|
||||||
# chatglm 没有 sys_prompt 接口,因此把prompt加入 history
|
# chatglm 没有 sys_prompt 接口,因此把prompt加入 history
|
||||||
history_feedin = []
|
history_feedin = []
|
||||||
for i in range(len(history)//2):
|
for i in range(len(history)//2):
|
||||||
@ -40,29 +74,27 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
|||||||
|
|
||||||
watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
|
watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
|
||||||
response = ""
|
response = ""
|
||||||
for response, history in chatglm_model.stream_chat(chatglm_tokenizer, inputs, history=history_feedin, max_length=llm_kwargs['max_length'],
|
for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
||||||
top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
|
||||||
# 观测窗,把已经获取的数据显示出去
|
|
||||||
observe_window[0] = response
|
observe_window[0] = response
|
||||||
# 看门狗 (watchdog),如果超过期限没有喂狗,则终止
|
|
||||||
if len(observe_window) >= 2:
|
if len(observe_window) >= 2:
|
||||||
if (time.time()-observe_window[1]) > watch_dog_patience:
|
if (time.time()-observe_window[1]) > watch_dog_patience:
|
||||||
raise RuntimeError("程序终止。")
|
raise RuntimeError("程序终止。")
|
||||||
# if not console_slience:
|
|
||||||
# print(response)
|
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
|
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
|
||||||
"""
|
"""
|
||||||
|
单线程方法
|
||||||
函数的说明请见 request_llm/bridge_all.py
|
函数的说明请见 request_llm/bridge_all.py
|
||||||
"""
|
"""
|
||||||
global chatglm_model, chatglm_tokenizer
|
|
||||||
chatbot.append((inputs, ""))
|
chatbot.append((inputs, ""))
|
||||||
if chatglm_model is None:
|
|
||||||
chatbot[-1] = (inputs, "ChatGLM尚未加载,加载需要一段时间 ……")
|
global glm_handle
|
||||||
|
if glm_handle is None:
|
||||||
|
glm_handle = GetGLMHandle()
|
||||||
|
chatbot[-1] = (inputs, "ChatGLM尚未加载,加载需要一段时间。注意,取决于`config.py`的配置,ChatGLM消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……")
|
||||||
yield from update_ui(chatbot=chatbot, history=[])
|
yield from update_ui(chatbot=chatbot, history=[])
|
||||||
model_loader()
|
|
||||||
|
|
||||||
if additional_fn is not None:
|
if additional_fn is not None:
|
||||||
import core_functional
|
import core_functional
|
||||||
@ -71,13 +103,11 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
|
if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
|
||||||
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
|
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
|
||||||
|
|
||||||
|
|
||||||
history_feedin = []
|
history_feedin = []
|
||||||
for i in range(len(history)//2):
|
for i in range(len(history)//2):
|
||||||
history_feedin.append(["What can I do?", system_prompt] )
|
history_feedin.append(["What can I do?", system_prompt] )
|
||||||
history_feedin.append([history[2*i], history[2*i+1]] )
|
history_feedin.append([history[2*i], history[2*i+1]] )
|
||||||
|
|
||||||
for response, history in chatglm_model.stream_chat(chatglm_tokenizer, inputs, history=history_feedin, max_length=llm_kwargs['max_length'],
|
for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
||||||
top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
|
||||||
chatbot[-1] = (inputs, response)
|
chatbot[-1] = (inputs, response)
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
yield from update_ui(chatbot=chatbot, history=history)
|
Loading…
x
Reference in New Issue
Block a user