From 6aba339538103c15d1b22fb616fb5f66557be716 Mon Sep 17 00:00:00 2001
From: qingxu fu <505030475@qq.com>
Date: Sat, 15 Apr 2023 19:09:03 +0800
Subject: [PATCH] =?UTF-8?q?ChatGLM=E6=94=B9=E6=88=90=E5=A4=9A=E8=BF=9B?=
=?UTF-8?q?=E7=A8=8B=E8=BF=90=E8=A1=8C?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
crazy_functions/crazy_utils.py | 2 +-
crazy_functions/询问多个大语言模型.py | 3 +-
docs/Dockerfile+ChatGLM | 2 +-
main.py | 327 +++++++++++++-------------
request_llm/bridge_all.py | 48 +++-
request_llm/bridge_chatglm.py | 98 +++++---
version | 2 +-
7 files changed, 270 insertions(+), 212 deletions(-)
diff --git a/crazy_functions/crazy_utils.py b/crazy_functions/crazy_utils.py
index cc43b53..b255706 100644
--- a/crazy_functions/crazy_utils.py
+++ b/crazy_functions/crazy_utils.py
@@ -66,7 +66,7 @@ def request_gpt_model_in_new_thread_with_ui_alive(
chatbot.append([inputs_show_user, ""])
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
executor = ThreadPoolExecutor(max_workers=16)
- mutable = ["", time.time()]
+ mutable = ["", time.time(), ""]
def _req_gpt(inputs, history, sys_prompt):
retry_op = retry_times_at_unknown_error
exceeded_cnt = 0
diff --git a/crazy_functions/询问多个大语言模型.py b/crazy_functions/询问多个大语言模型.py
index a3c98c1..55bfb41 100644
--- a/crazy_functions/询问多个大语言模型.py
+++ b/crazy_functions/询问多个大语言模型.py
@@ -20,7 +20,8 @@ def 同时问询(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
inputs=txt, inputs_show_user=txt,
llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
- sys_prompt=system_prompt
+ sys_prompt=system_prompt,
+ retry_times_at_unknown_error=0
)
history.append(txt)
diff --git a/docs/Dockerfile+ChatGLM b/docs/Dockerfile+ChatGLM
index f99f2a6..197ca1a 100644
--- a/docs/Dockerfile+ChatGLM
+++ b/docs/Dockerfile+ChatGLM
@@ -24,7 +24,7 @@ RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8
# 下载分支
WORKDIR /gpt
-RUN $useProxyNetwork git clone https://github.com/binary-husky/chatgpt_academic.git -b v3.0
+RUN $useProxyNetwork git clone https://github.com/binary-husky/chatgpt_academic.git -b v3.1
WORKDIR /gpt/chatgpt_academic
RUN $useProxyNetwork python3 -m pip install -r requirements.txt
RUN $useProxyNetwork python3 -m pip install -r request_llm/requirements_chatglm.txt
diff --git a/main.py b/main.py
index d8257a2..db35c15 100644
--- a/main.py
+++ b/main.py
@@ -1,177 +1,182 @@
import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
-import gradio as gr
-from request_llm.bridge_all import predict
-from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith
-# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
-proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY = \
- get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY')
+def main():
+ import gradio as gr
+ from request_llm.bridge_all import predict
+ from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith
+ # 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
+ proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY = \
+ get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY')
-# 如果WEB_PORT是-1, 则随机选取WEB端口
-PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
-if not AUTHENTICATION: AUTHENTICATION = None
+ # 如果WEB_PORT是-1, 则随机选取WEB端口
+ PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
+ if not AUTHENTICATION: AUTHENTICATION = None
-from check_proxy import get_current_version
-initial_prompt = "Serve me as a writing and programming assistant."
-title_html = f"
ChatGPT 学术优化 {get_current_version()}
"
-description = """代码开源和更新[地址🚀](https://github.com/binary-husky/chatgpt_academic),感谢热情的[开发者们❤️](https://github.com/binary-husky/chatgpt_academic/graphs/contributors)"""
+ from check_proxy import get_current_version
+ initial_prompt = "Serve me as a writing and programming assistant."
+ title_html = f"ChatGPT 学术优化 {get_current_version()}
"
+ description = """代码开源和更新[地址🚀](https://github.com/binary-husky/chatgpt_academic),感谢热情的[开发者们❤️](https://github.com/binary-husky/chatgpt_academic/graphs/contributors)"""
-# 问询记录, python 版本建议3.9+(越新越好)
-import logging
-os.makedirs("gpt_log", exist_ok=True)
-try:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO, encoding="utf-8")
-except:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO)
-print("所有问询记录将自动保存在本地目录./gpt_log/chat_secrets.log, 请注意自我隐私保护哦!")
+ # 问询记录, python 版本建议3.9+(越新越好)
+ import logging
+ os.makedirs("gpt_log", exist_ok=True)
+ try:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO, encoding="utf-8")
+ except:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO)
+ print("所有问询记录将自动保存在本地目录./gpt_log/chat_secrets.log, 请注意自我隐私保护哦!")
-# 一些普通功能模块
-from core_functional import get_core_functions
-functional = get_core_functions()
+ # 一些普通功能模块
+ from core_functional import get_core_functions
+ functional = get_core_functions()
-# 高级函数插件
-from crazy_functional import get_crazy_functions
-crazy_fns = get_crazy_functions()
+ # 高级函数插件
+ from crazy_functional import get_crazy_functions
+ crazy_fns = get_crazy_functions()
-# 处理markdown文本格式的转变
-gr.Chatbot.postprocess = format_io
+ # 处理markdown文本格式的转变
+ gr.Chatbot.postprocess = format_io
-# 做一些外观色彩上的调整
-from theme import adjust_theme, advanced_css
-set_theme = adjust_theme()
+ # 做一些外观色彩上的调整
+ from theme import adjust_theme, advanced_css
+ set_theme = adjust_theme()
-# 代理与自动更新
-from check_proxy import check_proxy, auto_update
-proxy_info = check_proxy(proxies)
+ # 代理与自动更新
+ from check_proxy import check_proxy, auto_update
+ proxy_info = check_proxy(proxies)
-gr_L1 = lambda: gr.Row().style()
-gr_L2 = lambda scale: gr.Column(scale=scale)
-if LAYOUT == "TOP-DOWN":
- gr_L1 = lambda: DummyWith()
- gr_L2 = lambda scale: gr.Row()
- CHATBOT_HEIGHT /= 2
+ gr_L1 = lambda: gr.Row().style()
+ gr_L2 = lambda scale: gr.Column(scale=scale)
+ if LAYOUT == "TOP-DOWN":
+ gr_L1 = lambda: DummyWith()
+ gr_L2 = lambda scale: gr.Row()
+ CHATBOT_HEIGHT /= 2
-cancel_handles = []
-with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
- gr.HTML(title_html)
- cookies = gr.State({'api_key': API_KEY, 'llm_model': LLM_MODEL})
- with gr_L1():
- with gr_L2(scale=2):
- chatbot = gr.Chatbot()
- chatbot.style(height=CHATBOT_HEIGHT)
- history = gr.State([])
- with gr_L2(scale=1):
- with gr.Accordion("输入区", open=True) as area_input_primary:
- with gr.Row():
- txt = gr.Textbox(show_label=False, placeholder="Input question here.").style(container=False)
- with gr.Row():
- submitBtn = gr.Button("提交", variant="primary")
- with gr.Row():
- resetBtn = gr.Button("重置", variant="secondary"); resetBtn.style(size="sm")
- stopBtn = gr.Button("停止", variant="secondary"); stopBtn.style(size="sm")
- with gr.Row():
- status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}")
- with gr.Accordion("基础功能区", open=True) as area_basic_fn:
- with gr.Row():
- for k in functional:
- variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
- functional[k]["Button"] = gr.Button(k, variant=variant)
- with gr.Accordion("函数插件区", open=True) as area_crazy_fn:
- with gr.Row():
- gr.Markdown("注意:以下“红颜色”标识的函数插件需从输入区读取路径作为参数.")
- with gr.Row():
- for k in crazy_fns:
- if not crazy_fns[k].get("AsButton", True): continue
- variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary"
- crazy_fns[k]["Button"] = gr.Button(k, variant=variant)
- crazy_fns[k]["Button"].style(size="sm")
- with gr.Row():
- with gr.Accordion("更多函数插件", open=True):
- dropdown_fn_list = [k for k in crazy_fns.keys() if not crazy_fns[k].get("AsButton", True)]
- with gr.Column(scale=1):
- dropdown = gr.Dropdown(dropdown_fn_list, value=r"打开插件列表", label="").style(container=False)
- with gr.Column(scale=1):
- switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary")
- with gr.Row():
- with gr.Accordion("点击展开“文件上传区”。上传本地文件可供红色函数插件调用。", open=False) as area_file_up:
- file_upload = gr.Files(label="任何文件, 但推荐上传压缩文件(zip, tar)", file_count="multiple")
- with gr.Accordion("展开SysPrompt & 交互界面布局 & Github地址", open=(LAYOUT == "TOP-DOWN")):
- system_prompt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt)
- top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
- temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
- max_length_sl = gr.Slider(minimum=256, maximum=4096, value=512, step=1, interactive=True, label="MaxLength",)
- checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "底部输入区"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区")
- md_dropdown = gr.Dropdown(["gpt-3.5-turbo", "chatglm"], value=LLM_MODEL, label="").style(container=False)
+ cancel_handles = []
+ with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
+ gr.HTML(title_html)
+ cookies = gr.State({'api_key': API_KEY, 'llm_model': LLM_MODEL})
+ with gr_L1():
+ with gr_L2(scale=2):
+ chatbot = gr.Chatbot()
+ chatbot.style(height=CHATBOT_HEIGHT)
+ history = gr.State([])
+ with gr_L2(scale=1):
+ with gr.Accordion("输入区", open=True) as area_input_primary:
+ with gr.Row():
+ txt = gr.Textbox(show_label=False, placeholder="Input question here.").style(container=False)
+ with gr.Row():
+ submitBtn = gr.Button("提交", variant="primary")
+ with gr.Row():
+ resetBtn = gr.Button("重置", variant="secondary"); resetBtn.style(size="sm")
+ stopBtn = gr.Button("停止", variant="secondary"); stopBtn.style(size="sm")
+ with gr.Row():
+ status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}")
+ with gr.Accordion("基础功能区", open=True) as area_basic_fn:
+ with gr.Row():
+ for k in functional:
+ variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
+ functional[k]["Button"] = gr.Button(k, variant=variant)
+ with gr.Accordion("函数插件区", open=True) as area_crazy_fn:
+ with gr.Row():
+ gr.Markdown("注意:以下“红颜色”标识的函数插件需从输入区读取路径作为参数.")
+ with gr.Row():
+ for k in crazy_fns:
+ if not crazy_fns[k].get("AsButton", True): continue
+ variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary"
+ crazy_fns[k]["Button"] = gr.Button(k, variant=variant)
+ crazy_fns[k]["Button"].style(size="sm")
+ with gr.Row():
+ with gr.Accordion("更多函数插件", open=True):
+ dropdown_fn_list = [k for k in crazy_fns.keys() if not crazy_fns[k].get("AsButton", True)]
+ with gr.Column(scale=1):
+ dropdown = gr.Dropdown(dropdown_fn_list, value=r"打开插件列表", label="").style(container=False)
+ with gr.Column(scale=1):
+ switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary")
+ with gr.Row():
+ with gr.Accordion("点击展开“文件上传区”。上传本地文件可供红色函数插件调用。", open=False) as area_file_up:
+ file_upload = gr.Files(label="任何文件, 但推荐上传压缩文件(zip, tar)", file_count="multiple")
+ with gr.Accordion("展开SysPrompt & 交互界面布局 & Github地址", open=(LAYOUT == "TOP-DOWN")):
+ system_prompt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt)
+ top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
+ temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
+ max_length_sl = gr.Slider(minimum=256, maximum=4096, value=512, step=1, interactive=True, label="MaxLength",)
+ checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "底部输入区"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区")
+ md_dropdown = gr.Dropdown(["gpt-3.5-turbo", "chatglm"], value=LLM_MODEL, label="").style(container=False)
- gr.Markdown(description)
- with gr.Accordion("备选输入区", open=True, visible=False) as area_input_secondary:
- with gr.Row():
- txt2 = gr.Textbox(show_label=False, placeholder="Input question here.", label="输入区2").style(container=False)
- with gr.Row():
- submitBtn2 = gr.Button("提交", variant="primary")
- with gr.Row():
- resetBtn2 = gr.Button("重置", variant="secondary"); resetBtn.style(size="sm")
- stopBtn2 = gr.Button("停止", variant="secondary"); stopBtn.style(size="sm")
- # 功能区显示开关与功能区的互动
- def fn_area_visibility(a):
- ret = {}
- ret.update({area_basic_fn: gr.update(visible=("基础功能区" in a))})
- ret.update({area_crazy_fn: gr.update(visible=("函数插件区" in a))})
- ret.update({area_input_primary: gr.update(visible=("底部输入区" not in a))})
- ret.update({area_input_secondary: gr.update(visible=("底部输入区" in a))})
- if "底部输入区" in a: ret.update({txt: gr.update(value="")})
- return ret
- checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn, area_input_primary, area_input_secondary, txt, txt2] )
- # 整理反复出现的控件句柄组合
- input_combo = [cookies, max_length_sl, md_dropdown, txt, txt2, top_p, temperature, chatbot, history, system_prompt]
- output_combo = [cookies, chatbot, history, status]
- predict_args = dict(fn=ArgsGeneralWrapper(predict), inputs=input_combo, outputs=output_combo)
- # 提交按钮、重置按钮
- cancel_handles.append(txt.submit(**predict_args))
- cancel_handles.append(txt2.submit(**predict_args))
- cancel_handles.append(submitBtn.click(**predict_args))
- cancel_handles.append(submitBtn2.click(**predict_args))
- resetBtn.click(lambda: ([], [], "已重置"), None, [chatbot, history, status])
- resetBtn2.click(lambda: ([], [], "已重置"), None, [chatbot, history, status])
- # 基础功能区的回调函数注册
- for k in functional:
- click_handle = functional[k]["Button"].click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(k)], outputs=output_combo)
- cancel_handles.append(click_handle)
- # 文件上传区,接收文件后与chatbot的互动
- file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt], [chatbot, txt])
- # 函数插件-固定按钮区
- for k in crazy_fns:
- if not crazy_fns[k].get("AsButton", True): continue
- click_handle = crazy_fns[k]["Button"].click(ArgsGeneralWrapper(crazy_fns[k]["Function"]), [*input_combo, gr.State(PORT)], output_combo)
+ gr.Markdown(description)
+ with gr.Accordion("备选输入区", open=True, visible=False) as area_input_secondary:
+ with gr.Row():
+ txt2 = gr.Textbox(show_label=False, placeholder="Input question here.", label="输入区2").style(container=False)
+ with gr.Row():
+ submitBtn2 = gr.Button("提交", variant="primary")
+ with gr.Row():
+ resetBtn2 = gr.Button("重置", variant="secondary"); resetBtn.style(size="sm")
+ stopBtn2 = gr.Button("停止", variant="secondary"); stopBtn.style(size="sm")
+ # 功能区显示开关与功能区的互动
+ def fn_area_visibility(a):
+ ret = {}
+ ret.update({area_basic_fn: gr.update(visible=("基础功能区" in a))})
+ ret.update({area_crazy_fn: gr.update(visible=("函数插件区" in a))})
+ ret.update({area_input_primary: gr.update(visible=("底部输入区" not in a))})
+ ret.update({area_input_secondary: gr.update(visible=("底部输入区" in a))})
+ if "底部输入区" in a: ret.update({txt: gr.update(value="")})
+ return ret
+ checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn, area_input_primary, area_input_secondary, txt, txt2] )
+ # 整理反复出现的控件句柄组合
+ input_combo = [cookies, max_length_sl, md_dropdown, txt, txt2, top_p, temperature, chatbot, history, system_prompt]
+ output_combo = [cookies, chatbot, history, status]
+ predict_args = dict(fn=ArgsGeneralWrapper(predict), inputs=input_combo, outputs=output_combo)
+ # 提交按钮、重置按钮
+ cancel_handles.append(txt.submit(**predict_args))
+ cancel_handles.append(txt2.submit(**predict_args))
+ cancel_handles.append(submitBtn.click(**predict_args))
+ cancel_handles.append(submitBtn2.click(**predict_args))
+ resetBtn.click(lambda: ([], [], "已重置"), None, [chatbot, history, status])
+ resetBtn2.click(lambda: ([], [], "已重置"), None, [chatbot, history, status])
+ # 基础功能区的回调函数注册
+ for k in functional:
+ click_handle = functional[k]["Button"].click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(k)], outputs=output_combo)
+ cancel_handles.append(click_handle)
+ # 文件上传区,接收文件后与chatbot的互动
+ file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt], [chatbot, txt])
+ # 函数插件-固定按钮区
+ for k in crazy_fns:
+ if not crazy_fns[k].get("AsButton", True): continue
+ click_handle = crazy_fns[k]["Button"].click(ArgsGeneralWrapper(crazy_fns[k]["Function"]), [*input_combo, gr.State(PORT)], output_combo)
+ click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot])
+ cancel_handles.append(click_handle)
+ # 函数插件-下拉菜单与随变按钮的互动
+ def on_dropdown_changed(k):
+ variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary"
+ return {switchy_bt: gr.update(value=k, variant=variant)}
+ dropdown.select(on_dropdown_changed, [dropdown], [switchy_bt] )
+ # 随变按钮的回调函数注册
+ def route(k, *args, **kwargs):
+ if k in [r"打开插件列表", r"请先从插件列表中选择"]: return
+ yield from ArgsGeneralWrapper(crazy_fns[k]["Function"])(*args, **kwargs)
+ click_handle = switchy_bt.click(route,[switchy_bt, *input_combo, gr.State(PORT)], output_combo)
click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot])
+ # def expand_file_area(file_upload, area_file_up):
+ # if len(file_upload)>0: return {area_file_up: gr.update(open=True)}
+ # click_handle.then(expand_file_area, [file_upload, area_file_up], [area_file_up])
cancel_handles.append(click_handle)
- # 函数插件-下拉菜单与随变按钮的互动
- def on_dropdown_changed(k):
- variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary"
- return {switchy_bt: gr.update(value=k, variant=variant)}
- dropdown.select(on_dropdown_changed, [dropdown], [switchy_bt] )
- # 随变按钮的回调函数注册
- def route(k, *args, **kwargs):
- if k in [r"打开插件列表", r"请先从插件列表中选择"]: return
- yield from ArgsGeneralWrapper(crazy_fns[k]["Function"])(*args, **kwargs)
- click_handle = switchy_bt.click(route,[switchy_bt, *input_combo, gr.State(PORT)], output_combo)
- click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot])
- # def expand_file_area(file_upload, area_file_up):
- # if len(file_upload)>0: return {area_file_up: gr.update(open=True)}
- # click_handle.then(expand_file_area, [file_upload, area_file_up], [area_file_up])
- cancel_handles.append(click_handle)
- # 终止按钮的回调函数注册
- stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
- stopBtn2.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
-# gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
-def auto_opentab_delay():
- import threading, webbrowser, time
- print(f"如果浏览器没有自动打开,请复制并转到以下URL:")
- print(f"\t(亮色主题): http://localhost:{PORT}")
- print(f"\t(暗色主题): http://localhost:{PORT}/?__dark-theme=true")
- def open():
- time.sleep(2) # 打开浏览器
- webbrowser.open_new_tab(f"http://localhost:{PORT}/?__dark-theme=true")
- threading.Thread(target=open, name="open-browser", daemon=True).start()
- threading.Thread(target=auto_update, name="self-upgrade", daemon=True).start()
+ # 终止按钮的回调函数注册
+ stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
+ stopBtn2.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
-auto_opentab_delay()
-demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION)
+ # gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
+ def auto_opentab_delay():
+ import threading, webbrowser, time
+ print(f"如果浏览器没有自动打开,请复制并转到以下URL:")
+ print(f"\t(亮色主题): http://localhost:{PORT}")
+ print(f"\t(暗色主题): http://localhost:{PORT}/?__dark-theme=true")
+ def open():
+ time.sleep(2) # 打开浏览器
+ webbrowser.open_new_tab(f"http://localhost:{PORT}/?__dark-theme=true")
+ threading.Thread(target=open, name="open-browser", daemon=True).start()
+ threading.Thread(target=auto_update, name="self-upgrade", daemon=True).start()
+
+ auto_opentab_delay()
+ demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION)
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/request_llm/bridge_all.py b/request_llm/bridge_all.py
index f02a1c8..6fdd846 100644
--- a/request_llm/bridge_all.py
+++ b/request_llm/bridge_all.py
@@ -31,6 +31,24 @@ methods = {
"tgui-ui": tgui_ui,
}
+def LLM_CATCH_EXCEPTION(f):
+ """
+ 装饰器函数,将错误显示出来
+ """
+ def decorated(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience):
+ try:
+ return f(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
+ except Exception as e:
+ from toolbox import get_conf
+ import traceback
+ proxies, = get_conf('proxies')
+ tb_str = '\n```\n' + traceback.format_exc() + '\n```\n'
+ observe_window[0] = tb_str
+ return tb_str
+ return decorated
+
+colors = ['#FF00FF', '#00FFFF', '#FF0000''#990099', '#009999', '#990044']
+
def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience=False):
"""
发送至LLM,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
@@ -62,17 +80,13 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
return method(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
else:
# 如果同时询问多个大语言模型:
- executor = ThreadPoolExecutor(max_workers=16)
+ executor = ThreadPoolExecutor(max_workers=4)
models = model.split('&')
n_model = len(models)
window_len = len(observe_window)
- if window_len==0:
- window_mutex = [[] for _ in range(n_model)] + [True]
- elif window_len==1:
- window_mutex = [[""] for _ in range(n_model)] + [True]
- elif window_len==2:
- window_mutex = [["", time.time()] for _ in range(n_model)] + [True]
+ assert window_len==3
+ window_mutex = [["", time.time(), ""] for _ in range(n_model)] + [True]
futures = []
for i in range(n_model):
@@ -85,12 +99,12 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
method = methods['tgui-no-ui']
llm_kwargs_feedin = copy.deepcopy(llm_kwargs)
llm_kwargs_feedin['llm_model'] = model
- future = executor.submit(method, inputs, llm_kwargs_feedin, history, sys_prompt, window_mutex[i], console_slience)
+ future = executor.submit(LLM_CATCH_EXCEPTION(method), inputs, llm_kwargs_feedin, history, sys_prompt, window_mutex[i], console_slience)
futures.append(future)
def mutex_manager(window_mutex, observe_window):
while True:
- time.sleep(0.2)
+ time.sleep(0.5)
if not window_mutex[-1]: break
# 看门狗(watchdog)
for i in range(n_model):
@@ -98,8 +112,8 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
# 观察窗(window)
chat_string = []
for i in range(n_model):
- chat_string.append( f"[{str(models[i])} 说]: {window_mutex[i][0]}" )
- res = '\n\n---\n\n'.join(chat_string)
+ chat_string.append( f"【{str(models[i])} 说】: {window_mutex[i][0]} " )
+ res = '
\n\n---\n\n'.join(chat_string)
# # # # # # # # # # #
observe_window[0] = res
@@ -107,10 +121,18 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
t_model.start()
return_string_collect = []
+ while True:
+ worker_done = [h.done() for h in futures]
+ if all(worker_done):
+ executor.shutdown()
+ break
+ time.sleep(1)
+
for i, future in enumerate(futures): # wait and get
- return_string_collect.append( f"[{str(models[i])} 说]: {future.result()}" )
+ return_string_collect.append( f"【{str(models[i])} 说】: {future.result()} " )
+
window_mutex[-1] = False # stop mutex thread
- res = '\n\n---\n\n'.join(return_string_collect)
+ res = '
\n\n---\n\n'.join(return_string_collect)
return res
diff --git a/request_llm/bridge_chatglm.py b/request_llm/bridge_chatglm.py
index d6f5eec..819519b 100644
--- a/request_llm/bridge_chatglm.py
+++ b/request_llm/bridge_chatglm.py
@@ -3,35 +3,69 @@ from transformers import AutoModel, AutoTokenizer
import time
import importlib
from toolbox import update_ui, get_conf
+from multiprocessing import Process, Pipe
+#################################################################################
+class GetGLMHandle(Process):
+ def __init__(self):
+ super().__init__(daemon=True)
+ self.parent, self.child = Pipe()
+ self.chatglm_model = None
+ self.chatglm_tokenizer = None
+ self.start()
+ print('初始化')
+
+ def ready(self):
+ return self.chatglm_model is not None
-global chatglm_model, chatglm_tokenizer
-
-chatglm_model = None
-chatglm_tokenizer = None
-
-def model_loader():
- global chatglm_model, chatglm_tokenizer
- if chatglm_tokenizer is None:
- chatglm_tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
- if chatglm_model is None: # 尚未加载
- device, = get_conf('LOCAL_MODEL_DEVICE')
- if device=='cpu':
- chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float()
- else:
- chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
- chatglm_model = chatglm_model.eval()
- chatglm_model = chatglm_model.eval()
+ def run(self):
+ while True:
+ try:
+ if self.chatglm_model is None:
+ self.chatglm_tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
+ device, = get_conf('LOCAL_MODEL_DEVICE')
+ if device=='cpu':
+ self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float()
+ else:
+ self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
+ self.chatglm_model = self.chatglm_model.eval()
+ break
+ else:
+ break
+ except:
+ pass
+ while True:
+ kwargs = self.child.recv()
+ try:
+ for response, history in self.chatglm_model.stream_chat(self.chatglm_tokenizer, **kwargs):
+ self.child.send(response)
+ except:
+ self.child.send('[Local Message] Call ChatGLM fail.')
+ self.child.send('[Finish]')
+ def stream_chat(self, **kwargs):
+ self.parent.send(kwargs)
+ while True:
+ res = self.parent.recv()
+ if res != '[Finish]':
+ yield res
+ else:
+ break
+ return
+
+global glm_handle
+glm_handle = None
+#################################################################################
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
"""
+ 多线程方法
函数的说明请见 request_llm/bridge_all.py
"""
- global chatglm_model, chatglm_tokenizer
- if chatglm_model is None:
- observe_window[0] = "ChatGLM尚未加载,加载需要一段时间 ……"
+ global glm_handle
+ if glm_handle is None:
+ glm_handle = GetGLMHandle()
+ observe_window[0] = "ChatGLM尚未加载,加载需要一段时间。注意,取决于`config.py`的配置,ChatGLM消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……"
- model_loader()
# chatglm 没有 sys_prompt 接口,因此把prompt加入 history
history_feedin = []
for i in range(len(history)//2):
@@ -40,29 +74,27 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
response = ""
- for response, history in chatglm_model.stream_chat(chatglm_tokenizer, inputs, history=history_feedin, max_length=llm_kwargs['max_length'],
- top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
- # 观测窗,把已经获取的数据显示出去
+ for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
observe_window[0] = response
- # 看门狗 (watchdog),如果超过期限没有喂狗,则终止
if len(observe_window) >= 2:
if (time.time()-observe_window[1]) > watch_dog_patience:
raise RuntimeError("程序终止。")
- # if not console_slience:
- # print(response)
return response
+
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
"""
+ 单线程方法
函数的说明请见 request_llm/bridge_all.py
"""
- global chatglm_model, chatglm_tokenizer
chatbot.append((inputs, ""))
- if chatglm_model is None:
- chatbot[-1] = (inputs, "ChatGLM尚未加载,加载需要一段时间 ……")
+
+ global glm_handle
+ if glm_handle is None:
+ glm_handle = GetGLMHandle()
+ chatbot[-1] = (inputs, "ChatGLM尚未加载,加载需要一段时间。注意,取决于`config.py`的配置,ChatGLM消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……")
yield from update_ui(chatbot=chatbot, history=[])
- model_loader()
if additional_fn is not None:
import core_functional
@@ -71,13 +103,11 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
-
history_feedin = []
for i in range(len(history)//2):
history_feedin.append(["What can I do?", system_prompt] )
history_feedin.append([history[2*i], history[2*i+1]] )
- for response, history in chatglm_model.stream_chat(chatglm_tokenizer, inputs, history=history_feedin, max_length=llm_kwargs['max_length'],
- top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
+ for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
chatbot[-1] = (inputs, response)
yield from update_ui(chatbot=chatbot, history=history)
\ No newline at end of file
diff --git a/version b/version
index 59e288a..620acb6 100644
--- a/version
+++ b/version
@@ -1,5 +1,5 @@
{
- "version": 3.0,
+ "version": 3.1,
"show_feature": true,
"new_feature": "支持ChatGLM <-> 支持多LLM模型同时对话"
}