update the error handling of moss and chatglm
This commit is contained in:
parent
10882b677d
commit
777850200d
@ -94,7 +94,7 @@ def get_current_version():
|
|||||||
return current_version
|
return current_version
|
||||||
|
|
||||||
|
|
||||||
def auto_update():
|
def auto_update(raise_error=False):
|
||||||
"""
|
"""
|
||||||
一键更新协议:查询版本和用户意见
|
一键更新协议:查询版本和用户意见
|
||||||
"""
|
"""
|
||||||
@ -126,14 +126,22 @@ def auto_update():
|
|||||||
try:
|
try:
|
||||||
patch_and_restart(path)
|
patch_and_restart(path)
|
||||||
except:
|
except:
|
||||||
print('更新失败。')
|
msg = '更新失败。'
|
||||||
|
if raise_error:
|
||||||
|
from toolbox import trimmed_format_exc
|
||||||
|
msg += trimmed_format_exc()
|
||||||
|
print(msg)
|
||||||
else:
|
else:
|
||||||
print('自动更新程序:已禁用')
|
print('自动更新程序:已禁用')
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
return
|
return
|
||||||
except:
|
except:
|
||||||
print('自动更新程序:已禁用')
|
msg = '自动更新程序:已禁用'
|
||||||
|
if raise_error:
|
||||||
|
from toolbox import trimmed_format_exc
|
||||||
|
msg += trimmed_format_exc()
|
||||||
|
print(msg)
|
||||||
|
|
||||||
def warm_up_modules():
|
def warm_up_modules():
|
||||||
print('正在执行一些模块的预热...')
|
print('正在执行一些模块的预热...')
|
||||||
|
@ -46,7 +46,7 @@ MAX_RETRY = 2
|
|||||||
|
|
||||||
# OpenAI模型选择是(gpt4现在只对申请成功的人开放,体验gpt-4可以试试api2d)
|
# OpenAI模型选择是(gpt4现在只对申请成功的人开放,体验gpt-4可以试试api2d)
|
||||||
LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
|
LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
|
||||||
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing"]
|
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing"]
|
||||||
|
|
||||||
# 本地LLM模型如ChatGLM的执行方式 CPU/GPU
|
# 本地LLM模型如ChatGLM的执行方式 CPU/GPU
|
||||||
LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
|
LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
|
||||||
|
@ -16,6 +16,13 @@ try {
|
|||||||
live2d_settings['canTakeScreenshot'] = false;
|
live2d_settings['canTakeScreenshot'] = false;
|
||||||
live2d_settings['canTurnToHomePage'] = false;
|
live2d_settings['canTurnToHomePage'] = false;
|
||||||
live2d_settings['canTurnToAboutPage'] = false;
|
live2d_settings['canTurnToAboutPage'] = false;
|
||||||
|
live2d_settings['showHitokoto'] = false; // 显示一言
|
||||||
|
live2d_settings['showF12Status'] = false; // 显示加载状态
|
||||||
|
live2d_settings['showF12Message'] = false; // 显示看板娘消息
|
||||||
|
live2d_settings['showF12OpenMsg'] = false; // 显示控制台打开提示
|
||||||
|
live2d_settings['showCopyMessage'] = false; // 显示 复制内容 提示
|
||||||
|
live2d_settings['showWelcomeMessage'] = true; // 显示进入面页欢迎词
|
||||||
|
|
||||||
/* 在 initModel 前添加 */
|
/* 在 initModel 前添加 */
|
||||||
initModel("file=docs/waifu_plugin/waifu-tips.json");
|
initModel("file=docs/waifu_plugin/waifu-tips.json");
|
||||||
}});
|
}});
|
||||||
|
@ -87,7 +87,7 @@ class GetGLMHandle(Process):
|
|||||||
global glm_handle
|
global glm_handle
|
||||||
glm_handle = None
|
glm_handle = None
|
||||||
#################################################################################
|
#################################################################################
|
||||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
|
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
|
||||||
"""
|
"""
|
||||||
多线程方法
|
多线程方法
|
||||||
函数的说明请见 request_llm/bridge_all.py
|
函数的说明请见 request_llm/bridge_all.py
|
||||||
@ -95,7 +95,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
|||||||
global glm_handle
|
global glm_handle
|
||||||
if glm_handle is None:
|
if glm_handle is None:
|
||||||
glm_handle = GetGLMHandle()
|
glm_handle = GetGLMHandle()
|
||||||
observe_window[0] = load_message + "\n\n" + glm_handle.info
|
if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + glm_handle.info
|
||||||
if not glm_handle.success:
|
if not glm_handle.success:
|
||||||
error = glm_handle.info
|
error = glm_handle.info
|
||||||
glm_handle = None
|
glm_handle = None
|
||||||
@ -110,7 +110,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
|||||||
watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
|
watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
|
||||||
response = ""
|
response = ""
|
||||||
for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
||||||
observe_window[0] = response
|
if len(observe_window) >= 1: observe_window[0] = response
|
||||||
if len(observe_window) >= 2:
|
if len(observe_window) >= 2:
|
||||||
if (time.time()-observe_window[1]) > watch_dog_patience:
|
if (time.time()-observe_window[1]) > watch_dog_patience:
|
||||||
raise RuntimeError("程序终止。")
|
raise RuntimeError("程序终止。")
|
||||||
|
@ -153,7 +153,8 @@ class GetGLMHandle(Process):
|
|||||||
print(response.lstrip('\n'))
|
print(response.lstrip('\n'))
|
||||||
self.child.send(response.lstrip('\n'))
|
self.child.send(response.lstrip('\n'))
|
||||||
except:
|
except:
|
||||||
self.child.send('[Local Message] Call MOSS fail.')
|
from toolbox import trimmed_format_exc
|
||||||
|
self.child.send('[Local Message] Call MOSS fail.' + '\n```\n' + trimmed_format_exc() + '\n```\n')
|
||||||
# 请求处理结束,开始下一个循环
|
# 请求处理结束,开始下一个循环
|
||||||
self.child.send('[Finish]')
|
self.child.send('[Finish]')
|
||||||
|
|
||||||
@ -217,6 +218,10 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
if not moss_handle.success:
|
if not moss_handle.success:
|
||||||
moss_handle = None
|
moss_handle = None
|
||||||
return
|
return
|
||||||
|
else:
|
||||||
|
response = "[Local Message]: 等待MOSS响应中 ..."
|
||||||
|
chatbot[-1] = (inputs, response)
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
|
||||||
if additional_fn is not None:
|
if additional_fn is not None:
|
||||||
import core_functional
|
import core_functional
|
||||||
@ -231,15 +236,12 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
history_feedin.append([history[2*i], history[2*i+1]] )
|
history_feedin.append([history[2*i], history[2*i+1]] )
|
||||||
|
|
||||||
# 开始接收chatglm的回复
|
# 开始接收chatglm的回复
|
||||||
response = "[Local Message]: 等待MOSS响应中 ..."
|
|
||||||
chatbot[-1] = (inputs, response)
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
|
||||||
for response in moss_handle.stream_chat(query=inputs, history=history_feedin, sys_prompt=system_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
for response in moss_handle.stream_chat(query=inputs, history=history_feedin, sys_prompt=system_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
||||||
chatbot[-1] = (inputs, response)
|
chatbot[-1] = (inputs, response.strip('<|MOSS|>: '))
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
|
||||||
# 总结输出
|
# 总结输出
|
||||||
if response == "[Local Message]: 等待MOSS响应中 ...":
|
if response == "[Local Message]: 等待MOSS响应中 ...":
|
||||||
response = "[Local Message]: MOSS响应异常 ..."
|
response = "[Local Message]: MOSS响应异常 ..."
|
||||||
history.extend([inputs, response])
|
history.extend([inputs, response.strip('<|MOSS|>: ')])
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user