From 184e417fec772c5a8dce302672240a7677ed64d6 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Mon, 7 Aug 2023 02:11:48 +0800 Subject: [PATCH] handle local llm dependency error properly --- request_llm/bridge_qwen.py | 4 ++-- request_llm/local_llm_class.py | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/request_llm/bridge_qwen.py b/request_llm/bridge_qwen.py index cd437e4..07ed243 100644 --- a/request_llm/bridge_qwen.py +++ b/request_llm/bridge_qwen.py @@ -58,8 +58,8 @@ class GetONNXGLMHandle(LocalLLMHandle): def try_to_import_special_deps(self, **kwargs): # import something that will raise error if the user does not install requirement_*.txt # 🏃‍♂️🏃‍♂️🏃‍♂️ 主进程执行 - # from modelscope import AutoModelForCausalLM, AutoTokenizer, GenerationConfig - pass + import importlib + importlib.import_module('modelscope') # ------------------------------------------------------------------------------------------------------------------------ diff --git a/request_llm/local_llm_class.py b/request_llm/local_llm_class.py index 1470717..3dd266f 100644 --- a/request_llm/local_llm_class.py +++ b/request_llm/local_llm_class.py @@ -124,6 +124,7 @@ def get_local_llm_predict_fns(LLMSingletonClass, model_name): """ _llm_handle = LLMSingletonClass() if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + _llm_handle.info + if not _llm_handle.running: raise RuntimeError(_llm_handle.info) # chatglm 没有 sys_prompt 接口,因此把prompt加入 history history_feedin = [] @@ -152,6 +153,7 @@ def get_local_llm_predict_fns(LLMSingletonClass, model_name): _llm_handle = LLMSingletonClass() chatbot[-1] = (inputs, load_message + "\n\n" + _llm_handle.info) yield from update_ui(chatbot=chatbot, history=[]) + if not _llm_handle.running: raise RuntimeError(_llm_handle.info) if additional_fn is not None: from core_functional import handle_core_functionality