handle local llm dependency error properly

这个提交包含在:
binary-husky
2023-08-07 02:11:48 +08:00
父节点 c17fc2a9b5
当前提交 184e417fec
共有 2 个文件被更改,包括 4 次插入2 次删除

查看文件

@@ -58,8 +58,8 @@ class GetONNXGLMHandle(LocalLLMHandle):
def try_to_import_special_deps(self, **kwargs):
# import something that will raise error if the user does not install requirement_*.txt
# 🏃‍♂️🏃‍♂️🏃‍♂️ 主进程执行
# from modelscope import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
pass
import importlib
importlib.import_module('modelscope')
# ------------------------------------------------------------------------------------------------------------------------

查看文件

@@ -124,6 +124,7 @@ def get_local_llm_predict_fns(LLMSingletonClass, model_name):
"""
_llm_handle = LLMSingletonClass()
if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + _llm_handle.info
if not _llm_handle.running: raise RuntimeError(_llm_handle.info)
# chatglm 没有 sys_prompt 接口,因此把prompt加入 history
history_feedin = []
@@ -152,6 +153,7 @@ def get_local_llm_predict_fns(LLMSingletonClass, model_name):
_llm_handle = LLMSingletonClass()
chatbot[-1] = (inputs, load_message + "\n\n" + _llm_handle.info)
yield from update_ui(chatbot=chatbot, history=[])
if not _llm_handle.running: raise RuntimeError(_llm_handle.info)
if additional_fn is not None:
from core_functional import handle_core_functionality