update the error handling of moss and chatglm

这个提交包含在:
fuqingxu
2023-05-08 19:21:17 +08:00
父节点 10882b677d
当前提交 777850200d
共有 5 个文件被更改,包括 30 次插入13 次删除

查看文件

@@ -94,7 +94,7 @@ def get_current_version():
return current_version
def auto_update():
def auto_update(raise_error=False):
"""
一键更新协议:查询版本和用户意见
"""
@@ -126,14 +126,22 @@ def auto_update():
try:
patch_and_restart(path)
except:
print('更新失败。')
msg = '更新失败。'
if raise_error:
from toolbox import trimmed_format_exc
msg += trimmed_format_exc()
print(msg)
else:
print('自动更新程序:已禁用')
return
else:
return
except:
print('自动更新程序:已禁用')
msg = '自动更新程序:已禁用'
if raise_error:
from toolbox import trimmed_format_exc
msg += trimmed_format_exc()
print(msg)
def warm_up_modules():
print('正在执行一些模块的预热...')

查看文件

@@ -46,7 +46,7 @@ MAX_RETRY = 2
# OpenAI模型选择是gpt4现在只对申请成功的人开放,体验gpt-4可以试试api2d
LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing"]
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing"]
# 本地LLM模型如ChatGLM的执行方式 CPU/GPU
LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"

查看文件

@@ -16,6 +16,13 @@ try {
live2d_settings['canTakeScreenshot'] = false;
live2d_settings['canTurnToHomePage'] = false;
live2d_settings['canTurnToAboutPage'] = false;
live2d_settings['showHitokoto'] = false; // 显示一言
live2d_settings['showF12Status'] = false; // 显示加载状态
live2d_settings['showF12Message'] = false; // 显示看板娘消息
live2d_settings['showF12OpenMsg'] = false; // 显示控制台打开提示
live2d_settings['showCopyMessage'] = false; // 显示 复制内容 提示
live2d_settings['showWelcomeMessage'] = true; // 显示进入面页欢迎词
/* 在 initModel 前添加 */
initModel("file=docs/waifu_plugin/waifu-tips.json");
}});

查看文件

@@ -87,7 +87,7 @@ class GetGLMHandle(Process):
global glm_handle
glm_handle = None
#################################################################################
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
"""
多线程方法
函数的说明请见 request_llm/bridge_all.py
@@ -95,7 +95,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
global glm_handle
if glm_handle is None:
glm_handle = GetGLMHandle()
observe_window[0] = load_message + "\n\n" + glm_handle.info
if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + glm_handle.info
if not glm_handle.success:
error = glm_handle.info
glm_handle = None
@@ -110,7 +110,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
response = ""
for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
observe_window[0] = response
if len(observe_window) >= 1: observe_window[0] = response
if len(observe_window) >= 2:
if (time.time()-observe_window[1]) > watch_dog_patience:
raise RuntimeError("程序终止。")

查看文件

@@ -153,7 +153,8 @@ class GetGLMHandle(Process):
print(response.lstrip('\n'))
self.child.send(response.lstrip('\n'))
except:
self.child.send('[Local Message] Call MOSS fail.')
from toolbox import trimmed_format_exc
self.child.send('[Local Message] Call MOSS fail.' + '\n```\n' + trimmed_format_exc() + '\n```\n')
# 请求处理结束,开始下一个循环
self.child.send('[Finish]')
@@ -217,6 +218,10 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
if not moss_handle.success:
moss_handle = None
return
else:
response = "[Local Message]: 等待MOSS响应中 ..."
chatbot[-1] = (inputs, response)
yield from update_ui(chatbot=chatbot, history=history)
if additional_fn is not None:
import core_functional
@@ -231,15 +236,12 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
history_feedin.append([history[2*i], history[2*i+1]] )
# 开始接收chatglm的回复
response = "[Local Message]: 等待MOSS响应中 ..."
chatbot[-1] = (inputs, response)
yield from update_ui(chatbot=chatbot, history=history)
for response in moss_handle.stream_chat(query=inputs, history=history_feedin, sys_prompt=system_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
chatbot[-1] = (inputs, response)
chatbot[-1] = (inputs, response.strip('<|MOSS|>: '))
yield from update_ui(chatbot=chatbot, history=history)
# 总结输出
if response == "[Local Message]: 等待MOSS响应中 ...":
response = "[Local Message]: MOSS响应异常 ..."
history.extend([inputs, response])
history.extend([inputs, response.strip('<|MOSS|>: ')])
yield from update_ui(chatbot=chatbot, history=history)