镜像自地址
https://github.com/binary-husky/gpt_academic.git
已同步 2025-12-07 15:06:48 +00:00
ChatGLM改成多进程运行
这个提交包含在:
@@ -31,6 +31,24 @@ methods = {
|
||||
"tgui-ui": tgui_ui,
|
||||
}
|
||||
|
||||
def LLM_CATCH_EXCEPTION(f):
|
||||
"""
|
||||
装饰器函数,将错误显示出来
|
||||
"""
|
||||
def decorated(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience):
|
||||
try:
|
||||
return f(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
|
||||
except Exception as e:
|
||||
from toolbox import get_conf
|
||||
import traceback
|
||||
proxies, = get_conf('proxies')
|
||||
tb_str = '\n```\n' + traceback.format_exc() + '\n```\n'
|
||||
observe_window[0] = tb_str
|
||||
return tb_str
|
||||
return decorated
|
||||
|
||||
colors = ['#FF00FF', '#00FFFF', '#FF0000''#990099', '#009999', '#990044']
|
||||
|
||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience=False):
|
||||
"""
|
||||
发送至LLM,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
|
||||
@@ -62,17 +80,13 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
|
||||
return method(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
|
||||
else:
|
||||
# 如果同时询问多个大语言模型:
|
||||
executor = ThreadPoolExecutor(max_workers=16)
|
||||
executor = ThreadPoolExecutor(max_workers=4)
|
||||
models = model.split('&')
|
||||
n_model = len(models)
|
||||
|
||||
window_len = len(observe_window)
|
||||
if window_len==0:
|
||||
window_mutex = [[] for _ in range(n_model)] + [True]
|
||||
elif window_len==1:
|
||||
window_mutex = [[""] for _ in range(n_model)] + [True]
|
||||
elif window_len==2:
|
||||
window_mutex = [["", time.time()] for _ in range(n_model)] + [True]
|
||||
assert window_len==3
|
||||
window_mutex = [["", time.time(), ""] for _ in range(n_model)] + [True]
|
||||
|
||||
futures = []
|
||||
for i in range(n_model):
|
||||
@@ -85,12 +99,12 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
|
||||
method = methods['tgui-no-ui']
|
||||
llm_kwargs_feedin = copy.deepcopy(llm_kwargs)
|
||||
llm_kwargs_feedin['llm_model'] = model
|
||||
future = executor.submit(method, inputs, llm_kwargs_feedin, history, sys_prompt, window_mutex[i], console_slience)
|
||||
future = executor.submit(LLM_CATCH_EXCEPTION(method), inputs, llm_kwargs_feedin, history, sys_prompt, window_mutex[i], console_slience)
|
||||
futures.append(future)
|
||||
|
||||
def mutex_manager(window_mutex, observe_window):
|
||||
while True:
|
||||
time.sleep(0.2)
|
||||
time.sleep(0.5)
|
||||
if not window_mutex[-1]: break
|
||||
# 看门狗(watchdog)
|
||||
for i in range(n_model):
|
||||
@@ -98,8 +112,8 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
|
||||
# 观察窗(window)
|
||||
chat_string = []
|
||||
for i in range(n_model):
|
||||
chat_string.append( f"[{str(models[i])} 说]: {window_mutex[i][0]}" )
|
||||
res = '\n\n---\n\n'.join(chat_string)
|
||||
chat_string.append( f"【{str(models[i])} 说】: <font color=\"{colors[i]}\"> {window_mutex[i][0]} </font>" )
|
||||
res = '<br/><br/>\n\n---\n\n'.join(chat_string)
|
||||
# # # # # # # # # # #
|
||||
observe_window[0] = res
|
||||
|
||||
@@ -107,10 +121,18 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
|
||||
t_model.start()
|
||||
|
||||
return_string_collect = []
|
||||
while True:
|
||||
worker_done = [h.done() for h in futures]
|
||||
if all(worker_done):
|
||||
executor.shutdown()
|
||||
break
|
||||
time.sleep(1)
|
||||
|
||||
for i, future in enumerate(futures): # wait and get
|
||||
return_string_collect.append( f"[{str(models[i])} 说]: {future.result()}" )
|
||||
return_string_collect.append( f"【{str(models[i])} 说】: <font color=\"{colors[i]}\"> {future.result()} </font>" )
|
||||
|
||||
window_mutex[-1] = False # stop mutex thread
|
||||
res = '\n\n---\n\n'.join(return_string_collect)
|
||||
res = '<br/>\n\n---\n\n'.join(return_string_collect)
|
||||
return res
|
||||
|
||||
|
||||
|
||||
在新工单中引用
屏蔽一个用户