镜像自地址
https://github.com/binary-husky/gpt_academic.git
已同步 2025-12-06 14:36:48 +00:00
[Feature]: allow model mutex override in core_functional.py (#1708)
* allow_core_func_specify_model * change arg name * 模型覆盖支持热更新&当模型覆盖指向不存在的模型时报错 * allow model mutex override --------- Co-authored-by: binary-husky <qingxu.fu@outlook.com>
这个提交包含在:
@@ -906,6 +906,13 @@ if len(AZURE_CFG_ARRAY) > 0:
|
||||
AVAIL_LLM_MODELS += [azure_model_name]
|
||||
|
||||
|
||||
# -=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=-=-=
|
||||
# -=-=-=-=-=-=-=-=-=- ☝️ 以上是模型路由 -=-=-=-=-=-=-=-=-=
|
||||
# -=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=-=-=
|
||||
|
||||
# -=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=-=-=
|
||||
# -=-=-=-=-=-=-= 👇 以下是多模型路由切换函数 -=-=-=-=-=-=-=
|
||||
# -=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=-=-=
|
||||
|
||||
|
||||
def LLM_CATCH_EXCEPTION(f):
|
||||
@@ -942,13 +949,11 @@ def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list, sys
|
||||
model = llm_kwargs['llm_model']
|
||||
n_model = 1
|
||||
if '&' not in model:
|
||||
|
||||
# 如果只询问1个大语言模型:
|
||||
# 如果只询问“一个”大语言模型(多数情况):
|
||||
method = model_info[model]["fn_without_ui"]
|
||||
return method(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
|
||||
else:
|
||||
|
||||
# 如果同时询问多个大语言模型,这个稍微啰嗦一点,但思路相同,您不必读这个else分支
|
||||
# 如果同时询问“多个”大语言模型,这个稍微啰嗦一点,但思路相同,您不必读这个else分支
|
||||
executor = ThreadPoolExecutor(max_workers=4)
|
||||
models = model.split('&')
|
||||
n_model = len(models)
|
||||
@@ -1001,8 +1006,26 @@ def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list, sys
|
||||
res = '<br/><br/>\n\n---\n\n'.join(return_string_collect)
|
||||
return res
|
||||
|
||||
# 根据基础功能区 ModelOverride 参数调整模型类型,用于 `predict` 中
|
||||
import importlib
|
||||
import core_functional
|
||||
def execute_model_override(llm_kwargs, additional_fn, method):
|
||||
functional = core_functional.get_core_functions()
|
||||
if 'ModelOverride' in functional[additional_fn]:
|
||||
# 热更新Prompt & ModelOverride
|
||||
importlib.reload(core_functional)
|
||||
functional = core_functional.get_core_functions()
|
||||
model_override = functional[additional_fn]['ModelOverride']
|
||||
if model_override not in model_info:
|
||||
raise ValueError(f"模型覆盖参数 '{model_override}' 指向一个暂不支持的模型,请检查配置文件。")
|
||||
method = model_info[model_override]["fn_with_ui"]
|
||||
llm_kwargs['llm_model'] = model_override
|
||||
return llm_kwargs, additional_fn, method
|
||||
# 默认返回原参数
|
||||
return llm_kwargs, additional_fn, method
|
||||
|
||||
def predict(inputs:str, llm_kwargs:dict, *args, **kwargs):
|
||||
def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot,
|
||||
history:list=[], system_prompt:str='', stream:bool=True, additional_fn:str=None):
|
||||
"""
|
||||
发送至LLM,流式获取输出。
|
||||
用于基础的对话功能。
|
||||
@@ -1021,6 +1044,11 @@ def predict(inputs:str, llm_kwargs:dict, *args, **kwargs):
|
||||
"""
|
||||
|
||||
inputs = apply_gpt_academic_string_mask(inputs, mode="show_llm")
|
||||
method = model_info[llm_kwargs['llm_model']]["fn_with_ui"] # 如果这里报错,检查config中的AVAIL_LLM_MODELS选项
|
||||
yield from method(inputs, llm_kwargs, *args, **kwargs)
|
||||
|
||||
method = model_info[llm_kwargs['llm_model']]["fn_with_ui"] # 如果这里报错,检查config中的AVAIL_LLM_MODELS选项
|
||||
|
||||
if additional_fn: # 根据基础功能区 ModelOverride 参数调整模型类型
|
||||
llm_kwargs, additional_fn, method = execute_model_override(llm_kwargs, additional_fn, method)
|
||||
|
||||
yield from method(inputs, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, stream, additional_fn)
|
||||
|
||||
|
||||
在新工单中引用
屏蔽一个用户