镜像自地址
https://github.com/binary-husky/gpt_academic.git
已同步 2025-12-06 14:36:48 +00:00
[Feature]: allow model mutex override in core_functional.py (#1708)
* allow_core_func_specify_model * change arg name * 模型覆盖支持热更新&当模型覆盖指向不存在的模型时报错 * allow model mutex override --------- Co-authored-by: binary-husky <qingxu.fu@outlook.com>
这个提交包含在:
@@ -33,6 +33,8 @@ def get_core_functions():
|
|||||||
"AutoClearHistory": False,
|
"AutoClearHistory": False,
|
||||||
# [6] 文本预处理 (可选参数,默认 None,举例:写个函数移除所有的换行符)
|
# [6] 文本预处理 (可选参数,默认 None,举例:写个函数移除所有的换行符)
|
||||||
"PreProcess": None,
|
"PreProcess": None,
|
||||||
|
# [7] 模型选择 (可选参数。如不设置,则使用当前全局模型;如设置,则用指定模型覆盖全局模型。)
|
||||||
|
# "ModelOverride": "gpt-3.5-turbo", # 主要用途:强制点击此基础功能按钮时,使用指定的模型。
|
||||||
},
|
},
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -906,6 +906,13 @@ if len(AZURE_CFG_ARRAY) > 0:
|
|||||||
AVAIL_LLM_MODELS += [azure_model_name]
|
AVAIL_LLM_MODELS += [azure_model_name]
|
||||||
|
|
||||||
|
|
||||||
|
# -=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=-=-=
|
||||||
|
# -=-=-=-=-=-=-=-=-=- ☝️ 以上是模型路由 -=-=-=-=-=-=-=-=-=
|
||||||
|
# -=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=-=-=
|
||||||
|
|
||||||
|
# -=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=-=-=
|
||||||
|
# -=-=-=-=-=-=-= 👇 以下是多模型路由切换函数 -=-=-=-=-=-=-=
|
||||||
|
# -=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=-=-=
|
||||||
|
|
||||||
|
|
||||||
def LLM_CATCH_EXCEPTION(f):
|
def LLM_CATCH_EXCEPTION(f):
|
||||||
@@ -942,13 +949,11 @@ def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list, sys
|
|||||||
model = llm_kwargs['llm_model']
|
model = llm_kwargs['llm_model']
|
||||||
n_model = 1
|
n_model = 1
|
||||||
if '&' not in model:
|
if '&' not in model:
|
||||||
|
# 如果只询问“一个”大语言模型(多数情况):
|
||||||
# 如果只询问1个大语言模型:
|
|
||||||
method = model_info[model]["fn_without_ui"]
|
method = model_info[model]["fn_without_ui"]
|
||||||
return method(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
|
return method(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
|
||||||
else:
|
else:
|
||||||
|
# 如果同时询问“多个”大语言模型,这个稍微啰嗦一点,但思路相同,您不必读这个else分支
|
||||||
# 如果同时询问多个大语言模型,这个稍微啰嗦一点,但思路相同,您不必读这个else分支
|
|
||||||
executor = ThreadPoolExecutor(max_workers=4)
|
executor = ThreadPoolExecutor(max_workers=4)
|
||||||
models = model.split('&')
|
models = model.split('&')
|
||||||
n_model = len(models)
|
n_model = len(models)
|
||||||
@@ -1001,8 +1006,26 @@ def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list, sys
|
|||||||
res = '<br/><br/>\n\n---\n\n'.join(return_string_collect)
|
res = '<br/><br/>\n\n---\n\n'.join(return_string_collect)
|
||||||
return res
|
return res
|
||||||
|
|
||||||
|
# 根据基础功能区 ModelOverride 参数调整模型类型,用于 `predict` 中
|
||||||
|
import importlib
|
||||||
|
import core_functional
|
||||||
|
def execute_model_override(llm_kwargs, additional_fn, method):
|
||||||
|
functional = core_functional.get_core_functions()
|
||||||
|
if 'ModelOverride' in functional[additional_fn]:
|
||||||
|
# 热更新Prompt & ModelOverride
|
||||||
|
importlib.reload(core_functional)
|
||||||
|
functional = core_functional.get_core_functions()
|
||||||
|
model_override = functional[additional_fn]['ModelOverride']
|
||||||
|
if model_override not in model_info:
|
||||||
|
raise ValueError(f"模型覆盖参数 '{model_override}' 指向一个暂不支持的模型,请检查配置文件。")
|
||||||
|
method = model_info[model_override]["fn_with_ui"]
|
||||||
|
llm_kwargs['llm_model'] = model_override
|
||||||
|
return llm_kwargs, additional_fn, method
|
||||||
|
# 默认返回原参数
|
||||||
|
return llm_kwargs, additional_fn, method
|
||||||
|
|
||||||
def predict(inputs:str, llm_kwargs:dict, *args, **kwargs):
|
def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot,
|
||||||
|
history:list=[], system_prompt:str='', stream:bool=True, additional_fn:str=None):
|
||||||
"""
|
"""
|
||||||
发送至LLM,流式获取输出。
|
发送至LLM,流式获取输出。
|
||||||
用于基础的对话功能。
|
用于基础的对话功能。
|
||||||
@@ -1021,6 +1044,11 @@ def predict(inputs:str, llm_kwargs:dict, *args, **kwargs):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
inputs = apply_gpt_academic_string_mask(inputs, mode="show_llm")
|
inputs = apply_gpt_academic_string_mask(inputs, mode="show_llm")
|
||||||
method = model_info[llm_kwargs['llm_model']]["fn_with_ui"] # 如果这里报错,检查config中的AVAIL_LLM_MODELS选项
|
|
||||||
yield from method(inputs, llm_kwargs, *args, **kwargs)
|
method = model_info[llm_kwargs['llm_model']]["fn_with_ui"] # 如果这里报错,检查config中的AVAIL_LLM_MODELS选项
|
||||||
|
|
||||||
|
if additional_fn: # 根据基础功能区 ModelOverride 参数调整模型类型
|
||||||
|
llm_kwargs, additional_fn, method = execute_model_override(llm_kwargs, additional_fn, method)
|
||||||
|
|
||||||
|
yield from method(inputs, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, stream, additional_fn)
|
||||||
|
|
||||||
|
|||||||
在新工单中引用
屏蔽一个用户