镜像自地址
https://github.com/binary-husky/gpt_academic.git
已同步 2025-12-06 06:26:47 +00:00
添加中转渠道支持功能
- 更新config.py配置文件,添加中转渠道相关配置项 - 修改bridge_all.py和bridge_chatgpt.py以支持中转渠道 - 更新key_pattern_manager.py处理中转渠道密钥模式
这个提交包含在:
52
config.py
52
config.py
@@ -8,6 +8,9 @@
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
# [step 1-1]>> ( 接入OpenAI模型家族 ) API_KEY = "sk-123456789xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx123456789"。极少数情况下,还需要填写组织(格式如org-123456789abcdefghijklmno的),请向下翻,找 API_ORG 设置项
|
# [step 1-1]>> ( 接入OpenAI模型家族 ) API_KEY = "sk-123456789xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx123456789"。极少数情况下,还需要填写组织(格式如org-123456789abcdefghijklmno的),请向下翻,找 API_ORG 设置项
|
||||||
|
from pickle import TRUE
|
||||||
|
|
||||||
|
|
||||||
API_KEY = "在此处填写APIKEY" # 可同时填写多个API-KEY,用英文逗号分割,例如API_KEY = "sk-openaikey1,sk-openaikey2,fkxxxx-api2dkey3,azure-apikey4"
|
API_KEY = "在此处填写APIKEY" # 可同时填写多个API-KEY,用英文逗号分割,例如API_KEY = "sk-openaikey1,sk-openaikey2,fkxxxx-api2dkey3,azure-apikey4"
|
||||||
|
|
||||||
# [step 1-2]>> ( 强烈推荐!接入通义家族 & 大模型服务平台百炼 ) 接入通义千问在线大模型,api-key获取地址 https://dashscope.console.aliyun.com/
|
# [step 1-2]>> ( 强烈推荐!接入通义家族 & 大模型服务平台百炼 ) 接入通义千问在线大模型,api-key获取地址 https://dashscope.console.aliyun.com/
|
||||||
@@ -16,6 +19,36 @@ DASHSCOPE_API_KEY = "" # 阿里灵积云API_KEY(用于接入qwen-max,dashsco
|
|||||||
# [step 1-3]>> ( 接入 deepseek-reasoner, 即 deepseek-r1 ) 深度求索(DeepSeek) API KEY,默认请求地址为"https://api.deepseek.com/v1/chat/completions"
|
# [step 1-3]>> ( 接入 deepseek-reasoner, 即 deepseek-r1 ) 深度求索(DeepSeek) API KEY,默认请求地址为"https://api.deepseek.com/v1/chat/completions"
|
||||||
DEEPSEEK_API_KEY = ""
|
DEEPSEEK_API_KEY = ""
|
||||||
|
|
||||||
|
# [step 1-4]>> ( 接入中转渠道 ) 中转渠道配置,支持采用OpenAI接口模式的中转渠道商
|
||||||
|
ZHONGZHUAN_ENABLE = TRUE # 是否启用中转渠道,默认关闭
|
||||||
|
ZHONGZHUAN_ENDPOINT = "https://test.com/v1/chat/completions" # 中转渠道的完整API端点
|
||||||
|
ZHONGZHUAN_API_KEY = "sk-xxxxxxxxxxxxxxx" # 中转渠道的API KEY,如果为空则使用API_KEY
|
||||||
|
ZHONGZHUAN_MODELS = [
|
||||||
|
# 中转渠道支持的模型列表,使用原始模型名称
|
||||||
|
"o3-mini-all",
|
||||||
|
"gpt-4.1",
|
||||||
|
"gpt-4o",
|
||||||
|
"gpt-4o-mini",
|
||||||
|
"claude-sonnet-4-20250514-thinking",
|
||||||
|
"claude-sonnet-4-20250514",
|
||||||
|
"gemini-2.5-pro-preview-03-25",
|
||||||
|
# 可以添加更多模型...
|
||||||
|
]
|
||||||
|
# 配置说明:
|
||||||
|
# 1. 将ZHONGZHUAN_ENABLE设置为True启用中转渠道
|
||||||
|
# 2. 将ZHONGZHUAN_ENDPOINT设置为你的中转渠道商提供的完整API端点(包含/chat/completions)
|
||||||
|
# 3. 将ZHONGZHUAN_API_KEY设置为你的中转渠道商提供的API KEY(可选)
|
||||||
|
# 4. 在ZHONGZHUAN_MODELS中配置你想要使用的模型,使用原始模型名称
|
||||||
|
# 5. 系统将自动把ZHONGZHUAN_MODELS中的模型添加到AVAIL_LLM_MODELS中,无需重复配置
|
||||||
|
# 6. 对于同时在两个列表中的模型,将自动使用中转渠道访问
|
||||||
|
#
|
||||||
|
# 示例配置:
|
||||||
|
# ZHONGZHUAN_ENABLE = True
|
||||||
|
# ZHONGZHUAN_ENDPOINT = "https://api.your-provider.com/v1/chat/completions"
|
||||||
|
# ZHONGZHUAN_API_KEY = "your-api-key-here"
|
||||||
|
# ZHONGZHUAN_MODELS = ["o3-mini-all", "gpt-4.1", "claude-sonnet-4-20250514"]
|
||||||
|
# 然后可以直接设置LLM_MODEL = "o3-mini-all"(将通过中转渠道访问)
|
||||||
|
|
||||||
# [step 2]>> 改为True应用代理。如果使用本地或无地域限制的大模型时,此处不修改;如果直接在海外服务器部署,此处不修改
|
# [step 2]>> 改为True应用代理。如果使用本地或无地域限制的大模型时,此处不修改;如果直接在海外服务器部署,此处不修改
|
||||||
USE_PROXY = False
|
USE_PROXY = False
|
||||||
if USE_PROXY:
|
if USE_PROXY:
|
||||||
@@ -49,6 +82,14 @@ AVAIL_LLM_MODELS = ["qwen-max", "o1-mini", "o1-mini-2024-09-12", "o1", "o1-2024-
|
|||||||
"dashscope-qwen3-14b", "dashscope-qwen3-235b-a22b", "dashscope-qwen3-32b",
|
"dashscope-qwen3-14b", "dashscope-qwen3-235b-a22b", "dashscope-qwen3-32b",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
# 自动将中转渠道模型添加到可用模型列表中,避免用户重复配置
|
||||||
|
# 对于同时在AVAIL_LLM_MODELS和ZHONGZHUAN_MODELS中的模型,将自动使用中转渠道
|
||||||
|
if ZHONGZHUAN_ENABLE and ZHONGZHUAN_MODELS:
|
||||||
|
for model in ZHONGZHUAN_MODELS:
|
||||||
|
if model not in AVAIL_LLM_MODELS:
|
||||||
|
AVAIL_LLM_MODELS.append(model)
|
||||||
|
print(f"[中转渠道] 已启用,共{len(ZHONGZHUAN_MODELS)}个模型将通过中转渠道访问: {', '.join(ZHONGZHUAN_MODELS)}")
|
||||||
|
|
||||||
EMBEDDING_MODEL = "text-embedding-3-small"
|
EMBEDDING_MODEL = "text-embedding-3-small"
|
||||||
|
|
||||||
# --- --- --- ---
|
# --- --- --- ---
|
||||||
@@ -158,7 +199,7 @@ MAX_RETRY = 2
|
|||||||
DEFAULT_FN_GROUPS = ['对话', '编程', '学术', '智能体']
|
DEFAULT_FN_GROUPS = ['对话', '编程', '学术', '智能体']
|
||||||
|
|
||||||
|
|
||||||
# 定义界面上“询问多个GPT模型”插件应该使用哪些模型,请从AVAIL_LLM_MODELS中选择,并在不同模型之间用`&`间隔,例如"gpt-3.5-turbo&chatglm3&azure-gpt-4"
|
# 定义界面上"询问多个GPT模型"插件应该使用哪些模型,请从AVAIL_LLM_MODELS中选择,并在不同模型之间用`&`间隔,例如"gpt-3.5-turbo&chatglm3&azure-gpt-4"
|
||||||
MULTI_QUERY_LLM_MODELS = "gpt-3.5-turbo&chatglm3"
|
MULTI_QUERY_LLM_MODELS = "gpt-3.5-turbo&chatglm3"
|
||||||
|
|
||||||
|
|
||||||
@@ -361,7 +402,7 @@ AUTO_CONTEXT_CLIP_ENABLE = False
|
|||||||
AUTO_CONTEXT_CLIP_TRIGGER_TOKEN_LEN = 30*1000
|
AUTO_CONTEXT_CLIP_TRIGGER_TOKEN_LEN = 30*1000
|
||||||
# 无条件丢弃x以上的轮数
|
# 无条件丢弃x以上的轮数
|
||||||
AUTO_CONTEXT_MAX_ROUND = 64
|
AUTO_CONTEXT_MAX_ROUND = 64
|
||||||
# 在裁剪上下文时,倒数第x次对话能“最多”保留的上下文token的比例占 AUTO_CONTEXT_CLIP_TRIGGER_TOKEN_LEN 的多少
|
# 在裁剪上下文时,倒数第x次对话能"最多"保留的上下文token的比例占 AUTO_CONTEXT_CLIP_TRIGGER_TOKEN_LEN 的多少
|
||||||
AUTO_CONTEXT_MAX_CLIP_RATIO = [0.80, 0.60, 0.45, 0.25, 0.20, 0.18, 0.16, 0.14, 0.12, 0.10, 0.08, 0.07, 0.06, 0.05, 0.04, 0.03, 0.02, 0.01]
|
AUTO_CONTEXT_MAX_CLIP_RATIO = [0.80, 0.60, 0.45, 0.25, 0.20, 0.18, 0.16, 0.14, 0.12, 0.10, 0.08, 0.07, 0.06, 0.05, 0.04, 0.03, 0.02, 0.01]
|
||||||
|
|
||||||
|
|
||||||
@@ -469,4 +510,11 @@ AUTO_CONTEXT_MAX_CLIP_RATIO = [0.80, 0.60, 0.45, 0.25, 0.20, 0.18, 0.16, 0.14, 0
|
|||||||
└── MATHPIX_APPKEY
|
└── MATHPIX_APPKEY
|
||||||
|
|
||||||
|
|
||||||
|
"zhongzhuan-..." 中转渠道模型配置
|
||||||
|
├── ZHONGZHUAN_ENABLE
|
||||||
|
├── ZHONGZHUAN_ENDPOINT
|
||||||
|
├── ZHONGZHUAN_API_KEY
|
||||||
|
└── ZHONGZHUAN_MODELS
|
||||||
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
该文件中主要包含2个函数,是所有LLM的通用接口,它们会继续向下调用更底层的LLM模型,处理多模型并行等细节
|
该文件中主要包含2个函数,是所有LLM的通用接口,它们会继续向下调用更底层的LLM模型,处理多模型并行等细节
|
||||||
|
|
||||||
@@ -115,6 +114,12 @@ get_token_num_gpt4 = lambda txt: len(tokenizer_gpt4.encode(txt, disallowed_speci
|
|||||||
# 开始初始化模型
|
# 开始初始化模型
|
||||||
AVAIL_LLM_MODELS, LLM_MODEL = get_conf("AVAIL_LLM_MODELS", "LLM_MODEL")
|
AVAIL_LLM_MODELS, LLM_MODEL = get_conf("AVAIL_LLM_MODELS", "LLM_MODEL")
|
||||||
AVAIL_LLM_MODELS = AVAIL_LLM_MODELS + [LLM_MODEL]
|
AVAIL_LLM_MODELS = AVAIL_LLM_MODELS + [LLM_MODEL]
|
||||||
|
|
||||||
|
# 获取中转渠道配置
|
||||||
|
ZHONGZHUAN_ENABLE, ZHONGZHUAN_ENDPOINT, ZHONGZHUAN_API_KEY, ZHONGZHUAN_MODELS = get_conf(
|
||||||
|
"ZHONGZHUAN_ENABLE", "ZHONGZHUAN_ENDPOINT", "ZHONGZHUAN_API_KEY", "ZHONGZHUAN_MODELS"
|
||||||
|
)
|
||||||
|
|
||||||
# -=-=-=-=-=-=- 以下这部分是最早加入的最稳定的模型 -=-=-=-=-=-=-
|
# -=-=-=-=-=-=- 以下这部分是最早加入的最稳定的模型 -=-=-=-=-=-=-
|
||||||
model_info = {
|
model_info = {
|
||||||
# openai
|
# openai
|
||||||
@@ -1415,6 +1420,23 @@ for model in [m for m in AVAIL_LLM_MODELS if m.startswith("openrouter-")]:
|
|||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
|
# -=-=-=-=-=-=- 中转渠道模型对齐支持 -=-=-=-=-=-=-
|
||||||
|
# 为中转渠道模型创建统一的model_info配置
|
||||||
|
if ZHONGZHUAN_ENABLE and ZHONGZHUAN_MODELS:
|
||||||
|
# 为每个中转渠道模型创建统一的model_info配置
|
||||||
|
# 注意:模型列表的合并已在config.py中处理
|
||||||
|
for model in ZHONGZHUAN_MODELS:
|
||||||
|
model_info.update({
|
||||||
|
model: {
|
||||||
|
"fn_with_ui": chatgpt_ui,
|
||||||
|
"fn_without_ui": chatgpt_noui,
|
||||||
|
"endpoint": ZHONGZHUAN_ENDPOINT,
|
||||||
|
"has_multimodal_capacity": True,
|
||||||
|
"max_token": 12800000,
|
||||||
|
"tokenizer": tokenizer_gpt4,
|
||||||
|
"token_cnt": get_token_num_gpt4,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
# -=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=-=-=
|
# -=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=-=-=
|
||||||
# -=-=-=-=-=-=-=-=-=- ☝️ 以上是模型路由 -=-=-=-=-=-=-=-=-=
|
# -=-=-=-=-=-=-=-=-=- ☝️ 以上是模型路由 -=-=-=-=-=-=-=-=-=
|
||||||
@@ -1459,11 +1481,11 @@ def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list, sys
|
|||||||
model = llm_kwargs['llm_model']
|
model = llm_kwargs['llm_model']
|
||||||
n_model = 1
|
n_model = 1
|
||||||
if '&' not in model:
|
if '&' not in model:
|
||||||
# 如果只询问“一个”大语言模型(多数情况):
|
# 如果只询问"一个"大语言模型(多数情况):
|
||||||
method = model_info[model]["fn_without_ui"]
|
method = model_info[model]["fn_without_ui"]
|
||||||
return method(inputs, llm_kwargs, history, sys_prompt, observe_window, console_silence)
|
return method(inputs, llm_kwargs, history, sys_prompt, observe_window, console_silence)
|
||||||
else:
|
else:
|
||||||
# 如果同时询问“多个”大语言模型,这个稍微啰嗦一点,但思路相同,您不必读这个else分支
|
# 如果同时询问"多个"大语言模型,这个稍微啰嗦一点,但思路相同,您不必读这个else分支
|
||||||
executor = ThreadPoolExecutor(max_workers=4)
|
executor = ThreadPoolExecutor(max_workers=4)
|
||||||
models = model.split('&')
|
models = model.split('&')
|
||||||
n_model = len(models)
|
n_model = len(models)
|
||||||
|
|||||||
@@ -241,9 +241,19 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
|||||||
yield from update_ui(chatbot=chatbot, history=history, msg="api_key已导入") # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history, msg="api_key已导入") # 刷新界面
|
||||||
return
|
return
|
||||||
elif not is_any_api_key(chatbot._cookies['api_key']):
|
elif not is_any_api_key(chatbot._cookies['api_key']):
|
||||||
chatbot.append((inputs, "缺少api_key。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。"))
|
# 对于中转渠道模型,额外检查中转渠道API key
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="缺少api_key") # 刷新界面
|
is_zhongzhuan_valid = False
|
||||||
return
|
try:
|
||||||
|
ZHONGZHUAN_ENABLE, ZHONGZHUAN_MODELS, ZHONGZHUAN_API_KEY = get_conf("ZHONGZHUAN_ENABLE", "ZHONGZHUAN_MODELS", "ZHONGZHUAN_API_KEY")
|
||||||
|
if ZHONGZHUAN_ENABLE and llm_kwargs['llm_model'] in ZHONGZHUAN_MODELS and ZHONGZHUAN_API_KEY:
|
||||||
|
is_zhongzhuan_valid = is_any_api_key(ZHONGZHUAN_API_KEY)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
if not is_zhongzhuan_valid:
|
||||||
|
chatbot.append((inputs, "缺少api_key。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。"))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history, msg="缺少api_key") # 刷新界面
|
||||||
|
return
|
||||||
|
|
||||||
user_input = inputs
|
user_input = inputs
|
||||||
if additional_fn is not None:
|
if additional_fn is not None:
|
||||||
@@ -269,12 +279,22 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
|||||||
|
|
||||||
# check mis-behavior
|
# check mis-behavior
|
||||||
if is_the_upload_folder(user_input):
|
if is_the_upload_folder(user_input):
|
||||||
chatbot[-1] = (inputs, f"[Local Message] 检测到操作错误!当您上传文档之后,需点击“**函数插件区**”按钮进行处理,请勿点击“提交”按钮或者“基础功能区”按钮。")
|
chatbot[-1] = (inputs, f"[Local Message] 检测到操作错误!当您上传文档之后,需点击\"**函数插件区**\"按钮进行处理,请勿点击\"提交\"按钮或者\"基础功能区\"按钮。")
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="正常") # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history, msg="正常") # 刷新界面
|
||||||
time.sleep(2)
|
time.sleep(2)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt, image_base64_array, has_multimodal_capacity, stream)
|
# 对于中转渠道模型,需要确保使用正确的API key
|
||||||
|
llm_kwargs_modified = llm_kwargs.copy()
|
||||||
|
try:
|
||||||
|
ZHONGZHUAN_ENABLE, ZHONGZHUAN_MODELS, ZHONGZHUAN_API_KEY = get_conf("ZHONGZHUAN_ENABLE", "ZHONGZHUAN_MODELS", "ZHONGZHUAN_API_KEY")
|
||||||
|
if ZHONGZHUAN_ENABLE and llm_kwargs['llm_model'] in ZHONGZHUAN_MODELS and ZHONGZHUAN_API_KEY:
|
||||||
|
# 确保中转渠道模型使用正确的API key
|
||||||
|
llm_kwargs_modified['api_key'] = ZHONGZHUAN_API_KEY
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
headers, payload = generate_payload(inputs, llm_kwargs_modified, history, system_prompt, image_base64_array, has_multimodal_capacity, stream)
|
||||||
except RuntimeError as e:
|
except RuntimeError as e:
|
||||||
chatbot[-1] = (inputs, f"您提供的api-key不满足要求,不包含任何可用于{llm_kwargs['llm_model']}的api-key。您可能选择了错误的模型或请求源。")
|
chatbot[-1] = (inputs, f"您提供的api-key不满足要求,不包含任何可用于{llm_kwargs['llm_model']}的api-key。您可能选择了错误的模型或请求源。")
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="api-key不满足要求") # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history, msg="api-key不满足要求") # 刷新界面
|
||||||
|
|||||||
@@ -45,8 +45,17 @@ def is_cohere_api_key(key):
|
|||||||
|
|
||||||
|
|
||||||
def is_any_api_key(key):
|
def is_any_api_key(key):
|
||||||
# key 一般只包含字母、数字、下划线、逗号、中划线
|
# 首先检查是否为中转渠道API KEY
|
||||||
if not re.match(r"^[a-zA-Z0-9_\-,]+$", key):
|
try:
|
||||||
|
ZHONGZHUAN_ENABLE, ZHONGZHUAN_API_KEY = get_conf("ZHONGZHUAN_ENABLE", "ZHONGZHUAN_API_KEY")
|
||||||
|
if ZHONGZHUAN_ENABLE and ZHONGZHUAN_API_KEY and key == ZHONGZHUAN_API_KEY:
|
||||||
|
return True
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# key 一般只包含字母、数字、下划线、逗号、中划线,但为了支持更多中转渠道,适当放宽限制
|
||||||
|
# 允许点号(.),用于支持某些中转渠道的特殊格式
|
||||||
|
if not re.match(r"^[a-zA-Z0-9_\-,\.]+$", key):
|
||||||
# 如果配置了 CUSTOM_API_KEY_PATTERN,再检查以下以免误杀
|
# 如果配置了 CUSTOM_API_KEY_PATTERN,再检查以下以免误杀
|
||||||
if CUSTOM_API_KEY_PATTERN := get_conf('CUSTOM_API_KEY_PATTERN'):
|
if CUSTOM_API_KEY_PATTERN := get_conf('CUSTOM_API_KEY_PATTERN'):
|
||||||
return bool(re.match(CUSTOM_API_KEY_PATTERN, key))
|
return bool(re.match(CUSTOM_API_KEY_PATTERN, key))
|
||||||
@@ -93,6 +102,22 @@ def select_api_key(keys, llm_model):
|
|||||||
avail_key_list = []
|
avail_key_list = []
|
||||||
key_list = keys.split(',')
|
key_list = keys.split(',')
|
||||||
|
|
||||||
|
# 中转渠道API KEY处理
|
||||||
|
try:
|
||||||
|
ZHONGZHUAN_ENABLE, ZHONGZHUAN_MODELS, ZHONGZHUAN_API_KEY = get_conf("ZHONGZHUAN_ENABLE", "ZHONGZHUAN_MODELS", "ZHONGZHUAN_API_KEY")
|
||||||
|
if ZHONGZHUAN_ENABLE and llm_model in ZHONGZHUAN_MODELS:
|
||||||
|
# 如果模型在中转渠道列表中,优先使用中转渠道的API KEY
|
||||||
|
if ZHONGZHUAN_API_KEY:
|
||||||
|
return ZHONGZHUAN_API_KEY
|
||||||
|
# 如果没有设置专门的中转渠道API KEY,则使用OpenAI格式的key(中转渠道一般采用OpenAI接口格式)
|
||||||
|
for k in key_list:
|
||||||
|
if is_openai_api_key(k): avail_key_list.append(k)
|
||||||
|
if len(avail_key_list) > 0:
|
||||||
|
return random.choice(avail_key_list)
|
||||||
|
except Exception:
|
||||||
|
# 如果获取中转渠道配置失败,继续使用原有逻辑
|
||||||
|
pass
|
||||||
|
|
||||||
if llm_model.startswith('gpt-') or llm_model.startswith('chatgpt-') or \
|
if llm_model.startswith('gpt-') or llm_model.startswith('chatgpt-') or \
|
||||||
llm_model.startswith('one-api-') or is_o_family_for_openai(llm_model):
|
llm_model.startswith('one-api-') or is_o_family_for_openai(llm_model):
|
||||||
for k in key_list:
|
for k in key_list:
|
||||||
|
|||||||
在新工单中引用
屏蔽一个用户