support qwen3 models

这个提交包含在:
binary-husky
2025-04-29 11:09:53 +08:00
提交者 GitHub
父节点 883b513b91
当前提交 fc06be6f7a

查看文件

@@ -869,7 +869,10 @@ if "qwen-local" in AVAIL_LLM_MODELS:
logger.error(trimmed_format_exc())
# -=-=-=-=-=-=- 阿里云百炼(通义)-在线模型 -=-=-=-=-=-=-
qwen_models = ["qwen-max-latest", "qwen-max-2025-01-25","qwen-max","qwen-turbo","qwen-plus","dashscope-deepseek-r1","dashscope-deepseek-v3"]
qwen_models = ["qwen-max-latest", "qwen-max-2025-01-25","qwen-max","qwen-turbo","qwen-plus",
"dashscope-deepseek-r1","dashscope-deepseek-v3",
"dashscope-qwen3-14b", "dashscope-qwen3-235b-a22b", "dashscope-qwen3-qwen3-32b",
]
if any(item in qwen_models for item in AVAIL_LLM_MODELS):
try:
from .bridge_qwen import predict_no_ui_long_connection as qwen_noui
@@ -938,6 +941,34 @@ if any(item in qwen_models for item in AVAIL_LLM_MODELS):
"max_token": 57344,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
"dashscope-qwen3-14b": {
"fn_with_ui": qwen_ui,
"fn_without_ui": qwen_noui,
"enable_reasoning": True,
"can_multi_thread": True,
"endpoint": None,
"max_token": 129024,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
"dashscope-qwen3-235b-a22b": {
"fn_with_ui": qwen_ui,
"fn_without_ui": qwen_noui,
"can_multi_thread": True,
"endpoint": None,
"max_token": 129024,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
"dashscope-qwen3-32b": {
"fn_with_ui": qwen_ui,
"fn_without_ui": qwen_noui,
"can_multi_thread": True,
"endpoint": None,
"max_token": 129024,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
}
})
except: