镜像自地址
https://github.com/binary-husky/gpt_academic.git
已同步 2025-12-06 14:36:48 +00:00
Add ChatGLM4 local deployment support and refactor ChatGLM bridge's path configuration (#2062)
* ✨ feat(request_llms and config.py): ChatGLM4 Deployment Add support for local deployment of ChatGLM4 model * 🦄 refactor(bridge_chatglm3.py): ChatGLM3 model path Added ChatGLM3 path customization (in config.py). Removed useless quantization model options that have been annotated --------- Co-authored-by: MarkDeia <17290550+MarkDeia@users.noreply.github.com>
这个提交包含在:
@@ -26,6 +26,9 @@ from .bridge_chatglm import predict as chatglm_ui
|
||||
from .bridge_chatglm3 import predict_no_ui_long_connection as chatglm3_noui
|
||||
from .bridge_chatglm3 import predict as chatglm3_ui
|
||||
|
||||
from .bridge_chatglm4 import predict_no_ui_long_connection as chatglm4_noui
|
||||
from .bridge_chatglm4 import predict as chatglm4_ui
|
||||
|
||||
from .bridge_qianfan import predict_no_ui_long_connection as qianfan_noui
|
||||
from .bridge_qianfan import predict as qianfan_ui
|
||||
|
||||
@@ -416,6 +419,7 @@ model_info = {
|
||||
"token_cnt": get_token_num_gpt4,
|
||||
},
|
||||
|
||||
# ChatGLM本地模型
|
||||
# 将 chatglm 直接对齐到 chatglm2
|
||||
"chatglm": {
|
||||
"fn_with_ui": chatglm_ui,
|
||||
@@ -441,6 +445,14 @@ model_info = {
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
"chatglm4": {
|
||||
"fn_with_ui": chatglm4_ui,
|
||||
"fn_without_ui": chatglm4_noui,
|
||||
"endpoint": None,
|
||||
"max_token": 8192,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
"qianfan": {
|
||||
"fn_with_ui": qianfan_ui,
|
||||
"fn_without_ui": qianfan_noui,
|
||||
|
||||
在新工单中引用
屏蔽一个用户