From 294df6c2d5d6f36e1ca009cd9eb8e9f35cd5b218 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?YE=20Ke=20=E5=8F=B6=E6=9F=AF?= <17290550+YipKo@users.noreply.github.com> Date: Sat, 7 Dec 2024 23:43:51 +0800 Subject: [PATCH] Add ChatGLM4 local deployment support and refactor ChatGLM bridge's path configuration (#2062) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ✨ feat(request_llms and config.py): ChatGLM4 Deployment Add support for local deployment of ChatGLM4 model * 🦄 refactor(bridge_chatglm3.py): ChatGLM3 model path Added ChatGLM3 path customization (in config.py). Removed useless quantization model options that have been annotated --------- Co-authored-by: MarkDeia <17290550+MarkDeia@users.noreply.github.com> --- README.md | 18 ++++-- config.py | 6 +- request_llms/bridge_all.py | 12 ++++ request_llms/bridge_chatglm3.py | 20 +++---- request_llms/bridge_chatglm4.py | 81 ++++++++++++++++++++++++++ request_llms/requirements_chatglm4.txt | 7 +++ 6 files changed, 124 insertions(+), 20 deletions(-) create mode 100644 request_llms/bridge_chatglm4.py create mode 100644 request_llms/requirements_chatglm4.txt diff --git a/README.md b/README.md index 2b8ffaa5..cf0189ce 100644 --- a/README.md +++ b/README.md @@ -170,26 +170,32 @@ flowchart TD ``` -
如果需要支持清华ChatGLM2/复旦MOSS/RWKV作为后端,请点击展开此处 +
如果需要支持清华ChatGLM系列/复旦MOSS/RWKV作为后端,请点击展开此处

-【可选步骤】如果需要支持清华ChatGLM3/复旦MOSS作为后端,需要额外安装更多依赖(前提条件:熟悉Python + 用过Pytorch + 电脑配置够强): +【可选步骤】如果需要支持清华ChatGLM系列/复旦MOSS作为后端,需要额外安装更多依赖(前提条件:熟悉Python + 用过Pytorch + 电脑配置够强): ```sh # 【可选步骤I】支持清华ChatGLM3。清华ChatGLM备注:如果遇到"Call ChatGLM fail 不能正常加载ChatGLM的参数" 错误,参考如下: 1:以上默认安装的为torch+cpu版,使用cuda需要卸载torch重新安装torch+cuda; 2:如因本机配置不够无法加载模型,可以修改request_llm/bridge_chatglm.py中的模型精度, 将 AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) 都修改为 AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) python -m pip install -r request_llms/requirements_chatglm.txt -# 【可选步骤II】支持复旦MOSS +# 【可选步骤II】支持清华ChatGLM4 注意:此模型至少需要24G显存 +python -m pip install -r request_llms/requirements_chatglm4.txt +# 可使用modelscope下载ChatGLM4模型 +# pip install modelscope +# modelscope download --model ZhipuAI/glm-4-9b-chat --local_dir ./THUDM/glm-4-9b-chat + +# 【可选步骤III】支持复旦MOSS python -m pip install -r request_llms/requirements_moss.txt git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss # 注意执行此行代码时,必须处于项目根路径 -# 【可选步骤III】支持RWKV Runner +# 【可选步骤IV】支持RWKV Runner 参考wiki:https://github.com/binary-husky/gpt_academic/wiki/%E9%80%82%E9%85%8DRWKV-Runner -# 【可选步骤IV】确保config.py配置文件的AVAIL_LLM_MODELS包含了期望的模型,目前支持的全部模型如下(jittorllms系列目前仅支持docker方案): +# 【可选步骤V】确保config.py配置文件的AVAIL_LLM_MODELS包含了期望的模型,目前支持的全部模型如下(jittorllms系列目前仅支持docker方案): AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] -# 【可选步骤V】支持本地模型INT8,INT4量化(这里所指的模型本身不是量化版本,目前deepseek-coder支持,后面测试后会加入更多模型量化选择) +# 【可选步骤VI】支持本地模型INT8,INT4量化(这里所指的模型本身不是量化版本,目前deepseek-coder支持,后面测试后会加入更多模型量化选择) pip install bitsandbyte # windows用户安装bitsandbytes需要使用下面bitsandbytes-windows-webui python -m pip install bitsandbytes --prefer-binary --extra-index-url=https://jllllll.github.io/bitsandbytes-windows-webui diff --git a/config.py b/config.py index 51f04d22..6353cb8a 100644 --- a/config.py +++ b/config.py @@ -36,7 +36,7 @@ AVAIL_LLM_MODELS = ["gpt-4-1106-preview", "gpt-4-turbo-preview", "gpt-4-vision-p "gpt-4o", "gpt-4o-mini", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "gpt-4", "gpt-4-32k", "azure-gpt-4", "glm-4", "glm-4v", "glm-3-turbo", - "gemini-1.5-pro", "chatglm3" + "gemini-1.5-pro", "chatglm3", "chatglm4" ] EMBEDDING_MODEL = "text-embedding-3-small" @@ -143,6 +143,9 @@ BAIDU_CLOUD_SECRET_KEY = '' BAIDU_CLOUD_QIANFAN_MODEL = 'ERNIE-Bot' # 可选 "ERNIE-Bot-4"(文心大模型4.0), "ERNIE-Bot"(文心一言), "ERNIE-Bot-turbo", "BLOOMZ-7B", "Llama-2-70B-Chat", "Llama-2-13B-Chat", "Llama-2-7B-Chat", "ERNIE-Speed-128K", "ERNIE-Speed-8K", "ERNIE-Lite-8K" +# 如果使用ChatGLM3或ChatGLM4本地模型,请把 LLM_MODEL="chatglm3" 或LLM_MODEL="chatglm4",并在此处指定模型路径 +CHATGLM_LOCAL_MODEL_PATH = "THUDM/glm-4-9b-chat" # 例如"/home/hmp/ChatGLM3-6B/" + # 如果使用ChatGLM2微调模型,请把 LLM_MODEL="chatglmft",并在此处指定模型路径 CHATGLM_PTUNING_CHECKPOINT = "" # 例如"/home/hmp/ChatGLM2-6B/ptuning/output/6b-pt-128-1e-2/checkpoint-100" @@ -375,6 +378,7 @@ DAAS_SERVER_URLS = [ f"https://niuziniu-biligpt{i}.hf.space/stream" for i in ran 本地大模型示意图 │ +├── "chatglm4" ├── "chatglm3" ├── "chatglm" ├── "chatglm_onnx" diff --git a/request_llms/bridge_all.py b/request_llms/bridge_all.py index 2c3cf14d..1aef7089 100644 --- a/request_llms/bridge_all.py +++ b/request_llms/bridge_all.py @@ -26,6 +26,9 @@ from .bridge_chatglm import predict as chatglm_ui from .bridge_chatglm3 import predict_no_ui_long_connection as chatglm3_noui from .bridge_chatglm3 import predict as chatglm3_ui +from .bridge_chatglm4 import predict_no_ui_long_connection as chatglm4_noui +from .bridge_chatglm4 import predict as chatglm4_ui + from .bridge_qianfan import predict_no_ui_long_connection as qianfan_noui from .bridge_qianfan import predict as qianfan_ui @@ -416,6 +419,7 @@ model_info = { "token_cnt": get_token_num_gpt4, }, + # ChatGLM本地模型 # 将 chatglm 直接对齐到 chatglm2 "chatglm": { "fn_with_ui": chatglm_ui, @@ -441,6 +445,14 @@ model_info = { "tokenizer": tokenizer_gpt35, "token_cnt": get_token_num_gpt35, }, + "chatglm4": { + "fn_with_ui": chatglm4_ui, + "fn_without_ui": chatglm4_noui, + "endpoint": None, + "max_token": 8192, + "tokenizer": tokenizer_gpt35, + "token_cnt": get_token_num_gpt35, + }, "qianfan": { "fn_with_ui": qianfan_ui, "fn_without_ui": qianfan_noui, diff --git a/request_llms/bridge_chatglm3.py b/request_llms/bridge_chatglm3.py index 95b629d1..67811858 100644 --- a/request_llms/bridge_chatglm3.py +++ b/request_llms/bridge_chatglm3.py @@ -23,39 +23,33 @@ class GetGLM3Handle(LocalLLMHandle): import os import platform - LOCAL_MODEL_QUANT, device = get_conf("LOCAL_MODEL_QUANT", "LOCAL_MODEL_DEVICE") - _model_name_ = "THUDM/chatglm3-6b" - # if LOCAL_MODEL_QUANT == "INT4": # INT4 - # _model_name_ = "THUDM/chatglm3-6b-int4" - # elif LOCAL_MODEL_QUANT == "INT8": # INT8 - # _model_name_ = "THUDM/chatglm3-6b-int8" - # else: - # _model_name_ = "THUDM/chatglm3-6b" # FP16 + LOCAL_MODEL_PATH, LOCAL_MODEL_QUANT, device = get_conf("CHATGLM_LOCAL_MODEL_PATH", "LOCAL_MODEL_QUANT", "LOCAL_MODEL_DEVICE") + model_path = LOCAL_MODEL_PATH with ProxyNetworkActivate("Download_LLM"): chatglm_tokenizer = AutoTokenizer.from_pretrained( - _model_name_, trust_remote_code=True + model_path, trust_remote_code=True ) if device == "cpu": chatglm_model = AutoModel.from_pretrained( - _model_name_, + model_path, trust_remote_code=True, device="cpu", ).float() elif LOCAL_MODEL_QUANT == "INT4": # INT4 chatglm_model = AutoModel.from_pretrained( - pretrained_model_name_or_path=_model_name_, + pretrained_model_name_or_path=model_path, trust_remote_code=True, quantization_config=BitsAndBytesConfig(load_in_4bit=True), ) elif LOCAL_MODEL_QUANT == "INT8": # INT8 chatglm_model = AutoModel.from_pretrained( - pretrained_model_name_or_path=_model_name_, + pretrained_model_name_or_path=model_path, trust_remote_code=True, quantization_config=BitsAndBytesConfig(load_in_8bit=True), ) else: chatglm_model = AutoModel.from_pretrained( - pretrained_model_name_or_path=_model_name_, + pretrained_model_name_or_path=model_path, trust_remote_code=True, device="cuda", ) diff --git a/request_llms/bridge_chatglm4.py b/request_llms/bridge_chatglm4.py new file mode 100644 index 00000000..1e0ba854 --- /dev/null +++ b/request_llms/bridge_chatglm4.py @@ -0,0 +1,81 @@ +model_name = "ChatGLM4" +cmd_to_install = """ +`pip install -r request_llms/requirements_chatglm4.txt` +`pip install modelscope` +`modelscope download --model ZhipuAI/glm-4-9b-chat --local_dir ./THUDM/glm-4-9b-chat` +""" + + +from toolbox import get_conf, ProxyNetworkActivate +from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns + + +# ------------------------------------------------------------------------------------------------------------------------ +# 🔌💻 Local Model +# ------------------------------------------------------------------------------------------------------------------------ +class GetGLM4Handle(LocalLLMHandle): + + def load_model_info(self): + # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行 + self.model_name = model_name + self.cmd_to_install = cmd_to_install + + def load_model_and_tokenizer(self): + # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行 + import torch + from transformers import AutoModel, AutoModelForCausalLM, AutoTokenizer + import os + + LOCAL_MODEL_PATH, device = get_conf("CHATGLM_LOCAL_MODEL_PATH", "LOCAL_MODEL_DEVICE") + model_path = LOCAL_MODEL_PATH + chatglm_tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) + chatglm_model = AutoModelForCausalLM.from_pretrained( + model_path, + torch_dtype=torch.bfloat16, + low_cpu_mem_usage=True, + trust_remote_code=True, + device=device + ).eval().to(device) + self._model = chatglm_model + self._tokenizer = chatglm_tokenizer + return self._model, self._tokenizer + + + def llm_stream_generator(self, **kwargs): + # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行 + def adaptor(kwargs): + query = kwargs["query"] + max_length = kwargs["max_length"] + top_p = kwargs["top_p"] + temperature = kwargs["temperature"] + history = kwargs["history"] + return query, max_length, top_p, temperature, history + + query, max_length, top_p, temperature, history = adaptor(kwargs) + inputs = self._tokenizer.apply_chat_template([{"role": "user", "content": query}], + add_generation_prompt=True, + tokenize=True, + return_tensors="pt", + return_dict=True + ).to(self._model.device) + gen_kwargs = {"max_length": max_length, "do_sample": True, "top_k": top_p} + + outputs = self._model.generate(**inputs, **gen_kwargs) + outputs = outputs[:, inputs['input_ids'].shape[1]:] + response = self._tokenizer.decode(outputs[0], skip_special_tokens=True) + yield response + + def try_to_import_special_deps(self, **kwargs): + # import something that will raise error if the user does not install requirement_*.txt + # 🏃‍♂️🏃‍♂️🏃‍♂️ 主进程执行 + import importlib + + # importlib.import_module('modelscope') + + +# ------------------------------------------------------------------------------------------------------------------------ +# 🔌💻 GPT-Academic Interface +# ------------------------------------------------------------------------------------------------------------------------ +predict_no_ui_long_connection, predict = get_local_llm_predict_fns( + GetGLM4Handle, model_name, history_format="chatglm3" +) diff --git a/request_llms/requirements_chatglm4.txt b/request_llms/requirements_chatglm4.txt new file mode 100644 index 00000000..69cbec29 --- /dev/null +++ b/request_llms/requirements_chatglm4.txt @@ -0,0 +1,7 @@ +protobuf +cpm_kernels +torch>=1.10 +transformers>=4.44 +mdtex2html +sentencepiece +accelerate \ No newline at end of file