修复了一个导致无法加载未量化的ChatGLM2 fine-tuning模型的问题(quantization_bit=0)

这个提交包含在:
shao0099876
2023-10-26 14:38:58 +00:00
父节点 706a239232
当前提交 f6e34d9621

查看文件

@@ -87,7 +87,7 @@ class GetGLMFTHandle(Process):
new_prefix_state_dict[k[len("transformer.prefix_encoder."):]] = v
model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict)
if model_args['quantization_bit'] is not None:
if model_args['quantization_bit'] is not None and model_args['quantization_bit'] != 0:
print(f"Quantized to {model_args['quantization_bit']} bit")
model = model.quantize(model_args['quantization_bit'])
model = model.cuda()