Merge pull request #1201 from shao0099876/master

修复了一个导致无法加载未量化的ChatGLM2 fine-tuning模型的问题
This commit is contained in:
binary-husky
2023-10-27 10:00:48 +08:00
committed by GitHub

View File

@@ -87,7 +87,7 @@ class GetGLMFTHandle(Process):
new_prefix_state_dict[k[len("transformer.prefix_encoder."):]] = v
model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict)
if model_args['quantization_bit'] is not None:
if model_args['quantization_bit'] is not None and model_args['quantization_bit'] != 0:
print(f"Quantized to {model_args['quantization_bit']} bit")
model = model.quantize(model_args['quantization_bit'])
model = model.cuda()