Merge pull request #1201 from shao0099876/master

修复了一个导致无法加载未量化的ChatGLM2 fine-tuning模型的问题
This commit is contained in:
binary-husky 2023-10-27 10:00:48 +08:00 committed by GitHub
commit f7a332eee7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -87,7 +87,7 @@ class GetGLMFTHandle(Process):
new_prefix_state_dict[k[len("transformer.prefix_encoder."):]] = v
model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict)
if model_args['quantization_bit'] is not None:
if model_args['quantization_bit'] is not None and model_args['quantization_bit'] != 0:
print(f"Quantized to {model_args['quantization_bit']} bit")
model = model.quantize(model_args['quantization_bit'])
model = model.cuda()