File tree Expand file tree Collapse file tree 2 files changed +10
-3
lines changed
tests/torchtune/modules/peft Expand file tree Collapse file tree 2 files changed +10
-3
lines changed Original file line number Diff line number Diff line change @@ -237,9 +237,16 @@ def test_quantized_state_dict(self, dtype):
237
237
)
238
238
239
239
def test_qat_lora_forward (self , inputs , lora_linear , out_dim ) -> None :
240
- lora_linear = lora_linear (use_bias = True , dtype = torch .float32 )
240
+ lora_linear = lora_linear (use_bias = False , dtype = torch .float32 )
241
241
qat_lora_linear = QATLoRALinear .from_lora_linear (lora_linear )
242
242
expected = torch .tensor (QAT_EXPECTED_VAL )
243
243
actual = qat_lora_linear (inputs )
244
244
assert actual .shape == (BSZ , SEQ_LEN , out_dim )
245
245
torch .testing .assert_close (actual .mean (), expected , atol = 1e-4 , rtol = 1e-6 )
246
+
247
+ def test_qat_lora_with_bias_raises_error (self , lora_linear ) -> None :
248
+ lora_linear_with_bias = lora_linear (use_bias = True , dtype = torch .float32 )
249
+ with pytest .raises (
250
+ ValueError , match = "Bias is not supported in QAT \\ + LoRA yet"
251
+ ):
252
+ QATLoRALinear .from_lora_linear (lora_linear_with_bias )
Original file line number Diff line number Diff line change @@ -277,9 +277,9 @@ def from_lora_linear(
277
277
preserving the weights and adapters.
278
278
"""
279
279
if lora_linear .bias is not None :
280
- ValueError ("Bias is not supported in QAT + LoRA yet" )
280
+ raise ValueError ("Bias is not supported in QAT + LoRA yet" )
281
281
if lora_linear ._quantize_base :
282
- ValueError ("quantize_base is not compatible with QAT + LoRA" )
282
+ raise ValueError ("quantize_base is not compatible with QAT + LoRA" )
283
283
if isinstance (lora_linear .dropout , nn .Dropout ):
284
284
dropout = lora_linear .dropout .p
285
285
else :
You can’t perform that action at this time.
0 commit comments