15
from unittest.mock import Mock, call, patch
19
from parameterized import parameterized
20
from transformers import AutoModelForCausalLM, AutoTokenizer
22
from peft import AdaLoraConfig, PromptTuningConfig, PromptTuningInit, get_peft_model
24
from .testing_common import PeftCommonTester, PeftTestConfigManager
27
PEFT_DECODER_MODELS_TO_TEST = [
28
"hf-internal-testing/tiny-random-OPTForCausalLM",
29
"hf-internal-testing/tiny-random-GPTNeoXForCausalLM",
30
"hf-internal-testing/tiny-random-GPT2LMHeadModel",
31
"hf-internal-testing/tiny-random-BloomForCausalLM",
32
"hf-internal-testing/tiny-random-gpt_neo",
33
"hf-internal-testing/tiny-random-GPTJForCausalLM",
34
"hf-internal-testing/tiny-random-GPTBigCodeForCausalLM",
35
"HuggingFaceM4/tiny-random-LlamaForCausalLM",
39
"model_ids": PEFT_DECODER_MODELS_TO_TEST,
40
"task_type": "CAUSAL_LM",
44
def skip_adalora_and_gpt2(test_list):
45
return [test for test in test_list if not (("GPT2LMHeadModel" in test[1]) and (test[2] == AdaLoraConfig))]
48
class PeftDecoderModelTester(unittest.TestCase, PeftCommonTester):
50
Test if the PeftModel behaves as expected. This includes:
51
- test if the model has the expected methods
53
We use parametrized.expand for debugging purposes to test each model individually.
56
transformers_class = AutoModelForCausalLM
58
def prepare_inputs_for_testing(self):
59
input_ids = torch.tensor([[1, 1, 1], [1, 2, 1]]).to(self.torch_device)
60
attention_mask = torch.tensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device)
63
"input_ids": input_ids,
64
"attention_mask": attention_mask,
69
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
70
def test_attributes_parametrized(self, test_name, model_id, config_cls, config_kwargs):
71
self._test_model_attr(model_id, config_cls, config_kwargs)
73
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
74
def test_adapter_name(self, test_name, model_id, config_cls, config_kwargs):
75
self._test_adapter_name(model_id, config_cls, config_kwargs)
77
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
78
def test_prepare_for_training_parametrized(self, test_name, model_id, config_cls, config_kwargs):
79
self._test_prepare_for_training(model_id, config_cls, config_kwargs)
81
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
82
def test_prompt_tuning_text_prepare_for_training(self, test_name, model_id, config_cls, config_kwargs):
84
if config_cls != PromptTuningConfig:
85
return pytest.skip(f"This test does not apply to {config_cls}")
87
config_kwargs = config_kwargs.copy()
88
config_kwargs["prompt_tuning_init"] = PromptTuningInit.TEXT
89
config_kwargs["prompt_tuning_init_text"] = "This is a test prompt."
90
config_kwargs["tokenizer_name_or_path"] = model_id
91
self._test_prepare_for_training(model_id, config_cls, config_kwargs)
93
def test_prompt_tuning_text_tokenizer_kwargs(self):
97
orig_from_pretrained = AutoTokenizer.from_pretrained
99
def mock_autotokenizer_from_pretrained(*args, **kwargs):
100
mock(*args, **kwargs)
101
return orig_from_pretrained(config.tokenizer_name_or_path)
103
model_id = "hf-internal-testing/tiny-random-OPTForCausalLM"
104
config = PromptTuningConfig(
105
base_model_name_or_path=model_id,
106
tokenizer_name_or_path=model_id,
107
num_virtual_tokens=10,
108
prompt_tuning_init=PromptTuningInit.TEXT,
109
task_type="CAUSAL_LM",
110
prompt_tuning_init_text="This is a test prompt.",
111
tokenizer_kwargs={"trust_remote_code": True, "foo": "bar"},
113
model = self.transformers_class.from_pretrained(model_id).to(self.torch_device)
114
with patch("transformers.AutoTokenizer.from_pretrained", mock_autotokenizer_from_pretrained):
115
model = get_peft_model(model, config)
117
expected_call = call(model_id, trust_remote_code=True, foo="bar")
118
assert mock.call_args == expected_call
120
def test_prompt_tuning_config_invalid_args(self):
123
model_id = "hf-internal-testing/tiny-random-OPTForCausalLM"
124
with pytest.raises(ValueError, match="tokenizer_kwargs only valid when using prompt_tuning_init='TEXT'."):
126
base_model_name_or_path=model_id,
127
tokenizer_name_or_path=model_id,
128
num_virtual_tokens=10,
129
task_type="CAUSAL_LM",
130
prompt_tuning_init_text="This is a test prompt.",
131
prompt_tuning_init=PromptTuningInit.RANDOM,
132
tokenizer_kwargs={"trust_remote_code": True, "foo": "bar"},
135
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
136
def test_save_pretrained(self, test_name, model_id, config_cls, config_kwargs):
137
self._test_save_pretrained(model_id, config_cls, config_kwargs)
139
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
140
def test_save_pretrained_pickle(self, test_name, model_id, config_cls, config_kwargs):
141
self._test_save_pretrained(model_id, config_cls, config_kwargs, safe_serialization=False)
143
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
144
def test_save_pretrained_selected_adapters(self, test_name, model_id, config_cls, config_kwargs):
145
self._test_save_pretrained_selected_adapters(model_id, config_cls, config_kwargs)
147
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
148
def test_save_pretrained_selected_adapters_pickle(self, test_name, model_id, config_cls, config_kwargs):
149
self._test_save_pretrained_selected_adapters(model_id, config_cls, config_kwargs, safe_serialization=False)
151
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
152
def test_from_pretrained_config_construction(self, test_name, model_id, config_cls, config_kwargs):
153
self._test_from_pretrained_config_construction(model_id, config_cls, config_kwargs)
155
@parameterized.expand(
156
PeftTestConfigManager.get_grid_parameters(
158
"model_ids": PEFT_DECODER_MODELS_TO_TEST,
159
"lora_kwargs": {"init_lora_weights": [False]},
160
"ia3_kwargs": {"init_ia3_weights": [False]},
161
"task_type": "CAUSAL_LM",
165
def test_merge_layers(self, test_name, model_id, config_cls, config_kwargs):
166
self._test_merge_layers(model_id, config_cls, config_kwargs)
168
@parameterized.expand(
169
PeftTestConfigManager.get_grid_parameters(
171
"model_ids": PEFT_DECODER_MODELS_TO_TEST,
172
"lora_kwargs": {"init_lora_weights": [False]},
173
"ia3_kwargs": {"init_ia3_weights": [False]},
174
"task_type": "CAUSAL_LM",
178
def test_merge_layers_multi(self, test_name, model_id, config_cls, config_kwargs):
179
self._test_merge_layers_multi(model_id, config_cls, config_kwargs)
181
@parameterized.expand(
182
PeftTestConfigManager.get_grid_parameters(
184
"model_ids": PEFT_DECODER_MODELS_TO_TEST,
185
"lora_kwargs": {"init_lora_weights": [False]},
186
"ia3_kwargs": {"init_ia3_weights": [False]},
187
"task_type": "CAUSAL_LM",
191
def test_merge_layers_nan(self, test_name, model_id, config_cls, config_kwargs):
192
self._test_merge_layers_nan(model_id, config_cls, config_kwargs)
194
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
195
def test_generate(self, test_name, model_id, config_cls, config_kwargs):
196
self._test_generate(model_id, config_cls, config_kwargs)
198
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
199
def test_generate_pos_args(self, test_name, model_id, config_cls, config_kwargs):
201
self._test_generate_pos_args(model_id, config_cls, config_kwargs, raises_err=False)
203
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
204
def test_merge_layers_fp16(self, test_name, model_id, config_cls, config_kwargs):
205
self._test_merge_layers_fp16(model_id, config_cls, config_kwargs)
207
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
208
def test_generate_half_prec(self, test_name, model_id, config_cls, config_kwargs):
209
self._test_generate_half_prec(model_id, config_cls, config_kwargs)
211
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
212
def test_prefix_tuning_half_prec_conversion(self, test_name, model_id, config_cls, config_kwargs):
213
self._test_prefix_tuning_half_prec_conversion(model_id, config_cls, config_kwargs)
215
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
216
def test_training_decoders(self, test_name, model_id, config_cls, config_kwargs):
217
self._test_training(model_id, config_cls, config_kwargs)
219
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
220
def test_training_decoders_layer_indexing(self, test_name, model_id, config_cls, config_kwargs):
221
self._test_training_layer_indexing(model_id, config_cls, config_kwargs)
223
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
224
def test_training_decoders_gradient_checkpointing(self, test_name, model_id, config_cls, config_kwargs):
225
self._test_training_gradient_checkpointing(model_id, config_cls, config_kwargs)
227
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
228
def test_inference_safetensors(self, test_name, model_id, config_cls, config_kwargs):
229
self._test_inference_safetensors(model_id, config_cls, config_kwargs)
231
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
232
def test_peft_model_device_map(self, test_name, model_id, config_cls, config_kwargs):
233
self._test_peft_model_device_map(model_id, config_cls, config_kwargs)
235
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
236
def test_delete_adapter(self, test_name, model_id, config_cls, config_kwargs):
237
self._test_delete_adapter(model_id, config_cls, config_kwargs)
239
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
240
def test_delete_inactive_adapter(self, test_name, model_id, config_cls, config_kwargs):
241
self._test_delete_inactive_adapter(model_id, config_cls, config_kwargs)
243
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
244
def test_adding_multiple_adapters_with_bias_raises(self, test_name, model_id, config_cls, config_kwargs):
245
self._test_adding_multiple_adapters_with_bias_raises(model_id, config_cls, config_kwargs)
247
@parameterized.expand(
248
PeftTestConfigManager.get_grid_parameters(
250
"model_ids": PEFT_DECODER_MODELS_TO_TEST,
251
"lora_kwargs": {"init_lora_weights": [False]},
252
"adalora_kwargs": {"init_lora_weights": [False]},
253
"ia3_kwargs": {"init_ia3_weights": [False]},
254
"task_type": "CAUSAL_LM",
256
filter_params_func=skip_adalora_and_gpt2,
259
def test_unload_adapter(self, test_name, model_id, config_cls, config_kwargs):
260
self._test_unload_adapter(model_id, config_cls, config_kwargs)
262
@parameterized.expand(
263
PeftTestConfigManager.get_grid_parameters(
265
"model_ids": PEFT_DECODER_MODELS_TO_TEST,
266
"lora_kwargs": {"init_lora_weights": [False]},
267
"task_type": "CAUSAL_LM",
271
def test_weighted_combination_of_adapters(self, test_name, model_id, config_cls, config_kwargs):
272
self._test_weighted_combination_of_adapters(model_id, config_cls, config_kwargs)
274
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
275
def test_training_prompt_learning_tasks(self, test_name, model_id, config_cls, config_kwargs):
276
self._test_training_prompt_learning_tasks(model_id, config_cls, config_kwargs)
278
@parameterized.expand(
279
PeftTestConfigManager.get_grid_parameters(
281
"model_ids": PEFT_DECODER_MODELS_TO_TEST,
282
"lora_kwargs": {"init_lora_weights": [False]},
283
"ia3_kwargs": {"init_ia3_weights": [False]},
284
"adalora_kwargs": {"init_lora_weights": [False]},
285
"task_type": "CAUSAL_LM",
289
def test_disable_adapter(self, test_name, model_id, config_cls, config_kwargs):
290
self._test_disable_adapter(model_id, config_cls, config_kwargs)
292
def test_generate_adalora_no_dropout(self):
294
model_id = "hf-internal-testing/tiny-random-OPTForCausalLM"
296
"target_modules": None,
297
"task_type": "CAUSAL_LM",
300
self._test_generate(model_id, AdaLoraConfig, config_kwargs)
302
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
303
def test_passing_input_embeds_works(self, test_name, model_id, config_cls, config_kwargs):
304
self._test_passing_input_embeds_works(test_name, model_id, config_cls, config_kwargs)