peft

Форк
0
/
test_adaption_prompt.py 
439 строк · 19.4 Кб
1
# Copyright 2023-present the HuggingFace Inc. team.
2
#
3
# Licensed under the Apache License, Version 2.0 (the "License");
4
# you may not use this file except in compliance with the License.
5
# You may obtain a copy of the License at
6
#
7
#     http://www.apache.org/licenses/LICENSE-2.0
8
#
9
# Unless required by applicable law or agreed to in writing, software
10
# distributed under the License is distributed on an "AS IS" BASIS,
11
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
# See the License for the specific language governing permissions and
13
# limitations under the License.
14

15
import importlib
16
import os
17
import tempfile
18
import unittest
19
from unittest import TestCase
20

21
import pytest
22
import torch
23
from torch.testing import assert_close
24

25
from peft.mapping import get_peft_model
26
from peft.peft_model import PeftModel
27
from peft.tuners.adaption_prompt import AdaptionPromptConfig
28
from peft.utils.other import prepare_model_for_int8_training
29
from peft.utils.save_and_load import get_peft_model_state_dict
30
from tests.testing_common import PeftCommonTester
31

32

33
def is_llama_available() -> bool:
34
    """Check if Llama is available in the transformers library (it's not in earlier versions)."""
35
    try:
36
        return importlib.util.find_spec("transformers.models.llama.modeling_llama") is not None
37
    except ModuleNotFoundError:
38
        return False
39

40

41
if is_llama_available():
42
    # We guard the import statement so that our unit tests will pass in CI environments
43
    # that don't have a transformers package with Llama.
44
    from transformers import LlamaConfig, LlamaForCausalLM, LlamaModel
45

46

47
class AdaptionPromptTester(TestCase, PeftCommonTester):
48
    """
49
    Tests for the AdaptionPrompt model.
50

51
    Some of these tests were adapted from `test_peft_model.py` (which has been refactored since), but since we haven't
52
    checked in the test checkpoints for Llama into `hf-internal-testing`, we separate them for now.
53
    """
54

55
    def setUp(self):
56
        # Check that llama is available in transformers package before running each test.
57
        if not is_llama_available():
58
            self.skipTest("Llama not available in transformers. Skipping test.")
59

60
    @staticmethod
61
    def _create_test_llama_config():
62
        """Create a test config for a small Llama model for testing."""
63
        return LlamaConfig(
64
            vocab_size=16,
65
            hidden_size=8,
66
            intermediate_size=8,
67
            num_hidden_layers=8,
68
            num_attention_heads=4,
69
            use_cache=False,
70
        )
71

72
    def test_attributes(self) -> None:
73
        model = LlamaModel(self._create_test_llama_config())
74
        config = AdaptionPromptConfig(adapter_layers=1, adapter_len=4)
75
        model = get_peft_model(model, config)
76

77
        assert hasattr(model, "save_pretrained")
78
        assert hasattr(model, "from_pretrained")
79
        assert hasattr(model, "push_to_hub")
80

81
    def test_prepare_for_training(self) -> None:
82
        model = LlamaForCausalLM(self._create_test_llama_config())
83
        config = AdaptionPromptConfig(adapter_layers=1, adapter_len=4, task_type="CAUSAL_LM")
84
        model = get_peft_model(model, config)
85
        model = model.to(self.torch_device)
86

87
        dummy_input = torch.LongTensor([[1, 1, 1]]).to(self.torch_device)
88
        dummy_output = model.get_input_embeddings()(dummy_input)
89

90
        assert not dummy_output.requires_grad
91

92
    def test_prepare_for_int8_training(self) -> None:
93
        model = LlamaForCausalLM(self._create_test_llama_config())
94
        model = prepare_model_for_int8_training(model)
95
        model = model.to(self.torch_device)
96

97
        for param in model.parameters():
98
            assert not param.requires_grad
99

100
        config = AdaptionPromptConfig(adapter_layers=1, adapter_len=4, task_type="CAUSAL_LM")
101
        model = get_peft_model(model, config)
102

103
        # For backward compatibility
104
        if hasattr(model, "enable_input_require_grads"):
105
            model.enable_input_require_grads()
106
        else:
107

108
            def make_inputs_require_grad(module, input, output):
109
                output.requires_grad_(True)
110

111
            model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
112

113
        dummy_input = torch.LongTensor([[1, 1, 1]]).to(self.torch_device)
114
        dummy_output = model.get_input_embeddings()(dummy_input)
115

116
        assert dummy_output.requires_grad
117

118
    def test_save_pretrained_regression(self) -> None:
119
        seed = 420
120
        torch.manual_seed(seed)
121
        model = LlamaForCausalLM(self._create_test_llama_config())
122
        config = AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM")
123
        model = get_peft_model(model, config)
124
        model = model.to(self.torch_device)
125

126
        with tempfile.TemporaryDirectory() as tmp_dirname:
127
            model.save_pretrained(tmp_dirname, safe_serialization=False)
128

129
            torch.manual_seed(seed)
130
            model_from_pretrained = LlamaForCausalLM(self._create_test_llama_config())
131
            model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname)
132

133
            # check if the state dicts are equal
134
            state_dict = get_peft_model_state_dict(model)
135
            state_dict_from_pretrained = get_peft_model_state_dict(model_from_pretrained)
136

137
            # check if same keys
138
            assert state_dict.keys() == state_dict_from_pretrained.keys()
139

140
            # Check that the number of saved parameters is 4 -- 2 layers of (tokens and gate).
141
            assert len(state_dict) == 4
142

143
            # check if tensors equal
144
            for key in state_dict.keys():
145
                assert torch.allclose(
146
                    state_dict[key].to(self.torch_device), state_dict_from_pretrained[key].to(self.torch_device)
147
                )
148

149
            # check if `adapter_model.bin` is present
150
            assert os.path.exists(os.path.join(tmp_dirname, "adapter_model.bin"))
151

152
            # check if `adapter_config.json` is present
153
            assert os.path.exists(os.path.join(tmp_dirname, "adapter_config.json"))
154

155
            # check if `model.safetensors` is not present
156
            assert not os.path.exists(os.path.join(tmp_dirname, "model.safetensors"))
157

158
            # check if `config.json` is not present
159
            assert not os.path.exists(os.path.join(tmp_dirname, "config.json"))
160

161
    def test_save_pretrained(self) -> None:
162
        seed = 420
163
        torch.manual_seed(seed)
164
        model = LlamaForCausalLM(self._create_test_llama_config())
165
        config = AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM")
166
        model = get_peft_model(model, config)
167
        model = model.to(self.torch_device)
168

169
        with tempfile.TemporaryDirectory() as tmp_dirname:
170
            model.save_pretrained(tmp_dirname)
171

172
            torch.manual_seed(seed)
173
            model_from_pretrained = LlamaForCausalLM(self._create_test_llama_config())
174
            model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname)
175

176
            # check if the state dicts are equal
177
            state_dict = get_peft_model_state_dict(model)
178
            state_dict_from_pretrained = get_peft_model_state_dict(model_from_pretrained)
179

180
            # check if same keys
181
            assert state_dict.keys() == state_dict_from_pretrained.keys()
182

183
            # Check that the number of saved parameters is 4 -- 2 layers of (tokens and gate).
184
            assert len(state_dict) == 4
185

186
            # check if tensors equal
187
            for key in state_dict.keys():
188
                assert torch.allclose(
189
                    state_dict[key].to(self.torch_device), state_dict_from_pretrained[key].to(self.torch_device)
190
                )
191

192
            # check if `adapter_model.bin` is present
193
            assert os.path.exists(os.path.join(tmp_dirname, "adapter_model.safetensors"))
194

195
            # check if `adapter_config.json` is present
196
            assert os.path.exists(os.path.join(tmp_dirname, "adapter_config.json"))
197

198
            # check if `model.safetensors` is not present
199
            assert not os.path.exists(os.path.join(tmp_dirname, "model.safetensors"))
200

201
            # check if `config.json` is not present
202
            assert not os.path.exists(os.path.join(tmp_dirname, "config.json"))
203

204
    def test_save_pretrained_selected_adapters(self) -> None:
205
        seed = 420
206
        torch.manual_seed(seed)
207
        model = LlamaForCausalLM(self._create_test_llama_config())
208
        config = AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM")
209
        model = get_peft_model(model, config)
210
        model = model.to(self.torch_device)
211

212
        new_adapter_config = AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM")
213
        model.add_adapter("new_adapter", new_adapter_config)
214

215
        with tempfile.TemporaryDirectory() as tmp_dirname:
216
            model.save_pretrained(tmp_dirname)
217

218
            torch.manual_seed(seed)
219
            model_from_pretrained = LlamaForCausalLM(self._create_test_llama_config())
220
            model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname)
221

222
            model_from_pretrained.load_adapter(tmp_dirname, "new_adapter")
223

224
            # check if the state dicts are equal
225
            state_dict = get_peft_model_state_dict(model)
226
            state_dict_from_pretrained = get_peft_model_state_dict(model_from_pretrained)
227

228
            # check if same keys
229
            assert state_dict.keys() == state_dict_from_pretrained.keys()
230

231
            # Check that the number of saved parameters is 4 -- 2 layers of (tokens and gate).
232
            assert len(state_dict) == 4
233

234
            # check if tensors equal
235
            for key in state_dict.keys():
236
                assert torch.allclose(
237
                    state_dict[key].to(self.torch_device), state_dict_from_pretrained[key].to(self.torch_device)
238
                )
239

240
            # check if `adapter_model.bin` is present
241
            assert os.path.exists(os.path.join(tmp_dirname, "adapter_model.safetensors"))
242

243
            # check if `adapter_config.json` is present
244
            assert os.path.exists(os.path.join(tmp_dirname, "adapter_config.json"))
245

246
            # check if `model.safetensors` is not present
247
            assert not os.path.exists(os.path.join(tmp_dirname, "model.safetensors"))
248

249
            # check if `config.json` is not present
250
            assert not os.path.exists(os.path.join(tmp_dirname, "config.json"))
251

252
    def test_generate(self) -> None:
253
        model = LlamaForCausalLM(self._create_test_llama_config())
254
        config = AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM")
255
        model = get_peft_model(model, config)
256
        model = model.to(self.torch_device)
257

258
        input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device)
259
        attention_mask = torch.LongTensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device)
260

261
        # check if `generate` works
262
        _ = model.generate(input_ids=input_ids, attention_mask=attention_mask)
263

264
        # check if `generate` works if positional arguments are passed
265
        _ = model.generate(input_ids, attention_mask=attention_mask)
266

267
    def test_sequence_adapter_ops(self) -> None:
268
        """Test sequence of adapter operations."""
269
        # Test input data.
270
        input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device)
271
        target_ids = torch.LongTensor([[0, 0, 0], [0, 0, 0]]).to(self.torch_device)
272
        attention_mask = torch.LongTensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device)
273

274
        # Create original llama model.
275
        original = LlamaForCausalLM(self._create_test_llama_config())
276
        original = original.to(self.torch_device)
277
        original_before = original(input_ids=input_ids, attention_mask=attention_mask)
278

279
        # Get AdaptionPrompt model.
280
        adapted = get_peft_model(
281
            original, AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM")
282
        )
283
        adapted = adapted.to(self.torch_device)
284
        default_before = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids)
285

286
        # Test zero-init: The logits should be exactly the same.
287
        assert_close(original_before.logits, default_before.logits, rtol=0, atol=0)
288

289
        # Single fine-tuning step on "default" adapter.
290
        optimizer = torch.optim.SGD(adapted.parameters(), lr=1)
291
        optimizer.zero_grad()
292
        default_before.loss.backward()
293
        optimizer.step()
294

295
        # Test that the output changed.
296
        default_after = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids)
297
        assert not torch.allclose(default_before.logits, default_after.logits)
298

299
        with adapted.disable_adapter():
300
            # Test that the output is the same as the original output.
301
            default_disabled = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids)
302
            assert_close(original_before.logits, default_disabled.logits, rtol=0, atol=0)
303

304
        # Add new adapter 1.
305
        adapted.add_adapter("adapter 1", AdaptionPromptConfig(adapter_layers=3, adapter_len=8, task_type="CAUSAL_LM"))
306
        # Test zero-init
307
        adapter_1_before = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids)
308
        assert_close(original_before.logits, adapter_1_before.logits, rtol=0, atol=0)
309

310
        # Single fine-tuning step on adapter 1.
311
        optimizer = torch.optim.SGD(adapted.parameters(), lr=1)
312
        optimizer.zero_grad()
313
        adapter_1_before.loss.backward()
314
        optimizer.step()
315

316
        # Test that adapter 1 output changed.
317
        adapter_1_after = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids)
318
        assert not torch.allclose(adapter_1_before.logits, adapter_1_after.logits)
319
        assert not torch.allclose(original_before.logits, adapter_1_after.logits)
320
        assert not torch.allclose(default_after.logits, adapter_1_after.logits)
321

322
        with adapted.disable_adapter():
323
            # Test that the output is the same as the original output.
324
            adapter_1_disabled = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids)
325
            assert_close(original_before.logits, adapter_1_disabled.logits, rtol=0, atol=0)
326

327
        # Set adapter back to default.
328
        adapted.set_adapter("default")
329

330
        # Test that the output is the same as the default output after training.
331
        default_after_set = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids)
332
        assert_close(default_after.logits, default_after_set.logits, rtol=0, atol=0)
333
        assert not torch.allclose(original_before.logits, default_after_set.logits)
334
        assert not torch.allclose(adapter_1_after.logits, default_after_set.logits)
335

336
    def test_add_and_set_while_disabled(self):
337
        """Test that adding and setting adapters while disabled works as intended."""
338
        # Test input data.
339
        input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device)
340
        target_ids = torch.LongTensor([[0, 0, 0], [0, 0, 0]]).to(self.torch_device)
341
        attention_mask = torch.LongTensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device)
342

343
        # Create original llama model.
344
        original = LlamaForCausalLM(self._create_test_llama_config())
345
        original = original.to(self.torch_device)
346
        original_before = original(input_ids=input_ids, attention_mask=attention_mask)
347

348
        # Get AdaptionPrompt model.
349
        adapted = get_peft_model(
350
            original, AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM")
351
        )
352
        adapted = adapted.to(self.torch_device)
353

354
        with adapted.disable_adapter():
355
            adapted.add_adapter(
356
                "adapter 1", AdaptionPromptConfig(adapter_layers=3, adapter_len=8, task_type="CAUSAL_LM")
357
            )
358

359
        # Test that the output is the same as the original output.
360
        adapter_1_before = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids)
361
        assert_close(original_before.logits, adapter_1_before.logits, rtol=0, atol=0)
362

363
        # Single fine-tuning step on adapter 1.
364
        optimizer = torch.optim.SGD(adapted.parameters(), lr=1)
365
        optimizer.zero_grad()
366
        adapter_1_before.loss.backward()
367
        optimizer.step()
368

369
        # Test that adapter 1 output changed.
370
        adapter_1_after = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids)
371
        assert not torch.allclose(original_before.logits, adapter_1_after.logits)
372

373
        adapted.set_adapter("default")
374
        with adapted.disable_adapter():
375
            adapted.set_adapter("adapter 1")
376

377
        # Test that adapter 1 is active again.
378
        adapter_1_after_set = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids)
379
        assert_close(adapter_1_after.logits, adapter_1_after_set.logits, rtol=0, atol=0)
380

381
    def test_use_cache(self) -> None:
382
        """Test that AdaptionPrompt works when Llama config use_cache=True."""
383
        torch.manual_seed(0)
384
        input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device)
385
        original = LlamaForCausalLM(
386
            LlamaConfig(
387
                vocab_size=16,
388
                hidden_size=8,
389
                intermediate_size=8,
390
                num_hidden_layers=8,
391
                num_attention_heads=4,
392
                use_cache=False,
393
            )
394
        ).eval()
395
        adapted = get_peft_model(
396
            original, AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM")
397
        )
398
        adapted = adapted.to(self.torch_device)
399
        expected = adapted.generate(input_ids=input_ids, max_length=8)
400

401
        # Set use_cache = True and generate output again.
402
        adapted.base_model.config.use_cache = True
403
        actual = adapted.generate(input_ids=input_ids, max_length=8)
404
        assert_close(expected, actual, rtol=0, atol=0)
405

406
    def test_bf16_inference(self) -> None:
407
        if self.torch_device == "mps":
408
            return pytest.skip("Skipping bf16 test on MPS")
409

410
        """Test that AdaptionPrompt works when Llama using a half-precision model."""
411
        input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device)
412
        original = LlamaForCausalLM.from_pretrained(
413
            "trl-internal-testing/tiny-random-LlamaForCausalLM", torch_dtype=torch.bfloat16
414
        )
415
        adapted = get_peft_model(
416
            original, AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM")
417
        )
418
        adapted = adapted.to(self.torch_device)
419
        _ = adapted.generate(input_ids=input_ids)
420

421
    @unittest.expectedFailure
422
    def test_disable_adapter(self):
423
        llama_config = self._create_test_llama_config()
424
        model = LlamaForCausalLM(llama_config).to(self.torch_device)
425
        dummy_input = torch.LongTensor([[1, 1, 1]]).to(self.torch_device)
426
        output_before = model(dummy_input).logits
427

428
        config = AdaptionPromptConfig(adapter_layers=1, adapter_len=4, task_type="CAUSAL_LM")
429
        model = get_peft_model(model, config).to(self.torch_device)
430
        output_peft = model(dummy_input).logits
431
        # TODO currently this fails because scores are zeroed out:
432
        # https://github.com/huggingface/peft/blob/062d95a09eb5d1de35c0e5e23d4387daba99e2db/src/peft/tuners/adaption_prompt.py#L303
433
        # This is fine for users but makes it difficult to test if anything happens. In the future, we will have a clean
434
        # way to control initialization. Until then, this test is expected to fail.
435
        assert not torch.allclose(output_before, output_peft)
436

437
        with model.disable_adapter():
438
            output_peft_disabled = model(dummy_input).logits
439
        assert torch.allclose(output_before, output_peft_disabled)
440

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.