peft

Форк
0
/
test_tuners_utils.py 
374 строки · 17.1 Кб
1
#!/usr/bin/env python3
2

3
# coding=utf-8
4
# Copyright 2023-present the HuggingFace Inc. team.
5
#
6
# Licensed under the Apache License, Version 2.0 (the "License");
7
# you may not use this file except in compliance with the License.
8
# You may obtain a copy of the License at
9
#
10
#     http://www.apache.org/licenses/LICENSE-2.0
11
#
12
# Unless required by applicable law or agreed to in writing, software
13
# distributed under the License is distributed on an "AS IS" BASIS,
14
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
# See the License for the specific language governing permissions and
16
# limitations under the License.
17
import unittest
18
from copy import deepcopy
19

20
import pytest
21
from diffusers import StableDiffusionPipeline
22
from parameterized import parameterized
23
from torch import nn
24
from transformers import AutoModel, AutoModelForCausalLM, AutoModelForSeq2SeqLM
25

26
from peft import IA3Config, LoHaConfig, LoraConfig, get_peft_model
27
from peft.tuners.tuners_utils import (
28
    _maybe_include_all_linear_layers,
29
    check_target_module_exists,
30
    inspect_matched_modules,
31
)
32
from peft.utils import INCLUDE_LINEAR_LAYERS_SHORTHAND
33

34
from .testing_utils import require_bitsandbytes, require_torch_gpu
35

36

37
# Implements tests for regex matching logic common for all BaseTuner subclasses, and
38
# tests for correct behaviour with different config kwargs for BaseTuners (Ex: feedforward for IA3, etc) and
39
# tests for utility function to include all linear layers
40

41
REGEX_TEST_CASES = [
42
    # tuple of
43
    # 1. key
44
    # 2. target_modules
45
    # 3. layers_to_transform
46
    # 4. layers_pattern
47
    # 5. expected result
48
    # some basic examples
49
    ("", [], None, None, False),
50
    ("", ["foo"], None, None, False),
51
    ("foo", [], None, None, False),
52
    ("foo", ["foo"], None, None, True),
53
    ("foo", ["bar"], None, None, False),
54
    ("foo", ["foo", "bar"], None, None, True),
55
    # with regex
56
    ("foo", "foo", None, None, True),
57
    ("foo", ".*oo", None, None, True),
58
    ("foo", "fo.*", None, None, True),
59
    ("foo", ".*bar.*", None, None, False),
60
    ("foobar", ".*oba.*", None, None, True),
61
    # with layers_to_transform
62
    ("foo.bar.1.baz", ["baz"], [1], ["bar"], True),
63
    ("foo.bar.1.baz", ["baz"], [0], ["bar"], False),
64
    ("foo.bar.1.baz", ["baz"], [2], ["bar"], False),
65
    ("foo.bar.10.baz", ["baz"], [0], ["bar"], False),
66
    ("foo.bar.10.baz", ["baz"], [1], ["bar"], False),
67
    ("foo.bar.1.baz", ["baz"], [0, 1, 2], ["bar"], True),
68
    ("foo.bar.1.baz", ["baz", "spam"], [1], ["bar"], True),
69
    ("foo.bar.1.baz", ["baz", "spam"], [0, 1, 2], ["bar"], True),
70
    # empty layers_to_transform
71
    ("foo.bar.7.baz", ["baz"], [], ["bar"], True),
72
    ("foo.bar.7.baz", ["baz"], None, ["bar"], True),
73
    # empty layers_pattern
74
    ("foo.whatever.1.baz", ["baz"], [1], [], True),
75
    ("foo.whatever.1.baz", ["baz"], [0], [], False),
76
    ("foo.whatever.1.baz", ["baz"], [1], "", True),
77
    ("foo.whatever.1.baz", ["baz"], [0], "", False),
78
    ("foo.whatever.1.baz", ["baz"], [1], None, True),
79
    ("foo.whatever.1.baz", ["baz"], [0], None, False),
80
    # some realistic examples: transformers model
81
    ("transformer.h.1.attn.attention.q_proj.foo", ["q_proj"], None, [], False),
82
    ("transformer.h.1.attn.attention.q_proj", [], None, [], False),
83
    ("transformer.h.1.attn.attention.q_proj", ["q_proj"], None, [], True),
84
    ("transformer.h.1.attn.attention.q_proj", ["q_proj", "v_proj"], None, [], True),
85
    ("transformer.h.1.attn.attention.resid_dropout", ["q_proj", "v_proj"], None, [], False),
86
    ("transformer.h.1.attn.attention.q_proj", ["q_proj"], [1], ["h"], True),
87
    ("transformer.h.1.attn.attention.q_proj", ["q_proj"], [0], ["h"], False),
88
    ("transformer.h.1.attn.attention.q_proj", ["q_proj"], [2], ["h"], False),
89
    ("transformer.h.1.attn.attention.q_proj", ["q_proj"], [0, 1, 2], ["h"], True),
90
    ("transformer.h.1.attn.attention.q_proj", ["q_proj", "v_proj"], [0, 1, 2], ["h"], True),
91
    ("foo.bar.q_proj", ["q_proj"], None, [], True),
92
    ("foo.bar.1.baz", ["baz"], [1], ["foo"], False),
93
    # other corner cases. For ex, below is a case where layers_pattern
94
    # is one of the target nn.modules
95
    ("foo.bar.1.baz", ["baz"], [1], ["baz"], False),
96
    # here, layers_pattern is 'bar', but only keys that contain '.bar' are valid.
97
    ("bar.1.baz", ["baz"], [1], ["bar"], False),
98
    ("foo.bar.001.baz", ["baz"], [1], ["bar"], True),
99
    ("foo.bar.1.spam.2.baz", ["baz"], [1], ["bar"], True),
100
    ("foo.bar.2.spam.1.baz", ["baz"], [1], ["bar"], False),
101
    # some realistic examples: module using nn.Sequential
102
    # for the below test case, key should contain '.blocks' to be valid, because of how layers_pattern is matched
103
    ("blocks.1.weight", ["weight"], [1], ["blocks"], False),
104
    ("blocks.1.bias", ["weight"], [1], ["blocks"], False),
105
    ("mlp.blocks.1.weight", ["weight"], [1], ["blocks"], True),
106
    ("mlp.blocks.1.bias", ["weight"], [1], ["blocks"], False),
107
]
108

109
MAYBE_INCLUDE_ALL_LINEAR_LAYERS_TEST_CASES = [
110
    # model_name, model_type, initial_target_modules, expected_target_modules
111
    # test for a causal Llama model
112
    (
113
        "HuggingFaceH4/tiny-random-LlamaForCausalLM",
114
        "causal",
115
        INCLUDE_LINEAR_LAYERS_SHORTHAND,
116
        ["k_proj", "v_proj", "q_proj", "o_proj", "down_proj", "up_proj", "gate_proj"],
117
    ),
118
    # test for a Llama model without the LM head
119
    (
120
        "HuggingFaceH4/tiny-random-LlamaForCausalLM",
121
        "base",
122
        INCLUDE_LINEAR_LAYERS_SHORTHAND,
123
        ["k_proj", "v_proj", "q_proj", "o_proj", "down_proj", "up_proj", "gate_proj"],
124
    ),
125
    # test for gpt2 with Conv1D layers
126
    ("hf-internal-testing/tiny-random-gpt2", "causal", INCLUDE_LINEAR_LAYERS_SHORTHAND, ["c_attn", "c_proj", "c_fc"]),
127
    # test for T5 model
128
    (
129
        "hf-internal-testing/tiny-random-t5",
130
        "seq2seq",
131
        INCLUDE_LINEAR_LAYERS_SHORTHAND,
132
        ["k", "q", "v", "o", "wi", "wo"],
133
    ),
134
    # test for GPTNeoX. output module list should exclude classification head - which is named as "embed_out" instead of the usual "lm_head" for GPTNeoX
135
    (
136
        "hf-internal-testing/tiny-random-GPTNeoXForCausalLM",
137
        "causal",
138
        INCLUDE_LINEAR_LAYERS_SHORTHAND,
139
        ["query_key_value", "dense", "dense_h_to_4h", "dense_4h_to_h"],
140
    ),
141
]
142

143
# tests for a few args that should remain unchanged
144
MAYBE_INCLUDE_ALL_LINEAR_LAYERS_TEST_INTERNALS = [
145
    # initial_target_modules, expected_target_modules
146
    (["k_proj"], ["k_proj"]),
147
    # test with target_modules as None
148
    (None, None),
149
    # test with target_modules as a regex expression
150
    (".*(q_proj|v_proj)$", ".*(q_proj|v_proj)$"),
151
]
152

153
BNB_QUANTIZATIONS = [("4bit",), ("8bit",)]
154
BNB_TEST_CASES = [(x + y) for x in MAYBE_INCLUDE_ALL_LINEAR_LAYERS_TEST_CASES for y in BNB_QUANTIZATIONS]
155

156

157
class PeftCustomKwargsTester(unittest.TestCase):
158
    r"""
159
    Test if the PeftModel is instantiated with correct behaviour for custom kwargs. This includes:
160
    - test if regex matching works correctly
161
    - test if adapters handle custom kwargs the right way e.g. IA3 for `feedforward_modules`
162

163
    """
164

165
    transformers_class_map = {"causal": AutoModelForCausalLM, "seq2seq": AutoModelForSeq2SeqLM, "base": AutoModel}
166

167
    @parameterized.expand(REGEX_TEST_CASES)
168
    def test_regex_matching_valid(self, key, target_modules, layers_to_transform, layers_pattern, expected_result):
169
        # We use a LoRA Config for testing, but the regex matching function is common for all BaseTuner subclasses.
170
        # example model_id for config initialization. key is matched only against the target_modules given, so this can be any model
171
        model_id = "peft-internal-testing/tiny-OPTForCausalLM-lora"
172
        config = LoraConfig(
173
            base_model_name_or_path=model_id,
174
            target_modules=target_modules,
175
            layers_pattern=layers_pattern,
176
            layers_to_transform=layers_to_transform,
177
        )
178
        actual_result = bool(check_target_module_exists(config, key))
179
        assert actual_result == expected_result
180

181
    def test_module_matching_lora(self):
182
        # peft models that have a module matching method to inspect the matching modules to allow
183
        # users to easily debug their configuration. Here we only test a single case, not all possible combinations of
184
        # configs that could exist. This is okay as the method calls `check_target_module_exists` internally, which
185
        # has been extensively tested above.
186
        model_id = "hf-internal-testing/tiny-random-BloomForCausalLM"
187
        model = AutoModel.from_pretrained(model_id)
188
        # by default, this model matches query_key_value
189
        config = LoraConfig()
190
        peft_model = get_peft_model(model, config)
191

192
        output = inspect_matched_modules(peft_model)  # inspects default adapter for peft_model
193
        matched = output["matched"]
194
        expected = [
195
            "h.0.self_attention.query_key_value",
196
            "h.1.self_attention.query_key_value",
197
            "h.2.self_attention.query_key_value",
198
            "h.3.self_attention.query_key_value",
199
            "h.4.self_attention.query_key_value",
200
        ]
201
        assert matched == expected  # module lists should match exactly
202

203
        # no overlap with matched modules
204
        unmatched = output["unmatched"]
205
        for key in expected:
206
            assert key not in unmatched
207

208
    def test_feedforward_matching_ia3(self):
209
        model_id = "hf-internal-testing/tiny-random-T5ForConditionalGeneration"
210
        model = AutoModelForSeq2SeqLM.from_pretrained(model_id)
211
        # simple example for just one t5 block for testing
212
        config_kwargs = {
213
            "target_modules": ".*encoder.*block.0.*(SelfAttention|EncDecAttention|DenseReluDense).(k|q|v|wo|wi)$",
214
            "feedforward_modules": ["wo", "wi"],
215
        }
216
        config = IA3Config(base_model_name_or_path=model_id, **config_kwargs)
217
        peft_model = get_peft_model(model, config)
218
        output = inspect_matched_modules(peft_model)  # inspects default adapter for peft_model
219
        matched = output["matched"]
220
        expected = [
221
            "encoder.block.0.layer.0.SelfAttention.q",
222
            "encoder.block.0.layer.0.SelfAttention.k",
223
            "encoder.block.0.layer.0.SelfAttention.v",
224
            "encoder.block.0.layer.1.DenseReluDense.wi",
225
            "encoder.block.0.layer.1.DenseReluDense.wo",
226
        ]
227
        expected_feedforward = [
228
            "encoder.block.0.layer.1.DenseReluDense.wi",
229
            "encoder.block.0.layer.1.DenseReluDense.wo",
230
        ]
231
        assert matched == expected  # not required since we do similar checks above, but just to be sure
232
        module_dict = dict(model.named_modules())
233
        for key in matched:
234
            module = module_dict[key]
235
            if key in expected_feedforward:
236
                assert module.is_feedforward
237
            else:  # other IA3 modules should not be marked as feedforward
238
                assert not module.is_feedforward
239

240
    @parameterized.expand(MAYBE_INCLUDE_ALL_LINEAR_LAYERS_TEST_CASES)
241
    def test_maybe_include_all_linear_layers_lora(
242
        self, model_id, model_type, initial_target_modules, expected_target_modules
243
    ):
244
        model = self.transformers_class_map[model_type].from_pretrained(model_id)
245
        config_cls = LoraConfig
246
        self._check_match_with_expected_target_modules(
247
            model_id, model, config_cls, initial_target_modules, expected_target_modules
248
        )
249

250
    @parameterized.expand(BNB_TEST_CASES)
251
    @require_torch_gpu
252
    @require_bitsandbytes
253
    def test_maybe_include_all_linear_layers_lora_bnb(
254
        self, model_id, model_type, initial_target_modules, expected_target_modules, quantization
255
    ):
256
        if quantization == "4bit":
257
            config_kwargs = {"load_in_4bit": True}
258
        elif quantization == "8bit":
259
            config_kwargs = {"load_in_8bit": True}
260
        model = self.transformers_class_map[model_type].from_pretrained(model_id, device_map="auto", **config_kwargs)
261
        config_cls = LoraConfig
262
        self._check_match_with_expected_target_modules(
263
            model_id, model, config_cls, initial_target_modules, expected_target_modules
264
        )
265

266
    def _check_match_with_expected_target_modules(
267
        self, model_id, model, config_cls, initial_target_modules, expected_target_modules
268
    ):
269
        """
270
        Helper function for the test for `_maybe_include_all_linear_layers`
271
        """
272
        actual_config = config_cls(base_model_name_or_path=model_id, target_modules=initial_target_modules)
273
        expected_config = config_cls(base_model_name_or_path=model_id, target_modules=expected_target_modules)
274
        model_copy = deepcopy(model)
275
        actual_model = get_peft_model(model, peft_config=actual_config)
276
        expected_model = get_peft_model(model_copy, peft_config=expected_config)
277
        expected_model_module_dict = dict(expected_model.named_modules())
278
        # compare the two models and assert that all layers are of the same type
279
        for name, actual_module in actual_model.named_modules():
280
            expected_module = expected_model_module_dict[name]
281
            assert type(actual_module) == type(expected_module)
282

283
    def test_maybe_include_all_linear_layers_ia3_loha(self):
284
        model_id, initial_target_modules, expected_target_modules = (
285
            "HuggingFaceH4/tiny-random-LlamaForCausalLM",
286
            INCLUDE_LINEAR_LAYERS_SHORTHAND,
287
            ["k_proj", "v_proj", "q_proj", "o_proj", "down_proj", "up_proj", "gate_proj"],
288
        )
289
        model_ia3 = AutoModelForCausalLM.from_pretrained(model_id)
290
        model_loha = deepcopy(model_ia3)
291
        config_classes = [IA3Config, LoHaConfig]
292
        models = [model_ia3, model_loha]
293
        for config_cls, model in zip(config_classes, models):
294
            self._check_match_with_expected_target_modules(
295
                model_id, model, config_cls, initial_target_modules, expected_target_modules
296
            )
297

298
    @parameterized.expand(MAYBE_INCLUDE_ALL_LINEAR_LAYERS_TEST_INTERNALS)
299
    def test_maybe_include_all_linear_layers_internals(self, initial_target_modules, expected_target_modules):
300
        model_id = "HuggingFaceH4/tiny-random-LlamaForCausalLM"
301
        model = AutoModelForCausalLM.from_pretrained(model_id)
302
        config = LoraConfig(base_model_name_or_path=model_id, target_modules=initial_target_modules)
303
        new_config = _maybe_include_all_linear_layers(config, model)
304
        if isinstance(expected_target_modules, list):
305
            # assert that expected and actual target_modules have the same items
306
            assert set(new_config.target_modules) == set(expected_target_modules)
307
        else:
308
            assert new_config.target_modules == expected_target_modules
309

310
    def test_maybe_include_all_linear_layers_diffusion(self):
311
        model_id = "hf-internal-testing/tiny-stable-diffusion-torch"
312
        model = StableDiffusionPipeline.from_pretrained(model_id)
313
        config = LoraConfig(base_model_name_or_path=model_id, target_modules="all-linear")
314
        with pytest.raises(
315
            ValueError,
316
            match="Only instances of PreTrainedModel support `target_modules='all-linear'`",
317
        ):
318
            model.unet = get_peft_model(model.unet, config)
319

320

321
class MLP(nn.Module):
322
    def __init__(self, bias=True):
323
        super().__init__()
324
        self.lin0 = nn.Linear(10, 20, bias=bias)
325
        self.relu = nn.ReLU()
326
        self.drop = nn.Dropout(0.5)
327
        self.lin1 = nn.Linear(20, 2, bias=bias)
328
        self.sm = nn.LogSoftmax(dim=-1)
329

330

331
class TestTargetedModuleNames(unittest.TestCase):
332
    """Check that the attribute targeted_module_names is correctly set.
333

334
    This checks LoRA and IA³, but this should be sufficient, testing all other tuners is not necessary.
335
    """
336

337
    def test_one_targeted_module_regex(self):
338
        model = MLP()
339
        model = get_peft_model(model, LoraConfig(target_modules="lin0"))
340
        assert model.targeted_module_names == ["lin0"]
341

342
    def test_two_targeted_module_regex(self):
343
        model = MLP()
344
        model = get_peft_model(model, LoraConfig(target_modules="lin.*"))
345
        assert model.targeted_module_names == ["lin0", "lin1"]
346

347
    def test_one_targeted_module_list(self):
348
        model = MLP()
349
        model = get_peft_model(model, LoraConfig(target_modules=["lin0"]))
350
        assert model.targeted_module_names == ["lin0"]
351

352
    def test_two_targeted_module_list(self):
353
        model = MLP()
354
        model = get_peft_model(model, LoraConfig(target_modules=["lin0", "lin1"]))
355
        assert model.targeted_module_names == ["lin0", "lin1"]
356

357
    def test_ia3_targeted_module_regex(self):
358
        model = MLP()
359
        model = get_peft_model(model, IA3Config(target_modules=".*lin.*", feedforward_modules=".*lin.*"))
360
        assert model.targeted_module_names == ["lin0", "lin1"]
361

362
    def test_ia3_targeted_module_list(self):
363
        model = MLP()
364
        model = get_peft_model(model, IA3Config(target_modules=["lin0", "lin1"], feedforward_modules=["lin0", "lin1"]))
365
        assert model.targeted_module_names == ["lin0", "lin1"]
366

367
    def test_realistic_example(self):
368
        model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-BloomForCausalLM")
369
        config = LoraConfig(task_type="CAUSAL_LM")
370
        model = get_peft_model(model, config)
371
        expected = [
372
            f"transformer.h.{i}.self_attention.query_key_value" for i in range(len(model.base_model.transformer.h))
373
        ]
374
        assert model.targeted_module_names == expected
375

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.