transformers

Форк
0
/
test_modeling_flax_distilbert.py 
152 строки · 5.5 Кб
1
# Copyright 2021 The HuggingFace Team. All rights reserved.
2
#
3
# Licensed under the Apache License, Version 2.0 (the "License");
4
# you may not use this file except in compliance with the License.
5
# You may obtain a copy of the License at
6
#
7
#     http://www.apache.org/licenses/LICENSE-2.0
8
#
9
# Unless required by applicable law or agreed to in writing, software
10
# distributed under the License is distributed on an "AS IS" BASIS,
11
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
# See the License for the specific language governing permissions and
13
# limitations under the License.
14

15
import unittest
16

17
import numpy as np
18

19
from transformers import DistilBertConfig, is_flax_available
20
from transformers.testing_utils import require_flax, slow
21

22
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
23

24

25
if is_flax_available():
26
    import jax.numpy as jnp
27

28
    from transformers.models.distilbert.modeling_flax_distilbert import (
29
        FlaxDistilBertForMaskedLM,
30
        FlaxDistilBertForMultipleChoice,
31
        FlaxDistilBertForQuestionAnswering,
32
        FlaxDistilBertForSequenceClassification,
33
        FlaxDistilBertForTokenClassification,
34
        FlaxDistilBertModel,
35
    )
36

37

38
class FlaxDistilBertModelTester(unittest.TestCase):
39
    def __init__(
40
        self,
41
        parent,
42
        batch_size=13,
43
        seq_length=7,
44
        is_training=True,
45
        use_attention_mask=True,
46
        use_token_type_ids=True,
47
        use_labels=True,
48
        vocab_size=99,
49
        hidden_size=32,
50
        num_hidden_layers=2,
51
        num_attention_heads=4,
52
        intermediate_size=37,
53
        hidden_act="gelu",
54
        hidden_dropout_prob=0.1,
55
        attention_probs_dropout_prob=0.1,
56
        max_position_embeddings=512,
57
        type_vocab_size=16,
58
        type_sequence_label_size=2,
59
        initializer_range=0.02,
60
        num_choices=4,
61
    ):
62
        self.parent = parent
63
        self.batch_size = batch_size
64
        self.seq_length = seq_length
65
        self.is_training = is_training
66
        self.use_attention_mask = use_attention_mask
67
        self.use_token_type_ids = use_token_type_ids
68
        self.use_labels = use_labels
69
        self.vocab_size = vocab_size
70
        self.hidden_size = hidden_size
71
        self.num_hidden_layers = num_hidden_layers
72
        self.num_attention_heads = num_attention_heads
73
        self.intermediate_size = intermediate_size
74
        self.hidden_act = hidden_act
75
        self.hidden_dropout_prob = hidden_dropout_prob
76
        self.attention_probs_dropout_prob = attention_probs_dropout_prob
77
        self.max_position_embeddings = max_position_embeddings
78
        self.type_vocab_size = type_vocab_size
79
        self.type_sequence_label_size = type_sequence_label_size
80
        self.initializer_range = initializer_range
81
        self.num_choices = num_choices
82

83
    def prepare_config_and_inputs(self):
84
        input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
85

86
        attention_mask = None
87
        if self.use_attention_mask:
88
            attention_mask = random_attention_mask([self.batch_size, self.seq_length])
89

90
        config = DistilBertConfig(
91
            vocab_size=self.vocab_size,
92
            dim=self.hidden_size,
93
            n_layers=self.num_hidden_layers,
94
            n_heads=self.num_attention_heads,
95
            hidden_dim=self.intermediate_size,
96
            hidden_act=self.hidden_act,
97
            dropout=self.hidden_dropout_prob,
98
            attention_dropout=self.attention_probs_dropout_prob,
99
            max_position_embeddings=self.max_position_embeddings,
100
            initializer_range=self.initializer_range,
101
            tie_weights_=True,
102
        )
103

104
        return config, input_ids, attention_mask
105

106
    def prepare_config_and_inputs_for_common(self):
107
        config_and_inputs = self.prepare_config_and_inputs()
108
        config, input_ids, attention_mask = config_and_inputs
109
        inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask}
110
        return config, inputs_dict
111

112

113
@require_flax
114
class FlaxDistilBertModelTest(FlaxModelTesterMixin, unittest.TestCase):
115
    all_model_classes = (
116
        (
117
            FlaxDistilBertModel,
118
            FlaxDistilBertForMaskedLM,
119
            FlaxDistilBertForMultipleChoice,
120
            FlaxDistilBertForQuestionAnswering,
121
            FlaxDistilBertForSequenceClassification,
122
            FlaxDistilBertForTokenClassification,
123
            FlaxDistilBertForQuestionAnswering,
124
        )
125
        if is_flax_available()
126
        else ()
127
    )
128

129
    def setUp(self):
130
        self.model_tester = FlaxDistilBertModelTester(self)
131

132
    @slow
133
    def test_model_from_pretrained(self):
134
        for model_class_name in self.all_model_classes:
135
            model = model_class_name.from_pretrained("distilbert-base-uncased")
136
            outputs = model(np.ones((1, 1)))
137
            self.assertIsNotNone(outputs)
138

139

140
@require_flax
141
class FlaxDistilBertModelIntegrationTest(unittest.TestCase):
142
    @slow
143
    def test_inference_no_head_absolute_embedding(self):
144
        model = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased")
145
        input_ids = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
146
        attention_mask = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
147
        output = model(input_ids, attention_mask=attention_mask)[0]
148
        expected_shape = (1, 11, 768)
149
        self.assertEqual(output.shape, expected_shape)
150
        expected_slice = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]])
151

152
        self.assertTrue(jnp.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4))
153

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.