transformers

Форк
0
/
test_modeling_flax_bert.py 
163 строки · 5.8 Кб
1
# Copyright 2020 The HuggingFace Team. All rights reserved.
2
#
3
# Licensed under the Apache License, Version 2.0 (the "License");
4
# you may not use this file except in compliance with the License.
5
# You may obtain a copy of the License at
6
#
7
#     http://www.apache.org/licenses/LICENSE-2.0
8
#
9
# Unless required by applicable law or agreed to in writing, software
10
# distributed under the License is distributed on an "AS IS" BASIS,
11
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
# See the License for the specific language governing permissions and
13
# limitations under the License.
14

15
import unittest
16

17
import numpy as np
18

19
from transformers import BertConfig, is_flax_available
20
from transformers.testing_utils import require_flax, slow
21

22
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
23

24

25
if is_flax_available():
26
    from transformers.models.bert.modeling_flax_bert import (
27
        FlaxBertForMaskedLM,
28
        FlaxBertForMultipleChoice,
29
        FlaxBertForNextSentencePrediction,
30
        FlaxBertForPreTraining,
31
        FlaxBertForQuestionAnswering,
32
        FlaxBertForSequenceClassification,
33
        FlaxBertForTokenClassification,
34
        FlaxBertModel,
35
    )
36

37

38
class FlaxBertModelTester(unittest.TestCase):
39
    def __init__(
40
        self,
41
        parent,
42
        batch_size=13,
43
        seq_length=7,
44
        is_training=True,
45
        use_attention_mask=True,
46
        use_token_type_ids=True,
47
        use_labels=True,
48
        vocab_size=99,
49
        hidden_size=32,
50
        num_hidden_layers=2,
51
        num_attention_heads=4,
52
        intermediate_size=37,
53
        hidden_act="gelu",
54
        hidden_dropout_prob=0.1,
55
        attention_probs_dropout_prob=0.1,
56
        max_position_embeddings=512,
57
        type_vocab_size=16,
58
        type_sequence_label_size=2,
59
        initializer_range=0.02,
60
        num_choices=4,
61
    ):
62
        self.parent = parent
63
        self.batch_size = batch_size
64
        self.seq_length = seq_length
65
        self.is_training = is_training
66
        self.use_attention_mask = use_attention_mask
67
        self.use_token_type_ids = use_token_type_ids
68
        self.use_labels = use_labels
69
        self.vocab_size = vocab_size
70
        self.hidden_size = hidden_size
71
        self.num_hidden_layers = num_hidden_layers
72
        self.num_attention_heads = num_attention_heads
73
        self.intermediate_size = intermediate_size
74
        self.hidden_act = hidden_act
75
        self.hidden_dropout_prob = hidden_dropout_prob
76
        self.attention_probs_dropout_prob = attention_probs_dropout_prob
77
        self.max_position_embeddings = max_position_embeddings
78
        self.type_vocab_size = type_vocab_size
79
        self.type_sequence_label_size = type_sequence_label_size
80
        self.initializer_range = initializer_range
81
        self.num_choices = num_choices
82

83
    def prepare_config_and_inputs(self):
84
        input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
85

86
        attention_mask = None
87
        if self.use_attention_mask:
88
            attention_mask = random_attention_mask([self.batch_size, self.seq_length])
89

90
        token_type_ids = None
91
        if self.use_token_type_ids:
92
            token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
93

94
        config = BertConfig(
95
            vocab_size=self.vocab_size,
96
            hidden_size=self.hidden_size,
97
            num_hidden_layers=self.num_hidden_layers,
98
            num_attention_heads=self.num_attention_heads,
99
            intermediate_size=self.intermediate_size,
100
            hidden_act=self.hidden_act,
101
            hidden_dropout_prob=self.hidden_dropout_prob,
102
            attention_probs_dropout_prob=self.attention_probs_dropout_prob,
103
            max_position_embeddings=self.max_position_embeddings,
104
            type_vocab_size=self.type_vocab_size,
105
            is_decoder=False,
106
            initializer_range=self.initializer_range,
107
        )
108

109
        return config, input_ids, token_type_ids, attention_mask
110

111
    def prepare_config_and_inputs_for_common(self):
112
        config_and_inputs = self.prepare_config_and_inputs()
113
        config, input_ids, token_type_ids, attention_mask = config_and_inputs
114
        inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
115
        return config, inputs_dict
116

117
    def prepare_config_and_inputs_for_decoder(self):
118
        config_and_inputs = self.prepare_config_and_inputs()
119
        config, input_ids, token_type_ids, attention_mask = config_and_inputs
120

121
        config.is_decoder = True
122
        encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
123
        encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
124

125
        return (
126
            config,
127
            input_ids,
128
            attention_mask,
129
            encoder_hidden_states,
130
            encoder_attention_mask,
131
        )
132

133

134
@require_flax
135
class FlaxBertModelTest(FlaxModelTesterMixin, unittest.TestCase):
136
    test_head_masking = True
137

138
    all_model_classes = (
139
        (
140
            FlaxBertModel,
141
            FlaxBertForPreTraining,
142
            FlaxBertForMaskedLM,
143
            FlaxBertForMultipleChoice,
144
            FlaxBertForQuestionAnswering,
145
            FlaxBertForNextSentencePrediction,
146
            FlaxBertForSequenceClassification,
147
            FlaxBertForTokenClassification,
148
            FlaxBertForQuestionAnswering,
149
        )
150
        if is_flax_available()
151
        else ()
152
    )
153

154
    def setUp(self):
155
        self.model_tester = FlaxBertModelTester(self)
156

157
    @slow
158
    def test_model_from_pretrained(self):
159
        # Only check this for base model, not necessary for all model classes.
160
        # This will also help speed-up tests.
161
        model = FlaxBertModel.from_pretrained("google-bert/bert-base-cased")
162
        outputs = model(np.ones((1, 1)))
163
        self.assertIsNotNone(outputs)
164

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.