paddlenlp

Форк
0
122 строки · 5.0 Кб
1
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2
#
3
# Licensed under the Apache License, Version 2.0 (the "License");
4
# you may not use this file except in compliance with the License.
5
# You may obtain a copy of the License at
6
#
7
#     http://www.apache.org/licenses/LICENSE-2.0
8
#
9
# Unless required by applicable law or agreed to in writing, software
10
# distributed under the License is distributed on an "AS IS" BASIS,
11
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
# See the License for the specific language governing permissions and
13
# limitations under the License.
14

15
import paddle
16
import paddle.nn as nn
17

18
from paddlenlp.transformers import ElectraConfig, ElectraModel, ElectraPretrainedModel
19

20

21
class ElectraForBinaryTokenClassification(ElectraPretrainedModel):
22
    """
23
    Electra Model with two linear layers on top of the hidden-states output layers,
24
    designed for token classification tasks with nesting.
25

26
    Args:
27
        electra (:class:`ElectraModel`):
28
            An instance of ElectraModel.
29
        num_classes (list):
30
            The number of classes.
31
        dropout (float, optionl):
32
            The dropout probability for output of Electra.
33
            If None, use the same value as `hidden_dropout_prob' of 'ElectraModel`
34
            instance `electra`. Defaults to None.
35
    """
36

37
    def __init__(self, config: ElectraConfig, num_classes_oth, num_classes_sym):
38
        super(ElectraForBinaryTokenClassification, self).__init__(config)
39
        self.num_classes_oth = num_classes_oth
40
        self.num_classes_sym = num_classes_sym
41
        self.electra = ElectraModel(config)
42
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
43
        self.classifier_oth = nn.Linear(config.hidden_size, self.num_classes_oth)
44
        self.classifier_sym = nn.Linear(config.hidden_size, self.num_classes_sym)
45

46
    def forward(self, input_ids=None, token_type_ids=None, position_ids=None, attention_mask=None):
47
        sequence_output = self.electra(input_ids, token_type_ids, position_ids, attention_mask)
48
        sequence_output = self.dropout(sequence_output)
49

50
        logits_sym = self.classifier_sym(sequence_output)
51
        logits_oth = self.classifier_oth(sequence_output)
52

53
        return logits_oth, logits_sym
54

55

56
class MultiHeadAttentionForSPO(nn.Layer):
57
    """
58
    Multi-head attention layer for SPO task.
59
    """
60

61
    def __init__(self, embed_dim, num_heads, scale_value=768):
62
        super(MultiHeadAttentionForSPO, self).__init__()
63
        self.embed_dim = embed_dim
64
        self.num_heads = num_heads
65
        self.scale_value = scale_value**-0.5
66
        self.q_proj = nn.Linear(embed_dim, embed_dim * num_heads)
67
        self.k_proj = nn.Linear(embed_dim, embed_dim * num_heads)
68

69
    def forward(self, query, key):
70
        q = self.q_proj(query)
71
        k = self.k_proj(key)
72
        q = paddle.reshape(q, shape=[0, 0, self.num_heads, self.embed_dim])
73
        k = paddle.reshape(k, shape=[0, 0, self.num_heads, self.embed_dim])
74
        q = paddle.transpose(q, perm=[0, 2, 1, 3])
75
        k = paddle.transpose(k, perm=[0, 2, 1, 3])
76
        scores = paddle.matmul(q, k, transpose_y=True)
77
        scores = paddle.scale(scores, scale=self.scale_value)
78
        return scores
79

80

81
class ElectraForSPO(ElectraPretrainedModel):
82
    """
83
    Electra Model with a linear layer on top of the hidden-states output
84
    layers for entity recognition, and a multi-head attention layer for
85
    relation classification.
86

87
    Args:
88
        electra (:class:`ElectraModel`):
89
            An instance of ElectraModel.
90
        num_classes (int):
91
            The number of classes.
92
        dropout (float, optionl):
93
            The dropout probability for output of Electra.
94
            If None, use the same value as `hidden_dropout_prob' of 'ElectraModel`
95
            instance `electra`. Defaults to None.
96
    """
97

98
    def __init__(self, config: ElectraConfig):
99
        super(ElectraForSPO, self).__init__(config)
100
        self.num_classes = config.num_labels
101
        self.electra = ElectraModel(config)
102
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
103
        self.classifier = nn.Linear(config.hidden_size, 2)
104
        self.span_attention = MultiHeadAttentionForSPO(config.hidden_size, config.num_labels)
105

106
    def forward(self, input_ids=None, token_type_ids=None, position_ids=None, attention_mask=None):
107
        outputs = self.electra(
108
            input_ids, token_type_ids, position_ids, attention_mask, output_hidden_states=True, return_dict=True
109
        )
110
        sequence_outputs = outputs.last_hidden_state
111
        all_hidden_states = outputs.hidden_states
112
        sequence_outputs = self.dropout(sequence_outputs)
113
        ent_logits = self.classifier(sequence_outputs)
114

115
        subject_output = all_hidden_states[-2]
116
        cls_output = paddle.unsqueeze(sequence_outputs[:, 0, :], axis=1)
117
        subject_output = subject_output + cls_output
118

119
        output_size = self.num_classes + self.electra.config["hidden_size"]  # noqa:F841
120
        rel_logits = self.span_attention(sequence_outputs, subject_output)
121

122
        return ent_logits, rel_logits
123

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.