allennlp

Форк
0
/
bias_mitigators_test.py 
320 строк · 13.8 Кб
1
import torch
2
from torch import allclose
3
import pytest
4
import json
5

6
from allennlp.common.checks import ConfigurationError
7
from allennlp.common.testing import AllenNlpTestCase, multi_device
8
from allennlp.fairness.bias_mitigators import (
9
    LinearBiasMitigator,
10
    HardBiasMitigator,
11
    INLPBiasMitigator,
12
    OSCaRBiasMitigator,
13
)
14
from allennlp.fairness.bias_direction import TwoMeansBiasDirection
15

16

17
class LinearBiasMitigatorTest(AllenNlpTestCase):
18
    def setup_method(self):
19
        super().setup_method()
20

21
        # embedding data from VERB demo
22
        emb_filename = str(self.FIXTURES_ROOT / "fairness" / "bias_embeddings.json")
23
        with open(emb_filename) as emb_file:
24
            emb_data = json.load(emb_file)
25

26
        seed_embeddings1 = torch.cat(
27
            [
28
                torch.Tensor(emb_data["he"]).reshape(1, -1),
29
                torch.Tensor(emb_data["him"]).reshape(1, -1),
30
            ]
31
        )
32
        seed_embeddings2 = torch.cat(
33
            [
34
                torch.Tensor(emb_data["she"]).reshape(1, -1),
35
                torch.Tensor(emb_data["her"]).reshape(1, -1),
36
            ]
37
        )
38
        tm = TwoMeansBiasDirection()
39
        self.bias_direction = tm(seed_embeddings1, seed_embeddings2)
40

41
        evaluation_embeddings = []
42
        expected_bias_mitigated_embeddings = []
43
        for word in ["engineer", "banker", "nurse", "receptionist"]:
44
            evaluation_embeddings.append(torch.Tensor(emb_data[word]).reshape(1, -1))
45
            expected_bias_mitigated_embeddings.append(
46
                torch.Tensor(emb_data["linear_two_means_" + word]).reshape(1, -1)
47
            )
48
        self.evaluation_embeddings = torch.cat(evaluation_embeddings).reshape(2, 2, -1)
49
        self.expected_bias_mitigated_embeddings = torch.cat(
50
            expected_bias_mitigated_embeddings
51
        ).reshape(2, 2, -1)
52

53
    def test_invalid_dims(self):
54
        lbm = LinearBiasMitigator()
55
        with pytest.raises(ConfigurationError):
56
            lbm(torch.zeros(2), torch.zeros(2))
57
        with pytest.raises(ConfigurationError):
58
            lbm(torch.zeros(2), torch.zeros((2, 2)))
59
        with pytest.raises(ConfigurationError):
60
            lbm(torch.zeros((2, 3)), torch.zeros(2))
61

62
    @multi_device
63
    def test_lbm_without_grad(self, device: str):
64
        self.bias_direction = self.bias_direction.to(device)
65
        self.evaluation_embeddings = self.evaluation_embeddings.to(device)
66
        self.expected_bias_mitigated_embeddings = self.expected_bias_mitigated_embeddings.to(device)
67

68
        lbm = LinearBiasMitigator()
69
        test_bias_mitigated_embeddings = lbm(self.evaluation_embeddings, self.bias_direction)
70
        assert allclose(
71
            self.expected_bias_mitigated_embeddings, test_bias_mitigated_embeddings, atol=1e-6
72
        )
73

74
    @multi_device
75
    def test_lbm_with_grad(self, device: str):
76
        self.bias_direction = self.bias_direction.to(device).requires_grad_()
77
        self.evaluation_embeddings = self.evaluation_embeddings.to(device).requires_grad_()
78
        assert self.bias_direction.grad is None
79
        assert self.evaluation_embeddings.grad is None
80

81
        lbm = LinearBiasMitigator(requires_grad=True)
82
        test_bias_mitigated_embeddings = lbm(self.evaluation_embeddings, self.bias_direction)
83
        test_bias_mitigated_embeddings.sum().backward()
84
        assert self.bias_direction.grad is not None
85
        assert self.evaluation_embeddings.grad is not None
86

87

88
class HardBiasMitigatorTest(AllenNlpTestCase):
89
    def setup_method(self):
90
        super().setup_method()
91

92
        # embedding data from VERB demo
93
        emb_filename = str(self.FIXTURES_ROOT / "fairness" / "bias_embeddings.json")
94
        with open(emb_filename) as emb_file:
95
            emb_data = json.load(emb_file)
96

97
        seed_embeddings1 = torch.cat(
98
            [
99
                torch.Tensor(emb_data["he"]).reshape(1, -1),
100
                torch.Tensor(emb_data["man"]).reshape(1, -1),
101
            ]
102
        )
103
        seed_embeddings2 = torch.cat(
104
            [
105
                torch.Tensor(emb_data["she"]).reshape(1, -1),
106
                torch.Tensor(emb_data["woman"]).reshape(1, -1),
107
            ]
108
        )
109
        tm = TwoMeansBiasDirection()
110
        self.bias_direction = tm(seed_embeddings1, seed_embeddings2)
111

112
        self.equalize_embeddings1 = torch.cat(
113
            [
114
                torch.Tensor(emb_data["boy"]).reshape(1, -1),
115
                torch.Tensor(emb_data["brother"]).reshape(1, -1),
116
            ]
117
        ).unsqueeze(0)
118
        self.equalize_embeddings2 = torch.cat(
119
            [
120
                torch.Tensor(emb_data["girl"]).reshape(1, -1),
121
                torch.Tensor(emb_data["sister"]).reshape(1, -1),
122
            ]
123
        ).unsqueeze(0)
124

125
        evaluation_embeddings = []
126
        expected_bias_mitigated_embeddings = []
127
        for word in ["engineer", "banker", "nurse", "receptionist"]:
128
            evaluation_embeddings.append(torch.Tensor(emb_data[word]).reshape(1, -1))
129
            expected_bias_mitigated_embeddings.append(
130
                torch.Tensor(emb_data["hard_two_means_" + word]).reshape(1, -1)
131
            )
132
        for word in ["boy", "brother", "girl", "sister"]:
133
            expected_bias_mitigated_embeddings.append(
134
                torch.Tensor(emb_data["hard_two_means_" + word]).reshape(1, -1)
135
            )
136
        self.evaluation_embeddings = torch.cat(evaluation_embeddings).reshape(2, 2, -1)
137
        self.expected_bias_mitigated_embeddings = torch.cat(
138
            expected_bias_mitigated_embeddings
139
        ).reshape(4, 2, -1)
140

141
    def test_invalid_dims(self):
142
        hbm = HardBiasMitigator()
143
        with pytest.raises(ConfigurationError):
144
            hbm(torch.zeros(2), torch.zeros(2), torch.zeros(2), torch.zeros(2))
145
        with pytest.raises(ConfigurationError):
146
            hbm(torch.zeros(2), torch.zeros(2), torch.zeros((2, 2)), torch.zeros((3, 2)))
147
        with pytest.raises(ConfigurationError):
148
            hbm(torch.zeros(2), torch.zeros(2), torch.zeros((2, 2)), torch.zeros((2, 2)))
149
        with pytest.raises(ConfigurationError):
150
            hbm(torch.zeros((3, 3)), torch.zeros(2), torch.zeros((2, 2)), torch.zeros((2, 2)))
151
        with pytest.raises(ConfigurationError):
152
            hbm(torch.zeros((3, 2)), torch.zeros((2, 2)), torch.zeros((2, 2)), torch.zeros((2, 2)))
153
        with pytest.raises(ConfigurationError):
154
            hbm(torch.zeros((3, 2)), torch.zeros(3), torch.zeros((2, 2)), torch.zeros((2, 2)))
155

156
    @multi_device
157
    def test_hbm_without_grad(self, device: str):
158
        self.bias_direction = self.bias_direction.to(device)
159
        self.evaluation_embeddings = self.evaluation_embeddings.to(device)
160
        self.equalize_embeddings1 = self.equalize_embeddings1.to(device)
161
        self.equalize_embeddings2 = self.equalize_embeddings2.to(device)
162
        self.expected_bias_mitigated_embeddings = self.expected_bias_mitigated_embeddings.to(device)
163

164
        hbm = HardBiasMitigator()
165
        test_bias_mitigated_embeddings = hbm(
166
            self.evaluation_embeddings,
167
            self.bias_direction,
168
            self.equalize_embeddings1,
169
            self.equalize_embeddings2,
170
        )
171
        assert allclose(
172
            self.expected_bias_mitigated_embeddings, test_bias_mitigated_embeddings, atol=1e-6
173
        )
174

175
    @multi_device
176
    def test_hbm_with_grad(self, device: str):
177
        self.bias_direction = self.bias_direction.to(device).requires_grad_()
178
        self.evaluation_embeddings = self.evaluation_embeddings.to(device).requires_grad_()
179
        self.equalize_embeddings1 = self.equalize_embeddings1.to(device).requires_grad_()
180
        self.equalize_embeddings2 = self.equalize_embeddings2.to(device).requires_grad_()
181
        assert self.bias_direction.grad is None
182
        assert self.evaluation_embeddings.grad is None
183
        assert self.equalize_embeddings1.grad is None
184
        assert self.equalize_embeddings2.grad is None
185

186
        hbm = HardBiasMitigator(requires_grad=True)
187
        test_bias_mitigated_embeddings = hbm(
188
            self.evaluation_embeddings,
189
            self.bias_direction,
190
            self.equalize_embeddings1,
191
            self.equalize_embeddings2,
192
        )
193
        test_bias_mitigated_embeddings.sum().backward()
194
        assert self.bias_direction.grad is not None
195
        assert self.evaluation_embeddings.grad is not None
196
        assert self.equalize_embeddings1.grad is not None
197
        assert self.equalize_embeddings2.grad is not None
198

199

200
class INLPBiasMitigatorTest(AllenNlpTestCase):
201
    def setup_method(self):
202
        super().setup_method()
203

204
        # embedding data from VERB demo
205
        emb_filename = str(self.FIXTURES_ROOT / "fairness" / "bias_embeddings.json")
206
        with open(emb_filename) as emb_file:
207
            emb_data = json.load(emb_file)
208

209
        seed_embeddings1 = []
210
        for word in ["man", "he", "his", "boy", "grandpa", "uncle", "jack"]:
211
            seed_embeddings1.append(torch.Tensor(emb_data[word]).reshape(1, -1))
212
        self.seed_embeddings1 = torch.cat(seed_embeddings1)
213

214
        seed_embeddings2 = []
215
        for word in ["woman", "she", "her", "girl", "grandma", "aunt", "jill"]:
216
            seed_embeddings2.append(torch.Tensor(emb_data[word]).reshape(1, -1))
217
        self.seed_embeddings2 = torch.cat(seed_embeddings2)
218

219
        evaluation_embeddings = []
220
        expected_bias_mitigated_embeddings = []
221
        for word in ["engineer", "homemaker"]:
222
            evaluation_embeddings.append(torch.Tensor(emb_data[word]).reshape(1, -1))
223
            expected_bias_mitigated_embeddings.append(
224
                torch.Tensor(emb_data["inlp_" + word]).reshape(1, -1)
225
            )
226
        self.evaluation_embeddings = torch.cat(evaluation_embeddings)
227
        self.expected_bias_mitigated_embeddings = torch.cat(expected_bias_mitigated_embeddings)
228

229
    def test_invalid_dims(self):
230
        ibm = INLPBiasMitigator()
231
        with pytest.raises(ConfigurationError):
232
            ibm(torch.zeros(2), torch.zeros(2), torch.zeros(2))
233
        with pytest.raises(ConfigurationError):
234
            ibm(torch.zeros(2), torch.zeros((2, 2)), torch.zeros((2, 3)))
235
        with pytest.raises(ConfigurationError):
236
            ibm(torch.zeros(2), torch.zeros((2, 2)), torch.zeros((2, 2)))
237
        with pytest.raises(ConfigurationError):
238
            ibm(torch.zeros((2, 3)), torch.zeros((2, 2)), torch.zeros((2, 2)))
239

240
    @multi_device
241
    def test_inlp(self, device: str):
242
        self.seed_embeddings1 = self.seed_embeddings1.to(device)
243
        self.seed_embeddings2 = self.seed_embeddings2.to(device)
244
        self.evaluation_embeddings = self.evaluation_embeddings.to(device)
245
        self.expected_bias_mitigated_embeddings = self.expected_bias_mitigated_embeddings.to(device)
246

247
        ibm = INLPBiasMitigator()
248
        test_bias_mitigated_embeddings = ibm(
249
            self.evaluation_embeddings, self.seed_embeddings1, self.seed_embeddings2
250
        )
251
        assert allclose(
252
            self.expected_bias_mitigated_embeddings, test_bias_mitigated_embeddings, atol=1e-6
253
        )
254

255

256
class OSCaRBiasMitigatorTest(AllenNlpTestCase):
257
    def setup_method(self):
258
        super().setup_method()
259

260
        # embedding data from VERB demo
261
        emb_filename = str(self.FIXTURES_ROOT / "fairness" / "bias_embeddings.json")
262
        with open(emb_filename) as emb_file:
263
            emb_data = json.load(emb_file)
264

265
        self.bias_direction1 = torch.Tensor(emb_data["oscar_bias1"])
266
        self.bias_direction2 = torch.Tensor(emb_data["oscar_bias2"])
267

268
        evaluation_embeddings = []
269
        expected_bias_mitigated_embeddings = []
270
        for word in ["programmer", "grandpa", "grandma"]:
271
            evaluation_embeddings.append(torch.Tensor(emb_data[word]).reshape(1, -1))
272
            expected_bias_mitigated_embeddings.append(
273
                torch.Tensor(emb_data["oscar_" + word]).reshape(1, -1)
274
            )
275
        self.evaluation_embeddings = torch.cat(evaluation_embeddings)
276
        self.expected_bias_mitigated_embeddings = torch.cat(expected_bias_mitigated_embeddings)
277

278
    def test_invalid_dims(self):
279
        ibm = INLPBiasMitigator()
280
        with pytest.raises(ConfigurationError):
281
            ibm(torch.zeros(2), torch.zeros(2), torch.zeros(2))
282
        with pytest.raises(ConfigurationError):
283
            ibm(torch.zeros(2), torch.zeros((2, 2)), torch.zeros((2, 3)))
284
        with pytest.raises(ConfigurationError):
285
            ibm(torch.zeros((2, 3)), torch.zeros(2), torch.zeros(2))
286
        with pytest.raises(ConfigurationError):
287
            ibm(torch.zeros((2, 1)), torch.zeros(1), torch.zeros(1))
288

289
    @multi_device
290
    def test_oscar_without_grad(self, device: str):
291
        self.bias_direction1 = self.bias_direction1.to(device)
292
        self.bias_direction2 = self.bias_direction2.to(device)
293
        self.evaluation_embeddings = self.evaluation_embeddings.to(device)
294
        self.expected_bias_mitigated_embeddings = self.expected_bias_mitigated_embeddings.to(device)
295

296
        obm = OSCaRBiasMitigator()
297
        test_bias_mitigated_embeddings = obm(
298
            self.evaluation_embeddings, self.bias_direction1, self.bias_direction2
299
        )
300
        assert allclose(
301
            self.expected_bias_mitigated_embeddings, test_bias_mitigated_embeddings, atol=1e-6
302
        )
303

304
    @multi_device
305
    def test_oscar_with_grad(self, device: str):
306
        self.bias_direction1 = self.bias_direction1.to(device).requires_grad_()
307
        self.bias_direction2 = self.bias_direction2.to(device).requires_grad_()
308
        self.evaluation_embeddings = self.evaluation_embeddings.to(device).requires_grad_()
309
        assert self.bias_direction1.grad is None
310
        assert self.bias_direction2.grad is None
311
        assert self.evaluation_embeddings.grad is None
312

313
        obm = OSCaRBiasMitigator(requires_grad=True)
314
        test_bias_mitigated_embeddings = obm(
315
            self.evaluation_embeddings, self.bias_direction1, self.bias_direction2
316
        )
317
        test_bias_mitigated_embeddings.sum().backward()
318
        assert self.bias_direction1.grad is not None
319
        assert self.bias_direction2.grad is not None
320
        assert self.evaluation_embeddings.grad is not None
321

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.