pytorch

Форк
0
/
boolean_mask_test.py 
400 строк · 16.0 Кб
1

2

3

4

5
from caffe2.python import core
6
import caffe2.python.hypothesis_test_util as hu
7
import caffe2.python.serialized_test.serialized_test_util as serial
8
from hypothesis import assume, given, settings
9
import hypothesis.strategies as st
10
import numpy as np
11

12

13
class TestBooleanMaskOp(serial.SerializedTestCase):
14
    @given(x=hu.tensor1d(min_len=1,
15
                         max_len=100,
16
                         elements=hu.floats(min_value=0.5, max_value=1.0)),
17
           **hu.gcs_cpu_only)
18
    @settings(deadline=10000)
19
    def test_boolean_mask_gradient(self, x, gc, dc):
20
        op = core.CreateOperator("BooleanMask",
21
                                 ["data", "mask"],
22
                                 "masked_data")
23
        mask = np.random.choice(a=[True, False], size=x.shape[0])
24
        expected_gradient = np.copy(mask).astype(int)
25
        self.assertDeviceChecks(dc, op, [x, mask], [0])
26
        self.assertGradientChecks(gc, op, [x, mask], 0, [0])
27

28

29
    @given(x=hu.tensor1d(min_len=1,
30
                         max_len=5,
31
                         elements=hu.floats(min_value=0.5, max_value=1.0)),
32
           **hu.gcs)
33
    @settings(deadline=10000)
34
    def test_boolean_mask(self, x, gc, dc):
35
        op = core.CreateOperator("BooleanMask",
36
                                 ["data", "mask"],
37
                                 "masked_data")
38
        mask = np.random.choice(a=[True, False], size=x.shape[0])
39

40
        def ref(x, mask):
41
            return (x[mask],)
42
        self.assertReferenceChecks(gc, op, [x, mask], ref)
43
        self.assertDeviceChecks(dc, op, [x, mask], [0])
44

45
    @given(x=hu.tensor1d(min_len=1,
46
                         max_len=5,
47
                         elements=hu.floats(min_value=0.5, max_value=1.0)),
48
           **hu.gcs)
49
    def test_boolean_mask_indices(self, x, gc, dc):
50
        op = core.CreateOperator("BooleanMask",
51
                                 ["data", "mask"],
52
                                 ["masked_data", "masked_indices"])
53
        mask = np.random.choice(a=[True, False], size=x.shape[0])
54

55
        def ref(x, mask):
56
            return (x[mask], np.where(mask)[0])
57

58
        self.assertReferenceChecks(gc, op, [x, mask], ref)
59
        self.assertDeviceChecks(dc, op, [x, mask], [0])
60

61
    @staticmethod
62
    def _dtype_conversion(x, dtype, gc, dc):
63
        """SequenceMask only supports fp16 with CUDA/ROCm."""
64
        if dtype == np.float16:
65
            assume(core.IsGPUDeviceType(gc.device_type))
66
            dc = [d for d in dc if core.IsGPUDeviceType(d.device_type)]
67
            x = x.astype(dtype)
68
        return x, dc
69

70
    @given(x=hu.tensor(min_dim=2,
71
                       max_dim=5,
72
                       elements=hu.floats(min_value=0.5, max_value=1.0)),
73
           dtype=st.sampled_from([np.float32, np.float16]),
74
           **hu.gcs)
75
    def test_sequence_mask_with_lengths(self, x, dtype, gc, dc):
76
        x, dc = self._dtype_conversion(x, dtype, gc, dc)
77
        # finite fill value needed for gradient check
78
        fill_val = 1e-3 if dtype == np.float16 else 1e-9
79
        op = core.CreateOperator("SequenceMask",
80
                                 ["data", "lengths"],
81
                                 ["masked_data"],
82
                                 mode="sequence",
83
                                 axis=len(x.shape) - 1,
84
                                 fill_val=fill_val)
85
        elem_dim = x.shape[-1]
86
        leading_dim = 1
87
        for dim in x.shape[:-1]:
88
            leading_dim *= dim
89
        lengths = np.random.randint(0, elem_dim, [leading_dim])\
90
            .astype(np.int32)
91

92
        def ref(x, lengths):
93
            ref = np.reshape(x, [leading_dim, elem_dim])
94
            for i in range(leading_dim):
95
                for j in range(elem_dim):
96
                    if j >= lengths[i]:
97
                        ref[i, j] = fill_val
98
            return [ref.reshape(x.shape)]
99

100
        self.assertReferenceChecks(gc, op, [x, lengths], ref)
101
        self.assertDeviceChecks(dc, op, [x, lengths], [0])
102

103
    @given(x=hu.tensor(min_dim=2,
104
                       max_dim=5,
105
                       elements=hu.floats(min_value=0.5, max_value=1.0)),
106
           dtype=st.sampled_from([np.float32, np.float16]),
107
           **hu.gcs)
108
    @settings(deadline=10000)
109
    def test_sequence_mask_with_window(self, x, dtype, gc, dc):
110
        x, dc = self._dtype_conversion(x, dtype, gc, dc)
111
        # finite fill value needed for gradient check
112
        fill_val = 1e-3 if dtype == np.float16 else 1e-9
113
        radius = 2
114
        op = core.CreateOperator("SequenceMask",
115
                                 ["data", "centers"],
116
                                 ["masked_data"],
117
                                 mode="window",
118
                                 radius=radius,
119
                                 axis=len(x.shape) - 1,
120
                                 fill_val=fill_val)
121
        elem_dim = x.shape[-1]
122
        leading_dim = 1
123
        for dim in x.shape[:-1]:
124
            leading_dim *= dim
125
        centers = np.random.randint(0, elem_dim, [leading_dim])\
126
            .astype(np.int32)
127

128
        def ref(x, centers):
129
            ref = np.reshape(x, [leading_dim, elem_dim])
130
            for i in range(leading_dim):
131
                for j in range(elem_dim):
132
                    if j > centers[i] + radius or j < centers[i] - radius:
133
                        ref[i, j] = fill_val
134
            return [ref.reshape(x.shape)]
135

136
        self.assertReferenceChecks(gc, op, [x, centers], ref)
137
        self.assertDeviceChecks(dc, op, [x, centers], [0])
138

139
        # Gradient check with np.float16 is found to be flakey, disable for now
140
        # with high threshold (to repro, set threshold to 0.4).
141
        threshold = 1.0 if dtype == np.float16 else 0.005
142
        self.assertGradientChecks(gc, op, [x, centers], 0, [0],
143
                                  threshold=threshold)
144

145
    @given(x=hu.tensor(min_dim=2,
146
                       max_dim=5,
147
                       elements=hu.floats(min_value=0.5, max_value=1.0)),
148
           mode=st.sampled_from(['upper', 'lower', 'upperdiag', 'lowerdiag']),
149
           dtype=st.sampled_from([np.float32, np.float16]),
150
           **hu.gcs)
151
    @settings(deadline=10000)
152
    def test_sequence_mask_triangle(self, x, mode, dtype, gc, dc):
153
        x, dc = self._dtype_conversion(x, dtype, gc, dc)
154
        # finite fill value needed for gradient check
155
        fill_val = 1e-3 if dtype == np.float16 else 1e-9
156
        op = core.CreateOperator("SequenceMask",
157
                                 ["data"],
158
                                 ["masked_data"],
159
                                 mode=mode,
160
                                 axis=len(x.shape) - 1,
161
                                 fill_val=fill_val)
162
        elem_dim = x.shape[-1]
163
        leading_dim = 1
164
        for dim in x.shape[:-1]:
165
            leading_dim *= dim
166

167
        if mode == 'upper':
168
            def compare(i, j):
169
                return j > i
170
        elif mode == 'lower':
171
            def compare(i, j):
172
                return j < i
173
        elif mode == 'upperdiag':
174
            def compare(i, j):
175
                return j >= i
176
        elif mode == 'lowerdiag':
177
            def compare(i, j):
178
                return j <= i
179

180
        def ref(x):
181
            ref = np.reshape(x, [leading_dim, elem_dim])
182
            for i in range(leading_dim):
183
                for j in range(elem_dim):
184
                    if compare(i, j):
185
                        ref[i, j] = fill_val
186
            return [ref.reshape(x.shape)]
187

188
        self.assertReferenceChecks(gc, op, [x], ref)
189
        self.assertDeviceChecks(dc, op, [x], [0])
190

191
        # Gradient check with np.float16 is found to be flakey, disable for now
192
        # with high threshold (to repro, set threshold to 0.4).
193
        threshold = 1.0 if dtype == np.float16 else 0.005
194
        stepsize = 0.1 if dtype == np.float16 else 0.05
195
        self.assertGradientChecks(gc, op, [x], 0, [0],
196
                                  threshold=threshold, stepsize=stepsize)
197

198
    @given(x=hu.tensor(min_dim=2,
199
                       max_dim=5,
200
                       elements=hu.floats(min_value=0.5, max_value=1.0)),
201
           dtype=st.sampled_from([np.float32, np.float16]),
202
           **hu.gcs)
203
    @settings(deadline=10000)
204
    def test_sequence_mask_batching_lengths(self, x, dtype, gc, dc):
205
        x, dc = self._dtype_conversion(x, dtype, gc, dc)
206
        # finite fill value needed for gradient check
207
        fill_val = 1e-3 if dtype == np.float16 else 1e-9
208
        # choose _different_ batch and axis dimensions, w/ axis != 0.
209
        axis = 0
210
        batch = 0
211
        while axis == 0 or axis < batch:
212
            inds = np.arange(len(x.shape))
213
            np.random.shuffle(inds)
214
            batch = inds[0]
215
            axis = inds[1]
216
        op = core.CreateOperator("SequenceMask",
217
                                 ["data", "lengths"],
218
                                 ["masked_data"],
219
                                 mode='sequence',
220
                                 axis=axis,
221
                                 fill_val=fill_val,
222
                                 batch=batch)
223

224
        before = int(np.prod(x.shape[:batch + 1]))
225
        between = int(np.prod(x.shape[batch + 1:axis]))
226
        after = int(np.prod(x.shape[axis:]))
227

228
        lengths = np.random.randint(0, after, [between])\
229
            .astype(np.int32)
230

231
        def ref(z, l):
232
            w = np.reshape(z, [before, between, after])
233

234
            for b in range(before):
235
                r = w[b, :, :]
236
                for i in range(between):
237
                    for j in range(after):
238
                        if j >= l[i]:
239
                            r[i, j] = fill_val
240
            return [w.reshape(z.shape)]
241

242
        self.assertReferenceChecks(gc, op, [x, lengths], ref)
243
        self.assertDeviceChecks(dc, op, [x, lengths], [0])
244

245
        # Gradient check with np.float16 is found to be flakey, disable for now
246
        # with high threshold (to repro, set threshold to 0.4).
247
        threshold = 1.0 if dtype == np.float16 else 0.005
248
        self.assertGradientChecks(gc, op, [x, lengths], 0, [0],
249
                                  threshold=threshold)
250

251
    @given(x=hu.tensor(min_dim=4,
252
                       max_dim=4,
253
                       elements=hu.floats(min_value=0.5, max_value=1.0)),
254
           dtype=st.sampled_from([np.float32, np.float16]),
255
           **hu.gcs)
256
    @settings(deadline=10000)
257
    def test_sequence_mask_batching_window(self, x, dtype, gc, dc):
258
        x, dc = self._dtype_conversion(x, dtype, gc, dc)
259
        # finite fill value needed for gradient check
260
        fill_val = 1e-3 if dtype == np.float16 else 1e-9
261
        radius = 1
262
        # choose _different_ batch and axis dimensions, w/ axis != 0.
263
        axis = 0
264
        batch = 0
265
        while axis == 0 or axis < batch:
266
            inds = np.arange(len(x.shape))
267
            np.random.shuffle(inds)
268
            batch = inds[0]
269
            axis = inds[1]
270
        op = core.CreateOperator("SequenceMask",
271
                                 ["data", "centers"],
272
                                 ["masked_data"],
273
                                 mode='window',
274
                                 radius=radius,
275
                                 axis=axis,
276
                                 fill_val=fill_val,
277
                                 batch=batch)
278

279
        before = int(np.prod(x.shape[:batch + 1]))
280
        between = int(np.prod(x.shape[batch + 1:axis]))
281
        after = int(np.prod(x.shape[axis:]))
282

283
        centers = np.random.randint(0, after, [between])\
284
            .astype(np.int32)
285

286
        def ref(z, c):
287
            w = np.reshape(z, [before, between, after])
288

289
            for b in range(before):
290
                r = w[b, :, :]
291
                for i in range(between):
292
                    for j in range(after):
293
                        if j > c[i] + radius or j < c[i] - radius:
294
                            r[i, j] = fill_val
295
            return [w.reshape(z.shape)]
296

297
        self.assertReferenceChecks(gc, op, [x, centers], ref)
298
        self.assertDeviceChecks(dc, op, [x, centers], [0])
299

300
        # Gradient check with np.float16 is found to be flakey, disable for now
301
        # with high threshold (to repro, set threshold to 0.4).
302
        threshold = 1.0 if dtype == np.float16 else 0.005
303
        self.assertGradientChecks(gc, op, [x, centers], 0, [0],
304
                                  threshold=threshold)
305

306
    @given(x=hu.tensor(min_dim=3,
307
                       max_dim=5,
308
                       elements=hu.floats(min_value=0.5, max_value=1.0)),
309
           mode=st.sampled_from(['upper', 'lower', 'upperdiag', 'lowerdiag']),
310
           dtype=st.sampled_from([np.float32, np.float16]),
311
           **hu.gcs)
312
    @settings(deadline=10000)
313
    def test_sequence_mask_batching_triangle(self, x, mode, dtype, gc, dc):
314
        x, dc = self._dtype_conversion(x, dtype, gc, dc)
315
        # finite fill value needed for gradient check
316
        fill_val = 1e-3 if dtype == np.float16 else 1e-9
317
        # choose _different_ batch and axis dimensions, w/ axis != 0.
318
        axis = 0
319
        batch = 0
320
        while axis == 0 or axis < batch:
321
            inds = np.arange(len(x.shape))
322
            np.random.shuffle(inds)
323
            batch = inds[0]
324
            axis = inds[1]
325
        op = core.CreateOperator("SequenceMask",
326
                                 ["data"],
327
                                 ["masked_data"],
328
                                 mode=mode,
329
                                 axis=axis,
330
                                 fill_val=fill_val,
331
                                 batch=batch)
332

333
        if mode == 'upper':
334
            def compare(i, j):
335
                return j > i
336
        elif mode == 'lower':
337
            def compare(i, j):
338
                return j < i
339
        elif mode == 'upperdiag':
340
            def compare(i, j):
341
                return j >= i
342
        elif mode == 'lowerdiag':
343
            def compare(i, j):
344
                return j <= i
345

346
        def ref(z):
347
            before = int(np.prod(z.shape[:batch + 1]))
348
            between = int(np.prod(z.shape[batch + 1:axis]))
349
            after = int(np.prod(z.shape[axis:]))
350

351
            w = np.reshape(z, [before, between, after])
352

353
            for b in range(before):
354
                r = w[b, :, :]
355
                for i in range(between):
356
                    for j in range(after):
357
                        if compare(i, j):
358
                            r[i, j] = fill_val
359
            return [w.reshape(z.shape)]
360

361
        self.assertReferenceChecks(gc, op, [x], ref)
362
        self.assertDeviceChecks(dc, op, [x], [0])
363

364
        # Gradient check with np.float16 is found to be flakey, disable for now
365
        # with high threshold (to repro, set threshold to 0.4).
366
        threshold = 1.0 if dtype == np.float16 else 0.005
367
        stepsize = 0.1 if dtype == np.float16 else 0.05
368
        self.assertGradientChecks(gc, op, [x], 0, [0],
369
                                  threshold=threshold, stepsize=stepsize)
370

371
    @given(x=hu.tensor(min_dim=3,
372
                       max_dim=5,
373
                       elements=hu.floats(min_value=0.5, max_value=1.0)),
374
           dtype=st.sampled_from([np.float32, np.float16]),
375
           **hu.gcs)
376
    def test_sequence_mask_repeated(self, x, dtype, gc, dc):
377
        x, dc = self._dtype_conversion(x, dtype, gc, dc)
378
        # finite fill value needed for gradient check
379
        fill_val = 1e-3 if dtype == np.float16 else 1e-9
380
        op = core.CreateOperator("SequenceMask",
381
                                 ["data", "lengths"],
382
                                 ["masked_data"],
383
                                 mode="sequence",
384
                                 axis=len(x.shape) - 2,
385
                                 repeat_from_axis=-1,
386
                                 fill_val=fill_val)
387

388
        elem_dim = x.shape[-2]
389
        leading_dim = 1
390
        for dim in x.shape[:-2]:
391
            leading_dim *= dim
392
        lengths = np.random.randint(0, elem_dim, [leading_dim])\
393
            .astype(np.int32)
394

395
        def ref(x, lengths):
396
            ref = np.reshape(x, [leading_dim, elem_dim, -1])
397
            for i in range(leading_dim):
398
                for j in range(elem_dim):
399
                    if j >= lengths[i]:
400
                        ref[i, j, :] = fill_val
401
            return [ref.reshape(x.shape)]
402

403
        self.assertReferenceChecks(gc, op, [x, lengths], ref)
404
        self.assertDeviceChecks(dc, op, [x, lengths], [0])
405

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.