pytorch

Форк
0
/
utility_ops_test.py 
477 строк · 14.7 Кб
1

2

3

4

5

6
from caffe2.python import core, workspace
7
from hypothesis import assume, given, settings
8
from caffe2.proto import caffe2_pb2
9
import caffe2.python.hypothesis_test_util as hu
10
import caffe2.python.serialized_test.serialized_test_util as serial
11
import hypothesis.strategies as st
12
import numpy as np
13
import random
14

15

16
class TestUtilityOps(serial.SerializedTestCase):
17

18
    @given(X=hu.tensor(), args=st.booleans(), **hu.gcs)
19
    @settings(deadline=10000)
20
    def test_slice(self, X, args, gc, dc):
21
        X = X.astype(dtype=np.float32)
22
        dim = random.randint(0, X.ndim - 1)
23
        slice_start = random.randint(0, X.shape[dim] - 1)
24
        slice_end = random.randint(slice_start, X.shape[dim] - 1)
25
        starts = np.array([0] * X.ndim).astype(np.int32)
26
        ends = np.array([-1] * X.ndim).astype(np.int32)
27
        starts[dim] = slice_start
28
        ends[dim] = slice_end
29

30
        if args:
31
            op = core.CreateOperator(
32
                "Slice", ["X"], ["Y"], starts=starts, ends=ends, device_option=gc
33
            )
34

35
            def slice_ref(X):
36
                slc = [slice(None)] * X.ndim
37
                slc[dim] = slice(slice_start, slice_end)
38
                return [X[slc]]
39
            inputs = [X]
40
        else:
41
            op = core.CreateOperator(
42
                "Slice", ["X", "starts", "ends"], ["Y"], device_option=gc
43
            )
44

45
            def slice_ref(x, starts, ends):
46
                slc = [slice(None)] * x.ndim
47
                slc[dim] = slice(slice_start, slice_end)
48
                return [x[slc]]
49
            inputs = [X, starts, ends]
50

51
        self.assertReferenceChecks(gc, op, inputs, slice_ref)
52
        self.assertDeviceChecks(dc, op, inputs, [0])
53
        self.assertGradientChecks(
54
            device_option=gc,
55
            op=op,
56
            inputs=inputs,
57
            outputs_to_check=0,
58
            outputs_with_grads=[0],
59
        )
60

61
    @given(ndims=st.integers(min_value=1, max_value=10), **hu.gcs)
62
    @settings(deadline=10000)
63
    def test_resize_like(self, ndims, gc, dc):
64
        X = np.zeros((ndims * 2, ))
65
        Y = np.zeros((ndims, 2))
66

67
        op = core.CreateOperator(
68
            "ResizeLike", ["X", "Y"], ["Z"],
69
        )
70

71
        def resize_like(X, Y):
72
            return [X.reshape(Y.shape)]
73

74
        self.assertDeviceChecks(dc, op, [X, Y], [0])
75
        self.assertReferenceChecks(gc, op, [X, Y], resize_like, ensure_outputs_are_inferred=True)
76

77
    @given(dtype=st.sampled_from([np.float32, np.int32]),
78
           ndims=st.integers(min_value=1, max_value=5),
79
           seed=st.integers(min_value=0, max_value=65536),
80
           null_axes=st.booleans(),
81
           engine=st.sampled_from(['CUDNN', None]),
82
           **hu.gcs)
83
    @settings(deadline=10000)
84
    def test_transpose(self, dtype, ndims, seed, null_axes, engine, gc, dc):
85
        if (gc.device_type == caffe2_pb2.CUDA and engine == "CUDNN"):
86
            # cudnn 5.1 does not support int.
87
            assume(workspace.GetCuDNNVersion() >= 6000 or dtype != np.int32)
88

89
        dims = (np.random.rand(ndims) * 16 + 1).astype(np.int32)
90
        X = (np.random.rand(*dims) * 16).astype(dtype)
91

92
        if null_axes:
93
            axes = None
94
            op = core.CreateOperator(
95
                "Transpose",
96
                ["input"], ["output"],
97
                engine=engine)
98
        else:
99
            np.random.seed(int(seed))
100
            axes = [int(v) for v in list(np.random.permutation(X.ndim))]
101
            op = core.CreateOperator(
102
                "Transpose",
103
                ["input"], ["output"],
104
                axes=axes,
105
                engine=engine)
106

107
        def transpose_ref(x, axes):
108
            return (np.transpose(x, axes),)
109

110
        self.assertReferenceChecks(gc, op, [X, axes],
111
                                   transpose_ref)
112

113
    @given(m=st.integers(5, 10), n=st.integers(5, 10),
114
           o=st.integers(5, 10), nans=st.booleans(), **hu.gcs)
115
    @settings(deadline=10000)
116
    def test_nan_check(self, m, n, o, nans, gc, dc):
117
        other = np.array([1, 2, 3]).astype(np.float32)
118
        X = np.random.rand(m, n, o).astype(np.float32)
119
        if nans:
120
            x_nan = np.random.randint(0, m)
121
            y_nan = np.random.randint(0, n)
122
            z_nan = np.random.randint(0, o)
123
            X[x_nan, y_nan, z_nan] = float('NaN')
124

125
        # print('nans: {}'.format(nans))
126
        # print(X)
127

128
        def nan_reference(X, Y):
129
            if not np.isnan(X).any():
130
                return [X]
131
            else:
132
                return [np.array([])]
133

134
        op = core.CreateOperator(
135
            "NanCheck",
136
            ["X", "other"],
137
            ["Y"]
138
        )
139

140
        try:
141
            self.assertReferenceChecks(
142
                device_option=gc,
143
                op=op,
144
                inputs=[X, other],
145
                reference=nan_reference,
146
            )
147
            if nans:
148
                self.assertTrue(False, "Did not fail when presented with NaN!")
149
        except RuntimeError:
150
            self.assertTrue(nans, "No NaNs but failed")
151

152
        try:
153
            self.assertGradientChecks(
154
                device_option=gc,
155
                op=op,
156
                inputs=[X],
157
                outputs_to_check=0,
158
                outputs_with_grads=[0],
159
            )
160
            if nans:
161
                self.assertTrue(False, "Did not fail when gradient had NaN!")
162
        except RuntimeError:
163
            pass
164

165
    @serial.given(n=st.integers(4, 5), m=st.integers(6, 7),
166
           d=st.integers(2, 3), **hu.gcs)
167
    def test_elementwise_max(self, n, m, d, gc, dc):
168
        X = np.random.rand(n, m, d).astype(np.float32)
169
        Y = np.random.rand(n, m, d).astype(np.float32)
170
        Z = np.random.rand(n, m, d).astype(np.float32)
171
        inputs = [X, Y, Z]
172

173
        def max_op(X, Y, Z):
174
            return [np.maximum(np.maximum(X, Y), Z)]
175

176
        op = core.CreateOperator(
177
            "Max",
178
            ["X", "Y", "Z"],
179
            ["mx"]
180
        )
181

182
        self.assertReferenceChecks(
183
            device_option=gc,
184
            op=op,
185
            inputs=inputs,
186
            reference=max_op,
187
        )
188
        self.assertDeviceChecks(dc, op, inputs, [0])
189

190
    @given(n=st.integers(4, 5), m=st.integers(6, 7),
191
           d=st.integers(2, 3), **hu.gcs)
192
    @settings(deadline=10000)
193
    def test_elementwise_max_grad(self, n, m, d, gc, dc):
194
        go = np.random.rand(n, m, d).astype(np.float32)
195
        X = np.random.rand(n, m, d).astype(np.float32)
196
        Y = np.random.rand(n, m, d).astype(np.float32)
197
        Z = np.random.rand(n, m, d).astype(np.float32)
198
        mx = np.maximum(np.maximum(X, Y), Z)
199
        inputs = [mx, go, X, Y, Z]
200

201
        def max_grad_op(mx, go, X, Y, Z):
202
            def mx_grad(a):
203
                return go * (mx == a)
204

205
            return [mx_grad(a) for a in [X, Y, Z]]
206

207
        op = core.CreateOperator(
208
            "MaxGradient",
209
            ["mx", "go", "X", "Y", "Z"],
210
            ["gX", "gY", "gZ"]
211
        )
212

213
        self.assertReferenceChecks(
214
            device_option=gc,
215
            op=op,
216
            inputs=inputs,
217
            reference=max_grad_op,
218
        )
219
        self.assertDeviceChecks(dc, op, inputs, [0, 1, 2])
220

221
    @serial.given(n=st.integers(4, 5), m=st.integers(6, 7),
222
           d=st.integers(2, 3), **hu.gcs)
223
    def test_elementwise_min(self, n, m, d, gc, dc):
224
        X = np.random.rand(n, m, d).astype(np.float32)
225
        Y = np.random.rand(n, m, d).astype(np.float32)
226
        Z = np.random.rand(n, m, d).astype(np.float32)
227
        inputs = [X, Y, Z]
228

229
        def min_op(X, Y, Z):
230
            return [np.minimum(np.minimum(X, Y), Z)]
231

232
        op = core.CreateOperator(
233
            "Min",
234
            ["X", "Y", "Z"],
235
            ["mx"]
236
        )
237

238
        self.assertReferenceChecks(
239
            device_option=gc,
240
            op=op,
241
            inputs=inputs,
242
            reference=min_op,
243
        )
244
        self.assertDeviceChecks(dc, op, inputs, [0])
245

246
    @given(n=st.integers(4, 5), m=st.integers(6, 7),
247
           d=st.integers(2, 3), **hu.gcs)
248
    @settings(deadline=10000)
249
    def test_elementwise_min_grad(self, n, m, d, gc, dc):
250
        go = np.random.rand(n, m, d).astype(np.float32)
251
        X = np.random.rand(n, m, d).astype(np.float32)
252
        Y = np.random.rand(n, m, d).astype(np.float32)
253
        Z = np.random.rand(n, m, d).astype(np.float32)
254
        mx = np.minimum(np.minimum(X, Y), Z)
255
        inputs = [mx, go, X, Y, Z]
256

257
        def min_grad_op(mx, go, X, Y, Z):
258
            def mx_grad(a):
259
                return go * (mx == a)
260

261
            return [mx_grad(a) for a in [X, Y, Z]]
262

263
        op = core.CreateOperator(
264
            "MinGradient",
265
            ["mx", "go", "X", "Y", "Z"],
266
            ["gX", "gY", "gZ"]
267
        )
268

269
        self.assertReferenceChecks(
270
            device_option=gc,
271
            op=op,
272
            inputs=inputs,
273
            reference=min_grad_op,
274
        )
275
        self.assertDeviceChecks(dc, op, inputs, [0, 1, 2])
276

277
    @given(
278
        n=st.integers(1, 8), m=st.integers(1, 10), d=st.integers(1, 4),
279
        in_place=st.booleans(), engine=st.sampled_from(["", "CUDNN"]),
280
        seed=st.integers(min_value=0, max_value=65535),
281
        dtype=st.sampled_from([np.int32, np.int64, np.float32]),
282
        **hu.gcs)
283
    @settings(deadline=10000)
284
    def test_sum(
285
            self, n, m, d, in_place, engine, seed, dtype, gc, dc):
286
        input_names = []
287
        input_vars = []
288
        np.random.seed(seed)
289
        for i in range(m):
290
            X_name = 'X' + str(i)
291
            input_names.extend([X_name])
292
            var = np.random.rand(n, d).astype(dtype)
293
            vars()[X_name] = var
294
            input_vars.append(var)
295

296
        def sum_op_ref(*args):
297
            res = np.zeros((n, d))
298
            for i in range(m):
299
                res = res + args[i]
300
            return (res, )
301

302
        op = core.CreateOperator(
303
            "Sum",
304
            input_names,
305
            [input_names[0]] if in_place else ['Y'],
306
            engine=engine,
307
        )
308

309
        self.assertReferenceChecks(
310
            device_option=gc,
311
            op=op,
312
            inputs=input_vars,
313
            reference=sum_op_ref,
314
        )
315
        self.assertDeviceChecks(dc, op, input_vars, [0])
316

317
    @given(
318
        inputs=hu.lengths_tensor().flatmap(
319
            lambda pair: st.tuples(
320
                st.just(pair[0]),
321
                st.just(pair[1]),
322
                hu.dims(max_value=len(pair[1])),
323
            )
324
        ).flatmap(
325
            lambda tup: st.tuples(
326
                st.just(tup[0]),
327
                st.just(tup[1]),
328
                hu.arrays(
329
                    tup[2], dtype=np.int32,
330
                    elements=st.integers(
331
                        min_value=0, max_value=len(tup[1]) - 1)),
332
            )
333
        ),
334
        **hu.gcs_cpu_only)
335
    @settings(deadline=10000)
336
    def test_lengths_gather(self, inputs, gc, dc):
337
        items = inputs[0]
338
        lengths = inputs[1]
339
        indices = inputs[2]
340

341
        def lengths_gather_op(items, lengths, indices):
342
            ends = np.cumsum(lengths)
343
            return [np.concatenate(
344
                list(items[ends[i] - lengths[i]:ends[i]] for i in indices))]
345

346
        op = core.CreateOperator(
347
            "LengthsGather",
348
            ["items", "lengths", "indices"],
349
            ["output"]
350
        )
351

352
        self.assertReferenceChecks(
353
            device_option=gc,
354
            op=op,
355
            inputs=[items, lengths, indices],
356
            reference=lengths_gather_op,
357
        )
358

359
    @given(
360
        inputs=hu.lengths_tensor(),
361
        **hu.gcs_cpu_only)
362
    @settings(deadline=10000)
363
    def test_lengths_to_ranges(self, inputs, gc, dc):
364
        _, lengths = inputs
365

366
        def lengths_to_ranges_op(lengths):
367
            return [
368
                [[x, y] for x, y in zip(np.cumsum(np.append([0], lengths)),
369
                                        lengths)]
370
            ]
371

372
        op = core.CreateOperator(
373
            "LengthsToRanges",
374
            ["lengths"],
375
            ["output"]
376
        )
377

378
        self.assertReferenceChecks(
379
            device_option=gc,
380
            op=op,
381
            inputs=[lengths],
382
            reference=lengths_to_ranges_op,
383
        )
384

385
        # Test shape inference logic
386
        net = core.Net("test_shape_inference")
387

388
        workspace.FeedBlob("lengths", lengths)
389
        output = net.LengthsToRanges(
390
            ["lengths"],
391
            ["output"]
392
        )
393
        (shapes, types) = workspace.InferShapesAndTypes([net])
394
        workspace.RunNetOnce(net)
395
        self.assertEqual(shapes[output], list(workspace.blobs[output].shape))
396
        self.assertEqual(shapes[output], list(lengths.shape) + [2])
397
        self.assertEqual(types[output], core.DataType.INT32)
398

399
    @given(**hu.gcs)
400
    @settings(deadline=None, max_examples=50)
401
    def test_size_op(self, gc, dc):
402
        X = np.array([[1, 2], [3, 4]]).astype(np.float32)
403

404
        def size_op(tensor):
405
            return [np.prod(tensor.shape)]
406

407
        op = core.CreateOperator(
408
            "Size",
409
            ["X"],
410
            ["output"]
411
        )
412

413
        self.assertReferenceChecks(
414
            device_option=gc,
415
            op=op,
416
            inputs=[X],
417
            reference=size_op,
418
        )
419

420
    def test_alias_op(self):
421
        """ Don't use hypothesis because there are only 2 cases to check"""
422
        for size in [0, 5]:
423
            X = np.arange(size).astype(np.float32)
424
            workspace.FeedBlob('X', X)
425

426
            op = core.CreateOperator(
427
                "Alias",
428
                ["X"],
429
                ["Y"]
430
            )
431
            workspace.RunOperatorOnce(op)
432
            Y = workspace.FetchBlob('Y')
433
            np.testing.assert_array_equal(X, Y)
434

435
    @given(**hu.gcs)
436
    @settings(deadline=10000)
437
    def test_range(self, gc, dc):
438
        names = [
439
            ('stop_',),
440
            ('start_', 'stop_'),
441
            ('start_', 'stop_', 'step_'),
442
        ]
443
        # Most random values aren't great here, so use a fixed set instead of
444
        # hypothesis.
445
        for inputs in (
446
            (10,),
447
            (np.float32(10.0),),
448
            (0,),
449
            (0, 0),
450
            (10., 5.0, -1.),
451
            (2, 10000),
452
            (2, 10000, 20000),
453
            (2, 10000, -1),
454
        ):
455
            inputs = [np.array(v) for v in inputs]
456
            op = core.CreateOperator(
457
                "Range",
458
                names[len(inputs) - 1],
459
                ["Y"]
460
            )
461

462
            self.assertReferenceChecks(
463
                device_option=gc,
464
                op=op,
465
                inputs=inputs,
466
                reference=lambda *x: [np.arange(*x)],
467
            )
468
            self.assertDeviceChecks(dc, op, inputs, [0])
469

470
        inputs = (np.array(0), np.array(10), np.array(0))
471
        op = core.CreateOperator(
472
            "Range",
473
            names[len(inputs) - 1],
474
            ["Y"]
475
        )
476
        with self.assertRaisesRegex(RuntimeError, 'Step size cannot be 0'):
477
            self.assertReferenceChecks(
478
                device_option=gc,
479
                op=op,
480
                inputs=inputs,
481
                reference=lambda *x: [np.arange(*x)],
482
            )
483

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.