pytorch

Форк
0
/
reduce_ops_test.py 
444 строки · 16.9 Кб
1

2

3

4

5

6
from caffe2.python import core, workspace
7
from hypothesis import given, settings
8

9
import caffe2.python.hypothesis_test_util as hu
10
import caffe2.python.serialized_test.serialized_test_util as serial
11
import hypothesis.strategies as st
12
import numpy as np
13
import itertools as it
14

15

16
class TestReduceOps(serial.SerializedTestCase):
17
    def run_reduce_op_test_impl(
18
            self, op_name, X, axes, keepdims, ref_func, gc, dc, allow_broadcast_fastpath):
19
        extra_args = dict(allow_broadcast_fastpath=True) if allow_broadcast_fastpath else {}
20
        if axes is None:
21
            op = core.CreateOperator(
22
                op_name,
23
                ["X"],
24
                ["Y"],
25
                keepdims=keepdims,
26
                **extra_args,
27
            )
28
        else:
29
            op = core.CreateOperator(
30
                op_name,
31
                ["X"],
32
                ["Y"],
33
                axes=axes,
34
                keepdims=keepdims,
35
                **extra_args,
36
            )
37

38
        def ref(X):
39
            return [ref_func(
40
                X, axis=None if axes is None else tuple(axes),
41
                keepdims=keepdims)]
42

43
        with self.set_disable_serialized_check(allow_broadcast_fastpath):
44
            self.assertReferenceChecks(gc, op, [X], ref)
45
        self.assertDeviceChecks(dc, op, [X], [0])
46
        self.assertGradientChecks(gc, op, [X], 0, [0])
47

48
    def run_reduce_op_test(
49
            self, op_name, X, keepdims, num_axes, ref_func, gc, dc, allow_broadcast_fastpath=False):
50
        self.run_reduce_op_test_impl(
51
            op_name, X, None, keepdims, ref_func, gc, dc, allow_broadcast_fastpath)
52

53
        num_dims = len(X.shape)
54
        if num_dims < num_axes:
55
            self.run_reduce_op_test_impl(
56
                op_name, X, range(num_dims), keepdims, ref_func, gc, dc, allow_broadcast_fastpath)
57
        else:
58
            for axes in it.combinations(range(num_dims), num_axes):
59
                self.run_reduce_op_test_impl(
60
                    op_name, X, axes, keepdims, ref_func, gc, dc, allow_broadcast_fastpath)
61

62
    @serial.given(
63
        X=hu.tensor(max_dim=3, dtype=np.float32),
64
        keepdims=st.booleans(),
65
        allow_broadcast_fastpath=st.booleans(),
66
        num_axes=st.integers(1, 3), **hu.gcs)
67
    def test_reduce_min(self, X, keepdims, allow_broadcast_fastpath, num_axes, gc, dc):
68
        X_dims = X.shape
69
        X_size = X.size
70
        X = np.arange(X_size, dtype=np.float32)
71
        np.random.shuffle(X)
72
        X = X.reshape(X_dims)
73
        self.run_reduce_op_test(
74
            "ReduceMin", X, keepdims, num_axes, np.min, gc, dc,
75
            allow_broadcast_fastpath=allow_broadcast_fastpath)
76

77
    @serial.given(
78
        X=hu.tensor(max_dim=3, dtype=np.float32),
79
        keepdims=st.booleans(),
80
        allow_broadcast_fastpath=st.booleans(),
81
        num_axes=st.integers(1, 3), **hu.gcs)
82
    def test_reduce_max(self, X, keepdims, allow_broadcast_fastpath, num_axes, gc, dc):
83
        X_dims = X.shape
84
        X_size = X.size
85
        X = np.arange(X_size, dtype=np.float32)
86
        np.random.shuffle(X)
87
        X = X.reshape(X_dims)
88
        self.run_reduce_op_test(
89
            "ReduceMax", X, keepdims, num_axes, np.max, gc, dc,
90
            allow_broadcast_fastpath=allow_broadcast_fastpath)
91

92
    @given(n=st.integers(0, 5), m=st.integers(0, 5), k=st.integers(0, 5),
93
           t=st.integers(0, 5), keepdims=st.booleans(),
94
           allow_broadcast_fastpath=st.booleans(),
95
           num_axes=st.integers(1, 3), **hu.gcs)
96
    @settings(deadline=10000)
97
    def test_reduce_sum(self, n, m, k, t, keepdims, allow_broadcast_fastpath, num_axes, gc, dc):
98
        X = np.random.randn(n, m, k, t).astype(np.float32)
99
        self.run_reduce_op_test(
100
            "ReduceSum", X, keepdims, num_axes, np.sum, gc, dc,
101
            allow_broadcast_fastpath=allow_broadcast_fastpath)
102

103
    @serial.given(X=hu.tensor(dtype=np.float32), keepdims=st.booleans(),
104
                  allow_broadcast_fastpath=st.booleans(),
105
                  num_axes=st.integers(1, 4), **hu.gcs)
106
    def test_reduce_mean(self, X, keepdims, allow_broadcast_fastpath, num_axes, gc, dc):
107
        self.run_reduce_op_test(
108
            "ReduceMean", X, keepdims, num_axes, np.mean, gc, dc,
109
            allow_broadcast_fastpath=allow_broadcast_fastpath)
110

111
    @given(n=st.integers(1, 3), m=st.integers(1, 3), k=st.integers(1, 3),
112
           keepdims=st.booleans(), allow_broadcast_fastpath=st.booleans(),
113
           num_axes=st.integers(1, 3), **hu.gcs_cpu_only)
114
    @settings(deadline=10000)
115
    def test_reduce_l1(self, n, m, k, keepdims, allow_broadcast_fastpath, num_axes, gc, dc):
116
        X = np.arange(n * m * k, dtype=np.float32) - 0.5
117
        np.random.shuffle(X)
118
        X = X.reshape((m, n, k))
119
        self.run_reduce_op_test(
120
            "ReduceL1", X, keepdims, num_axes, getNorm(1), gc, dc,
121
            allow_broadcast_fastpath=allow_broadcast_fastpath)
122

123
    @serial.given(n=st.integers(1, 5), m=st.integers(1, 5), k=st.integers(1, 5),
124
                  keepdims=st.booleans(), allow_broadcast_fastpath=st.booleans(),
125
                  num_axes=st.integers(1, 3), **hu.gcs_cpu_only)
126
    def test_reduce_l2(self, n, m, k, keepdims, allow_broadcast_fastpath, num_axes, gc, dc):
127
        X = np.random.randn(n, m, k).astype(np.float32)
128
        self.run_reduce_op_test(
129
            "ReduceL2", X, keepdims, num_axes, getNorm(2), gc, dc,
130
            allow_broadcast_fastpath=allow_broadcast_fastpath)
131

132

133
def getNorm(p):
134
    if p == 1:
135
        def norm(X, axis, keepdims):
136
            return np.sum(np.abs(X), axis=axis, keepdims=keepdims)
137
    elif p == 2:
138
        def norm(X, axis, keepdims):
139
            return np.sqrt(np.sum(np.power(X, 2), axis=axis, keepdims=keepdims))
140
    else:
141
        raise RuntimeError("Only L1 and L2 norms supported")
142
    return norm
143

144

145
class TestReduceFrontReductions(serial.SerializedTestCase):
146
    def grad_variant_input_test(self, grad_op_name, X, ref, num_reduce_dim):
147
        workspace.ResetWorkspace()
148

149
        Y = np.array(ref(X)[0]).astype(np.float32)
150
        dY = np.array(np.random.rand(*Y.shape)).astype(np.float32)
151
        shape = np.array(X.shape).astype(np.int64)
152

153
        workspace.FeedBlob("X", X)
154
        workspace.FeedBlob("dY", dY)
155
        workspace.FeedBlob("shape", shape)
156

157
        grad_op = core.CreateOperator(
158
            grad_op_name, ["dY", "X"], ["dX"], num_reduce_dim=num_reduce_dim)
159

160
        grad_op1 = core.CreateOperator(
161
            grad_op_name, ["dY", "shape"], ["dX1"],
162
            num_reduce_dim=num_reduce_dim)
163

164
        workspace.RunOperatorOnce(grad_op)
165
        workspace.RunOperatorOnce(grad_op1)
166

167
        dX = workspace.FetchBlob("dX")
168
        dX1 = workspace.FetchBlob("dX1")
169
        np.testing.assert_array_equal(dX, dX1)
170

171
    def max_op_test(
172
            self, op_name, num_reduce_dim, gc, dc, in_data, in_names, ref_max):
173

174
        op = core.CreateOperator(
175
            op_name,
176
            in_names,
177
            ["outputs"],
178
            num_reduce_dim=num_reduce_dim
179
        )
180

181
        self.assertReferenceChecks(
182
            device_option=gc,
183
            op=op,
184
            inputs=in_data,
185
            reference=ref_max,
186
        )
187

188
        # Skip gradient check because it is too unreliable with max.
189
        # Just check CPU and CUDA have same results
190
        Y = np.array(ref_max(*in_data)[0]).astype(np.float32)
191
        dY = np.array(np.random.rand(*Y.shape)).astype(np.float32)
192
        if len(in_data) == 2:
193
            grad_in_names = ["dY", in_names[0], "Y", in_names[1]]
194
            grad_in_data = [dY, in_data[0], Y, in_data[1]]
195
        else:
196
            grad_in_names = ["dY", in_names[0], "Y"]
197
            grad_in_data = [dY, in_data[0], Y]
198

199
        grad_op = core.CreateOperator(
200
            op_name + "Gradient",
201
            grad_in_names,
202
            ["dX"],
203
            num_reduce_dim=num_reduce_dim
204
        )
205
        self.assertDeviceChecks(dc, grad_op, grad_in_data, [0])
206

207
    def reduce_op_test(self, op_name, op_ref, in_data, in_names,
208
                       num_reduce_dims, device):
209
        op = core.CreateOperator(
210
            op_name,
211
            in_names,
212
            ["outputs"],
213
            num_reduce_dim=num_reduce_dims
214
        )
215

216
        self.assertReferenceChecks(
217
            device_option=device,
218
            op=op,
219
            inputs=in_data,
220
            reference=op_ref
221
        )
222

223
        self.assertGradientChecks(
224
            device, op, in_data, 0, [0], stepsize=1e-2, threshold=1e-2)
225

226
    @given(num_reduce_dim=st.integers(0, 4), **hu.gcs)
227
    @settings(deadline=10000)
228
    def test_reduce_front_sum(self, num_reduce_dim, gc, dc):
229
        X = np.random.rand(7, 4, 3, 5).astype(np.float32)
230

231
        def ref_sum(X):
232
            return [np.sum(X, axis=(tuple(range(num_reduce_dim))))]
233

234
        self.reduce_op_test(
235
            "ReduceFrontSum", ref_sum, [X], ["input"], num_reduce_dim, gc)
236
        self.grad_variant_input_test(
237
            "ReduceFrontSumGradient", X, ref_sum, num_reduce_dim)
238

239
    @given(num_reduce_dim=st.integers(0, 4), seed=st.integers(0, 4), **hu.gcs)
240
    def test_reduce_front_sum_empty_batch(self, num_reduce_dim, seed, gc, dc):
241
        np.random.seed(seed)
242
        X = np.random.rand(0, 4, 3, 5).astype(np.float32)
243

244
        def ref_sum(X):
245
            return [np.sum(X, axis=(tuple(range(num_reduce_dim))))]
246

247
        self.reduce_op_test(
248
            "ReduceFrontSum", ref_sum, [X], ["input"], num_reduce_dim, gc)
249
        self.grad_variant_input_test(
250
            "ReduceFrontSumGradient", X, ref_sum, num_reduce_dim)
251

252
        # test the second iteration
253
        not_empty_X = np.random.rand(2, 4, 3, 5).astype(np.float32)
254
        net = core.Net('test')
255
        with core.DeviceScope(gc):
256
            net.ReduceFrontSum(
257
                ['X'], ['output'],
258
                num_reduce_dim=num_reduce_dim
259
            )
260
            workspace.CreateNet(net)
261

262
            workspace.FeedBlob('X', not_empty_X)
263
            workspace.RunNet(workspace.GetNetName(net))
264
            output = workspace.FetchBlob('output')
265
            np.testing.assert_allclose(
266
                output, ref_sum(not_empty_X)[0], atol=1e-3)
267

268
            workspace.FeedBlob('X', X)
269
            workspace.RunNet(workspace.GetNetName(net))
270
            output = workspace.FetchBlob('output')
271
            np.testing.assert_allclose(output, ref_sum(X)[0], atol=1e-3)
272

273
    @given(**hu.gcs)
274
    @settings(deadline=None)
275
    def test_reduce_front_sum_with_length(self, dc, gc):
276
        num_reduce_dim = 1
277
        X = np.random.rand(2, 3, 4, 5).astype(np.float32)
278
        batch_size = int(np.prod([2, 3, 4, 5][num_reduce_dim:]))
279
        d = 120 // batch_size
280
        lengths = np.random.randint(1, d, size=batch_size).astype(np.int32)
281

282
        def ref_sum(X, lengths):
283
            Y = X.reshape(d, lengths.size)
284
            rv = np.zeros((lengths.size, 1)).astype(np.float32)
285
            for ii in range(lengths.size):
286
                rv[ii] = np.sum(Y[:lengths[ii], ii])
287
            return [rv.reshape((2, 3, 4, 5)[num_reduce_dim:])]
288

289
        self.reduce_op_test(
290
            "ReduceFrontSum", ref_sum, [X, lengths], ["input", "lengths"],
291
            num_reduce_dim, gc)
292

293
    @given(num_reduce_dim=st.integers(0, 4), **hu.gcs)
294
    @settings(deadline=10000)
295
    def test_reduce_front_mean(self, num_reduce_dim, gc, dc):
296
        X = np.random.rand(6, 7, 8, 2).astype(np.float32)
297

298
        def ref_mean(X):
299
            return [np.mean(X, axis=(tuple(range(num_reduce_dim))))]
300

301
        self.reduce_op_test(
302
            "ReduceFrontMean", ref_mean, [X], ["input"], num_reduce_dim, gc)
303
        self.grad_variant_input_test(
304
            "ReduceFrontMeanGradient", X, ref_mean, num_reduce_dim)
305

306
    @given(**hu.gcs)
307
    @settings(deadline=10000)
308
    def test_reduce_front_mean_with_length(self, dc, gc):
309
        num_reduce_dim = 1
310
        X = np.random.rand(2, 3, 4, 5).astype(np.float32)
311
        batch_size = int(np.prod([2, 3, 4, 5][num_reduce_dim:]))
312
        d = 120 // batch_size
313
        lengths = np.random.randint(1, d, size=batch_size).astype(np.int32)
314

315
        def ref_mean(X, lengths):
316
            Y = X.reshape(d, lengths.size)
317
            rv = np.zeros((lengths.size, 1)).astype(np.float32)
318
            for ii in range(lengths.size):
319
                rv[ii] = np.mean(Y[:lengths[ii], ii])
320
            return [rv.reshape((2, 3, 4, 5)[num_reduce_dim:])]
321

322
        self.reduce_op_test(
323
            "ReduceFrontMean", ref_mean, [X, lengths], ["input", "lengths"],
324
            num_reduce_dim, gc)
325

326
    @serial.given(num_reduce_dim=st.integers(0, 4), **hu.gcs)
327
    def test_reduce_front_max(self, num_reduce_dim, gc, dc):
328
        X = np.random.rand(6, 7, 8, 2).astype(np.float32)
329

330
        def ref_frontmax(X):
331
            return [np.max(X, axis=(tuple(range(num_reduce_dim))))]
332

333
        self.max_op_test(
334
            "ReduceFrontMax", num_reduce_dim, gc, dc, [X], ["X"], ref_frontmax)
335

336
    @given(**hu.gcs)
337
    def test_reduce_front_max_with_length(self, dc, gc):
338
        num_reduce_dim = 1
339
        X = np.random.rand(2, 3, 4, 5).astype(np.float32)
340
        batch_size = int(np.prod([2, 3, 4, 5][num_reduce_dim:]))
341
        d = 120 // batch_size
342
        lengths = np.random.randint(1, d, size=batch_size).astype(np.int32)
343

344
        def ref_max(X, lengths):
345
            Y = X.reshape(d, lengths.size)
346
            rv = np.zeros((lengths.size, 1)).astype(np.float32)
347
            for ii in range(lengths.size):
348
                rv[ii] = np.max(Y[:lengths[ii], ii])
349
            return [rv.reshape((2, 3, 4, 5)[num_reduce_dim:])]
350

351
        self.max_op_test(
352
            "ReduceFrontMax", num_reduce_dim, gc, dc, [X, lengths],
353
            ["X", "lengths"], ref_max)
354

355
    @serial.given(num_reduce_dim=st.integers(0, 4), **hu.gcs)
356
    def test_reduce_back_max(self, num_reduce_dim, gc, dc):
357
        X = np.random.rand(6, 7, 8, 2).astype(np.float32)
358

359
        def ref_backmax(X):
360
            return [np.max(X, axis=(0, 1, 2, 3)[4 - num_reduce_dim:])]
361

362
        self.max_op_test(
363
            "ReduceBackMax", num_reduce_dim, gc, dc, [X], ["X"], ref_backmax)
364

365
    @given(**hu.gcs)
366
    def test_reduce_back_max_with_length(self, gc, dc):
367
        num_reduce_dim = 1
368
        X = np.random.rand(2, 3, 4, 5).astype(np.float32)
369
        batch_size = int(np.prod([2, 3, 4, 5][:4 - num_reduce_dim]))
370
        d = 120 // batch_size
371
        lengths = np.random.randint(1, d, size=batch_size).astype(np.int32)
372

373
        def ref_max(X, lengths):
374
            Y = X.reshape(lengths.size, d)
375
            rv = np.zeros((lengths.size, 1)).astype(np.float32)
376
            for ii in range(lengths.size):
377
                rv[ii] = np.max(Y[ii, :lengths[ii]])
378
            return [rv.reshape((2, 3, 4, 5)[:4 - num_reduce_dim])]
379

380
        self.max_op_test(
381
            "ReduceBackMax", num_reduce_dim, gc, dc, [X, lengths],
382
            ["X", "lengths"], ref_max)
383

384
    @given(**hu.gcs)
385
    @settings(deadline=10000)
386
    def test_reduce_back_sum(self, dc, gc):
387
        num_reduce_dim = 1
388
        X = np.random.rand(6, 7, 8, 2).astype(np.float32)
389

390
        def ref_sum(X):
391
            return [np.sum(X, axis=(0, 1, 2, 3)[4 - num_reduce_dim:])]
392

393
        self.reduce_op_test(
394
            "ReduceBackSum", ref_sum, [X], ["input"], num_reduce_dim, gc)
395
        self.grad_variant_input_test(
396
            "ReduceBackSumGradient", X, ref_sum, num_reduce_dim)
397

398
    @given(**hu.gcs)
399
    @settings(deadline=10000)
400
    def test_reduce_back_sum_with_length(self, dc, gc):
401
        num_reduce_dim = 1
402
        X = np.random.rand(2, 3, 4, 5).astype(np.float32)
403
        batch_size = int(np.prod([2, 3, 4, 5][:4 - num_reduce_dim]))
404
        d = 120 // batch_size
405
        lengths = np.random.randint(1, d, size=batch_size).astype(np.int32)
406

407
        def ref_sum(X, lengths):
408
            Y = X.reshape(lengths.size, d)
409
            rv = np.zeros((lengths.size, 1)).astype(np.float32)
410
            for ii in range(lengths.size):
411
                rv[ii] = np.sum(Y[ii, :lengths[ii]])
412
            return [rv.reshape((2, 3, 4, 5)[:4 - num_reduce_dim])]
413

414
        self.reduce_op_test(
415
            "ReduceBackSum", ref_sum, [X, lengths], ["input", "lengths"],
416
            num_reduce_dim, gc)
417

418
    @given(num_reduce_dim=st.integers(0, 4), **hu.gcs)
419
    @settings(deadline=10000)
420
    def test_reduce_back_mean(self, num_reduce_dim, dc, gc):
421
        X = np.random.rand(6, 7, 8, 2).astype(np.float32)
422

423
        def ref_mean(X):
424
            return [np.mean(X, axis=(0, 1, 2, 3)[4 - num_reduce_dim:])]
425

426
        self.reduce_op_test(
427
            "ReduceBackMean", ref_mean, [X], ["input"], num_reduce_dim, gc)
428
        self.grad_variant_input_test(
429
            "ReduceBackMeanGradient", X, ref_mean, num_reduce_dim)
430

431
    @given(**hu.gcs)
432
    @settings(deadline=None)
433
    def test_reduce_back_mean_with_length(self, dc, gc):
434
        num_reduce_dim = 1
435
        X = np.random.rand(2, 3, 4, 5).astype(np.float32)
436
        batch_size = int(np.prod([2, 3, 4, 5][:4 - num_reduce_dim]))
437
        d = 120 // batch_size
438
        lengths = np.random.randint(1, d, size=batch_size).astype(np.int32)
439

440
        def ref_mean(X, lengths):
441
            Y = X.reshape(lengths.size, d)
442
            rv = np.zeros((lengths.size, 1)).astype(np.float32)
443
            for ii in range(lengths.size):
444
                rv[ii] = np.mean(Y[ii, :lengths[ii]])
445
            return [rv.reshape((2, 3, 4, 5)[:4 - num_reduce_dim])]
446

447
        self.reduce_op_test(
448
            "ReduceBackMean", ref_mean, [X, lengths], ["input", "lengths"],
449
            num_reduce_dim, gc)
450

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.