pytorch
477 строк · 14.7 Кб
1
2
3
4
5
6from caffe2.python import core, workspace7from hypothesis import assume, given, settings8from caffe2.proto import caffe2_pb29import caffe2.python.hypothesis_test_util as hu10import caffe2.python.serialized_test.serialized_test_util as serial11import hypothesis.strategies as st12import numpy as np13import random14
15
16class TestUtilityOps(serial.SerializedTestCase):17
18@given(X=hu.tensor(), args=st.booleans(), **hu.gcs)19@settings(deadline=10000)20def test_slice(self, X, args, gc, dc):21X = X.astype(dtype=np.float32)22dim = random.randint(0, X.ndim - 1)23slice_start = random.randint(0, X.shape[dim] - 1)24slice_end = random.randint(slice_start, X.shape[dim] - 1)25starts = np.array([0] * X.ndim).astype(np.int32)26ends = np.array([-1] * X.ndim).astype(np.int32)27starts[dim] = slice_start28ends[dim] = slice_end29
30if args:31op = core.CreateOperator(32"Slice", ["X"], ["Y"], starts=starts, ends=ends, device_option=gc33)34
35def slice_ref(X):36slc = [slice(None)] * X.ndim37slc[dim] = slice(slice_start, slice_end)38return [X[slc]]39inputs = [X]40else:41op = core.CreateOperator(42"Slice", ["X", "starts", "ends"], ["Y"], device_option=gc43)44
45def slice_ref(x, starts, ends):46slc = [slice(None)] * x.ndim47slc[dim] = slice(slice_start, slice_end)48return [x[slc]]49inputs = [X, starts, ends]50
51self.assertReferenceChecks(gc, op, inputs, slice_ref)52self.assertDeviceChecks(dc, op, inputs, [0])53self.assertGradientChecks(54device_option=gc,55op=op,56inputs=inputs,57outputs_to_check=0,58outputs_with_grads=[0],59)60
61@given(ndims=st.integers(min_value=1, max_value=10), **hu.gcs)62@settings(deadline=10000)63def test_resize_like(self, ndims, gc, dc):64X = np.zeros((ndims * 2, ))65Y = np.zeros((ndims, 2))66
67op = core.CreateOperator(68"ResizeLike", ["X", "Y"], ["Z"],69)70
71def resize_like(X, Y):72return [X.reshape(Y.shape)]73
74self.assertDeviceChecks(dc, op, [X, Y], [0])75self.assertReferenceChecks(gc, op, [X, Y], resize_like, ensure_outputs_are_inferred=True)76
77@given(dtype=st.sampled_from([np.float32, np.int32]),78ndims=st.integers(min_value=1, max_value=5),79seed=st.integers(min_value=0, max_value=65536),80null_axes=st.booleans(),81engine=st.sampled_from(['CUDNN', None]),82**hu.gcs)83@settings(deadline=10000)84def test_transpose(self, dtype, ndims, seed, null_axes, engine, gc, dc):85if (gc.device_type == caffe2_pb2.CUDA and engine == "CUDNN"):86# cudnn 5.1 does not support int.87assume(workspace.GetCuDNNVersion() >= 6000 or dtype != np.int32)88
89dims = (np.random.rand(ndims) * 16 + 1).astype(np.int32)90X = (np.random.rand(*dims) * 16).astype(dtype)91
92if null_axes:93axes = None94op = core.CreateOperator(95"Transpose",96["input"], ["output"],97engine=engine)98else:99np.random.seed(int(seed))100axes = [int(v) for v in list(np.random.permutation(X.ndim))]101op = core.CreateOperator(102"Transpose",103["input"], ["output"],104axes=axes,105engine=engine)106
107def transpose_ref(x, axes):108return (np.transpose(x, axes),)109
110self.assertReferenceChecks(gc, op, [X, axes],111transpose_ref)112
113@given(m=st.integers(5, 10), n=st.integers(5, 10),114o=st.integers(5, 10), nans=st.booleans(), **hu.gcs)115@settings(deadline=10000)116def test_nan_check(self, m, n, o, nans, gc, dc):117other = np.array([1, 2, 3]).astype(np.float32)118X = np.random.rand(m, n, o).astype(np.float32)119if nans:120x_nan = np.random.randint(0, m)121y_nan = np.random.randint(0, n)122z_nan = np.random.randint(0, o)123X[x_nan, y_nan, z_nan] = float('NaN')124
125# print('nans: {}'.format(nans))126# print(X)127
128def nan_reference(X, Y):129if not np.isnan(X).any():130return [X]131else:132return [np.array([])]133
134op = core.CreateOperator(135"NanCheck",136["X", "other"],137["Y"]138)139
140try:141self.assertReferenceChecks(142device_option=gc,143op=op,144inputs=[X, other],145reference=nan_reference,146)147if nans:148self.assertTrue(False, "Did not fail when presented with NaN!")149except RuntimeError:150self.assertTrue(nans, "No NaNs but failed")151
152try:153self.assertGradientChecks(154device_option=gc,155op=op,156inputs=[X],157outputs_to_check=0,158outputs_with_grads=[0],159)160if nans:161self.assertTrue(False, "Did not fail when gradient had NaN!")162except RuntimeError:163pass164
165@serial.given(n=st.integers(4, 5), m=st.integers(6, 7),166d=st.integers(2, 3), **hu.gcs)167def test_elementwise_max(self, n, m, d, gc, dc):168X = np.random.rand(n, m, d).astype(np.float32)169Y = np.random.rand(n, m, d).astype(np.float32)170Z = np.random.rand(n, m, d).astype(np.float32)171inputs = [X, Y, Z]172
173def max_op(X, Y, Z):174return [np.maximum(np.maximum(X, Y), Z)]175
176op = core.CreateOperator(177"Max",178["X", "Y", "Z"],179["mx"]180)181
182self.assertReferenceChecks(183device_option=gc,184op=op,185inputs=inputs,186reference=max_op,187)188self.assertDeviceChecks(dc, op, inputs, [0])189
190@given(n=st.integers(4, 5), m=st.integers(6, 7),191d=st.integers(2, 3), **hu.gcs)192@settings(deadline=10000)193def test_elementwise_max_grad(self, n, m, d, gc, dc):194go = np.random.rand(n, m, d).astype(np.float32)195X = np.random.rand(n, m, d).astype(np.float32)196Y = np.random.rand(n, m, d).astype(np.float32)197Z = np.random.rand(n, m, d).astype(np.float32)198mx = np.maximum(np.maximum(X, Y), Z)199inputs = [mx, go, X, Y, Z]200
201def max_grad_op(mx, go, X, Y, Z):202def mx_grad(a):203return go * (mx == a)204
205return [mx_grad(a) for a in [X, Y, Z]]206
207op = core.CreateOperator(208"MaxGradient",209["mx", "go", "X", "Y", "Z"],210["gX", "gY", "gZ"]211)212
213self.assertReferenceChecks(214device_option=gc,215op=op,216inputs=inputs,217reference=max_grad_op,218)219self.assertDeviceChecks(dc, op, inputs, [0, 1, 2])220
221@serial.given(n=st.integers(4, 5), m=st.integers(6, 7),222d=st.integers(2, 3), **hu.gcs)223def test_elementwise_min(self, n, m, d, gc, dc):224X = np.random.rand(n, m, d).astype(np.float32)225Y = np.random.rand(n, m, d).astype(np.float32)226Z = np.random.rand(n, m, d).astype(np.float32)227inputs = [X, Y, Z]228
229def min_op(X, Y, Z):230return [np.minimum(np.minimum(X, Y), Z)]231
232op = core.CreateOperator(233"Min",234["X", "Y", "Z"],235["mx"]236)237
238self.assertReferenceChecks(239device_option=gc,240op=op,241inputs=inputs,242reference=min_op,243)244self.assertDeviceChecks(dc, op, inputs, [0])245
246@given(n=st.integers(4, 5), m=st.integers(6, 7),247d=st.integers(2, 3), **hu.gcs)248@settings(deadline=10000)249def test_elementwise_min_grad(self, n, m, d, gc, dc):250go = np.random.rand(n, m, d).astype(np.float32)251X = np.random.rand(n, m, d).astype(np.float32)252Y = np.random.rand(n, m, d).astype(np.float32)253Z = np.random.rand(n, m, d).astype(np.float32)254mx = np.minimum(np.minimum(X, Y), Z)255inputs = [mx, go, X, Y, Z]256
257def min_grad_op(mx, go, X, Y, Z):258def mx_grad(a):259return go * (mx == a)260
261return [mx_grad(a) for a in [X, Y, Z]]262
263op = core.CreateOperator(264"MinGradient",265["mx", "go", "X", "Y", "Z"],266["gX", "gY", "gZ"]267)268
269self.assertReferenceChecks(270device_option=gc,271op=op,272inputs=inputs,273reference=min_grad_op,274)275self.assertDeviceChecks(dc, op, inputs, [0, 1, 2])276
277@given(278n=st.integers(1, 8), m=st.integers(1, 10), d=st.integers(1, 4),279in_place=st.booleans(), engine=st.sampled_from(["", "CUDNN"]),280seed=st.integers(min_value=0, max_value=65535),281dtype=st.sampled_from([np.int32, np.int64, np.float32]),282**hu.gcs)283@settings(deadline=10000)284def test_sum(285self, n, m, d, in_place, engine, seed, dtype, gc, dc):286input_names = []287input_vars = []288np.random.seed(seed)289for i in range(m):290X_name = 'X' + str(i)291input_names.extend([X_name])292var = np.random.rand(n, d).astype(dtype)293vars()[X_name] = var294input_vars.append(var)295
296def sum_op_ref(*args):297res = np.zeros((n, d))298for i in range(m):299res = res + args[i]300return (res, )301
302op = core.CreateOperator(303"Sum",304input_names,305[input_names[0]] if in_place else ['Y'],306engine=engine,307)308
309self.assertReferenceChecks(310device_option=gc,311op=op,312inputs=input_vars,313reference=sum_op_ref,314)315self.assertDeviceChecks(dc, op, input_vars, [0])316
317@given(318inputs=hu.lengths_tensor().flatmap(319lambda pair: st.tuples(320st.just(pair[0]),321st.just(pair[1]),322hu.dims(max_value=len(pair[1])),323)324).flatmap(325lambda tup: st.tuples(326st.just(tup[0]),327st.just(tup[1]),328hu.arrays(329tup[2], dtype=np.int32,330elements=st.integers(331min_value=0, max_value=len(tup[1]) - 1)),332)333),334**hu.gcs_cpu_only)335@settings(deadline=10000)336def test_lengths_gather(self, inputs, gc, dc):337items = inputs[0]338lengths = inputs[1]339indices = inputs[2]340
341def lengths_gather_op(items, lengths, indices):342ends = np.cumsum(lengths)343return [np.concatenate(344list(items[ends[i] - lengths[i]:ends[i]] for i in indices))]345
346op = core.CreateOperator(347"LengthsGather",348["items", "lengths", "indices"],349["output"]350)351
352self.assertReferenceChecks(353device_option=gc,354op=op,355inputs=[items, lengths, indices],356reference=lengths_gather_op,357)358
359@given(360inputs=hu.lengths_tensor(),361**hu.gcs_cpu_only)362@settings(deadline=10000)363def test_lengths_to_ranges(self, inputs, gc, dc):364_, lengths = inputs365
366def lengths_to_ranges_op(lengths):367return [368[[x, y] for x, y in zip(np.cumsum(np.append([0], lengths)),369lengths)]370]371
372op = core.CreateOperator(373"LengthsToRanges",374["lengths"],375["output"]376)377
378self.assertReferenceChecks(379device_option=gc,380op=op,381inputs=[lengths],382reference=lengths_to_ranges_op,383)384
385# Test shape inference logic386net = core.Net("test_shape_inference")387
388workspace.FeedBlob("lengths", lengths)389output = net.LengthsToRanges(390["lengths"],391["output"]392)393(shapes, types) = workspace.InferShapesAndTypes([net])394workspace.RunNetOnce(net)395self.assertEqual(shapes[output], list(workspace.blobs[output].shape))396self.assertEqual(shapes[output], list(lengths.shape) + [2])397self.assertEqual(types[output], core.DataType.INT32)398
399@given(**hu.gcs)400@settings(deadline=None, max_examples=50)401def test_size_op(self, gc, dc):402X = np.array([[1, 2], [3, 4]]).astype(np.float32)403
404def size_op(tensor):405return [np.prod(tensor.shape)]406
407op = core.CreateOperator(408"Size",409["X"],410["output"]411)412
413self.assertReferenceChecks(414device_option=gc,415op=op,416inputs=[X],417reference=size_op,418)419
420def test_alias_op(self):421""" Don't use hypothesis because there are only 2 cases to check"""422for size in [0, 5]:423X = np.arange(size).astype(np.float32)424workspace.FeedBlob('X', X)425
426op = core.CreateOperator(427"Alias",428["X"],429["Y"]430)431workspace.RunOperatorOnce(op)432Y = workspace.FetchBlob('Y')433np.testing.assert_array_equal(X, Y)434
435@given(**hu.gcs)436@settings(deadline=10000)437def test_range(self, gc, dc):438names = [439('stop_',),440('start_', 'stop_'),441('start_', 'stop_', 'step_'),442]443# Most random values aren't great here, so use a fixed set instead of444# hypothesis.445for inputs in (446(10,),447(np.float32(10.0),),448(0,),449(0, 0),450(10., 5.0, -1.),451(2, 10000),452(2, 10000, 20000),453(2, 10000, -1),454):455inputs = [np.array(v) for v in inputs]456op = core.CreateOperator(457"Range",458names[len(inputs) - 1],459["Y"]460)461
462self.assertReferenceChecks(463device_option=gc,464op=op,465inputs=inputs,466reference=lambda *x: [np.arange(*x)],467)468self.assertDeviceChecks(dc, op, inputs, [0])469
470inputs = (np.array(0), np.array(10), np.array(0))471op = core.CreateOperator(472"Range",473names[len(inputs) - 1],474["Y"]475)476with self.assertRaisesRegex(RuntimeError, 'Step size cannot be 0'):477self.assertReferenceChecks(478device_option=gc,479op=op,480inputs=inputs,481reference=lambda *x: [np.arange(*x)],482)483