pytorch
76 строк · 2.9 Кб
1#!/usr/bin/env python3
2
3import caffe2.python.hypothesis_test_util as hu
4import hypothesis.strategies as st
5import numpy as np
6import numpy.testing as npt
7from caffe2.python import core, workspace
8from hypothesis import given
9
10
11class TestUnsafeCoalesceOp(hu.HypothesisTestCase):
12@given(
13n=st.integers(1, 5),
14shape=st.lists(st.integers(0, 5), min_size=1, max_size=3),
15**hu.gcs
16)
17def test_unsafe_coalesce_op(self, n, shape, dc, gc):
18workspace.ResetWorkspace()
19test_inputs = [(100 * np.random.random(shape)).astype(np.float32) for _ in range(n)]
20test_input_blobs = ["x_{}".format(i) for i in range(n)]
21
22coalesce_op = core.CreateOperator(
23"UnsafeCoalesce",
24test_input_blobs,
25test_input_blobs + ["shared_memory_blob"],
26device_option=gc,
27)
28
29def reference_func(*args):
30self.assertEqual(len(args), n)
31return list(args) + [np.concatenate([x.flatten() for x in args])]
32
33self.assertReferenceChecks(gc, coalesce_op, test_inputs, reference_func)
34
35@given(
36n=st.integers(1, 5),
37shape=st.lists(st.integers(1, 5), min_size=1, max_size=3),
38seed=st.integers(0, 65535),
39**hu.gcs
40)
41def test_unsafe_coalesce_op_blob_sharing(self, n, shape, seed, dc, gc):
42workspace.ResetWorkspace()
43# Can make debugging of the test more predictable
44np.random.seed(seed)
45test_inputs = [(np.random.random(shape)).astype(np.float32) for _ in range(n)]
46test_input_blobs = ["x_{}".format(i) for i in range(n)]
47
48coalesce_op = core.CreateOperator(
49"UnsafeCoalesce",
50test_input_blobs,
51test_input_blobs + ["shared_memory_blob"],
52device_option=gc,
53)
54for name, value in zip(test_input_blobs, test_inputs):
55workspace.FeedBlob(name, value, device_option=gc)
56
57workspace.RunOperatorOnce(coalesce_op)
58blob_value = workspace.blobs["shared_memory_blob"]
59npt.assert_almost_equal(
60blob_value,
61np.concatenate([x.flatten() for x in test_inputs]),
62decimal=4
63)
64# np.random generates values in range [0, 1), so -2 is outside of range
65blob_value.fill(-2.0)
66self.assertTrue((blob_value != workspace.blobs["shared_memory_blob"]).all())
67workspace.FeedBlob("shared_memory_blob", blob_value, device_option=gc)
68
69# All blobs preserved shape, but got overwritted to -2
70for name, value in zip(test_input_blobs, test_inputs):
71self.assertEqual(value.shape, workspace.blobs[name].shape)
72self.assertTrue((value != workspace.blobs[name]).all())
73self.assertTrue((workspace.blobs[name] == -2).all())
74
75# It should be OK to reuse operator as long as it's blob shapes are not changing
76workspace.RunOperatorOnce(coalesce_op)
77