pytorch
197 строк · 7.1 Кб
1# Copyright (c) 2016-present, Facebook, Inc.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14##############################################################################
15
16
17
18
19
20from caffe2.python import core
21import caffe2.python.hypothesis_test_util as hu
22import caffe2.python.serialized_test.serialized_test_util as serial
23from hypothesis import given, settings
24import hypothesis.strategies as st
25import numpy as np
26import unittest
27
28
29class TestUpSample(serial.SerializedTestCase):
30@given(height_scale=st.floats(1.0, 4.0) | st.just(2.0),
31width_scale=st.floats(1.0, 4.0) | st.just(2.0),
32height=st.integers(4, 32),
33width=st.integers(4, 32),
34num_channels=st.integers(1, 4),
35batch_size=st.integers(1, 4),
36seed=st.integers(0, 65535),
37**hu.gcs)
38@settings(max_examples=50, deadline=None)
39def test_upsample(self, height_scale, width_scale, height, width,
40num_channels, batch_size, seed,
41gc, dc):
42
43np.random.seed(seed)
44
45X = np.random.rand(
46batch_size, num_channels, height, width).astype(np.float32)
47scales = np.array([height_scale, width_scale]).astype(np.float32)
48
49ops = [
50(
51core.CreateOperator(
52"UpsampleBilinear",
53["X"],
54["Y"],
55width_scale=width_scale,
56height_scale=height_scale,
57),
58[X],
59),
60(
61core.CreateOperator(
62"UpsampleBilinear",
63["X", "scales"],
64["Y"],
65),
66[X, scales],
67),
68]
69
70for op, inputs in ops:
71def ref(X, scales=None):
72output_height = np.int32(height * height_scale)
73output_width = np.int32(width * width_scale)
74
75Y = np.random.rand(
76batch_size, num_channels, output_height,
77output_width).astype(np.float32)
78
79rheight = ((height - 1) / (output_height - 1)
80if output_height > 1
81else float(0))
82rwidth = ((width - 1) / (output_width - 1)
83if output_width > 1
84else float(0))
85
86for i in range(output_height):
87h1r = rheight * i
88h1 = int(h1r)
89h1p = 1 if h1 < height - 1 else 0
90h1lambda = h1r - h1
91h0lambda = float(1) - h1lambda
92for j in range(output_width):
93w1r = rwidth * j
94w1 = int(w1r)
95w1p = 1 if w1 < width - 1 else 0
96w1lambda = w1r - w1
97w0lambda = float(1) - w1lambda
98Y[:, :, i, j] = (h0lambda * (
99w0lambda * X[:, :, h1, w1] +
100w1lambda * X[:, :, h1, w1 + w1p]) +
101h1lambda * (w0lambda * X[:, :, h1 + h1p, w1] +
102w1lambda * X[:, :, h1 + h1p, w1 + w1p]))
103
104return Y,
105
106self.assertReferenceChecks(gc, op, inputs, ref)
107self.assertDeviceChecks(dc, op, inputs, [0])
108self.assertGradientChecks(gc, op, inputs, 0, [0], stepsize=0.1,
109threshold=1e-2)
110
111@given(height_scale=st.floats(1.0, 4.0) | st.just(2.0),
112width_scale=st.floats(1.0, 4.0) | st.just(2.0),
113height=st.integers(4, 32),
114width=st.integers(4, 32),
115num_channels=st.integers(1, 4),
116batch_size=st.integers(1, 4),
117seed=st.integers(0, 65535),
118**hu.gcs)
119@settings(deadline=10000)
120def test_upsample_grad(self, height_scale, width_scale, height, width,
121num_channels, batch_size, seed, gc, dc):
122
123np.random.seed(seed)
124
125output_height = np.int32(height * height_scale)
126output_width = np.int32(width * width_scale)
127X = np.random.rand(batch_size,
128num_channels,
129height,
130width).astype(np.float32)
131dY = np.random.rand(batch_size,
132num_channels,
133output_height,
134output_width).astype(np.float32)
135scales = np.array([height_scale, width_scale]).astype(np.float32)
136
137ops = [
138(
139core.CreateOperator(
140"UpsampleBilinearGradient",
141["dY", "X"],
142["dX"],
143width_scale=width_scale,
144height_scale=height_scale,
145),
146[dY, X],
147),
148(
149core.CreateOperator(
150"UpsampleBilinearGradient",
151["dY", "X", "scales"],
152["dX"],
153),
154[dY, X, scales],
155),
156]
157
158for op, inputs in ops:
159def ref(dY, X, scales=None):
160dX = np.zeros_like(X)
161
162rheight = ((height - 1) / (output_height - 1)
163if output_height > 1
164else float(0))
165rwidth = ((width - 1) / (output_width - 1)
166if output_width > 1
167else float(0))
168
169for i in range(output_height):
170h1r = rheight * i
171h1 = int(h1r)
172h1p = 1 if h1 < height - 1 else 0
173h1lambda = h1r - h1
174h0lambda = float(1) - h1lambda
175for j in range(output_width):
176w1r = rwidth * j
177w1 = int(w1r)
178w1p = 1 if w1 < width - 1 else 0
179w1lambda = w1r - w1
180w0lambda = float(1) - w1lambda
181dX[:, :, h1, w1] += (
182h0lambda * w0lambda * dY[:, :, i, j])
183dX[:, :, h1, w1 + w1p] += (
184h0lambda * w1lambda * dY[:, :, i, j])
185dX[:, :, h1 + h1p, w1] += (
186h1lambda * w0lambda * dY[:, :, i, j])
187dX[:, :, h1 + h1p, w1 + w1p] += (
188h1lambda * w1lambda * dY[:, :, i, j])
189
190return dX,
191
192self.assertDeviceChecks(dc, op, inputs, [0])
193self.assertReferenceChecks(gc, op, inputs, ref)
194
195
196if __name__ == "__main__":
197unittest.main()
198