pytorch

Форк
0
/
test_layernorm_nnpi_fp16.py 
240 строк · 8.6 Кб
1
import numpy as np
2
import caffe2.python.fakelowp.init_shared_libs  # noqa
3
from caffe2.proto import caffe2_pb2
4
from caffe2.python import core
5
from caffe2.python import workspace
6
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
7
from caffe2.python.fakelowp.test_utils import print_test_debug_info
8
from hypothesis import given, settings
9
from hypothesis import strategies as st
10
import caffe2.python.serialized_test.serialized_test_util as serial
11
import datetime
12

13
core.GlobalInit(["caffe2",
14
                 "--glow_global_fp16=1",
15
                 "--glow_global_fused_scale_offset_fp16=1",
16
                 "--glow_global_force_sls_fp16_accum=1"])
17

18
GLOW_LOWERED_BATCHNORM = False
19

20

21
# Test the lowered LayerNorm op
22
class LayerNorm(serial.SerializedTestCase):
23

24
    @given(seed=st.integers(0, 65535),
25
           batch_size=st.integers(min_value=1, max_value=50),
26
           size=st.integers(min_value=2, max_value=128),
27
           epsilon=st.floats(min_value=1e-4, max_value=1e-3),
28
           elementwise_affine=st.booleans())
29
    @settings(deadline=datetime.timedelta(seconds=10))
30
    def test_layernorm(self, seed, batch_size, size, epsilon, elementwise_affine):
31
        np.random.seed(seed)
32
        # Reset the workspace
33
        workspace.ResetWorkspace()
34
        axis = 1
35

36
        dims = np.array(([batch_size, size]))
37
        X = np.random.uniform(size=dims).astype(np.float32) - 0.5
38
        gamma = np.random.randn(*X.shape[axis:]).astype(np.float32)
39
        beta = np.random.randn(*X.shape[axis:]).astype(np.float32)
40

41
        pred_net = caffe2_pb2.NetDef()
42
        pred_net.name = "pred"
43
        pred_net.external_input.extend(["X", "gamma", "beta"])
44
        pred_net.external_output.extend(["Y", "mean", "rstd"])
45
        pred_net.op.add().CopyFrom(
46
            core.CreateOperator(
47
                "LayerNorm",
48
                ["X", "gamma", "beta"] if elementwise_affine else ["X"],
49
                ["Y", "mean", "rstd"],
50
                axis=axis,
51
                epsilon=epsilon,
52
                elementwise_affine=elementwise_affine
53
            )
54
        )
55

56
        pred_net_ref = caffe2_pb2.NetDef()
57
        pred_net_ref.name = "pred_ref"
58
        pred_net_ref.external_input.extend(["X", "gamma", "beta"])
59
        pred_net_ref.external_output.extend(["Y", "mean", "rstd"])
60
        pred_net_ref.op.add().CopyFrom(
61
            core.CreateOperator(
62
                "LayerNormFakeFP16NNPI",
63
                ["X", "gamma", "beta"] if elementwise_affine else ["X"],
64
                ["Y", "mean", "rstd"],
65
                axis=axis,
66
                epsilon=epsilon,
67
                elementwise_affine=elementwise_affine
68
            )
69
        )
70

71
        shape_hits = {"X": X.shape, "gamma": gamma.shape, "beta": beta.shape}
72
        pred_net_onnxified = onnxifi_caffe2_net(
73
            pred_net,
74
            shape_hits,
75
            debug=True,
76
            adjust_batch=True,
77
            use_onnx=False
78
        )
79
        num_onnxified_ops = sum(
80
            1 if o.type == "Onnxifi" else 0 for o in pred_net_onnxified.op)
81
        np.testing.assert_equal(num_onnxified_ops, 1)
82

83
        workspace.FeedBlob("X", X)
84
        workspace.FeedBlob("gamma", gamma)
85
        workspace.FeedBlob("beta", beta)
86

87
        workspace.CreateNet(pred_net_ref)
88
        workspace.CreateNet(pred_net_onnxified)
89

90
        workspace.RunNet(pred_net_ref.name)
91
        Y_c2 = workspace.FetchBlob("Y")
92

93
        dims1 = np.array(([1, *dims]))
94
        X_glow = X.reshape(dims1)
95
        workspace.FeedBlob("X", X_glow)
96

97
        workspace.RunNet(pred_net_onnxified.name)
98
        Y_glow = workspace.FetchBlob("Y")
99

100
        if not np.allclose(Y_glow, Y_c2):
101
            diff_Y = np.abs(Y_glow - Y_c2)
102
            print_test_debug_info(
103
                "layernorm",
104
                {
105
                    "seed": seed,
106
                    "size": size,
107
                    "batch_size": batch_size,
108
                    "epsilon": epsilon,
109
                    "gamma": gamma,
110
                    "beta": beta,
111
                    "elementwise_affine": elementwise_affine,
112
                    "X": X,
113
                    "Y_glow": Y_glow,
114
                    "Y_c2": Y_c2,
115
                    "diff_Y": diff_Y,
116
                }
117
            )
118
            assert(0)
119

120
    def _get_scale_zp(self, tensor):
121
        tensor_max = np.max(tensor)
122
        tensor_min = min(0, np.min(tensor))
123
        scale = np.float32(np.float16((tensor_max - tensor_min) / 255.0))
124
        if scale < 1e-6:
125
            scale = np.float32(1e-6)
126
        zero_point = 0 - tensor_min / scale
127
        zero_point = int(round(np.clip(zero_point, 0, 255.0)))
128
        return (scale, zero_point)
129

130
    def _layernorm_transform(self, X):
131
        mean = np.mean(X, axis=1)
132
        mean_exp = np.outer(mean, np.ones(X.shape[1]))
133
        std = np.std(X, axis=1)
134
        std_exp = np.outer(std, np.ones(X.shape[1]))
135
        Y = (X - mean_exp) / std_exp
136
        return Y
137

138
    @given(seed=st.integers(0, 65535),
139
           batch_size=st.integers(min_value=1, max_value=50),
140
           size=st.integers(min_value=2, max_value=128),
141
           epsilon=st.floats(min_value=1e-4, max_value=1e-3),
142
           elementwise_affine=st.booleans())
143
    @settings(deadline=datetime.timedelta(seconds=10))
144
    # re-enable when T74553975 gets fixed
145
    def test_fused_ln_quantize(self, seed, batch_size, size, epsilon, elementwise_affine):
146
        np.random.seed(seed)
147

148
        # Reset the workspace
149
        workspace.ResetWorkspace()
150
        axis = 1
151

152
        dims = np.array(([batch_size, size]))
153
        X = np.random.uniform(size=dims).astype(np.float32) - 0.5
154
        gamma = np.random.randn(*X.shape[axis:]).astype(np.float32)
155
        beta = np.random.randn(*X.shape[axis:]).astype(np.float32)
156

157
        Y = self._layernorm_transform(X)
158
        scale, zp = self._get_scale_zp(Y)
159

160
        pred_net = caffe2_pb2.NetDef()
161
        pred_net.name = "pred"
162
        pred_net.external_input.extend(["X", "gamma", "beta"])
163
        pred_net.external_output.extend(["Y_q"])
164
        pred_net.op.add().CopyFrom(
165
            core.CreateOperator(
166
                "LayerNorm",
167
                ["X", "gamma", "beta"] if elementwise_affine else ["X"],
168
                ["Y", "mean", "rstd"],
169
                axis=axis,
170
                epsilon=epsilon,
171
                elementwise_affine=elementwise_affine
172
            )
173
        )
174
        pred_net.op.add().CopyFrom(
175
            core.CreateOperator(
176
                "Int8Quantize", ["Y"], ["Y_q"], Y_scale=scale, Y_zero_point=zp
177
            )
178
        )
179

180
        print(pred_net)
181
        pred_net_ref = caffe2_pb2.NetDef()
182
        pred_net_ref.name = "pred_ref"
183
        pred_net_ref.external_input.extend(["X", "gamma", "beta"])
184
        pred_net_ref.external_output.extend(["Y_q"])
185
        pred_net_ref.op.add().CopyFrom(
186
            core.CreateOperator(
187
                "LayerNormInt8QuantizeFakeNNPI",
188
                ["X", "gamma", "beta"] if elementwise_affine else ["X"],
189
                ["Y_q", "mean", "rstd"],
190
                axis=axis,
191
                epsilon=epsilon,
192
                elementwise_affine=elementwise_affine,
193
                Y_scale=scale, Y_zero_point=zp
194
            )
195
        )
196
        shape_hits = {"X": X.shape, "gamma": gamma.shape, "beta": beta.shape}
197
        pred_net_onnxified = onnxifi_caffe2_net(
198
            pred_net,
199
            shape_hits,
200
            debug=True,
201
            adjust_batch=True,
202
            use_onnx=False
203
        )
204
        num_onnxified_ops = sum(
205
            1 if o.type == "Onnxifi" else 0 for o in pred_net_onnxified.op)
206
        np.testing.assert_equal(num_onnxified_ops, 1)
207

208
        workspace.FeedBlob("X", X)
209
        workspace.FeedBlob("gamma", gamma)
210
        workspace.FeedBlob("beta", beta)
211

212
        workspace.CreateNet(pred_net_ref)
213
        workspace.CreateNet(pred_net_onnxified)
214

215
        workspace.RunNet(pred_net_ref.name)
216
        Y_c2 = workspace.FetchInt8Blob("Y_q")
217

218
        workspace.RunNet(pred_net_onnxified.name)
219
        Y_glow = workspace.FetchInt8Blob("Y_q")
220

221
        if not np.allclose(Y_glow.data, Y_c2.data) or \
222
           Y_glow.scale != Y_c2.scale or Y_glow.zero_point != Y_c2.zero_point:
223
            diff_Y = np.abs(Y_glow.data.astype(np.float32) - Y_c2.data.astype(np.float32))
224
            print_test_debug_info(
225
                "layernorm",
226
                {
227
                    "seed": seed,
228
                    "size": size,
229
                    "batch_size": batch_size,
230
                    "epsilon": epsilon,
231
                    "gamma": gamma,
232
                    "beta": beta,
233
                    "elementwise_affine": elementwise_affine,
234
                    "X": X,
235
                    "Y_glow": Y_glow,
236
                    "Y_c2": Y_c2,
237
                    "diff_Y": diff_Y,
238
                }
239
            )
240
            assert(0)
241

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.