1
// Tencent is pleased to support the open source community by making ncnn available.
3
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
5
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6
// in compliance with the License. You may obtain a copy of the License at
8
// https://opensource.org/licenses/BSD-3-Clause
10
// Unless required by applicable law or agreed to in writing, software distributed
11
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
13
// specific language governing permissions and limitations under the License.
22
support_inplace = true;
25
int Normalize::load_param(const ParamDict& pd)
27
across_spatial = pd.get(0, 0);
28
across_channel = pd.get(4, 1);
29
channel_shared = pd.get(1, 0);
30
eps = pd.get(2, 0.0001f);
31
eps_mode = pd.get(9, 0);
32
scale_data_size = pd.get(3, 0);
37
int Normalize::load_model(const ModelBin& mb)
39
scale_data = mb.load(scale_data_size, 1);
40
if (scale_data.empty())
46
int Normalize::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
48
int w = bottom_top_blob.w;
49
int h = bottom_top_blob.h;
50
int channels = bottom_top_blob.c;
51
size_t elemsize = bottom_top_blob.elemsize;
54
if (across_spatial && across_channel)
58
square_sum_blob.create(channels, elemsize, opt.workspace_allocator);
59
if (square_sum_blob.empty())
62
#pragma omp parallel for num_threads(opt.num_threads)
63
for (int q = 0; q < channels; q++)
65
const float* ptr = bottom_top_blob.channel(q);
68
for (int i = 0; i < size; i++)
70
ssum += ptr[i] * ptr[i];
73
square_sum_blob[q] = ssum;
77
for (int q = 0; q < channels; q++)
79
ssum += square_sum_blob[q];
83
if (eps_mode == 0) // caffe/mxnet
85
a = 1.f / sqrtf(ssum + eps);
87
else if (eps_mode == 1) // pytorch
89
a = 1.f / std::max(sqrtf(ssum), eps);
91
else //if (eps_mode == 2) // tensorflow
93
a = 1.f / sqrt(std::max(ssum, eps));
98
float scale = a * scale_data[0];
100
#pragma omp parallel for num_threads(opt.num_threads)
101
for (int q = 0; q < channels; q++)
103
float* ptr = bottom_top_blob.channel(q);
105
for (int i = 0; i < size; i++)
107
ptr[i] = ptr[i] * scale;
113
#pragma omp parallel for num_threads(opt.num_threads)
114
for (int q = 0; q < channels; q++)
116
float* ptr = bottom_top_blob.channel(q);
117
float scale = a * scale_data[q];
119
for (int i = 0; i < size; i++)
121
ptr[i] = ptr[i] * scale;
129
if (across_spatial && !across_channel)
131
#pragma omp parallel for num_threads(opt.num_threads)
132
for (int q = 0; q < channels; q++)
134
float* ptr = bottom_top_blob.channel(q);
137
for (int i = 0; i < size; i++)
139
ssum += ptr[i] * ptr[i];
143
if (eps_mode == 0) // caffe/mxnet
145
a = 1.f / sqrtf(ssum + eps);
147
else if (eps_mode == 1) // pytorch
149
a = 1.f / std::max(sqrtf(ssum), eps);
151
else //if (eps_mode == 2) // tensorflow
153
a = 1.f / sqrtf(std::max(ssum, eps));
156
float scale = a * (channel_shared ? scale_data[0] : scale_data[q]);
158
for (int i = 0; i < size; i++)
160
ptr[i] = ptr[i] * scale;
167
if (!across_spatial && across_channel)
169
// square sum, 1 / sqrt(ssum)
171
square_sum_blob.create(size, elemsize, opt.workspace_allocator);
172
if (square_sum_blob.empty())
177
float scale = scale_data[0];
179
#pragma omp parallel for num_threads(opt.num_threads)
180
for (int i = 0; i < size; i++)
183
for (int q = 0; q < channels; q++)
185
const float* ptr = bottom_top_blob.channel(q);
186
ssum += ptr[i] * ptr[i];
190
if (eps_mode == 0) // caffe/mxnet
192
a = 1.f / sqrtf(ssum + eps);
194
else if (eps_mode == 1) // pytorch
196
a = 1.f / std::max((float)sqrt(ssum), eps);
198
else //if (eps_mode == 2) // tensorflow
200
a = 1.f / sqrtf(std::max(ssum, eps));
203
square_sum_blob[i] = a * scale;
206
#pragma omp parallel for num_threads(opt.num_threads)
207
for (int q = 0; q < channels; q++)
209
float* ptr = bottom_top_blob.channel(q);
211
for (int i = 0; i < size; i++)
213
ptr[i] = ptr[i] * square_sum_blob[i];
219
#pragma omp parallel for num_threads(opt.num_threads)
220
for (int i = 0; i < size; i++)
223
for (int q = 0; q < channels; q++)
225
const float* ptr = bottom_top_blob.channel(q);
226
ssum += ptr[i] * ptr[i];
230
if (eps_mode == 0) // caffe/mxnet
232
a = 1.f / sqrtf(ssum + eps);
234
else if (eps_mode == 1) // pytorch
236
a = 1.f / std::max(sqrtf(ssum), eps);
238
else //if (eps_mode == 2) // tensorflow
240
a = 1.f / sqrtf(std::max(ssum, eps));
243
square_sum_blob[i] = a;
246
#pragma omp parallel for num_threads(opt.num_threads)
247
for (int q = 0; q < channels; q++)
249
float* ptr = bottom_top_blob.channel(q);
250
float scale = scale_data[q];
252
for (int i = 0; i < size; i++)
254
ptr[i] = ptr[i] * square_sum_blob[i] * scale;