ncnn

Форк
0
/
normalize.cpp 
265 строк · 7.2 Кб
1
// Tencent is pleased to support the open source community by making ncnn available.
2
//
3
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4
//
5
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6
// in compliance with the License. You may obtain a copy of the License at
7
//
8
// https://opensource.org/licenses/BSD-3-Clause
9
//
10
// Unless required by applicable law or agreed to in writing, software distributed
11
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
13
// specific language governing permissions and limitations under the License.
14

15
#include "normalize.h"
16

17
namespace ncnn {
18

19
Normalize::Normalize()
20
{
21
    one_blob_only = true;
22
    support_inplace = true;
23
}
24

25
int Normalize::load_param(const ParamDict& pd)
26
{
27
    across_spatial = pd.get(0, 0);
28
    across_channel = pd.get(4, 1);
29
    channel_shared = pd.get(1, 0);
30
    eps = pd.get(2, 0.0001f);
31
    eps_mode = pd.get(9, 0);
32
    scale_data_size = pd.get(3, 0);
33

34
    return 0;
35
}
36

37
int Normalize::load_model(const ModelBin& mb)
38
{
39
    scale_data = mb.load(scale_data_size, 1);
40
    if (scale_data.empty())
41
        return -100;
42

43
    return 0;
44
}
45

46
int Normalize::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
47
{
48
    int w = bottom_top_blob.w;
49
    int h = bottom_top_blob.h;
50
    int channels = bottom_top_blob.c;
51
    size_t elemsize = bottom_top_blob.elemsize;
52
    int size = w * h;
53

54
    if (across_spatial && across_channel)
55
    {
56
        // square
57
        Mat square_sum_blob;
58
        square_sum_blob.create(channels, elemsize, opt.workspace_allocator);
59
        if (square_sum_blob.empty())
60
            return -100;
61

62
        #pragma omp parallel for num_threads(opt.num_threads)
63
        for (int q = 0; q < channels; q++)
64
        {
65
            const float* ptr = bottom_top_blob.channel(q);
66

67
            float ssum = 0.f;
68
            for (int i = 0; i < size; i++)
69
            {
70
                ssum += ptr[i] * ptr[i];
71
            }
72

73
            square_sum_blob[q] = ssum;
74
        }
75

76
        float ssum = 0.f;
77
        for (int q = 0; q < channels; q++)
78
        {
79
            ssum += square_sum_blob[q];
80
        }
81

82
        float a;
83
        if (eps_mode == 0) // caffe/mxnet
84
        {
85
            a = 1.f / sqrtf(ssum + eps);
86
        }
87
        else if (eps_mode == 1) // pytorch
88
        {
89
            a = 1.f / std::max(sqrtf(ssum), eps);
90
        }
91
        else //if (eps_mode == 2) // tensorflow
92
        {
93
            a = 1.f / sqrt(std::max(ssum, eps));
94
        }
95

96
        if (channel_shared)
97
        {
98
            float scale = a * scale_data[0];
99

100
            #pragma omp parallel for num_threads(opt.num_threads)
101
            for (int q = 0; q < channels; q++)
102
            {
103
                float* ptr = bottom_top_blob.channel(q);
104

105
                for (int i = 0; i < size; i++)
106
                {
107
                    ptr[i] = ptr[i] * scale;
108
                }
109
            }
110
        }
111
        else
112
        {
113
            #pragma omp parallel for num_threads(opt.num_threads)
114
            for (int q = 0; q < channels; q++)
115
            {
116
                float* ptr = bottom_top_blob.channel(q);
117
                float scale = a * scale_data[q];
118

119
                for (int i = 0; i < size; i++)
120
                {
121
                    ptr[i] = ptr[i] * scale;
122
                }
123
            }
124
        }
125

126
        return 0;
127
    }
128

129
    if (across_spatial && !across_channel)
130
    {
131
        #pragma omp parallel for num_threads(opt.num_threads)
132
        for (int q = 0; q < channels; q++)
133
        {
134
            float* ptr = bottom_top_blob.channel(q);
135

136
            float ssum = 0.f;
137
            for (int i = 0; i < size; i++)
138
            {
139
                ssum += ptr[i] * ptr[i];
140
            }
141

142
            float a;
143
            if (eps_mode == 0) // caffe/mxnet
144
            {
145
                a = 1.f / sqrtf(ssum + eps);
146
            }
147
            else if (eps_mode == 1) // pytorch
148
            {
149
                a = 1.f / std::max(sqrtf(ssum), eps);
150
            }
151
            else //if (eps_mode == 2) // tensorflow
152
            {
153
                a = 1.f / sqrtf(std::max(ssum, eps));
154
            }
155

156
            float scale = a * (channel_shared ? scale_data[0] : scale_data[q]);
157

158
            for (int i = 0; i < size; i++)
159
            {
160
                ptr[i] = ptr[i] * scale;
161
            }
162
        }
163

164
        return 0;
165
    }
166

167
    if (!across_spatial && across_channel)
168
    {
169
        // square sum, 1 / sqrt(ssum)
170
        Mat square_sum_blob;
171
        square_sum_blob.create(size, elemsize, opt.workspace_allocator);
172
        if (square_sum_blob.empty())
173
            return -100;
174

175
        if (channel_shared)
176
        {
177
            float scale = scale_data[0];
178

179
            #pragma omp parallel for num_threads(opt.num_threads)
180
            for (int i = 0; i < size; i++)
181
            {
182
                float ssum = 0.f;
183
                for (int q = 0; q < channels; q++)
184
                {
185
                    const float* ptr = bottom_top_blob.channel(q);
186
                    ssum += ptr[i] * ptr[i];
187
                }
188

189
                float a;
190
                if (eps_mode == 0) // caffe/mxnet
191
                {
192
                    a = 1.f / sqrtf(ssum + eps);
193
                }
194
                else if (eps_mode == 1) // pytorch
195
                {
196
                    a = 1.f / std::max((float)sqrt(ssum), eps);
197
                }
198
                else //if (eps_mode == 2) // tensorflow
199
                {
200
                    a = 1.f / sqrtf(std::max(ssum, eps));
201
                }
202

203
                square_sum_blob[i] = a * scale;
204
            }
205

206
            #pragma omp parallel for num_threads(opt.num_threads)
207
            for (int q = 0; q < channels; q++)
208
            {
209
                float* ptr = bottom_top_blob.channel(q);
210

211
                for (int i = 0; i < size; i++)
212
                {
213
                    ptr[i] = ptr[i] * square_sum_blob[i];
214
                }
215
            }
216
        }
217
        else
218
        {
219
            #pragma omp parallel for num_threads(opt.num_threads)
220
            for (int i = 0; i < size; i++)
221
            {
222
                float ssum = 0.f;
223
                for (int q = 0; q < channels; q++)
224
                {
225
                    const float* ptr = bottom_top_blob.channel(q);
226
                    ssum += ptr[i] * ptr[i];
227
                }
228

229
                float a;
230
                if (eps_mode == 0) // caffe/mxnet
231
                {
232
                    a = 1.f / sqrtf(ssum + eps);
233
                }
234
                else if (eps_mode == 1) // pytorch
235
                {
236
                    a = 1.f / std::max(sqrtf(ssum), eps);
237
                }
238
                else //if (eps_mode == 2) // tensorflow
239
                {
240
                    a = 1.f / sqrtf(std::max(ssum, eps));
241
                }
242

243
                square_sum_blob[i] = a;
244
            }
245

246
            #pragma omp parallel for num_threads(opt.num_threads)
247
            for (int q = 0; q < channels; q++)
248
            {
249
                float* ptr = bottom_top_blob.channel(q);
250
                float scale = scale_data[q];
251

252
                for (int i = 0; i < size; i++)
253
                {
254
                    ptr[i] = ptr[i] * square_sum_blob[i] * scale;
255
                }
256
            }
257
        }
258

259
        return 0;
260
    }
261

262
    return 0;
263
}
264

265
} // namespace ncnn
266

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.