ncnn

Форк
0
/
pooling1d.cpp 
322 строки · 9.3 Кб
1
// Tencent is pleased to support the open source community by making ncnn available.
2
//
3
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
4
//
5
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6
// in compliance with the License. You may obtain a copy of the License at
7
//
8
// https://opensource.org/licenses/BSD-3-Clause
9
//
10
// Unless required by applicable law or agreed to in writing, software distributed
11
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
13
// specific language governing permissions and limitations under the License.
14

15
#include "pooling1d.h"
16

17
#include "layer_type.h"
18

19
#include <float.h>
20

21
namespace ncnn {
22

23
Pooling1D::Pooling1D()
24
{
25
    one_blob_only = true;
26
    support_inplace = false;
27
}
28

29
int Pooling1D::load_param(const ParamDict& pd)
30
{
31
    pooling_type = pd.get(0, 0);
32
    kernel_w = pd.get(1, 0);
33
    stride_w = pd.get(2, 1);
34
    pad_left = pd.get(3, 0);
35
    pad_right = pd.get(14, pad_left);
36
    global_pooling = pd.get(4, 0);
37
    pad_mode = pd.get(5, 0);
38
    avgpool_count_include_pad = pd.get(6, 0);
39
    adaptive_pooling = pd.get(7, 0);
40
    out_w = pd.get(8, 0);
41

42
    return 0;
43
}
44

45
int Pooling1D::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
46
{
47
    int w = bottom_blob.w;
48
    int h = bottom_blob.h;
49
    size_t elemsize = bottom_blob.elemsize;
50

51
    //     NCNN_LOGE("Pooling1D     input %d x %d  pad = %d %d  ksize=%d  stride=%d", w, h, pad_left, pad_right, kernel_w, stride_w);
52
    if (global_pooling)
53
    {
54
        top_blob.create(h, elemsize, opt.blob_allocator);
55
        if (top_blob.empty())
56
            return -100;
57

58
        if (pooling_type == PoolMethod_MAX)
59
        {
60
            #pragma omp parallel for num_threads(opt.num_threads)
61
            for (int q = 0; q < h; q++)
62
            {
63
                const float* ptr = bottom_blob.row(q);
64

65
                float max = ptr[0];
66
                for (int i = 0; i < w; i++)
67
                {
68
                    max = std::max(max, ptr[i]);
69
                }
70

71
                top_blob[q] = max;
72
            }
73
        }
74
        else if (pooling_type == PoolMethod_AVE)
75
        {
76
            #pragma omp parallel for num_threads(opt.num_threads)
77
            for (int q = 0; q < h; q++)
78
            {
79
                const float* ptr = bottom_blob.row(q);
80

81
                float sum = 0.f;
82
                for (int i = 0; i < w; i++)
83
                {
84
                    sum += ptr[i];
85
                }
86

87
                top_blob[q] = sum / w;
88
            }
89
        }
90

91
        return 0;
92
    }
93

94
    if (adaptive_pooling)
95
    {
96
        top_blob.create(out_w, h, elemsize, opt.blob_allocator);
97
        if (top_blob.empty())
98
            return -100;
99

100
        if (pooling_type == PoolMethod_MAX)
101
        {
102
            #pragma omp parallel for num_threads(opt.num_threads)
103
            for (int q = 0; q < h; q++)
104
            {
105
                const float* inptr = bottom_blob.row(q);
106
                float* outptr = top_blob.row(q);
107

108
                for (int j = 0; j < out_w; j++)
109
                {
110
                    // floor div
111
                    const int iw0 = w * j / out_w;
112
                    // ceil div
113
                    const int iw1 = (w * (j + 1) + out_w - 1) / out_w;
114

115
                    float max = inptr[iw0];
116
                    for (int iw = iw0; iw < iw1; iw++)
117
                    {
118
                        max = std::max(max, inptr[iw]);
119
                    }
120

121
                    outptr[j] = max;
122
                }
123
            }
124
        }
125
        else if (pooling_type == PoolMethod_AVE)
126
        {
127
            #pragma omp parallel for num_threads(opt.num_threads)
128
            for (int q = 0; q < h; q++)
129
            {
130
                const float* inptr = bottom_blob.row(q);
131
                float* outptr = top_blob.row(q);
132

133
                for (int j = 0; j < out_w; j++)
134
                {
135
                    // floor div
136
                    const int iw0 = w * j / out_w;
137
                    // ceil div
138
                    const int iw1 = (w * (j + 1) + out_w - 1) / out_w;
139
                    const int wk = iw1 - iw0;
140

141
                    float sum = 0;
142
                    for (int iw = iw0; iw < iw1; iw++)
143
                    {
144
                        sum += inptr[iw];
145
                    }
146

147
                    outptr[j] = sum / wk;
148
                }
149
            }
150
        }
151

152
        return 0;
153
    }
154

155
    Mat bottom_blob_bordered;
156
    make_padding(bottom_blob, bottom_blob_bordered, opt);
157
    if (bottom_blob_bordered.empty())
158
        return -100;
159

160
    w = bottom_blob_bordered.w;
161
    h = bottom_blob_bordered.h;
162

163
    int outw = (w - kernel_w) / stride_w + 1;
164

165
    top_blob.create(outw, h, elemsize, opt.blob_allocator);
166
    if (top_blob.empty())
167
        return -100;
168

169
    if (pooling_type == PoolMethod_MAX)
170
    {
171
        #pragma omp parallel for num_threads(opt.num_threads)
172
        for (int q = 0; q < h; q++)
173
        {
174
            const float* ptr = bottom_blob_bordered.row(q);
175
            float* outptr = top_blob.row(q);
176

177
            for (int j = 0; j < outw; j++)
178
            {
179
                const float* sptr = ptr + j * stride_w;
180

181
                float max = sptr[0];
182

183
                for (int k = 0; k < kernel_w; k++)
184
                {
185
                    float val = sptr[k];
186
                    max = std::max(max, val);
187
                }
188

189
                outptr[j] = max;
190
            }
191
        }
192
    }
193
    else if (pooling_type == PoolMethod_AVE)
194
    {
195
        if (avgpool_count_include_pad == 0)
196
        {
197
            int wtailpad = 0;
198

199
            if (pad_mode == 0) // full padding
200
            {
201
                wtailpad = bottom_blob_bordered.w - bottom_blob.w - pad_left - pad_right;
202
            }
203

204
            #pragma omp parallel for num_threads(opt.num_threads)
205
            for (int q = 0; q < h; q++)
206
            {
207
                const float* ptr = bottom_blob_bordered.row(q);
208
                float* outptr = top_blob.row(q);
209

210
                for (int j = 0; j < outw; j++)
211
                {
212
                    int sx0 = j * stride_w;
213

214
                    float sum = 0;
215
                    int area = 0;
216

217
                    for (int kj = 0; kj < kernel_w; kj++)
218
                    {
219
                        int sx = sx0 + kj;
220

221
                        if (sx < pad_left)
222
                            continue;
223

224
                        if (sx >= w - pad_right - wtailpad)
225
                            break;
226

227
                        float val = ptr[sx];
228
                        sum += val;
229
                        area += 1;
230
                    }
231

232
                    outptr[j] = sum / area;
233
                }
234
            }
235
        }
236
        else // if (avgpool_count_include_pad == 1)
237
        {
238
            #pragma omp parallel for num_threads(opt.num_threads)
239
            for (int q = 0; q < h; q++)
240
            {
241
                const float* ptr = bottom_blob_bordered.row(q);
242
                float* outptr = top_blob.row(q);
243

244
                for (int j = 0; j < outw; j++)
245
                {
246
                    const float* sptr = ptr + j * stride_w;
247

248
                    float sum = 0;
249

250
                    for (int k = 0; k < kernel_w; k++)
251
                    {
252
                        float val = sptr[k];
253
                        sum += val;
254
                    }
255

256
                    outptr[j] = sum / kernel_w;
257
                }
258
            }
259
        }
260
    }
261

262
    return 0;
263
}
264

265
void Pooling1D::make_padding(const Mat& bottom_blob, Mat& bottom_blob_bordered, const Option& opt) const
266
{
267
    int w = bottom_blob.w;
268

269
    bottom_blob_bordered = bottom_blob;
270

271
    float pad_value = 0.f;
272
    if (pooling_type == PoolMethod_MAX)
273
    {
274
        pad_value = bottom_blob.elemsize == 1 ? -128.f : -FLT_MAX;
275
    }
276
    else if (pooling_type == PoolMethod_AVE)
277
    {
278
        pad_value = 0.f;
279
    }
280

281
    int wtailpad = 0;
282

283
    if (pad_mode == 0) // full padding
284
    {
285
        int wtail = (w + pad_left + pad_right - kernel_w) % stride_w;
286

287
        if (wtail != 0)
288
            wtailpad = stride_w - wtail;
289

290
        Option opt_b = opt;
291
        opt_b.blob_allocator = opt.workspace_allocator;
292
        copy_make_border(bottom_blob, bottom_blob_bordered, 0, 0, pad_left, pad_right + wtailpad, BORDER_CONSTANT, pad_value, opt_b);
293
    }
294
    else if (pad_mode == 1) // valid padding
295
    {
296
        Option opt_b = opt;
297
        opt_b.blob_allocator = opt.workspace_allocator;
298
        copy_make_border(bottom_blob, bottom_blob_bordered, 0, 0, pad_left, pad_right, BORDER_CONSTANT, pad_value, opt_b);
299
    }
300
    else if (pad_mode == 2) // tensorflow padding=SAME or onnx padding=SAME_UPPER
301
    {
302
        int wpad = kernel_w + (w - 1) / stride_w * stride_w - w;
303
        if (wpad > 0)
304
        {
305
            Option opt_b = opt;
306
            opt_b.blob_allocator = opt.workspace_allocator;
307
            copy_make_border(bottom_blob, bottom_blob_bordered, 0, 0, wpad / 2, wpad - wpad / 2, BORDER_CONSTANT, pad_value, opt_b);
308
        }
309
    }
310
    else if (pad_mode == 3) // onnx padding=SAME_LOWER
311
    {
312
        int wpad = kernel_w + (w - 1) / stride_w * stride_w - w;
313
        if (wpad > 0)
314
        {
315
            Option opt_b = opt;
316
            opt_b.blob_allocator = opt.workspace_allocator;
317
            copy_make_border(bottom_blob, bottom_blob_bordered, 0, 0, wpad - wpad / 2, wpad / 2, BORDER_CONSTANT, pad_value, opt_b);
318
        }
319
    }
320
}
321

322
} // namespace ncnn
323

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.