ncnn

Форк
0
/
deconvolutiondepthwise_mips.cpp 
523 строки · 15.9 Кб
1
// Tencent is pleased to support the open source community by making ncnn available.
2
//
3
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
4
//
5
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6
// in compliance with the License. You may obtain a copy of the License at
7
//
8
// https://opensource.org/licenses/BSD-3-Clause
9
//
10
// Unless required by applicable law or agreed to in writing, software distributed
11
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
13
// specific language governing permissions and limitations under the License.
14

15
#include "deconvolutiondepthwise_mips.h"
16

17
#include "layer_type.h"
18

19
#if __mips_msa
20
#include <msa.h>
21
#endif // __mips_msa
22

23
#include "mips_activation.h"
24
#include "mips_usability.h"
25

26
namespace ncnn {
27

28
DeconvolutionDepthWise_mips::DeconvolutionDepthWise_mips()
29
{
30
#if __mips_msa
31
    support_packing = true;
32
#endif // __mips_msa
33
}
34

35
int DeconvolutionDepthWise_mips::create_pipeline(const Option& opt)
36
{
37
    if (dynamic_weight)
38
        return 0;
39

40
    const int maxk = kernel_w * kernel_h;
41
    int channels = (weight_data_size / group) / maxk / (num_output / group) * group;
42

43
    // depth-wise
44
    if (channels == group && group == num_output)
45
    {
46
        int elempack = 1;
47
#if __mips_msa
48
        if (opt.use_packing_layout)
49
        {
50
            elempack = channels % 4 == 0 ? 4 : 1;
51
        }
52
#endif
53

54
        Mat weight_data_transposed(weight_data.w);
55
        {
56
            float* pt = weight_data_transposed;
57
            const float* p = weight_data;
58

59
            for (int i = 0; i < (channels / group) * (num_output / group) * group; i++)
60
            {
61
                for (int k = 0; k < maxk; k++)
62
                {
63
                    pt[maxk - 1 - k] = p[k];
64
                }
65

66
                p += maxk;
67
                pt += maxk;
68
            }
69
        }
70

71
#if __mips_msa
72
        // pack4
73
        if (elempack == 4)
74
        {
75
            Mat weight_data_r2 = weight_data_transposed.reshape(maxk, group);
76
            convert_packing(weight_data_r2, weight_data_tm, 4, opt);
77
        }
78
#endif // __mips_msa
79

80
        if (elempack == 1)
81
        {
82
            weight_data_tm = weight_data_transposed;
83
        }
84

85
        if (opt.lightmode)
86
            weight_data.release();
87

88
        return 0;
89
    }
90

91
    // group convolution
92
    create_group_ops(opt);
93

94
    if (opt.lightmode)
95
        weight_data.release();
96

97
    return 0;
98
}
99

100
int DeconvolutionDepthWise_mips::create_group_ops(const Option& opt)
101
{
102
    // create Deconvolution op for each group
103
    const int maxk = kernel_w * kernel_h;
104
    int channels = (weight_data_size / group) / maxk / (num_output / group) * group;
105

106
    for (int i = 0; i < (int)group_ops.size(); i++)
107
        delete group_ops[i];
108

109
    group_ops.clear();
110

111
    const int channels_g = channels / group;
112
    const int num_output_g = num_output / group;
113

114
    group_ops.resize(group);
115

116
    for (int g = 0; g < group; g++)
117
    {
118
        Mat weight_data_g = weight_data.range(maxk * channels_g * num_output_g * g, maxk * channels_g * num_output_g).clone();
119
        Mat bias_data_g;
120
        if (bias_term)
121
            bias_data_g = bias_data.range(num_output_g * g, num_output_g);
122

123
        ncnn::Layer* op = ncnn::create_layer_cpu(ncnn::LayerType::Deconvolution);
124

125
        // set param
126
        ncnn::ParamDict pd;
127
        pd.set(0, num_output_g); // num_output
128
        pd.set(1, kernel_w);
129
        pd.set(11, kernel_h);
130
        pd.set(2, dilation_w);
131
        pd.set(12, dilation_h);
132
        pd.set(3, stride_w);
133
        pd.set(13, stride_h);
134
        pd.set(4, 0);  // pad_w
135
        pd.set(14, 0); // pad_h
136
        pd.set(18, output_pad_right);
137
        pd.set(19, output_pad_bottom);
138
        pd.set(5, bias_term);
139
        pd.set(6, maxk * channels_g * num_output_g); // weight_data_size
140
        pd.set(9, activation_type);
141
        pd.set(10, activation_params);
142

143
        op->load_param(pd);
144

145
        // set weights
146
        if (bias_term)
147
        {
148
            ncnn::Mat weights[2];
149
            weights[0] = weight_data_g;
150
            weights[1] = bias_data_g;
151

152
            op->load_model(ModelBinFromMatArray(weights));
153
        }
154
        else
155
        {
156
            ncnn::Mat weights[1];
157
            weights[0] = weight_data_g;
158

159
            op->load_model(ModelBinFromMatArray(weights));
160
        }
161

162
        op->create_pipeline(opt);
163

164
        group_ops[g] = op;
165
    }
166

167
    return 0;
168
}
169

170
int DeconvolutionDepthWise_mips::destroy_pipeline(const Option& opt)
171
{
172
    for (int i = 0; i < (int)group_ops.size(); i++)
173
    {
174
        group_ops[i]->destroy_pipeline(opt);
175
        delete group_ops[i];
176
    }
177
    group_ops.clear();
178

179
    return 0;
180
}
181

182
int DeconvolutionDepthWise_mips::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
183
{
184
    // convolv with NxN kernel
185
    // value = value + bias
186

187
    int w = bottom_blob.w;
188
    int h = bottom_blob.h;
189
    int channels = bottom_blob.c;
190
    size_t elemsize = bottom_blob.elemsize;
191
    int elempack = bottom_blob.elempack;
192

193
    const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
194
    const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
195

196
    int outw = (w - 1) * stride_w + kernel_extent_w + output_pad_right;
197
    int outh = (h - 1) * stride_h + kernel_extent_h + output_pad_bottom;
198
    int out_elempack = 1;
199
#if __mips_msa
200
    if (opt.use_packing_layout)
201
    {
202
        out_elempack = num_output % 4 == 0 ? 4 : 1;
203
    }
204
#endif
205
    size_t out_elemsize = elemsize / elempack * out_elempack;
206

207
    Mat top_blob_bordered;
208
    if (pad_left > 0 || pad_right > 0 || pad_top > 0 || pad_bottom > 0 || (output_w > 0 && output_h > 0))
209
    {
210
        top_blob_bordered.create(outw, outh, num_output / out_elempack, out_elemsize, out_elempack, opt.workspace_allocator);
211
    }
212
    else
213
    {
214
        top_blob_bordered = top_blob;
215
        top_blob_bordered.create(outw, outh, num_output / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
216
    }
217
    if (top_blob_bordered.empty())
218
        return -100;
219

220
    const int maxk = kernel_w * kernel_h;
221

222
    // depth-wise
223
    if (channels * elempack == group && group == num_output)
224
    {
225
#if __mips_msa
226
        if (elempack == 4)
227
        {
228
            {
229
                #pragma omp parallel for num_threads(opt.num_threads)
230
                for (int g = 0; g < channels; g++)
231
                {
232
                    float* outptr = top_blob_bordered.channel(g);
233
                    const float* kptr = (const float*)weight_data_tm + maxk * g * 4;
234
                    const Mat m = bottom_blob.channel(g);
235

236
                    for (int i = 0; i < outh; i++)
237
                    {
238
                        for (int j = 0; j < outw; j++)
239
                        {
240
                            v4f32 _sum = (v4f32)__msa_fill_w(0);
241

242
                            if (bias_term)
243
                            {
244
                                _sum = (v4f32)__msa_ld_w((const float*)bias_data + g * 4, 0);
245
                            }
246

247
                            for (int y = 0; y < kernel_h; y++)
248
                            {
249
                                int sys = (i + y * dilation_h - (kernel_extent_h - 1));
250
                                if (sys < 0 || sys % stride_h != 0)
251
                                    continue;
252

253
                                int sy = sys / stride_h;
254
                                if (sy >= h)
255
                                    continue;
256

257
                                for (int x = 0; x < kernel_w; x++)
258
                                {
259
                                    int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
260
                                    if (sxs < 0 || sxs % stride_w != 0)
261
                                        continue;
262

263
                                    int sx = sxs / stride_w;
264
                                    if (sx >= w)
265
                                        continue;
266

267
                                    const float* sptr = m.row(sy) + sx * 4;
268

269
                                    int k = y * kernel_w + x;
270

271
                                    v4f32 _val = (v4f32)__msa_ld_w(sptr, 0);
272
                                    v4f32 _w = (v4f32)__msa_ld_w(kptr + k * 4, 0);
273
                                    _sum = __msa_fmadd_w(_sum, _val, _w);
274
                                }
275
                            }
276

277
                            _sum = activation_ps(_sum, activation_type, activation_params);
278

279
                            __msa_st_w((v4i32)_sum, outptr + j * 4, 0);
280
                        }
281

282
                        outptr += outw * 4;
283
                    }
284
                }
285
            }
286
        }
287
#endif // __mips_msa
288

289
        if (elempack == 1)
290
        {
291
            #pragma omp parallel for num_threads(opt.num_threads)
292
            for (int g = 0; g < channels; g++)
293
            {
294
                float* outptr = top_blob_bordered.channel(g);
295
                const float* kptr = (const float*)weight_data_tm + maxk * g;
296
                const Mat m = bottom_blob.channel(g);
297

298
                for (int i = 0; i < outh; i++)
299
                {
300
                    for (int j = 0; j < outw; j++)
301
                    {
302
                        float sum = 0.f;
303

304
                        if (bias_term)
305
                        {
306
                            sum = bias_data[g];
307
                        }
308

309
                        for (int y = 0; y < kernel_h; y++)
310
                        {
311
                            int sys = (i + y * dilation_h - (kernel_extent_h - 1));
312
                            if (sys < 0 || sys % stride_h != 0)
313
                                continue;
314

315
                            int sy = sys / stride_h;
316
                            if (sy >= h)
317
                                continue;
318

319
                            const float* sptr = m.row(sy);
320

321
                            for (int x = 0; x < kernel_w; x++)
322
                            {
323
                                int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
324
                                if (sxs < 0 || sxs % stride_w != 0)
325
                                    continue;
326

327
                                int sx = sxs / stride_w;
328
                                if (sx >= w)
329
                                    continue;
330

331
                                float val = sptr[sx];
332

333
                                int k = y * kernel_w + x;
334

335
                                float w = kptr[k];
336

337
                                sum += val * w;
338
                            }
339
                        }
340

341
                        sum = activation_ss(sum, activation_type, activation_params);
342

343
                        outptr[j] = sum;
344
                    }
345

346
                    outptr += outw;
347
                }
348
            }
349
        }
350
    }
351
    else
352
    {
353
        // group deconvolution
354
        const int channels_g = channels * elempack / group;
355
        const int num_output_g = num_output / group;
356

357
        int g_elempack = 1;
358
        int out_g_elempack = 1;
359
#if __mips_msa
360
        if (opt.use_packing_layout)
361
        {
362
            g_elempack = channels_g % 4 == 0 ? 4 : 1;
363
            out_g_elempack = num_output_g % 4 == 0 ? 4 : 1;
364
        }
365
#endif
366

367
        // unpacking
368
        Mat bottom_blob_unpacked = bottom_blob;
369
        if (elempack > g_elempack)
370
        {
371
            Option opt_p = opt;
372
            opt_p.blob_allocator = opt.workspace_allocator;
373
            convert_packing(bottom_blob, bottom_blob_unpacked, 1, opt_p);
374
        }
375

376
        Mat top_blob_bordered_unpacked = top_blob_bordered;
377
        if (out_g_elempack < out_elempack)
378
        {
379
            top_blob_bordered_unpacked.create(outw, outh, num_output, out_elemsize / out_elempack, 1, opt.workspace_allocator);
380
            if (top_blob_bordered_unpacked.empty())
381
                return -100;
382
        }
383

384
        for (int g = 0; g < group; g++)
385
        {
386
            const Mat bottom_blob_g = bottom_blob_unpacked.channel_range(channels_g * g / g_elempack, channels_g / g_elempack);
387
            Mat top_blob_bordered_g = top_blob_bordered_unpacked.channel_range(num_output_g * g / out_g_elempack, num_output_g / out_g_elempack);
388

389
            const ncnn::Layer* op = group_ops[g];
390

391
            Option opt_g = opt;
392
            opt_g.blob_allocator = top_blob_bordered_unpacked.allocator;
393

394
            // forward
395
            op->forward(bottom_blob_g, top_blob_bordered_g, opt_g);
396
        }
397

398
        // packing
399
        if (out_g_elempack < out_elempack)
400
        {
401
            convert_packing(top_blob_bordered_unpacked, top_blob_bordered, 4, opt);
402
        }
403
        else
404
        {
405
            top_blob_bordered = top_blob_bordered_unpacked;
406
        }
407
    }
408

409
    cut_padding(top_blob_bordered, top_blob, opt);
410
    if (top_blob.empty())
411
        return -100;
412

413
    return 0;
414
}
415

416
int DeconvolutionDepthWise_mips::forward(const std::vector<Mat>& bottom_blobs, std::vector<Mat>& top_blobs, const Option& opt) const
417
{
418
    const Mat& bottom_blob = bottom_blobs[0];
419
    const Mat& _weight_data = bottom_blobs[1];
420
    Mat& top_blob = top_blobs[0];
421

422
    const int _num_input = bottom_blob.c * bottom_blob.elempack;
423
    const int _kernel_w = _weight_data.w;
424
    const int _kernel_h = _weight_data.h;
425
    const int _num_output = _weight_data.d * group;
426

427
    Mat weight_data_flattened;
428
    flatten(_weight_data, weight_data_flattened, opt);
429
    if (weight_data_flattened.empty())
430
        return -100;
431

432
    // weight_data_flattened as pack1
433
    weight_data_flattened.w *= weight_data_flattened.elempack;
434
    weight_data_flattened.elemsize /= weight_data_flattened.elempack;
435
    weight_data_flattened.elempack = 1;
436

437
    // transpose group-inch/group-outch/group-kh-kw to group-outch/group-inch/group-kh-kw
438
    Mat weight_data_transposed;
439
    {
440
        weight_data_transposed.create(_kernel_w * _kernel_h * _num_output * _num_input / group, 4u, opt.workspace_allocator);
441
        if (weight_data_transposed.empty())
442
            return -100;
443

444
        const int outch_g = _num_output / group;
445
        const int inch_g = _num_input / group;
446
        const int maxk = _kernel_h * _kernel_w;
447

448
        for (int g = 0; g < group; g++)
449
        {
450
            // reorder weight from inch-outch to outch-inch
451
            float* wg2 = (float*)weight_data_transposed + g * outch_g * inch_g * maxk;
452
            const float* wg = (const float*)weight_data_flattened + g * inch_g * outch_g * maxk;
453
            for (int i = 0; i < outch_g; i++)
454
            {
455
                for (int j = 0; j < inch_g; j++)
456
                {
457
                    for (int k = 0; k < maxk; k++)
458
                    {
459
                        wg2[(i * inch_g + j) * maxk + k] = wg[(j * outch_g + i) * maxk + k];
460
                    }
461
                }
462
            }
463
        }
464
    }
465

466
    Mat bias_data_flattened;
467
    if (bias_term)
468
    {
469
        const Mat& _bias_data = bottom_blobs[2];
470
        flatten(_bias_data, bias_data_flattened, opt);
471
        if (bias_data_flattened.empty())
472
            return -100;
473

474
        // bias_data_flattened as pack1
475
        bias_data_flattened.w *= bias_data_flattened.elempack;
476
        bias_data_flattened.elemsize /= bias_data_flattened.elempack;
477
        bias_data_flattened.elempack = 1;
478
    }
479

480
    ncnn::Layer* op = ncnn::create_layer_cpu(ncnn::LayerType::DeconvolutionDepthWise);
481

482
    ncnn::ParamDict pd;
483
    pd.set(0, _num_output);
484
    pd.set(1, _kernel_w);
485
    pd.set(11, _kernel_h);
486
    pd.set(2, dilation_w);
487
    pd.set(12, dilation_h);
488
    pd.set(3, stride_w);
489
    pd.set(13, stride_h);
490
    pd.set(4, pad_left);
491
    pd.set(15, pad_right);
492
    pd.set(14, pad_top);
493
    pd.set(16, pad_bottom);
494
    pd.set(18, output_pad_right);
495
    pd.set(19, output_pad_bottom);
496
    pd.set(20, output_w);
497
    pd.set(21, output_h);
498
    pd.set(5, bias_term);
499
    pd.set(6, weight_data_transposed.w);
500
    pd.set(7, group);
501
    pd.set(9, activation_type);
502
    pd.set(10, activation_params);
503

504
    op->load_param(pd);
505

506
    ncnn::Mat weights[2];
507
    weights[0] = weight_data_transposed;
508
    weights[1] = bias_data_flattened;
509

510
    op->load_model(ncnn::ModelBinFromMatArray(weights));
511

512
    op->create_pipeline(opt);
513

514
    op->forward(bottom_blob, top_blob, opt);
515

516
    op->destroy_pipeline(opt);
517

518
    delete op;
519

520
    return 0;
521
}
522

523
} // namespace ncnn
524

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.