1
// Tencent is pleased to support the open source community by making ncnn available.
3
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
5
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6
// in compliance with the License. You may obtain a copy of the License at
8
// https://opensource.org/licenses/BSD-3-Clause
10
// Unless required by applicable law or agreed to in writing, software distributed
11
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
13
// specific language governing permissions and limitations under the License.
15
#include "deconvolutiondepthwise_mips.h"
17
#include "layer_type.h"
23
#include "mips_activation.h"
24
#include "mips_usability.h"
28
DeconvolutionDepthWise_mips::DeconvolutionDepthWise_mips()
31
support_packing = true;
35
int DeconvolutionDepthWise_mips::create_pipeline(const Option& opt)
40
const int maxk = kernel_w * kernel_h;
41
int channels = (weight_data_size / group) / maxk / (num_output / group) * group;
44
if (channels == group && group == num_output)
48
if (opt.use_packing_layout)
50
elempack = channels % 4 == 0 ? 4 : 1;
54
Mat weight_data_transposed(weight_data.w);
56
float* pt = weight_data_transposed;
57
const float* p = weight_data;
59
for (int i = 0; i < (channels / group) * (num_output / group) * group; i++)
61
for (int k = 0; k < maxk; k++)
63
pt[maxk - 1 - k] = p[k];
75
Mat weight_data_r2 = weight_data_transposed.reshape(maxk, group);
76
convert_packing(weight_data_r2, weight_data_tm, 4, opt);
82
weight_data_tm = weight_data_transposed;
86
weight_data.release();
92
create_group_ops(opt);
95
weight_data.release();
100
int DeconvolutionDepthWise_mips::create_group_ops(const Option& opt)
102
// create Deconvolution op for each group
103
const int maxk = kernel_w * kernel_h;
104
int channels = (weight_data_size / group) / maxk / (num_output / group) * group;
106
for (int i = 0; i < (int)group_ops.size(); i++)
111
const int channels_g = channels / group;
112
const int num_output_g = num_output / group;
114
group_ops.resize(group);
116
for (int g = 0; g < group; g++)
118
Mat weight_data_g = weight_data.range(maxk * channels_g * num_output_g * g, maxk * channels_g * num_output_g).clone();
121
bias_data_g = bias_data.range(num_output_g * g, num_output_g);
123
ncnn::Layer* op = ncnn::create_layer_cpu(ncnn::LayerType::Deconvolution);
127
pd.set(0, num_output_g); // num_output
129
pd.set(11, kernel_h);
130
pd.set(2, dilation_w);
131
pd.set(12, dilation_h);
133
pd.set(13, stride_h);
134
pd.set(4, 0); // pad_w
135
pd.set(14, 0); // pad_h
136
pd.set(18, output_pad_right);
137
pd.set(19, output_pad_bottom);
138
pd.set(5, bias_term);
139
pd.set(6, maxk * channels_g * num_output_g); // weight_data_size
140
pd.set(9, activation_type);
141
pd.set(10, activation_params);
148
ncnn::Mat weights[2];
149
weights[0] = weight_data_g;
150
weights[1] = bias_data_g;
152
op->load_model(ModelBinFromMatArray(weights));
156
ncnn::Mat weights[1];
157
weights[0] = weight_data_g;
159
op->load_model(ModelBinFromMatArray(weights));
162
op->create_pipeline(opt);
170
int DeconvolutionDepthWise_mips::destroy_pipeline(const Option& opt)
172
for (int i = 0; i < (int)group_ops.size(); i++)
174
group_ops[i]->destroy_pipeline(opt);
182
int DeconvolutionDepthWise_mips::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
184
// convolv with NxN kernel
185
// value = value + bias
187
int w = bottom_blob.w;
188
int h = bottom_blob.h;
189
int channels = bottom_blob.c;
190
size_t elemsize = bottom_blob.elemsize;
191
int elempack = bottom_blob.elempack;
193
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
194
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
196
int outw = (w - 1) * stride_w + kernel_extent_w + output_pad_right;
197
int outh = (h - 1) * stride_h + kernel_extent_h + output_pad_bottom;
198
int out_elempack = 1;
200
if (opt.use_packing_layout)
202
out_elempack = num_output % 4 == 0 ? 4 : 1;
205
size_t out_elemsize = elemsize / elempack * out_elempack;
207
Mat top_blob_bordered;
208
if (pad_left > 0 || pad_right > 0 || pad_top > 0 || pad_bottom > 0 || (output_w > 0 && output_h > 0))
210
top_blob_bordered.create(outw, outh, num_output / out_elempack, out_elemsize, out_elempack, opt.workspace_allocator);
214
top_blob_bordered = top_blob;
215
top_blob_bordered.create(outw, outh, num_output / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
217
if (top_blob_bordered.empty())
220
const int maxk = kernel_w * kernel_h;
223
if (channels * elempack == group && group == num_output)
229
#pragma omp parallel for num_threads(opt.num_threads)
230
for (int g = 0; g < channels; g++)
232
float* outptr = top_blob_bordered.channel(g);
233
const float* kptr = (const float*)weight_data_tm + maxk * g * 4;
234
const Mat m = bottom_blob.channel(g);
236
for (int i = 0; i < outh; i++)
238
for (int j = 0; j < outw; j++)
240
v4f32 _sum = (v4f32)__msa_fill_w(0);
244
_sum = (v4f32)__msa_ld_w((const float*)bias_data + g * 4, 0);
247
for (int y = 0; y < kernel_h; y++)
249
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
250
if (sys < 0 || sys % stride_h != 0)
253
int sy = sys / stride_h;
257
for (int x = 0; x < kernel_w; x++)
259
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
260
if (sxs < 0 || sxs % stride_w != 0)
263
int sx = sxs / stride_w;
267
const float* sptr = m.row(sy) + sx * 4;
269
int k = y * kernel_w + x;
271
v4f32 _val = (v4f32)__msa_ld_w(sptr, 0);
272
v4f32 _w = (v4f32)__msa_ld_w(kptr + k * 4, 0);
273
_sum = __msa_fmadd_w(_sum, _val, _w);
277
_sum = activation_ps(_sum, activation_type, activation_params);
279
__msa_st_w((v4i32)_sum, outptr + j * 4, 0);
291
#pragma omp parallel for num_threads(opt.num_threads)
292
for (int g = 0; g < channels; g++)
294
float* outptr = top_blob_bordered.channel(g);
295
const float* kptr = (const float*)weight_data_tm + maxk * g;
296
const Mat m = bottom_blob.channel(g);
298
for (int i = 0; i < outh; i++)
300
for (int j = 0; j < outw; j++)
309
for (int y = 0; y < kernel_h; y++)
311
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
312
if (sys < 0 || sys % stride_h != 0)
315
int sy = sys / stride_h;
319
const float* sptr = m.row(sy);
321
for (int x = 0; x < kernel_w; x++)
323
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
324
if (sxs < 0 || sxs % stride_w != 0)
327
int sx = sxs / stride_w;
331
float val = sptr[sx];
333
int k = y * kernel_w + x;
341
sum = activation_ss(sum, activation_type, activation_params);
353
// group deconvolution
354
const int channels_g = channels * elempack / group;
355
const int num_output_g = num_output / group;
358
int out_g_elempack = 1;
360
if (opt.use_packing_layout)
362
g_elempack = channels_g % 4 == 0 ? 4 : 1;
363
out_g_elempack = num_output_g % 4 == 0 ? 4 : 1;
368
Mat bottom_blob_unpacked = bottom_blob;
369
if (elempack > g_elempack)
372
opt_p.blob_allocator = opt.workspace_allocator;
373
convert_packing(bottom_blob, bottom_blob_unpacked, 1, opt_p);
376
Mat top_blob_bordered_unpacked = top_blob_bordered;
377
if (out_g_elempack < out_elempack)
379
top_blob_bordered_unpacked.create(outw, outh, num_output, out_elemsize / out_elempack, 1, opt.workspace_allocator);
380
if (top_blob_bordered_unpacked.empty())
384
for (int g = 0; g < group; g++)
386
const Mat bottom_blob_g = bottom_blob_unpacked.channel_range(channels_g * g / g_elempack, channels_g / g_elempack);
387
Mat top_blob_bordered_g = top_blob_bordered_unpacked.channel_range(num_output_g * g / out_g_elempack, num_output_g / out_g_elempack);
389
const ncnn::Layer* op = group_ops[g];
392
opt_g.blob_allocator = top_blob_bordered_unpacked.allocator;
395
op->forward(bottom_blob_g, top_blob_bordered_g, opt_g);
399
if (out_g_elempack < out_elempack)
401
convert_packing(top_blob_bordered_unpacked, top_blob_bordered, 4, opt);
405
top_blob_bordered = top_blob_bordered_unpacked;
409
cut_padding(top_blob_bordered, top_blob, opt);
410
if (top_blob.empty())
416
int DeconvolutionDepthWise_mips::forward(const std::vector<Mat>& bottom_blobs, std::vector<Mat>& top_blobs, const Option& opt) const
418
const Mat& bottom_blob = bottom_blobs[0];
419
const Mat& _weight_data = bottom_blobs[1];
420
Mat& top_blob = top_blobs[0];
422
const int _num_input = bottom_blob.c * bottom_blob.elempack;
423
const int _kernel_w = _weight_data.w;
424
const int _kernel_h = _weight_data.h;
425
const int _num_output = _weight_data.d * group;
427
Mat weight_data_flattened;
428
flatten(_weight_data, weight_data_flattened, opt);
429
if (weight_data_flattened.empty())
432
// weight_data_flattened as pack1
433
weight_data_flattened.w *= weight_data_flattened.elempack;
434
weight_data_flattened.elemsize /= weight_data_flattened.elempack;
435
weight_data_flattened.elempack = 1;
437
// transpose group-inch/group-outch/group-kh-kw to group-outch/group-inch/group-kh-kw
438
Mat weight_data_transposed;
440
weight_data_transposed.create(_kernel_w * _kernel_h * _num_output * _num_input / group, 4u, opt.workspace_allocator);
441
if (weight_data_transposed.empty())
444
const int outch_g = _num_output / group;
445
const int inch_g = _num_input / group;
446
const int maxk = _kernel_h * _kernel_w;
448
for (int g = 0; g < group; g++)
450
// reorder weight from inch-outch to outch-inch
451
float* wg2 = (float*)weight_data_transposed + g * outch_g * inch_g * maxk;
452
const float* wg = (const float*)weight_data_flattened + g * inch_g * outch_g * maxk;
453
for (int i = 0; i < outch_g; i++)
455
for (int j = 0; j < inch_g; j++)
457
for (int k = 0; k < maxk; k++)
459
wg2[(i * inch_g + j) * maxk + k] = wg[(j * outch_g + i) * maxk + k];
466
Mat bias_data_flattened;
469
const Mat& _bias_data = bottom_blobs[2];
470
flatten(_bias_data, bias_data_flattened, opt);
471
if (bias_data_flattened.empty())
474
// bias_data_flattened as pack1
475
bias_data_flattened.w *= bias_data_flattened.elempack;
476
bias_data_flattened.elemsize /= bias_data_flattened.elempack;
477
bias_data_flattened.elempack = 1;
480
ncnn::Layer* op = ncnn::create_layer_cpu(ncnn::LayerType::DeconvolutionDepthWise);
483
pd.set(0, _num_output);
484
pd.set(1, _kernel_w);
485
pd.set(11, _kernel_h);
486
pd.set(2, dilation_w);
487
pd.set(12, dilation_h);
489
pd.set(13, stride_h);
491
pd.set(15, pad_right);
493
pd.set(16, pad_bottom);
494
pd.set(18, output_pad_right);
495
pd.set(19, output_pad_bottom);
496
pd.set(20, output_w);
497
pd.set(21, output_h);
498
pd.set(5, bias_term);
499
pd.set(6, weight_data_transposed.w);
501
pd.set(9, activation_type);
502
pd.set(10, activation_params);
506
ncnn::Mat weights[2];
507
weights[0] = weight_data_transposed;
508
weights[1] = bias_data_flattened;
510
op->load_model(ncnn::ModelBinFromMatArray(weights));
512
op->create_pipeline(opt);
514
op->forward(bottom_blob, top_blob, opt);
516
op->destroy_pipeline(opt);