1
// yala is pleased to support the open source community by making ncnn available.
4
// Copyright (C) 2022 yala <zhaojunchao@loongson.cn>;<junchao82@qq.com>. All rights reserved.
5
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6
// in compliance with the License. You may obtain a copy of the License at
8
// https://opensource.org/licenses/BSD-3-Clause
10
// Unless required by applicable law or agreed to in writing, software distributed
11
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
13
// specific language governing permissions and limitations under the License.
15
#include "convolution1d_loongarch.h"
19
#endif // __loongarch_sx
21
#include "loongarch_activation.h"
22
#include "loongarch_usability.h"
26
Convolution1D_loongarch::Convolution1D_loongarch()
29
support_packing = true;
30
#endif // __loongarch_sx
33
int Convolution1D_loongarch::create_pipeline(const Option& opt)
38
const int num_input = weight_data_size / kernel_w / num_output;
43
if (opt.use_packing_layout)
45
elempack = num_input % 4 == 0 ? 4 : 1;
46
out_elempack = num_output % 4 == 0 ? 4 : 1;
50
// src = kw-inch-outch
51
// dst = pb-pa-kw-inch/pa-outch/pb
53
Mat weight_data_r2 = weight_data.reshape(kernel_w, num_input, num_output);
55
weight_data_packed.create(kernel_w, num_input / elempack, num_output / out_elempack, (size_t)4u * elempack * out_elempack, elempack * out_elempack);
57
for (int q = 0; q + (out_elempack - 1) < num_output; q += out_elempack)
59
float* g00 = weight_data_packed.channel(q / out_elempack);
61
for (int p = 0; p + (elempack - 1) < num_input; p += elempack)
63
for (int k = 0; k < kernel_w; k++)
65
for (int i = 0; i < elempack; i++)
67
for (int j = 0; j < out_elempack; j++)
69
const float* k00 = weight_data_r2.channel(q + j).row(p + i);
82
weight_data.release();
87
int Convolution1D_loongarch::destroy_pipeline(const Option& /*opt*/)
92
int Convolution1D_loongarch::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
94
int w = bottom_blob.w;
95
int h = bottom_blob.h;
96
size_t elemsize = bottom_blob.elemsize;
97
int elempack = bottom_blob.elempack;
99
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
101
Mat bottom_blob_bordered;
102
make_padding(bottom_blob, bottom_blob_bordered, opt);
103
if (bottom_blob_bordered.empty())
106
w = bottom_blob_bordered.w;
107
h = bottom_blob_bordered.h;
109
int out_elempack = 1;
111
if (opt.use_packing_layout)
113
out_elempack = num_output % 4 == 0 ? 4 : 1;
116
size_t out_elemsize = elemsize / elempack * out_elempack;
118
const int outw = (w - kernel_extent_w) / stride_w + 1;
119
const int outh = num_output / out_elempack;
121
top_blob.create(outw, outh, out_elemsize, out_elempack, opt.blob_allocator);
122
if (top_blob.empty())
126
if (elempack == 4 && out_elempack == 4)
129
#pragma omp parallel for num_threads(opt.num_threads)
130
for (int p = 0; p < outh; p++)
132
float* outptr = top_blob.row(p);
134
for (int j = 0; j < outw; j++)
136
__m128 _sum = (__m128)__lsx_vreplgr2vr_w(0);
140
_sum = (__m128)__lsx_vld((const float*)bias_data + p * 4, 0);
143
const float* kptr = weight_data_packed.channel(p);
145
for (int q = 0; q < h; q++)
147
const float* sptr = bottom_blob_bordered.row(q) + j * stride_w * 4;
149
for (int k = 0; k < kernel_w; k++)
151
__m128 _val0 = __lsx_vreplfr2vr_s(sptr[0]);
152
__m128 _val1 = __lsx_vreplfr2vr_s(sptr[1]);
153
__m128 _val2 = __lsx_vreplfr2vr_s(sptr[2]);
154
__m128 _val3 = __lsx_vreplfr2vr_s(sptr[3]);
156
__m128 _w0 = (__m128)__lsx_vld(kptr, 0);
157
__m128 _w1 = (__m128)__lsx_vld(kptr + 4, 0);
158
__m128 _w2 = (__m128)__lsx_vld(kptr + 8, 0);
159
__m128 _w3 = (__m128)__lsx_vld(kptr + 12, 0);
161
_sum = __lsx_vfmadd_s(_w0, _val0, _sum);
162
_sum = __lsx_vfmadd_s(_w1, _val1, _sum);
163
_sum = __lsx_vfmadd_s(_w2, _val2, _sum);
164
_sum = __lsx_vfmadd_s(_w3, _val3, _sum);
166
sptr += dilation_w * 4;
171
_sum = activation_ps(_sum, activation_type, activation_params);
173
__lsx_vst(_sum, outptr, 0);
180
if (elempack == 1 && out_elempack == 4)
183
#pragma omp parallel for num_threads(opt.num_threads)
184
for (int p = 0; p < outh; p++)
186
float* outptr = top_blob.row(p);
188
for (int j = 0; j < outw; j++)
190
__m128 _sum = (__m128)__lsx_vreplgr2vr_w(0);
194
_sum = (__m128)__lsx_vld((const float*)bias_data + p * 4, 0);
197
const float* kptr = weight_data_packed.channel(p);
199
for (int q = 0; q < h; q++)
201
const float* sptr = bottom_blob_bordered.row(q) + j * stride_w;
203
for (int k = 0; k < kernel_w; k++)
205
__m128 _val = __lsx_vreplfr2vr_s(sptr[0]);
206
__m128 _w = (__m128)__lsx_vld(kptr, 0);
207
_sum = __lsx_vfmadd_s(_w, _val, _sum);
214
_sum = activation_ps(_sum, activation_type, activation_params);
216
__lsx_vst(_sum, outptr, 0);
223
if (elempack == 4 && out_elempack == 1)
226
#pragma omp parallel for num_threads(opt.num_threads)
227
for (int p = 0; p < outh; p++)
229
float* outptr = top_blob.row(p);
231
for (int j = 0; j < outw; j++)
240
__m128 _sum = (__m128)__lsx_vreplgr2vr_w(0);
242
const float* kptr = weight_data_packed.channel(p);
244
for (int q = 0; q < h; q++)
246
const float* sptr = bottom_blob_bordered.row(q) + j * stride_w * 4;
248
for (int k = 0; k < kernel_w; k++)
250
__m128 _val = (__m128)__lsx_vld(sptr, 0);
251
__m128 _w = (__m128)__lsx_vld(kptr, 0);
252
_sum = __lsx_vfmadd_s(_w, _val, _sum);
254
sptr += dilation_w * 4;
259
sum += __lsx_reduce_fadd_s(_sum);
261
sum = activation_ss(sum, activation_type, activation_params);
268
#endif // __loongarch_sx
270
if (elempack == 1 && out_elempack == 1)
273
#pragma omp parallel for num_threads(opt.num_threads)
274
for (int p = 0; p < outh; p++)
276
float* outptr = top_blob.row(p);
278
for (int j = 0; j < outw; j++)
287
const float* kptr = weight_data_packed.channel(p);
289
for (int q = 0; q < h; q++)
291
const float* sptr = bottom_blob_bordered.row(q) + j * stride_w;
293
for (int k = 0; k < kernel_w; k++)
304
sum = activation_ss(sum, activation_type, activation_params);
315
int Convolution1D_loongarch::forward(const std::vector<Mat>& bottom_blobs, std::vector<Mat>& top_blobs, const Option& opt) const
317
const Mat& bottom_blob = bottom_blobs[0];
318
const Mat& _weight_data = bottom_blobs[1];
319
Mat& top_blob = top_blobs[0];
321
const int _kernel_w = _weight_data.w;
322
const int _num_output = _weight_data.c * _weight_data.elempack;
324
Mat weight_data_flattened;
325
flatten(_weight_data, weight_data_flattened, opt);
326
if (weight_data_flattened.empty())
329
// weight_data_flattened as pack1
330
weight_data_flattened.w *= weight_data_flattened.elempack;
331
weight_data_flattened.elemsize /= weight_data_flattened.elempack;
332
weight_data_flattened.elempack = 1;
334
Mat bias_data_flattened;
337
const Mat& _bias_data = bottom_blobs[2];
338
flatten(_bias_data, bias_data_flattened, opt);
339
if (bias_data_flattened.empty())
342
// bias_data_flattened as pack1
343
bias_data_flattened.w *= bias_data_flattened.elempack;
344
bias_data_flattened.elemsize /= bias_data_flattened.elempack;
345
bias_data_flattened.elempack = 1;
348
ncnn::Layer* op = ncnn::create_layer_cpu(ncnn::LayerType::Convolution1D);
351
pd.set(0, _num_output);
352
pd.set(1, _kernel_w);
353
pd.set(2, dilation_w);
356
pd.set(15, pad_right);
357
pd.set(18, pad_value);
358
pd.set(5, bias_term);
359
pd.set(6, weight_data_flattened.w);
360
pd.set(9, activation_type);
361
pd.set(10, activation_params);
365
ncnn::Mat weights[2];
366
weights[0] = weight_data_flattened;
367
weights[1] = bias_data_flattened;
369
op->load_model(ncnn::ModelBinFromMatArray(weights));
371
op->create_pipeline(opt);
373
op->forward(bottom_blob, top_blob, opt);
375
op->destroy_pipeline(opt);