ncnn

Форк
0
/
convolution1d_loongarch.cpp 
382 строки · 11.5 Кб
1
// yala is pleased to support the open source community by making ncnn available.
2
//
3
//
4
// Copyright (C) 2022 yala <zhaojunchao@loongson.cn>;<junchao82@qq.com>. All rights reserved.
5
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6
// in compliance with the License. You may obtain a copy of the License at
7
//
8
// https://opensource.org/licenses/BSD-3-Clause
9
//
10
// Unless required by applicable law or agreed to in writing, software distributed
11
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
13
// specific language governing permissions and limitations under the License.
14

15
#include "convolution1d_loongarch.h"
16

17
#if __loongarch_sx
18
#include <lsxintrin.h>
19
#endif // __loongarch_sx
20

21
#include "loongarch_activation.h"
22
#include "loongarch_usability.h"
23

24
namespace ncnn {
25

26
Convolution1D_loongarch::Convolution1D_loongarch()
27
{
28
#if __loongarch_sx
29
    support_packing = true;
30
#endif // __loongarch_sx
31
}
32

33
int Convolution1D_loongarch::create_pipeline(const Option& opt)
34
{
35
    if (dynamic_weight)
36
        return 0;
37

38
    const int num_input = weight_data_size / kernel_w / num_output;
39

40
    int elempack = 1;
41
    int out_elempack = 1;
42
#if __loongarch_sx
43
    if (opt.use_packing_layout)
44
    {
45
        elempack = num_input % 4 == 0 ? 4 : 1;
46
        out_elempack = num_output % 4 == 0 ? 4 : 1;
47
    }
48
#endif
49

50
    // src = kw-inch-outch
51
    // dst = pb-pa-kw-inch/pa-outch/pb
52
    {
53
        Mat weight_data_r2 = weight_data.reshape(kernel_w, num_input, num_output);
54

55
        weight_data_packed.create(kernel_w, num_input / elempack, num_output / out_elempack, (size_t)4u * elempack * out_elempack, elempack * out_elempack);
56

57
        for (int q = 0; q + (out_elempack - 1) < num_output; q += out_elempack)
58
        {
59
            float* g00 = weight_data_packed.channel(q / out_elempack);
60

61
            for (int p = 0; p + (elempack - 1) < num_input; p += elempack)
62
            {
63
                for (int k = 0; k < kernel_w; k++)
64
                {
65
                    for (int i = 0; i < elempack; i++)
66
                    {
67
                        for (int j = 0; j < out_elempack; j++)
68
                        {
69
                            const float* k00 = weight_data_r2.channel(q + j).row(p + i);
70

71
                            g00[0] = k00[k];
72

73
                            g00++;
74
                        }
75
                    }
76
                }
77
            }
78
        }
79
    }
80

81
    if (opt.lightmode)
82
        weight_data.release();
83

84
    return 0;
85
}
86

87
int Convolution1D_loongarch::destroy_pipeline(const Option& /*opt*/)
88
{
89
    return 0;
90
}
91

92
int Convolution1D_loongarch::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
93
{
94
    int w = bottom_blob.w;
95
    int h = bottom_blob.h;
96
    size_t elemsize = bottom_blob.elemsize;
97
    int elempack = bottom_blob.elempack;
98

99
    const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
100

101
    Mat bottom_blob_bordered;
102
    make_padding(bottom_blob, bottom_blob_bordered, opt);
103
    if (bottom_blob_bordered.empty())
104
        return -100;
105

106
    w = bottom_blob_bordered.w;
107
    h = bottom_blob_bordered.h;
108

109
    int out_elempack = 1;
110
#if __loongarch_sx
111
    if (opt.use_packing_layout)
112
    {
113
        out_elempack = num_output % 4 == 0 ? 4 : 1;
114
    }
115
#endif
116
    size_t out_elemsize = elemsize / elempack * out_elempack;
117

118
    const int outw = (w - kernel_extent_w) / stride_w + 1;
119
    const int outh = num_output / out_elempack;
120

121
    top_blob.create(outw, outh, out_elemsize, out_elempack, opt.blob_allocator);
122
    if (top_blob.empty())
123
        return -100;
124

125
#if __loongarch_sx
126
    if (elempack == 4 && out_elempack == 4)
127
    {
128
        {
129
            #pragma omp parallel for num_threads(opt.num_threads)
130
            for (int p = 0; p < outh; p++)
131
            {
132
                float* outptr = top_blob.row(p);
133

134
                for (int j = 0; j < outw; j++)
135
                {
136
                    __m128 _sum = (__m128)__lsx_vreplgr2vr_w(0);
137

138
                    if (bias_term)
139
                    {
140
                        _sum = (__m128)__lsx_vld((const float*)bias_data + p * 4, 0);
141
                    }
142

143
                    const float* kptr = weight_data_packed.channel(p);
144

145
                    for (int q = 0; q < h; q++)
146
                    {
147
                        const float* sptr = bottom_blob_bordered.row(q) + j * stride_w * 4;
148

149
                        for (int k = 0; k < kernel_w; k++)
150
                        {
151
                            __m128 _val0 = __lsx_vreplfr2vr_s(sptr[0]);
152
                            __m128 _val1 = __lsx_vreplfr2vr_s(sptr[1]);
153
                            __m128 _val2 = __lsx_vreplfr2vr_s(sptr[2]);
154
                            __m128 _val3 = __lsx_vreplfr2vr_s(sptr[3]);
155

156
                            __m128 _w0 = (__m128)__lsx_vld(kptr, 0);
157
                            __m128 _w1 = (__m128)__lsx_vld(kptr + 4, 0);
158
                            __m128 _w2 = (__m128)__lsx_vld(kptr + 8, 0);
159
                            __m128 _w3 = (__m128)__lsx_vld(kptr + 12, 0);
160

161
                            _sum = __lsx_vfmadd_s(_w0, _val0, _sum);
162
                            _sum = __lsx_vfmadd_s(_w1, _val1, _sum);
163
                            _sum = __lsx_vfmadd_s(_w2, _val2, _sum);
164
                            _sum = __lsx_vfmadd_s(_w3, _val3, _sum);
165

166
                            sptr += dilation_w * 4;
167
                            kptr += 16;
168
                        }
169
                    }
170

171
                    _sum = activation_ps(_sum, activation_type, activation_params);
172

173
                    __lsx_vst(_sum, outptr, 0);
174
                    outptr += 4;
175
                }
176
            }
177
        }
178
    }
179

180
    if (elempack == 1 && out_elempack == 4)
181
    {
182
        {
183
            #pragma omp parallel for num_threads(opt.num_threads)
184
            for (int p = 0; p < outh; p++)
185
            {
186
                float* outptr = top_blob.row(p);
187

188
                for (int j = 0; j < outw; j++)
189
                {
190
                    __m128 _sum = (__m128)__lsx_vreplgr2vr_w(0);
191

192
                    if (bias_term)
193
                    {
194
                        _sum = (__m128)__lsx_vld((const float*)bias_data + p * 4, 0);
195
                    }
196

197
                    const float* kptr = weight_data_packed.channel(p);
198

199
                    for (int q = 0; q < h; q++)
200
                    {
201
                        const float* sptr = bottom_blob_bordered.row(q) + j * stride_w;
202

203
                        for (int k = 0; k < kernel_w; k++)
204
                        {
205
                            __m128 _val = __lsx_vreplfr2vr_s(sptr[0]);
206
                            __m128 _w = (__m128)__lsx_vld(kptr, 0);
207
                            _sum = __lsx_vfmadd_s(_w, _val, _sum);
208

209
                            sptr += dilation_w;
210
                            kptr += 4;
211
                        }
212
                    }
213

214
                    _sum = activation_ps(_sum, activation_type, activation_params);
215

216
                    __lsx_vst(_sum, outptr, 0);
217
                    outptr += 4;
218
                }
219
            }
220
        }
221
    }
222

223
    if (elempack == 4 && out_elempack == 1)
224
    {
225
        {
226
            #pragma omp parallel for num_threads(opt.num_threads)
227
            for (int p = 0; p < outh; p++)
228
            {
229
                float* outptr = top_blob.row(p);
230

231
                for (int j = 0; j < outw; j++)
232
                {
233
                    float sum = 0.f;
234

235
                    if (bias_term)
236
                    {
237
                        sum = bias_data[p];
238
                    }
239

240
                    __m128 _sum = (__m128)__lsx_vreplgr2vr_w(0);
241

242
                    const float* kptr = weight_data_packed.channel(p);
243

244
                    for (int q = 0; q < h; q++)
245
                    {
246
                        const float* sptr = bottom_blob_bordered.row(q) + j * stride_w * 4;
247

248
                        for (int k = 0; k < kernel_w; k++)
249
                        {
250
                            __m128 _val = (__m128)__lsx_vld(sptr, 0);
251
                            __m128 _w = (__m128)__lsx_vld(kptr, 0);
252
                            _sum = __lsx_vfmadd_s(_w, _val, _sum);
253

254
                            sptr += dilation_w * 4;
255
                            kptr += 4;
256
                        }
257
                    }
258

259
                    sum += __lsx_reduce_fadd_s(_sum);
260

261
                    sum = activation_ss(sum, activation_type, activation_params);
262

263
                    outptr[j] = sum;
264
                }
265
            }
266
        }
267
    }
268
#endif // __loongarch_sx
269

270
    if (elempack == 1 && out_elempack == 1)
271
    {
272
        {
273
            #pragma omp parallel for num_threads(opt.num_threads)
274
            for (int p = 0; p < outh; p++)
275
            {
276
                float* outptr = top_blob.row(p);
277

278
                for (int j = 0; j < outw; j++)
279
                {
280
                    float sum = 0.f;
281

282
                    if (bias_term)
283
                    {
284
                        sum = bias_data[p];
285
                    }
286

287
                    const float* kptr = weight_data_packed.channel(p);
288

289
                    for (int q = 0; q < h; q++)
290
                    {
291
                        const float* sptr = bottom_blob_bordered.row(q) + j * stride_w;
292

293
                        for (int k = 0; k < kernel_w; k++)
294
                        {
295
                            float val = sptr[0];
296
                            float wt = kptr[0];
297
                            sum += val * wt;
298

299
                            sptr += dilation_w;
300
                            kptr += 1;
301
                        }
302
                    }
303

304
                    sum = activation_ss(sum, activation_type, activation_params);
305

306
                    outptr[j] = sum;
307
                }
308
            }
309
        }
310
    }
311

312
    return 0;
313
}
314

315
int Convolution1D_loongarch::forward(const std::vector<Mat>& bottom_blobs, std::vector<Mat>& top_blobs, const Option& opt) const
316
{
317
    const Mat& bottom_blob = bottom_blobs[0];
318
    const Mat& _weight_data = bottom_blobs[1];
319
    Mat& top_blob = top_blobs[0];
320

321
    const int _kernel_w = _weight_data.w;
322
    const int _num_output = _weight_data.c * _weight_data.elempack;
323

324
    Mat weight_data_flattened;
325
    flatten(_weight_data, weight_data_flattened, opt);
326
    if (weight_data_flattened.empty())
327
        return -100;
328

329
    // weight_data_flattened as pack1
330
    weight_data_flattened.w *= weight_data_flattened.elempack;
331
    weight_data_flattened.elemsize /= weight_data_flattened.elempack;
332
    weight_data_flattened.elempack = 1;
333

334
    Mat bias_data_flattened;
335
    if (bias_term)
336
    {
337
        const Mat& _bias_data = bottom_blobs[2];
338
        flatten(_bias_data, bias_data_flattened, opt);
339
        if (bias_data_flattened.empty())
340
            return -100;
341

342
        // bias_data_flattened as pack1
343
        bias_data_flattened.w *= bias_data_flattened.elempack;
344
        bias_data_flattened.elemsize /= bias_data_flattened.elempack;
345
        bias_data_flattened.elempack = 1;
346
    }
347

348
    ncnn::Layer* op = ncnn::create_layer_cpu(ncnn::LayerType::Convolution1D);
349

350
    ncnn::ParamDict pd;
351
    pd.set(0, _num_output);
352
    pd.set(1, _kernel_w);
353
    pd.set(2, dilation_w);
354
    pd.set(3, stride_w);
355
    pd.set(4, pad_left);
356
    pd.set(15, pad_right);
357
    pd.set(18, pad_value);
358
    pd.set(5, bias_term);
359
    pd.set(6, weight_data_flattened.w);
360
    pd.set(9, activation_type);
361
    pd.set(10, activation_params);
362

363
    op->load_param(pd);
364

365
    ncnn::Mat weights[2];
366
    weights[0] = weight_data_flattened;
367
    weights[1] = bias_data_flattened;
368

369
    op->load_model(ncnn::ModelBinFromMatArray(weights));
370

371
    op->create_pipeline(opt);
372

373
    op->forward(bottom_blob, top_blob, opt);
374

375
    op->destroy_pipeline(opt);
376

377
    delete op;
378

379
    return 0;
380
}
381

382
} // namespace ncnn
383

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.