ncnn

Форк
0
/
convolutiondepthwise_loongarch.cpp 
963 строки · 31.8 Кб
1
// yala is pleased to support the open source community by making ncnn available.
2
//
3
//
4
// Copyright (C) 2022 yala <zhaojunchao@loongson.cn>;<junchao82@qq.com>. All rights reserved.
5
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6
// in compliance with the License. You may obtain a copy of the License at
7
//
8
// https://opensource.org/licenses/BSD-3-Clause
9
//
10
// Unless required by applicable law or agreed to in writing, software distributed
11
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
13
// specific language governing permissions and limitations under the License.
14

15
#include "convolutiondepthwise_loongarch.h"
16

17
#include "layer_type.h"
18

19
#if __loongarch_sx
20
#include <lsxintrin.h>
21
#endif // __loongarch_sx
22

23
#include "loongarch_activation.h"
24
#include "loongarch_usability.h"
25

26
namespace ncnn {
27

28
#include "convolutiondepthwise_3x3.h"
29

30
#if __loongarch_sx
31
#include "convolutiondepthwise_3x3_pack4.h"
32
#include "convolutiondepthwise_5x5_pack4.h"
33
#endif // __loongarch_sx
34

35
ConvolutionDepthWise_loongarch::ConvolutionDepthWise_loongarch()
36
{
37
#if __loongarch_sx
38
    support_packing = true;
39
#endif // __loongarch_sx
40

41
    activation = 0;
42
}
43

44
int ConvolutionDepthWise_loongarch::create_pipeline(const Option& opt)
45
{
46
    if (dynamic_weight)
47
        return 0;
48

49
    activation = create_activation_layer(activation_type, activation_params, opt);
50

51
#if NCNN_INT8
52
    if (opt.use_int8_inference && weight_data.elemsize == (size_t)1u)
53
    {
54
        return create_pipeline_int8_loongarch(opt);
55
    }
56
#endif
57

58
    const int maxk = kernel_w * kernel_h;
59
    int channels = (weight_data_size / group) / maxk / (num_output / group) * group;
60

61
    // depth-wise
62
    if (channels == group && group == num_output)
63
    {
64
        int elempack = 1;
65
#if __loongarch_sx
66
        if (opt.use_packing_layout)
67
        {
68
            elempack = channels % 4 == 0 ? 4 : 1;
69
        }
70
#endif
71

72
#if __loongarch_sx
73
        // pack4
74
        if (elempack == 4)
75
        {
76
            Mat weight_data_r2 = weight_data.reshape(maxk, group);
77
            convert_packing(weight_data_r2, weight_data_tm, 4, opt);
78
        }
79
#endif // __loongarch_sx
80

81
        if (elempack == 1)
82
        {
83
            weight_data_tm = weight_data;
84
        }
85

86
        if (opt.lightmode)
87
            weight_data.release();
88

89
        return 0;
90
    }
91

92
    // group convolution
93
    create_group_ops(opt);
94

95
    if (opt.lightmode)
96
        weight_data.release();
97

98
    return 0;
99
}
100

101
int ConvolutionDepthWise_loongarch::create_group_ops(const Option& opt)
102
{
103
    // create Convolution op for each group
104
    const int maxk = kernel_w * kernel_h;
105
    int channels = (weight_data_size / group) / maxk / (num_output / group) * group;
106

107
    for (int i = 0; i < (int)group_ops.size(); i++)
108
        delete group_ops[i];
109

110
    group_ops.clear();
111

112
    const int channels_g = channels / group;
113
    const int num_output_g = num_output / group;
114

115
    group_ops.resize(group);
116

117
    for (int g = 0; g < group; g++)
118
    {
119
        Mat weight_data_g = weight_data.range(maxk * channels_g * num_output_g * g, maxk * channels_g * num_output_g).clone();
120
        Mat bias_data_g;
121
        if (bias_term)
122
            bias_data_g = bias_data.range(num_output_g * g, num_output_g);
123

124
        ncnn::Layer* op = ncnn::create_layer_cpu(ncnn::LayerType::Convolution);
125

126
        // set param
127
        ncnn::ParamDict pd;
128
        pd.set(0, num_output_g); // num_output
129
        pd.set(1, kernel_w);
130
        pd.set(11, kernel_h);
131
        pd.set(2, dilation_w);
132
        pd.set(12, dilation_h);
133
        pd.set(3, stride_w);
134
        pd.set(13, stride_h);
135
        pd.set(4, 0);  // pad_w
136
        pd.set(14, 0); // pad_h
137
        pd.set(5, bias_term);
138
        pd.set(6, maxk * channels_g * num_output_g); // weight_data_size
139
        pd.set(8, int8_scale_term);
140
        pd.set(9, activation_type);
141
        pd.set(10, activation_params);
142

143
        op->load_param(pd);
144

145
        // set weights
146
        if (bias_term)
147
        {
148
            ncnn::Mat weights[5];
149
            weights[0] = weight_data_g;
150
            weights[1] = bias_data_g;
151

152
#if NCNN_INT8
153
            if (int8_scale_term)
154
            {
155
                Mat weight_data_int8_scales_g(num_output_g);
156
                weight_data_int8_scales_g.fill(weight_data_int8_scales[g]);
157
                weights[2] = weight_data_int8_scales_g;
158
                weights[3] = bottom_blob_int8_scales.range(g, 1);
159
            }
160
            if (int8_scale_term > 100)
161
            {
162
                weights[4] = top_blob_int8_scales.range(g, 1);
163
            }
164
#endif
165

166
            op->load_model(ModelBinFromMatArray(weights));
167
        }
168
        else
169
        {
170
            ncnn::Mat weights[4];
171
            weights[0] = weight_data_g;
172

173
#if NCNN_INT8
174
            if (int8_scale_term)
175
            {
176
                Mat weight_data_int8_scales_g(num_output_g);
177
                weight_data_int8_scales_g.fill(weight_data_int8_scales[g]);
178
                weights[1] = weight_data_int8_scales_g;
179
                weights[2] = bottom_blob_int8_scales.range(g, 1);
180
            }
181
            if (int8_scale_term > 100)
182
            {
183
                weights[3] = top_blob_int8_scales.range(g, 1);
184
            }
185
#endif
186

187
            op->load_model(ModelBinFromMatArray(weights));
188
        }
189

190
        op->create_pipeline(opt);
191

192
        group_ops[g] = op;
193
    }
194

195
    return 0;
196
}
197

198
int ConvolutionDepthWise_loongarch::destroy_pipeline(const Option& opt)
199
{
200
    if (activation)
201
    {
202
        activation->destroy_pipeline(opt);
203
        delete activation;
204
        activation = 0;
205
    }
206

207
    for (int i = 0; i < (int)group_ops.size(); i++)
208
    {
209
        group_ops[i]->destroy_pipeline(opt);
210
        delete group_ops[i];
211
    }
212
    group_ops.clear();
213

214
    return 0;
215
}
216

217
int ConvolutionDepthWise_loongarch::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
218
{
219
#if NCNN_INT8
220
    if (opt.use_int8_inference && int8_scale_term)
221
    {
222
        return forward_int8_loongarch(bottom_blob, top_blob, opt);
223
    }
224
#endif
225

226
    int w = bottom_blob.w;
227
    int h = bottom_blob.h;
228
    int channels = bottom_blob.c;
229
    size_t elemsize = bottom_blob.elemsize;
230
    int elempack = bottom_blob.elempack;
231

232
    const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
233
    const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
234

235
    Mat bottom_blob_bordered;
236
    make_padding(bottom_blob, bottom_blob_bordered, opt);
237
    if (bottom_blob_bordered.empty())
238
        return -100;
239

240
    w = bottom_blob_bordered.w;
241
    h = bottom_blob_bordered.h;
242

243
    int outw = (w - kernel_extent_w) / stride_w + 1;
244
    int outh = (h - kernel_extent_h) / stride_h + 1;
245
    int out_elempack = 1;
246
#if __loongarch_sx
247
    if (opt.use_packing_layout)
248
    {
249
        out_elempack = num_output % 4 == 0 ? 4 : 1;
250
    }
251
#endif
252
    size_t out_elemsize = elemsize / elempack * out_elempack;
253

254
    top_blob.create(outw, outh, num_output / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
255
    if (top_blob.empty())
256
        return -100;
257

258
    // depth-wise
259
    if (channels * elempack == group && group == num_output)
260
    {
261
#if __loongarch_sx
262
        if (elempack == 4)
263
        {
264
            if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
265
            {
266
                convdw3x3s1_pack4_lsx(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
267

268
                if (activation)
269
                {
270
                    activation->forward_inplace(top_blob, opt);
271
                }
272
            }
273
            else if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
274
            {
275
                convdw3x3s2_pack4_lsx(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
276

277
                if (activation)
278
                {
279
                    activation->forward_inplace(top_blob, opt);
280
                }
281
            }
282
            else if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
283
            {
284
                convdw5x5s1_pack4_lsx(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
285

286
                if (activation)
287
                {
288
                    activation->forward_inplace(top_blob, opt);
289
                }
290
            }
291
            else if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
292
            {
293
                convdw5x5s2_pack4_lsx(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
294

295
                if (activation)
296
                {
297
                    activation->forward_inplace(top_blob, opt);
298
                }
299
            }
300
            else
301
            {
302
                const int maxk = kernel_w * kernel_h;
303

304
                // kernel offsets
305
                std::vector<int> _space_ofs(maxk);
306
                int* space_ofs = &_space_ofs[0];
307
                {
308
                    int p1 = 0;
309
                    int p2 = 0;
310
                    int gap = w * dilation_h - kernel_w * dilation_w;
311
                    for (int i = 0; i < kernel_h; i++)
312
                    {
313
                        for (int j = 0; j < kernel_w; j++)
314
                        {
315
                            space_ofs[p1] = p2;
316
                            p1++;
317
                            p2 += dilation_w;
318
                        }
319
                        p2 += gap;
320
                    }
321
                }
322

323
                #pragma omp parallel for num_threads(opt.num_threads)
324
                for (int g = 0; g < channels; g++)
325
                {
326
                    float* outptr = top_blob.channel(g);
327
                    const float* kptr = (const float*)weight_data_tm + maxk * g * 4;
328
                    const Mat m = bottom_blob_bordered.channel(g);
329

330
                    for (int i = 0; i < outh; i++)
331
                    {
332
                        for (int j = 0; j < outw; j++)
333
                        {
334
                            __m128 _sum = (__m128)__lsx_vreplgr2vr_w(0);
335

336
                            if (bias_term)
337
                            {
338
                                _sum = (__m128)__lsx_vld((const float*)bias_data + g * 4, 0);
339
                            }
340

341
                            const float* sptr = m.row(i * stride_h) + j * stride_w * 4;
342

343
                            for (int k = 0; k < maxk; k++)
344
                            {
345
                                __m128 _val = (__m128)__lsx_vld(sptr + space_ofs[k] * 4, 0);
346
                                __m128 _w = (__m128)__lsx_vld(kptr + k * 4, 0);
347
                                _sum = __lsx_vfmadd_s(_w, _val, _sum);
348
                            }
349

350
                            _sum = activation_ps(_sum, activation_type, activation_params);
351

352
                            __lsx_vst(_sum, outptr + j * 4, 0);
353
                        }
354

355
                        outptr += outw * 4;
356
                    }
357
                }
358
            }
359
        }
360
#endif // __loongarch_sx
361

362
        if (elempack == 1)
363
        {
364
            if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
365
            {
366
                convdw3x3s1_lsx(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
367

368
                if (activation)
369
                {
370
                    activation->forward_inplace(top_blob, opt);
371
                }
372
            }
373
            else if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
374
            {
375
                convdw3x3s2_lsx(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
376

377
                if (activation)
378
                {
379
                    activation->forward_inplace(top_blob, opt);
380
                }
381
            }
382
            else
383
            {
384
                const int maxk = kernel_w * kernel_h;
385

386
                // kernel offsets
387
                std::vector<int> _space_ofs(maxk);
388
                int* space_ofs = &_space_ofs[0];
389
                {
390
                    int p1 = 0;
391
                    int p2 = 0;
392
                    int gap = w * dilation_h - kernel_w * dilation_w;
393
                    for (int i = 0; i < kernel_h; i++)
394
                    {
395
                        for (int j = 0; j < kernel_w; j++)
396
                        {
397
                            space_ofs[p1] = p2;
398
                            p1++;
399
                            p2 += dilation_w;
400
                        }
401
                        p2 += gap;
402
                    }
403
                }
404

405
                #pragma omp parallel for num_threads(opt.num_threads)
406
                for (int g = 0; g < group; g++)
407
                {
408
                    float* outptr = top_blob.channel(g);
409
                    const float* kptr = (const float*)weight_data_tm + maxk * g;
410
                    const Mat m = bottom_blob_bordered.channel(g);
411

412
                    for (int i = 0; i < outh; i++)
413
                    {
414
                        for (int j = 0; j < outw; j++)
415
                        {
416
                            float sum = 0.f;
417

418
                            if (bias_term)
419
                                sum = bias_data[g];
420

421
                            const float* sptr = m.row(i * stride_h) + j * stride_w;
422

423
                            for (int k = 0; k < maxk; k++)
424
                            {
425
                                float val = (float)sptr[space_ofs[k]];
426
                                float w = (float)kptr[k];
427
                                sum += val * w;
428
                            }
429

430
                            sum = activation_ss(sum, activation_type, activation_params);
431

432
                            outptr[j] = sum;
433
                        }
434

435
                        outptr += outw;
436
                    }
437
                }
438
            }
439
        }
440

441
        return 0;
442
    }
443

444
    // group convolution
445
    const int channels_g = channels * elempack / group;
446
    const int num_output_g = num_output / group;
447

448
    int g_elempack = 1;
449
    int out_g_elempack = 1;
450
#if __loongarch_sx
451
    if (opt.use_packing_layout)
452
    {
453
        g_elempack = channels_g % 4 == 0 ? 4 : 1;
454
        out_g_elempack = num_output_g % 4 == 0 ? 4 : 1;
455
    }
456
#endif
457

458
    // unpacking
459
    Mat bottom_blob_bordered_unpacked = bottom_blob_bordered;
460
    if (elempack > g_elempack)
461
    {
462
        Option opt_p = opt;
463
        opt_p.blob_allocator = opt.workspace_allocator;
464
        convert_packing(bottom_blob_bordered, bottom_blob_bordered_unpacked, 1, opt_p);
465
    }
466

467
    Mat top_blob_unpacked = top_blob;
468
    if (out_g_elempack < out_elempack)
469
    {
470
        top_blob_unpacked.create(outw, outh, num_output, out_elemsize / out_elempack, 1, opt.workspace_allocator);
471
        if (top_blob_unpacked.empty())
472
            return -100;
473
    }
474

475
    for (int g = 0; g < group; g++)
476
    {
477
        const Mat bottom_blob_bordered_g = bottom_blob_bordered_unpacked.channel_range(channels_g * g / g_elempack, channels_g / g_elempack);
478
        Mat top_blob_g = top_blob_unpacked.channel_range(num_output_g * g / out_g_elempack, num_output_g / out_g_elempack);
479

480
        const ncnn::Layer* op = group_ops[g];
481

482
        Option opt_g = opt;
483
        opt_g.blob_allocator = top_blob_unpacked.allocator;
484

485
        // forward
486
        op->forward(bottom_blob_bordered_g, top_blob_g, opt_g);
487
    }
488

489
    // packing
490
    if (out_g_elempack < out_elempack)
491
    {
492
        convert_packing(top_blob_unpacked, top_blob, out_elempack, opt);
493
    }
494
    else
495
    {
496
        top_blob = top_blob_unpacked;
497
    }
498

499
    return 0;
500
}
501

502
int ConvolutionDepthWise_loongarch::forward(const std::vector<Mat>& bottom_blobs, std::vector<Mat>& top_blobs, const Option& opt) const
503
{
504
    const Mat& bottom_blob = bottom_blobs[0];
505
    const Mat& _weight_data = bottom_blobs[1];
506
    Mat& top_blob = top_blobs[0];
507

508
    const int _kernel_w = _weight_data.w;
509
    const int _kernel_h = _weight_data.h;
510
    const int _num_output = _weight_data.c * _weight_data.elempack;
511

512
    Mat weight_data_flattened;
513
    flatten(_weight_data, weight_data_flattened, opt);
514
    if (weight_data_flattened.empty())
515
        return -100;
516

517
    // weight_data_flattened as pack1
518
    weight_data_flattened.w *= weight_data_flattened.elempack;
519
    weight_data_flattened.elemsize /= weight_data_flattened.elempack;
520
    weight_data_flattened.elempack = 1;
521

522
    Mat bias_data_flattened;
523
    if (bias_term)
524
    {
525
        const Mat& _bias_data = bottom_blobs[2];
526
        flatten(_bias_data, bias_data_flattened, opt);
527
        if (bias_data_flattened.empty())
528
            return -100;
529

530
        // bias_data_flattened as pack1
531
        bias_data_flattened.w *= bias_data_flattened.elempack;
532
        bias_data_flattened.elemsize /= bias_data_flattened.elempack;
533
        bias_data_flattened.elempack = 1;
534
    }
535

536
    ncnn::Layer* op = ncnn::create_layer_cpu(ncnn::LayerType::ConvolutionDepthWise);
537

538
    ncnn::ParamDict pd;
539
    pd.set(0, _num_output);
540
    pd.set(1, _kernel_w);
541
    pd.set(11, _kernel_h);
542
    pd.set(2, dilation_w);
543
    pd.set(12, dilation_h);
544
    pd.set(3, stride_w);
545
    pd.set(13, stride_h);
546
    pd.set(4, pad_left);
547
    pd.set(15, pad_right);
548
    pd.set(14, pad_top);
549
    pd.set(16, pad_bottom);
550
    pd.set(18, pad_value);
551
    pd.set(5, bias_term);
552
    pd.set(6, weight_data_flattened.w);
553
    pd.set(7, group);
554
    pd.set(8, int8_scale_term);
555
    pd.set(9, activation_type);
556
    pd.set(10, activation_params);
557

558
    op->load_param(pd);
559

560
    ncnn::Mat weights[2];
561
    weights[0] = weight_data_flattened;
562
    weights[1] = bias_data_flattened;
563

564
    op->load_model(ncnn::ModelBinFromMatArray(weights));
565

566
    op->create_pipeline(opt);
567

568
    op->forward(bottom_blob, top_blob, opt);
569

570
    op->destroy_pipeline(opt);
571

572
    delete op;
573

574
    return 0;
575
}
576

577
#if NCNN_INT8
578
int ConvolutionDepthWise_loongarch::create_pipeline_int8_loongarch(const Option& opt)
579
{
580
    const int maxk = kernel_w * kernel_h;
581
    int channels = (weight_data_size / group) / maxk / (num_output / group) * group;
582

583
    // depth-wise
584
    if (channels == group && group == num_output)
585
    {
586
        int elempack = 1;
587
#if __loongarch_sx
588
        if (opt.use_packing_layout)
589
        {
590
            elempack = channels % 8 == 0 ? 8 : 1;
591
        }
592
#endif // __loongarch_sx
593

594
        if (elempack == 8)
595
        {
596
            Mat weight_data_r2 = weight_data.reshape(maxk, group);
597
            convert_packing(weight_data_r2, weight_data_tm, 8, opt);
598
        }
599

600
        if (elempack == 1)
601
        {
602
            weight_data_tm = weight_data;
603
        }
604

605
        if (opt.lightmode)
606
            weight_data.release();
607

608
        return 0;
609
    }
610

611
    // group convolution
612
    create_group_ops(opt);
613

614
    if (opt.lightmode)
615
        weight_data.release();
616

617
    return 0;
618
}
619

620
int ConvolutionDepthWise_loongarch::forward_int8_loongarch(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
621
{
622
    int w = bottom_blob.w;
623
    int h = bottom_blob.h;
624
    int channels = bottom_blob.c;
625
    int elempack = bottom_blob.elempack;
626

627
    int elembits = bottom_blob.elembits();
628

629
    const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
630
    const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
631

632
    Mat bottom_blob_int8 = bottom_blob;
633
    if (elembits != 8)
634
    {
635
        const int channels_g = channels * elempack / group;
636

637
        Mat scales(channels * elempack);
638
        {
639
            float* ps = scales;
640
            for (int g = 0; g < group; g++)
641
            {
642
                float scale = bottom_blob_int8_scales[g];
643
                for (int q = 0; q < channels_g; q++)
644
                {
645
                    *ps++ = scale;
646
                }
647
            }
648
        }
649

650
        Option opt_q = opt;
651
        opt_q.blob_allocator = opt.workspace_allocator;
652
        quantize_to_int8(bottom_blob, bottom_blob_int8, scales, opt_q);
653
    }
654

655
    Mat bottom_blob_bordered;
656
    make_padding(bottom_blob_int8, bottom_blob_bordered, opt);
657
    if (bottom_blob_bordered.empty())
658
        return -100;
659

660
    w = bottom_blob_bordered.w;
661
    h = bottom_blob_bordered.h;
662
    channels = bottom_blob_bordered.c;
663
    elempack = bottom_blob_bordered.elempack;
664

665
    int outw = (w - kernel_extent_w) / stride_w + 1;
666
    int outh = (h - kernel_extent_h) / stride_h + 1;
667

668
    // depth-wise
669
    if (channels * elempack == group && group == num_output)
670
    {
671
        int out_elempack = 1;
672
#if __loongarch_sx
673
        if (opt.use_packing_layout)
674
        {
675
            out_elempack = num_output % 8 == 0 ? 8 : 1;
676
        }
677
#endif // __loongarch_sx
678
        bool use_int8_requantize = int8_scale_term > 100;
679
        size_t out_elemsize = use_int8_requantize ? 1u * out_elempack : 4u * out_elempack;
680

681
        top_blob.create(outw, outh, num_output / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
682
        if (top_blob.empty())
683
            return -100;
684

685
#if __loongarch_sx
686
        if (elempack == 8)
687
        {
688
            {
689
                const int maxk = kernel_w * kernel_h;
690

691
                // kernel offsets
692
                std::vector<int> _space_ofs(maxk);
693
                int* space_ofs = &_space_ofs[0];
694
                {
695
                    int p1 = 0;
696
                    int p2 = 0;
697
                    int gap = w * dilation_h - kernel_w * dilation_w;
698
                    for (int i = 0; i < kernel_h; i++)
699
                    {
700
                        for (int j = 0; j < kernel_w; j++)
701
                        {
702
                            space_ofs[p1] = p2;
703
                            p1++;
704
                            p2 += dilation_w;
705
                        }
706
                        p2 += gap;
707
                    }
708
                }
709

710
                #pragma omp parallel for num_threads(opt.num_threads)
711
                for (int g = 0; g < channels; g++)
712
                {
713
                    signed char* outptr_s8 = top_blob.channel(g);
714
                    float* outptr_f32 = top_blob.channel(g);
715
                    const signed char* kptr = (const signed char*)weight_data_tm + maxk * g * 8;
716
                    const Mat m = bottom_blob_bordered.channel(g);
717

718
                    for (int i = 0; i < outh; i++)
719
                    {
720
                        for (int j = 0; j < outw; j++)
721
                        {
722
                            __m128i _sum0 = __lsx_vreplgr2vr_w(0);
723
                            __m128i _sum1 = __lsx_vreplgr2vr_w(0);
724

725
                            const signed char* sptr = m.row<const signed char>(i * stride_h) + j * stride_w * 8;
726

727
                            for (int k = 0; k < maxk; k++)
728
                            {
729
                                __m128i _val = __lsx_vld(sptr + space_ofs[k] * 8, 0);
730
                                __m128i _val16 = __lsx_vilvl_b(__lsx_vslti_b(_val, 0), _val);
731

732
                                __m128i _w = __lsx_vld(kptr + k * 8, 0);
733
                                __m128i _w16 = __lsx_vilvl_b(__lsx_vslti_b(_w, 0), _w);
734

735
                                __m128i _s0 = __lsx_vmul_h(_val16, _w16);
736
                                __m128i _exts0 = __lsx_vslti_h(_s0, 0);
737
                                __m128i _s0l = __lsx_vilvl_h(_exts0, _s0);
738
                                __m128i _s0h = __lsx_vilvh_h(_exts0, _s0);
739

740
                                _sum0 = __lsx_vadd_w(_sum0, _s0l);
741
                                _sum1 = __lsx_vadd_w(_sum1, _s0h);
742
                            }
743

744
                            __m128 _scale_in0;
745
                            __m128 _scale_in1;
746
                            {
747
                                __m128 _bottom_blob_int8_scales0 = (__m128)__lsx_vld((const float*)bottom_blob_int8_scales + g * 8, 0);
748
                                __m128 _bottom_blob_int8_scales1 = (__m128)__lsx_vld((const float*)bottom_blob_int8_scales + g * 8 + 4, 0);
749
                                __m128 _weight_data_int8_scales0 = (__m128)__lsx_vld((const float*)weight_data_int8_scales + g * 8, 0);
750
                                __m128 _weight_data_int8_scales1 = (__m128)__lsx_vld((const float*)weight_data_int8_scales + g * 8 + 4, 0);
751
                                _scale_in0 = __lsx_vfrecip_s(__lsx_vfmul_s(_bottom_blob_int8_scales0, _weight_data_int8_scales0));
752
                                _scale_in1 = __lsx_vfrecip_s(__lsx_vfmul_s(_bottom_blob_int8_scales1, _weight_data_int8_scales1));
753

754
                                __m128i _m0 = __lsx_vfcmp_cne_s(_weight_data_int8_scales0, __lsx_vreplfr2vr_s(0.f));
755
                                __m128i _m1 = __lsx_vfcmp_cne_s(_weight_data_int8_scales1, __lsx_vreplfr2vr_s(0.f));
756
                                _scale_in0 = (__m128)__lsx_vand_v((__m128i)_scale_in0, (__m128i)_m0);
757
                                _scale_in1 = (__m128)__lsx_vand_v((__m128i)_scale_in1, (__m128i)_m1);
758
                            }
759

760
                            __m128 _sumfp32_0 = __lsx_vfmul_s(__lsx_vffint_s_w(_sum0), _scale_in0);
761
                            __m128 _sumfp32_1 = __lsx_vfmul_s(__lsx_vffint_s_w(_sum1), _scale_in1);
762

763
                            if (bias_term)
764
                            {
765
                                __m128 _bias0 = (__m128)__lsx_vld((const float*)bias_data + g * 8, 0);
766
                                __m128 _bias1 = (__m128)__lsx_vld((const float*)bias_data + g * 8 + 4, 0);
767
                                _sumfp32_0 = __lsx_vfadd_s(_sumfp32_0, _bias0);
768
                                _sumfp32_1 = __lsx_vfadd_s(_sumfp32_1, _bias1);
769
                            }
770

771
                            _sumfp32_0 = activation_ps(_sumfp32_0, activation_type, activation_params);
772
                            _sumfp32_1 = activation_ps(_sumfp32_1, activation_type, activation_params);
773

774
                            if (use_int8_requantize)
775
                            {
776
                                // requantize and relu
777
                                __m128 _scale_out0 = (__m128)__lsx_vld((const float*)top_blob_int8_scales + g * 8, 0);
778
                                __m128 _scale_out1 = (__m128)__lsx_vld((const float*)top_blob_int8_scales + g * 8 + 4, 0);
779
                                _sumfp32_0 = __lsx_vfmul_s(_sumfp32_0, _scale_out0);
780
                                _sumfp32_1 = __lsx_vfmul_s(_sumfp32_1, _scale_out1);
781
                                int64_t _sum8 = float2int8(_sumfp32_0, _sumfp32_1);
782

783
                                *(int64_t*)outptr_s8 = _sum8;
784
                                outptr_s8 += 8;
785
                            }
786
                            else
787
                            {
788
                                // dequantize and relu
789
                                __lsx_vst(_sumfp32_0, outptr_f32, 0);
790
                                __lsx_vst(_sumfp32_1, outptr_f32 + 4, 0);
791
                                outptr_f32 += 8;
792
                            }
793
                        }
794
                    }
795
                }
796
            }
797
        }
798
#endif // __loongarch_sx
799

800
        if (elempack == 1)
801
        {
802
            {
803
                const int maxk = kernel_w * kernel_h;
804

805
                // kernel offsets
806
                std::vector<int> _space_ofs(maxk);
807
                int* space_ofs = &_space_ofs[0];
808
                {
809
                    int p1 = 0;
810
                    int p2 = 0;
811
                    int gap = w * dilation_h - kernel_w * dilation_w;
812
                    for (int i = 0; i < kernel_h; i++)
813
                    {
814
                        for (int j = 0; j < kernel_w; j++)
815
                        {
816
                            space_ofs[p1] = p2;
817
                            p1++;
818
                            p2 += dilation_w;
819
                        }
820
                        p2 += gap;
821
                    }
822
                }
823

824
                #pragma omp parallel for num_threads(opt.num_threads)
825
                for (int g = 0; g < group; g++)
826
                {
827
                    signed char* outptr_s8 = top_blob.channel(g);
828
                    float* outptr_f32 = top_blob.channel(g);
829
                    const signed char* kptr = (const signed char*)weight_data_tm + maxk * g;
830
                    const Mat m = bottom_blob_bordered.channel(g);
831

832
                    for (int i = 0; i < outh; i++)
833
                    {
834
                        for (int j = 0; j < outw; j++)
835
                        {
836
                            int sum = 0;
837

838
                            const signed char* sptr = m.row<const signed char>(i * stride_h) + j * stride_w;
839

840
                            for (int k = 0; k < maxk; k++)
841
                            {
842
                                signed char val = sptr[space_ofs[k]];
843
                                signed char w = kptr[k];
844
                                sum += val * w;
845
                            }
846

847
                            float scale_in;
848
                            if (weight_data_int8_scales[g] == 0)
849
                                scale_in = 0;
850
                            else
851
                                scale_in = 1.f / (bottom_blob_int8_scales[g] * weight_data_int8_scales[g]);
852

853
                            float sumfp32 = sum * scale_in;
854

855
                            if (bias_term)
856
                                sumfp32 += bias_data[g];
857

858
                            sumfp32 = activation_ss(sumfp32, activation_type, activation_params);
859

860
                            if (use_int8_requantize)
861
                            {
862
                                // requantize
863
                                float scale_out = top_blob_int8_scales[g];
864
                                signed char sums8 = float2int8(sumfp32 * scale_out);
865
                                outptr_s8[0] = sums8;
866
                                outptr_s8 += 1;
867
                            }
868
                            else
869
                            {
870
                                // dequantize
871
                                outptr_f32[0] = sumfp32;
872
                                outptr_f32 += 1;
873
                            }
874
                        }
875
                    }
876
                }
877
            }
878
        }
879

880
        return 0;
881
    }
882

883
    bool use_int8_requantize = int8_scale_term > 100;
884
    int out_elempack = 1;
885
#if __loongarch_sx
886
    if (opt.use_packing_layout)
887
    {
888
        if (use_int8_requantize)
889
            out_elempack = num_output % 8 == 0 ? 8 : 1;
890
        else
891
            out_elempack = num_output % 4 == 0 ? 4 : 1;
892
    }
893
#endif // __loongarch_sx
894
    size_t out_elemsize = use_int8_requantize ? 1u * out_elempack : 4u * out_elempack;
895

896
    top_blob.create(outw, outh, num_output / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
897
    if (top_blob.empty())
898
        return -100;
899

900
    // group convolution
901
    const int channels_g = channels * elempack / group;
902
    const int num_output_g = num_output / group;
903

904
    int g_elempack = 1;
905
    int out_g_elempack = 1;
906
#if __loongarch_sx
907
    if (opt.use_packing_layout)
908
    {
909
        g_elempack = channels_g % 8 == 0 ? 8 : 1;
910
        if (use_int8_requantize)
911
            out_g_elempack = num_output_g % 8 == 0 ? 8 : 1;
912
        else
913
            out_g_elempack = num_output_g % 4 == 0 ? 4 : 1;
914
    }
915
#endif // __loongarch_sx
916

917
    // unpacking
918
    Mat bottom_blob_bordered_unpacked = bottom_blob_bordered;
919
    if (elempack > g_elempack)
920
    {
921
        Option opt_p = opt;
922
        opt_p.blob_allocator = opt.workspace_allocator;
923
        convert_packing(bottom_blob_bordered, bottom_blob_bordered_unpacked, g_elempack, opt_p);
924
    }
925

926
    Mat top_blob_unpacked = top_blob;
927
    if (out_g_elempack < out_elempack)
928
    {
929
        top_blob_unpacked.create(outw, outh, num_output / out_g_elempack, out_elemsize / out_elempack * out_g_elempack, out_g_elempack, opt.workspace_allocator);
930
        if (top_blob_unpacked.empty())
931
            return -100;
932
    }
933

934
    #pragma omp parallel for num_threads(opt.num_threads)
935
    for (int g = 0; g < group; g++)
936
    {
937
        const Mat bottom_blob_bordered_g = bottom_blob_bordered_unpacked.channel_range(channels_g * g / g_elempack, channels_g / g_elempack);
938
        Mat top_blob_g = top_blob_unpacked.channel_range(num_output_g * g / out_g_elempack, num_output_g / out_g_elempack);
939

940
        const ncnn::Layer* op = group_ops[g];
941

942
        Option opt_g = opt;
943
        opt_g.blob_allocator = top_blob_unpacked.allocator;
944

945
        // forward
946
        op->forward(bottom_blob_bordered_g, top_blob_g, opt_g);
947
    }
948

949
    // packing
950
    if (out_g_elempack < out_elempack)
951
    {
952
        convert_packing(top_blob_unpacked, top_blob, out_elempack, opt);
953
    }
954
    else
955
    {
956
        top_blob = top_blob_unpacked;
957
    }
958

959
    return 0;
960
}
961
#endif // NCNN_INT8
962

963
} // namespace ncnn
964

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.