ncnn

Форк
0
/
innerproduct_riscv.cpp 
1095 строк · 32.0 Кб
1
// Tencent is pleased to support the open source community by making ncnn available.
2
//
3
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
4
//
5
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6
// in compliance with the License. You may obtain a copy of the License at
7
//
8
// https://opensource.org/licenses/BSD-3-Clause
9
//
10
// Unless required by applicable law or agreed to in writing, software distributed
11
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
13
// specific language governing permissions and limitations under the License.
14

15
#include "innerproduct_riscv.h"
16

17
#include "layer_type.h"
18

19
#if __riscv_vector
20
#include <riscv_vector.h>
21
#endif // __riscv_vector
22

23
#include "riscv_activation.h"
24
#include "riscv_usability.h"
25

26
namespace ncnn {
27

28
InnerProduct_riscv::InnerProduct_riscv()
29
{
30
#if __riscv_vector
31
    support_packing = true;
32
#if __riscv_zfh
33
    support_fp16_storage = true;
34
#endif
35
#endif // __riscv_vector
36

37
    flatten = 0;
38
}
39

40
int InnerProduct_riscv::create_pipeline(const Option& opt)
41
{
42
    {
43
        flatten = ncnn::create_layer_cpu(ncnn::LayerType::Flatten);
44

45
        ncnn::ParamDict pd;
46

47
        flatten->load_param(pd);
48

49
        flatten->create_pipeline(opt);
50
    }
51

52
#if NCNN_INT8
53
    if (opt.use_int8_inference && weight_data.elemsize == (size_t)1u)
54
    {
55
        // TODO implement int8
56
        return 0;
57
    }
58
#endif
59

60
#if __riscv_vector && __riscv_zfh
61
    if (opt.use_fp16_storage)
62
    {
63
        return create_pipeline_fp16s(opt);
64
    }
65
#endif
66

67
    int out_elempack = 1;
68

69
#if __riscv_vector
70
    const int packn = csrr_vlenb() / 4;
71

72
    const int num_input = weight_data_size / num_output;
73

74
    if (opt.use_packing_layout)
75
    {
76
        out_elempack = num_output % packn == 0 ? packn : 1;
77
    }
78

79
    if (out_elempack == packn)
80
    {
81
        // src = inch-outch
82
        // dst = packn-inch-outch/packn
83
        {
84
            Mat weight_data_r2 = weight_data.reshape(num_input, num_output);
85

86
            weight_data_tm.create(num_input, num_output / packn, (size_t)4u * packn, packn);
87

88
            for (int q = 0; q + (packn - 1) < num_output; q += packn)
89
            {
90
                float* g0 = weight_data_tm.row(q / packn);
91

92
                for (int p = 0; p < num_input; p++)
93
                {
94
                    for (int j = 0; j < packn; j++)
95
                    {
96
                        *g0++ = weight_data_r2.row(q + j)[p];
97
                    }
98
                }
99
            }
100
        }
101
    }
102
#endif // __riscv_vector
103

104
    if (out_elempack == 1)
105
    {
106
        weight_data_tm = weight_data;
107
    }
108

109
    if (opt.lightmode)
110
        weight_data.release();
111

112
    return 0;
113
}
114

115
int InnerProduct_riscv::destroy_pipeline(const Option& opt)
116
{
117
    if (flatten)
118
    {
119
        flatten->destroy_pipeline(opt);
120
        delete flatten;
121
        flatten = 0;
122
    }
123

124
    return 0;
125
}
126

127
int InnerProduct_riscv::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
128
{
129
#if NCNN_INT8
130
    if (opt.use_int8_inference && int8_scale_term)
131
    {
132
        Mat bottom_blob_unpacked = bottom_blob;
133
        if (bottom_blob.elempack != 1)
134
        {
135
            Option opt_pack1 = opt;
136
            opt_pack1.blob_allocator = opt.workspace_allocator;
137

138
            convert_packing(bottom_blob, bottom_blob_unpacked, 1, opt_pack1);
139
        }
140

141
        Mat bottom_blob_unpacked_fp32 = bottom_blob_unpacked;
142
        if (bottom_blob_unpacked.elembits() == 16)
143
        {
144
            Option opt_pack1 = opt;
145
            opt_pack1.blob_allocator = opt.workspace_allocator;
146

147
            cast_float16_to_float32(bottom_blob_unpacked, bottom_blob_unpacked_fp32, opt_pack1);
148
        }
149

150
        Option opt_unpacked = opt;
151
        opt_unpacked.use_packing_layout = false;
152
        return InnerProduct::forward_int8(bottom_blob_unpacked_fp32, top_blob, opt_unpacked);
153
    }
154
#endif
155

156
    int elembits = bottom_blob.elembits();
157

158
#if __riscv_vector && __riscv_zfh
159
    if (opt.use_fp16_storage && elembits == 16)
160
    {
161
        if (opt.use_fp16_arithmetic)
162
            return forward_fp16sa(bottom_blob, top_blob, opt);
163
        else
164
            return forward_fp16s(bottom_blob, top_blob, opt);
165
    }
166
#endif
167

168
#if __riscv_vector
169
    const int packn = csrr_vlenb() / 4;
170
#endif
171

172
    const int num_input = weight_data_size / num_output;
173

174
    if (bottom_blob.dims == 2 && bottom_blob.w == num_input)
175
    {
176
        // gemm
177
        int h = bottom_blob.h;
178
        size_t elemsize = bottom_blob.elemsize;
179
        int elempack = bottom_blob.elempack;
180

181
        top_blob.create(num_output, h, elemsize, elempack, opt.blob_allocator);
182
        if (top_blob.empty())
183
            return -100;
184

185
        int num_output_elempack = 1;
186
#if __riscv_vector
187
        if (opt.use_packing_layout)
188
        {
189
            num_output_elempack = num_output % packn == 0 ? packn : 1;
190
        }
191
#endif
192

193
        #pragma omp parallel for num_threads(opt.num_threads)
194
        for (int j = 0; j < h; j++)
195
        {
196
#if __riscv_vector
197
            if (elempack == packn && num_output_elempack == packn)
198
            {
199
                const size_t vl = vsetvl_e32m1(packn);
200

201
                float* outptr = top_blob.row(j);
202

203
                for (int p = 0; p < num_output / num_output_elempack; p++)
204
                {
205
                    for (int l = 0; l < packn; l++)
206
                    {
207
                        const float* kptr = weight_data_tm.row(p) + l;
208
                        const float* m = bottom_blob.row(j);
209

210
                        vfloat32m1_t _sum = vfmv_v_f_f32m1(0.f, vl);
211

212
                        if (bias_term)
213
                        {
214
                            _sum = vfmv_v_f_f32m1(bias_data[p * packn + l], vl);
215
                        }
216

217
                        int n = num_input;
218
                        while (n > 0)
219
                        {
220
                            vfloat32m1_t _val = vle32_v_f32m1(m, vl);
221
                            _sum = vfmacc_vf_f32m1(_sum, *kptr, _val, vl);
222

223
                            m += packn;
224
                            kptr += packn;
225
                            n -= 1;
226
                        }
227

228
                        _sum = activation_ps(_sum, activation_type, activation_params, vl);
229

230
                        vse32_v_f32m1(outptr, _sum, vl);
231
                        outptr += packn;
232
                    }
233
                }
234
            }
235

236
            if (elempack == 1 && num_output_elempack == packn)
237
            {
238
                const size_t vl = vsetvl_e32m1(packn);
239

240
                float* outptr = top_blob.row(j);
241

242
                for (int p = 0; p < num_output / num_output_elempack; p++)
243
                {
244
                    const float* kptr = weight_data_tm.row(p);
245
                    const float* m = bottom_blob.row(j);
246

247
                    vfloat32m1_t _sum = vfmv_v_f_f32m1(0.f, vl);
248

249
                    if (bias_term)
250
                    {
251
                        _sum = vle32_v_f32m1((const float*)bias_data + p * packn, vl);
252
                    }
253

254
                    int n = num_input;
255
                    while (n > 0)
256
                    {
257
                        vfloat32m1_t _w = vle32_v_f32m1(kptr, vl);
258
                        _sum = vfmacc_vf_f32m1(_sum, *m, _w, vl);
259

260
                        m += 1;
261
                        kptr += packn;
262
                        n -= 1;
263
                    }
264

265
                    _sum = activation_ps(_sum, activation_type, activation_params, vl);
266

267
                    vse32_v_f32m1(outptr, _sum, vl);
268
                    outptr += packn;
269
                }
270
            }
271

272
            if (elempack == packn && num_output_elempack == 1)
273
            {
274
                const size_t vl = vsetvl_e32m1(packn);
275

276
                float* outptr = top_blob.row(j);
277

278
                for (int p = 0; p < num_output; p++)
279
                {
280
                    const float* kptr = (const float*)weight_data_tm + num_input * p;
281
                    const float* m = bottom_blob.row(j);
282

283
                    vfloat32m1_t _sum = vfmv_v_f_f32m1(0.f, vl);
284

285
                    if (bias_term)
286
                    {
287
                        _sum = vfmv_v_f_f32m1(bias_data[p], vl);
288
                    }
289

290
                    int n = num_input;
291
                    while (n > 0)
292
                    {
293
                        vfloat32m1_t _val = vle32_v_f32m1(m, vl);
294
                        _sum = vfmacc_vf_f32m1(_sum, *kptr, _val, vl);
295

296
                        m += packn;
297
                        kptr += 1;
298
                        n -= 1;
299
                    }
300

301
                    _sum = activation_ps(_sum, activation_type, activation_params, vl);
302

303
                    vse32_v_f32m1(outptr, _sum, vl);
304
                    outptr += packn;
305
                }
306
            }
307
#endif // __riscv_vector
308

309
            if (elempack == 1 && num_output_elempack == 1)
310
            {
311
                float* outptr = top_blob.row(j);
312

313
                for (int p = 0; p < num_output; p++)
314
                {
315
                    const float* kptr = (const float*)weight_data_tm + num_input * p;
316
                    const float* m = bottom_blob.row(j);
317

318
                    float sum = 0.f;
319

320
                    if (bias_term)
321
                    {
322
                        sum = bias_data[p];
323
                    }
324

325
                    for (int i = 0; i < num_input; i++)
326
                    {
327
                        sum += m[i] * kptr[i];
328
                    }
329

330
                    sum = activation_ss(sum, activation_type, activation_params);
331

332
                    outptr[0] = sum;
333
                    outptr += 1;
334
                }
335
            }
336
        }
337

338
        return 0;
339
    }
340

341
    // flatten
342
    Mat bottom_blob_flattened = bottom_blob;
343
    if (bottom_blob.dims != 1)
344
    {
345
        Option opt_flatten = opt;
346
        opt_flatten.blob_allocator = opt.workspace_allocator;
347

348
        flatten->forward(bottom_blob, bottom_blob_flattened, opt_flatten);
349
    }
350

351
    size_t elemsize = bottom_blob_flattened.elemsize;
352
    int elempack = bottom_blob_flattened.elempack;
353

354
    int out_elempack = 1;
355
#if __riscv_vector
356
    if (opt.use_packing_layout)
357
    {
358
        out_elempack = num_output % packn == 0 ? packn : 1;
359
    }
360
#endif
361
    size_t out_elemsize = elemsize / elempack * out_elempack;
362

363
    top_blob.create(num_output / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
364
    if (top_blob.empty())
365
        return -100;
366

367
#if __riscv_vector
368
    if (out_elempack == packn)
369
    {
370
        #pragma omp parallel for num_threads(opt.num_threads)
371
        for (int p = 0; p < num_output / out_elempack; p++)
372
        {
373
            const size_t vl = vsetvl_e32m1(packn);
374
            vfloat32m1_t _sum = vfmv_v_f_f32m1(0.f, vl);
375

376
            if (bias_term)
377
            {
378
                _sum = vle32_v_f32m1((const float*)bias_data + p * packn, vl);
379
            }
380

381
            const float* kptr = weight_data_tm.row(p);
382

383
            const float* sptr = bottom_blob_flattened;
384

385
            int n = num_input;
386
            while (n > 0)
387
            {
388
                vfloat32m1_t _w = vle32_v_f32m1(kptr, vl);
389
                _sum = vfmacc_vf_f32m1(_sum, *sptr, _w, vl);
390

391
                sptr += 1;
392
                kptr += packn;
393
                n -= 1;
394
            }
395

396
            _sum = activation_ps(_sum, activation_type, activation_params, vl);
397

398
            float* outptr = top_blob;
399
            vse32_v_f32m1(outptr + p * packn, _sum, vl);
400
        }
401
    }
402
#endif // __riscv_vector
403

404
    if (out_elempack == 1)
405
    {
406
#if __riscv_vector
407
        int nn_num_output = num_output / packn;
408
        int remain_num_output_start = nn_num_output * packn;
409

410
        #pragma omp parallel for num_threads(opt.num_threads)
411
        for (int pp = 0; pp < nn_num_output; pp++)
412
        {
413
            int p = pp * packn;
414

415
            const size_t vl = vsetvl_e32m1(packn);
416
            vfloat32m1_t _sum = vfmv_v_f_f32m1(0.f, vl);
417

418
            if (bias_term)
419
            {
420
                _sum = vle32_v_f32m1((const float*)bias_data + p, vl);
421
            }
422

423
            const float* w = (const float*)weight_data_tm + num_input * p;
424

425
            const float* m = bottom_blob_flattened;
426

427
            int n = num_input;
428
            while (n > 0)
429
            {
430
                vfloat32m1_t _w = vlse32_v_f32m1(w, num_input * sizeof(float), vl);
431

432
                _sum = vfmacc_vf_f32m1(_sum, *m, _w, vl);
433

434
                m += 1;
435
                w += 1;
436
                n -= 1;
437
            }
438

439
            _sum = activation_ps(_sum, activation_type, activation_params, vl);
440

441
            vse32_v_f32m1((float*)top_blob + p, _sum, vl);
442
        }
443
#else // __riscv_vector
444
        int nn_num_output = num_output / 4;
445
        int remain_num_output_start = nn_num_output * 4;
446

447
        #pragma omp parallel for num_threads(opt.num_threads)
448
        for (int pp = 0; pp < nn_num_output; pp++)
449
        {
450
            int p = pp * 4;
451

452
            float sum0 = 0.f;
453
            float sum1 = 0.f;
454
            float sum2 = 0.f;
455
            float sum3 = 0.f;
456

457
            if (bias_term)
458
            {
459
                sum0 = bias_data[p];
460
                sum1 = bias_data[p + 1];
461
                sum2 = bias_data[p + 2];
462
                sum3 = bias_data[p + 3];
463
            }
464

465
            const float* w0 = (const float*)weight_data_tm + num_input * p;
466
            const float* w1 = (const float*)weight_data_tm + num_input * (p + 1);
467
            const float* w2 = (const float*)weight_data_tm + num_input * (p + 2);
468
            const float* w3 = (const float*)weight_data_tm + num_input * (p + 3);
469

470
            const float* m = bottom_blob_flattened;
471

472
            for (int i = 0; i < num_input; i++)
473
            {
474
                sum0 += *m * *w0;
475
                sum1 += *m * *w1;
476
                sum2 += *m * *w2;
477
                sum3 += *m * *w3;
478

479
                m++;
480
                w0++;
481
                w1++;
482
                w2++;
483
                w3++;
484
            }
485

486
            sum0 = activation_ss(sum0, activation_type, activation_params);
487
            sum1 = activation_ss(sum1, activation_type, activation_params);
488
            sum2 = activation_ss(sum2, activation_type, activation_params);
489
            sum3 = activation_ss(sum3, activation_type, activation_params);
490

491
            top_blob[p] = sum0;
492
            top_blob[p + 1] = sum1;
493
            top_blob[p + 2] = sum2;
494
            top_blob[p + 3] = sum3;
495
        }
496
#endif // __riscv_vector
497

498
        #pragma omp parallel for num_threads(opt.num_threads)
499
        for (int p = remain_num_output_start; p < num_output; p++)
500
        {
501
            float sum = 0.f;
502

503
            if (bias_term)
504
                sum = bias_data[p];
505

506
            const float* w = (const float*)weight_data_tm + num_input * p;
507

508
            const float* m = bottom_blob_flattened;
509

510
            for (int i = 0; i < num_input; i++)
511
            {
512
                sum += *m * *w;
513

514
                m++;
515
                w++;
516
            }
517

518
            sum = activation_ss(sum, activation_type, activation_params);
519

520
            top_blob[p] = sum;
521
        }
522
    }
523

524
    return 0;
525
}
526

527
#if __riscv_vector && __riscv_zfh
528
int InnerProduct_riscv::create_pipeline_fp16s(const Option& opt)
529
{
530
    const int packn = csrr_vlenb() / 2;
531

532
    const int num_input = weight_data_size / num_output;
533

534
    int out_elempack = 1;
535

536
    if (opt.use_packing_layout)
537
    {
538
        out_elempack = num_output % packn == 0 ? packn : 1;
539
    }
540

541
    // src = inch-outch
542
    // dst = pb-inch-outch/pb
543
    {
544
        Mat weight_data_r2 = weight_data.reshape(num_input, num_output);
545

546
        weight_data_tm.create(num_input, num_output / out_elempack, (size_t)2u * out_elempack, out_elempack);
547

548
        for (int q = 0; q + (out_elempack - 1) < num_output; q += out_elempack)
549
        {
550
            __fp16* g0 = weight_data_tm.row<__fp16>(q / out_elempack);
551

552
            for (int p = 0; p < num_input; p++)
553
            {
554
                for (int j = 0; j < out_elempack; j++)
555
                {
556
                    *g0++ = (__fp16)(weight_data_r2.row(q + j)[p]);
557
                }
558
            }
559
        }
560
    }
561

562
    ncnn::cast_float32_to_float16(bias_data, bias_data_fp16, opt);
563

564
    if (opt.lightmode)
565
        weight_data.release();
566

567
    return 0;
568
}
569

570
int InnerProduct_riscv::forward_fp16s(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
571
{
572
    const int packn = csrr_vlenb() / 2;
573

574
    const int num_input = weight_data_size / num_output;
575

576
    if (bottom_blob.dims == 2 && bottom_blob.w == num_input)
577
    {
578
        // gemm
579
        int h = bottom_blob.h;
580
        size_t elemsize = bottom_blob.elemsize;
581
        int elempack = bottom_blob.elempack;
582

583
        top_blob.create(num_output, h, elemsize, elempack, opt.blob_allocator);
584
        if (top_blob.empty())
585
            return -100;
586

587
        int num_output_elempack = opt.use_packing_layout && num_output % packn == 0 ? packn : 1;
588

589
        #pragma omp parallel for num_threads(opt.num_threads)
590
        for (int j = 0; j < h; j++)
591
        {
592
            if (elempack == packn && num_output_elempack == packn)
593
            {
594
                const size_t vl = vsetvl_e16m1(packn);
595

596
                __fp16* outptr = top_blob.row<__fp16>(j);
597

598
                for (int p = 0; p < num_output / num_output_elempack; p++)
599
                {
600
                    for (int l = 0; l < packn; l++)
601
                    {
602
                        const __fp16* kptr = (const __fp16*)weight_data_tm + num_input * p * packn + l;
603
                        const __fp16* m = bottom_blob.row<const __fp16>(j);
604

605
                        vfloat32m2_t _sum = vfmv_v_f_f32m2(0.f, vl);
606

607
                        if (bias_term)
608
                        {
609
                            _sum = vfmv_v_f_f32m2(bias_data[p * packn + l], vl);
610
                        }
611

612
                        int n = num_input;
613
                        while (n > 0)
614
                        {
615
                            vfloat32m2_t _val = vfwcvt_f_f_v_f32m2(vle16_v_f16m1(m, vl), vl);
616

617
                            _sum = vfmacc_vf_f32m2(_sum, *kptr, _val, vl);
618

619
                            m += packn;
620
                            kptr += packn;
621
                            n -= 1;
622
                        }
623

624
                        _sum = activation_ps(_sum, activation_type, activation_params, vl);
625

626
                        vse16_v_f16m1(outptr, vfncvt_f_f_w_f16m1(_sum, vl), vl);
627
                        outptr += packn;
628
                    }
629
                }
630
            }
631

632
            if (elempack == 1 && num_output_elempack == packn)
633
            {
634
                const size_t vl = vsetvl_e16m1(packn);
635

636
                __fp16* outptr = top_blob.row<__fp16>(j);
637

638
                for (int p = 0; p < num_output / num_output_elempack; p++)
639
                {
640
                    const __fp16* kptr = (const __fp16*)weight_data_tm + num_input * p * packn;
641
                    const __fp16* m = bottom_blob.row<const __fp16>(j);
642

643
                    vfloat32m2_t _sum = vfmv_v_f_f32m2(0.f, vl);
644

645
                    if (bias_term)
646
                    {
647
                        _sum = vle32_v_f32m2((const float*)bias_data + p * packn, vl);
648
                    }
649

650
                    int n = num_input;
651
                    while (n > 0)
652
                    {
653
                        vfloat32m2_t _w = vfwcvt_f_f_v_f32m2(vle16_v_f16m1(kptr, vl), vl);
654

655
                        _sum = vfmacc_vf_f32m2(_sum, *m, _w, vl);
656

657
                        m += 1;
658
                        kptr += packn;
659
                        n -= 1;
660
                    }
661

662
                    _sum = activation_ps(_sum, activation_type, activation_params, vl);
663

664
                    vse16_v_f16m1(outptr, vfncvt_f_f_w_f16m1(_sum, vl), vl);
665
                    outptr += packn;
666
                }
667
            }
668

669
            if (elempack == packn && num_output_elempack == 1)
670
            {
671
                const size_t vl = vsetvl_e16m1(packn);
672

673
                __fp16* outptr = top_blob.row<__fp16>(j);
674

675
                for (int p = 0; p < num_output; p++)
676
                {
677
                    const __fp16* kptr = (const __fp16*)weight_data_tm + num_input * p;
678
                    const __fp16* m = bottom_blob.row<const __fp16>(j);
679

680
                    vfloat32m2_t _sum = vfmv_v_f_f32m2(0.f, vl);
681

682
                    if (bias_term)
683
                    {
684
                        _sum = vfmv_v_f_f32m2(bias_data[p], vl);
685
                    }
686

687
                    int n = num_input;
688
                    while (n > 0)
689
                    {
690
                        vfloat32m2_t _val = vfwcvt_f_f_v_f32m2(vle16_v_f16m1(m, vl), vl);
691

692
                        _sum = vfmacc_vf_f32m2(_sum, *kptr, _val, vl);
693

694
                        m += packn;
695
                        kptr += 1;
696
                        n -= 1;
697
                    }
698

699
                    _sum = activation_ps(_sum, activation_type, activation_params, vl);
700

701
                    vse16_v_f16m1(outptr, vfncvt_f_f_w_f16m1(_sum, vl), vl);
702
                    outptr += packn;
703
                }
704
            }
705

706
            if (elempack == 1 && num_output_elempack == 1)
707
            {
708
                __fp16* outptr = top_blob.row<__fp16>(j);
709

710
                for (int p = 0; p < num_output; p++)
711
                {
712
                    const __fp16* kptr = (const __fp16*)weight_data_tm + num_input * p;
713
                    const __fp16* m = bottom_blob.row<const __fp16>(j);
714

715
                    float sum = 0.f;
716

717
                    if (bias_term)
718
                    {
719
                        sum = bias_data[p];
720
                    }
721

722
                    for (int i = 0; i < num_input; i++)
723
                    {
724
                        sum += (float)m[i] * (float)kptr[i];
725
                    }
726

727
                    sum = activation_ss(sum, activation_type, activation_params);
728

729
                    outptr[0] = (__fp16)sum;
730
                    outptr += 1;
731
                }
732
            }
733
        }
734

735
        return 0;
736
    }
737

738
    // flatten
739
    Mat bottom_blob_flattened = bottom_blob;
740
    if (bottom_blob.dims != 1)
741
    {
742
        Option opt_flatten = opt;
743
        opt_flatten.blob_allocator = opt.workspace_allocator;
744

745
        flatten->forward(bottom_blob, bottom_blob_flattened, opt_flatten);
746
    }
747

748
    size_t elemsize = bottom_blob_flattened.elemsize;
749
    int elempack = bottom_blob_flattened.elempack;
750

751
    int out_elempack = opt.use_packing_layout && num_output % packn == 0 ? packn : 1;
752
    size_t out_elemsize = elemsize / elempack * out_elempack;
753

754
    top_blob.create(num_output / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
755
    if (top_blob.empty())
756
        return -100;
757

758
    if (out_elempack == packn)
759
    {
760
        // num_output
761
        #pragma omp parallel for num_threads(opt.num_threads)
762
        for (int p = 0; p < num_output / out_elempack; p++)
763
        {
764
            const size_t vl = vsetvl_e16m1(packn);
765
            vfloat32m2_t _sum = vfmv_v_f_f32m2(0.f, vl);
766

767
            if (bias_term)
768
            {
769
                _sum = vle32_v_f32m2((const float*)bias_data + p * packn, vl);
770
            }
771

772
            const __fp16* kptr = weight_data_tm.row<const __fp16>(p);
773

774
            const __fp16* sptr = bottom_blob_flattened;
775

776
            int n = num_input;
777
            while (n > 0)
778
            {
779
                vfloat32m2_t _w = vfwcvt_f_f_v_f32m2(vle16_v_f16m1(kptr, vl), vl);
780

781
                _sum = vfmacc_vf_f32m2(_sum, (float)(*sptr), _w, vl);
782

783
                sptr += 1;
784
                kptr += packn;
785
                n -= 1;
786
            }
787

788
            _sum = activation_ps(_sum, activation_type, activation_params, vl);
789

790
            __fp16* outptr = (__fp16*)top_blob;
791
            vse16_v_f16m1(outptr + p * packn, vfncvt_f_f_w_f16m1(_sum, vl), vl);
792
        }
793
    }
794

795
    if (out_elempack == 1)
796
    {
797
        // num_output
798
        #pragma omp parallel for num_threads(opt.num_threads)
799
        for (int p = 0; p < num_output; p++)
800
        {
801
            float sum = 0.f;
802

803
            if (bias_term)
804
                sum = bias_data[p];
805

806
            const __fp16* kptr = weight_data_tm.row<__fp16>(p);
807

808
            const __fp16* sptr = bottom_blob_flattened;
809

810
            int i = 0;
811
            for (; i < num_input; i++)
812
            {
813
                float v = (float)(*sptr);
814
                float k = (float)(*kptr);
815

816
                sum += v * k;
817

818
                sptr++;
819
                kptr++;
820
            }
821

822
            sum = activation_ss(sum, activation_type, activation_params);
823

824
            __fp16* outptr = (__fp16*)top_blob;
825
            outptr[p] = (__fp16)sum;
826
        }
827
    }
828

829
    return 0;
830
}
831

832
int InnerProduct_riscv::forward_fp16sa(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
833
{
834
    const int packn = csrr_vlenb() / 2;
835

836
    const int num_input = weight_data_size / num_output;
837

838
    if (bottom_blob.dims == 2 && bottom_blob.w == num_input)
839
    {
840
        // gemm
841
        int h = bottom_blob.h;
842
        size_t elemsize = bottom_blob.elemsize;
843
        int elempack = bottom_blob.elempack;
844

845
        top_blob.create(num_output, h, elemsize, elempack, opt.blob_allocator);
846
        if (top_blob.empty())
847
            return -100;
848

849
        int num_output_elempack = opt.use_packing_layout && num_output % packn == 0 ? packn : 1;
850

851
        #pragma omp parallel for num_threads(opt.num_threads)
852
        for (int j = 0; j < h; j++)
853
        {
854
            if (elempack == packn && num_output_elempack == packn)
855
            {
856
                const size_t vl = vsetvl_e16m1(packn);
857

858
                __fp16* outptr = top_blob.row<__fp16>(j);
859

860
                for (int p = 0; p < num_output / num_output_elempack; p++)
861
                {
862
                    for (int l = 0; l < packn; l++)
863
                    {
864
                        const __fp16* kptr = (const __fp16*)weight_data_tm + num_input * p * packn + l;
865
                        const __fp16* m = bottom_blob.row<const __fp16>(j);
866

867
                        vfloat16m1_t _sum = vfmv_v_f_f16m1((__fp16)0.f, vl);
868

869
                        if (bias_term)
870
                        {
871
                            _sum = vfmv_v_f_f16m1(((const __fp16*)bias_data_fp16)[p * packn + l], vl);
872
                        }
873

874
                        int n = num_input;
875
                        while (n > 0)
876
                        {
877
                            vfloat16m1_t _val = vle16_v_f16m1(m, vl);
878

879
                            _sum = vfmacc_vf_f16m1(_sum, *kptr, _val, vl);
880

881
                            m += packn;
882
                            kptr += packn;
883
                            n -= 1;
884
                        }
885

886
                        _sum = activation_ps(_sum, activation_type, activation_params, vl);
887

888
                        vse16_v_f16m1(outptr, _sum, vl);
889
                        outptr += packn;
890
                    }
891
                }
892
            }
893

894
            if (elempack == 1 && num_output_elempack == packn)
895
            {
896
                const size_t vl = vsetvl_e16m1(packn);
897

898
                __fp16* outptr = top_blob.row<__fp16>(j);
899

900
                for (int p = 0; p < num_output / num_output_elempack; p++)
901
                {
902
                    const __fp16* kptr = (const __fp16*)weight_data_tm + num_input * p * packn;
903
                    const __fp16* m = bottom_blob.row<const __fp16>(j);
904

905
                    vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl);
906

907
                    if (bias_term)
908
                    {
909
                        _sum = vle16_v_f16m1((const __fp16*)bias_data_fp16 + p * packn, vl);
910
                    }
911

912
                    int n = num_input;
913
                    while (n > 0)
914
                    {
915
                        vfloat16m1_t _w = vle16_v_f16m1(kptr, vl);
916

917
                        _sum = vfmacc_vf_f16m1(_sum, *m, _w, vl);
918

919
                        m += 1;
920
                        kptr += packn;
921
                        n -= 1;
922
                    }
923

924
                    _sum = activation_ps(_sum, activation_type, activation_params, vl);
925

926
                    vse16_v_f16m1(outptr, _sum, vl);
927
                    outptr += packn;
928
                }
929
            }
930

931
            if (elempack == packn && num_output_elempack == 1)
932
            {
933
                const size_t vl = vsetvl_e16m1(packn);
934

935
                __fp16* outptr = top_blob.row<__fp16>(j);
936

937
                for (int p = 0; p < num_output; p++)
938
                {
939
                    const __fp16* kptr = (const __fp16*)weight_data_tm + num_input * p;
940
                    const __fp16* m = bottom_blob.row<const __fp16>(j);
941

942
                    vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl);
943

944
                    if (bias_term)
945
                    {
946
                        _sum = vfmv_v_f_f16m1(((const __fp16*)bias_data_fp16)[p], vl);
947
                    }
948

949
                    int n = num_input;
950
                    while (n > 0)
951
                    {
952
                        vfloat16m1_t _val = vle16_v_f16m1(m, vl);
953

954
                        _sum = vfmacc_vf_f16m1(_sum, *kptr, _val, vl);
955

956
                        m += packn;
957
                        kptr += 1;
958
                        n -= 1;
959
                    }
960

961
                    _sum = activation_ps(_sum, activation_type, activation_params, vl);
962

963
                    vse16_v_f16m1(outptr, _sum, vl);
964
                    outptr += packn;
965
                }
966
            }
967

968
            if (elempack == 1 && num_output_elempack == 1)
969
            {
970
                __fp16* outptr = top_blob.row<__fp16>(j);
971

972
                for (int p = 0; p < num_output; p++)
973
                {
974
                    const __fp16* kptr = (const __fp16*)weight_data_tm + num_input * p;
975
                    const __fp16* m = bottom_blob.row<const __fp16>(j);
976

977
                    float sum = 0.f;
978

979
                    if (bias_term)
980
                    {
981
                        sum = bias_data[p];
982
                    }
983

984
                    for (int i = 0; i < num_input; i++)
985
                    {
986
                        sum += (float)(m[i] * kptr[i]);
987
                    }
988

989
                    sum = activation_ss(sum, activation_type, activation_params);
990

991
                    outptr[0] = (__fp16)sum;
992
                    outptr += 1;
993
                }
994
            }
995
        }
996

997
        return 0;
998
    }
999

1000
    // flatten
1001
    Mat bottom_blob_flattened = bottom_blob;
1002
    if (bottom_blob.dims != 1)
1003
    {
1004
        Option opt_flatten = opt;
1005
        opt_flatten.blob_allocator = opt.workspace_allocator;
1006

1007
        flatten->forward(bottom_blob, bottom_blob_flattened, opt_flatten);
1008
    }
1009

1010
    size_t elemsize = bottom_blob_flattened.elemsize;
1011
    int elempack = bottom_blob_flattened.elempack;
1012

1013
    int out_elempack = opt.use_packing_layout && num_output % packn == 0 ? packn : 1;
1014
    size_t out_elemsize = elemsize / elempack * out_elempack;
1015

1016
    top_blob.create(num_output / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
1017
    if (top_blob.empty())
1018
        return -100;
1019

1020
    if (out_elempack == packn)
1021
    {
1022
        // num_output
1023
        #pragma omp parallel for num_threads(opt.num_threads)
1024
        for (int p = 0; p < num_output / out_elempack; p++)
1025
        {
1026
            const size_t vl = vsetvl_e16m1(packn);
1027
            vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl);
1028

1029
            if (bias_term)
1030
            {
1031
                _sum = vle16_v_f16m1((const __fp16*)bias_data_fp16 + p * packn, vl);
1032
            }
1033

1034
            const __fp16* kptr = weight_data_tm.row<const __fp16>(p);
1035

1036
            const __fp16* sptr = bottom_blob_flattened;
1037

1038
            int n = num_input;
1039
            while (n > 0)
1040
            {
1041
                vfloat16m1_t _w = vle16_v_f16m1(kptr, vl);
1042

1043
                _sum = vfmacc_vf_f16m1(_sum, *sptr, _w, vl);
1044

1045
                sptr += 1;
1046
                kptr += packn;
1047
                n -= 1;
1048
            }
1049

1050
            _sum = activation_ps(_sum, activation_type, activation_params, vl);
1051

1052
            __fp16* outptr = (__fp16*)top_blob;
1053
            vse16_v_f16m1(outptr + p * packn, _sum, vl);
1054
        }
1055
    }
1056

1057
    if (out_elempack == 1)
1058
    {
1059
        // num_output
1060
        #pragma omp parallel for num_threads(opt.num_threads)
1061
        for (int p = 0; p < num_output; p++)
1062
        {
1063
            float sum = 0.f;
1064

1065
            if (bias_term)
1066
                sum = bias_data[p];
1067

1068
            const __fp16* kptr = weight_data_tm.row<__fp16>(p);
1069

1070
            const __fp16* sptr = bottom_blob_flattened;
1071

1072
            int i = 0;
1073
            for (; i < num_input; i++)
1074
            {
1075
                __fp16 v = *sptr;
1076
                __fp16 k = *kptr;
1077

1078
                sum += (float)(v * k);
1079

1080
                sptr++;
1081
                kptr++;
1082
            }
1083

1084
            sum = activation_ss(sum, activation_type, activation_params);
1085

1086
            __fp16* outptr = (__fp16*)top_blob;
1087
            outptr[p] = (__fp16)sum;
1088
        }
1089
    }
1090

1091
    return 0;
1092
}
1093
#endif // __riscv_vector && __riscv_zfh
1094

1095
} // namespace ncnn
1096

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.