ncnn

Форк
0
/
pooling_riscv.cpp 
962 строки · 31.2 Кб
1
// Tencent is pleased to support the open source community by making ncnn available.
2
//
3
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
4
//
5
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6
// in compliance with the License. You may obtain a copy of the License at
7
//
8
// https://opensource.org/licenses/BSD-3-Clause
9
//
10
// Unless required by applicable law or agreed to in writing, software distributed
11
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
13
// specific language governing permissions and limitations under the License.
14

15
#include "pooling_riscv.h"
16

17
#include <float.h>
18

19
#if __riscv_vector
20
#include <riscv_vector.h>
21
#endif // __riscv_vector
22

23
#include "riscv_usability.h"
24

25
namespace ncnn {
26

27
Pooling_riscv::Pooling_riscv()
28
{
29
#if __riscv_vector
30
    support_packing = true;
31
#if __riscv_zfh
32
    support_fp16_storage = true;
33
#endif
34
#endif // __riscv_vector
35
}
36

37
int Pooling_riscv::create_pipeline(const Option& /*opt*/)
38
{
39
    if (adaptive_pooling)
40
    {
41
        support_packing = false;
42

43
        support_bf16_storage = false;
44
        support_fp16_storage = false;
45
        support_int8_storage = false;
46
        support_tensor_storage = false;
47
    }
48
    return 0;
49
}
50

51
int Pooling_riscv::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
52
{
53
    if (adaptive_pooling)
54
    {
55
        return Pooling::forward(bottom_blob, top_blob, opt);
56
    }
57

58
    int elembits = bottom_blob.elembits();
59

60
#if __riscv_vector && __riscv_zfh
61
    if (opt.use_fp16_storage && elembits == 16)
62
    {
63
        if (opt.use_fp16_arithmetic)
64
            return forward_fp16sa(bottom_blob, top_blob, opt);
65
        else
66
            return forward_fp16s(bottom_blob, top_blob, opt);
67
    }
68
#endif
69

70
    // max value in NxN window
71
    // avg value in NxN window
72

73
#if __riscv_vector
74
    const int packn = csrr_vlenb() / 4;
75
    const size_t vl = vsetvl_e32m1(packn);
76
#endif
77

78
    int w = bottom_blob.w;
79
    int h = bottom_blob.h;
80
    int channels = bottom_blob.c;
81
    size_t elemsize = bottom_blob.elemsize;
82
    int elempack = bottom_blob.elempack;
83

84
#if __riscv_vector
85
    //     NCNN_LOGE("Pooling     input %d x %d  pad = %d %d %d %d  ksize=%d %d  stride=%d %d", w, h, pad_left, pad_right, pad_top, pad_bottom, kernel_w, kernel_h, stride_w, stride_h);
86

87
    if (elempack == packn)
88
    {
89
        if (global_pooling)
90
        {
91
            top_blob.create(channels, elemsize, elempack, opt.blob_allocator);
92
            if (top_blob.empty())
93
                return -100;
94

95
            int size = w * h;
96

97
            if (pooling_type == PoolMethod_MAX)
98
            {
99
                #pragma omp parallel for num_threads(opt.num_threads)
100
                for (int q = 0; q < channels; q++)
101
                {
102
                    const float* ptr = bottom_blob.channel(q);
103

104
                    vfloat32m1_t _max = vle32_v_f32m1(ptr, vl);
105
                    for (int i = 0; i < size; i++)
106
                    {
107
                        vfloat32m1_t _val = vle32_v_f32m1(ptr, vl);
108
                        _max = vfmax_vv_f32m1(_max, _val, vl);
109
                        ptr += packn;
110
                    }
111

112
                    float* outptr = top_blob;
113
                    vse32_v_f32m1(outptr + q * packn, _max, vl);
114
                }
115
            }
116
            else if (pooling_type == PoolMethod_AVE)
117
            {
118
                #pragma omp parallel for num_threads(opt.num_threads)
119
                for (int q = 0; q < channels; q++)
120
                {
121
                    const float* ptr = bottom_blob.channel(q);
122

123
                    vfloat32m1_t _sum = vfmv_v_f_f32m1(0.f, vl);
124
                    for (int i = 0; i < size; i++)
125
                    {
126
                        vfloat32m1_t _val = vle32_v_f32m1(ptr, vl);
127
                        _sum = vfadd_vv_f32m1(_sum, _val, vl);
128
                        ptr += packn;
129
                    }
130

131
                    vfloat32m1_t _avg = vfmul_vf_f32m1(_sum, 1.f / size, vl);
132

133
                    float* outptr = top_blob;
134
                    vse32_v_f32m1(outptr + q * packn, _avg, vl);
135
                }
136
            }
137

138
            return 0;
139
        }
140

141
        Mat bottom_blob_bordered;
142
        make_padding(bottom_blob, bottom_blob_bordered, opt);
143
        if (bottom_blob_bordered.empty())
144
            return -100;
145

146
        w = bottom_blob_bordered.w;
147
        h = bottom_blob_bordered.h;
148

149
        int outw = (w - kernel_w) / stride_w + 1;
150
        int outh = (h - kernel_h) / stride_h + 1;
151

152
        top_blob.create(outw, outh, channels, elemsize, elempack, opt.blob_allocator);
153
        if (top_blob.empty())
154
            return -100;
155

156
        const int maxk = kernel_w * kernel_h;
157

158
        // kernel offsets
159
        std::vector<int> _space_ofs(maxk);
160
        int* space_ofs = &_space_ofs[0];
161
        {
162
            int p1 = 0;
163
            int p2 = 0;
164
            int gap = w - kernel_w;
165
            for (int i = 0; i < kernel_h; i++)
166
            {
167
                for (int j = 0; j < kernel_w; j++)
168
                {
169
                    space_ofs[p1] = p2;
170
                    p1++;
171
                    p2++;
172
                }
173
                p2 += gap;
174
            }
175
        }
176

177
        if (pooling_type == PoolMethod_MAX)
178
        {
179
            #pragma omp parallel for num_threads(opt.num_threads)
180
            for (int q = 0; q < channels; q++)
181
            {
182
                const Mat m = bottom_blob_bordered.channel(q);
183
                float* outptr = top_blob.channel(q);
184

185
                for (int i = 0; i < outh; i++)
186
                {
187
                    for (int j = 0; j < outw; j++)
188
                    {
189
                        const float* sptr = m.row(i * stride_h) + j * stride_w * packn;
190

191
                        vfloat32m1_t _max = vle32_v_f32m1(sptr, vl);
192

193
                        for (int k = 0; k < maxk; k++)
194
                        {
195
                            vfloat32m1_t _val = vle32_v_f32m1(sptr + space_ofs[k] * packn, vl);
196
                            _max = vfmax_vv_f32m1(_max, _val, vl);
197
                        }
198

199
                        vse32_v_f32m1(outptr + j * packn, _max, vl);
200
                    }
201

202
                    outptr += outw * packn;
203
                }
204
            }
205
        }
206
        else if (pooling_type == PoolMethod_AVE)
207
        {
208
            if (avgpool_count_include_pad == 0)
209
            {
210
                int wtailpad = 0;
211
                int htailpad = 0;
212

213
                if (pad_mode == 0) // full padding
214
                {
215
                    wtailpad = bottom_blob_bordered.w - bottom_blob.w - pad_left - pad_right;
216
                    htailpad = bottom_blob_bordered.h - bottom_blob.h - pad_top - pad_bottom;
217
                }
218

219
                #pragma omp parallel for num_threads(opt.num_threads)
220
                for (int q = 0; q < channels; q++)
221
                {
222
                    const Mat m = bottom_blob_bordered.channel(q);
223
                    float* outptr = top_blob.channel(q);
224

225
                    for (int i = 0; i < outh; i++)
226
                    {
227
                        int sy0 = i * stride_h;
228

229
                        for (int j = 0; j < outw; j++)
230
                        {
231
                            int sx0 = j * stride_w;
232

233
                            vfloat32m1_t _sum = vfmv_v_f_f32m1(0.f, vl);
234
                            int area = 0;
235

236
                            for (int ki = 0; ki < kernel_h; ki++)
237
                            {
238
                                int sy = sy0 + ki;
239

240
                                if (sy < pad_top)
241
                                    continue;
242

243
                                if (sy >= h - pad_bottom - htailpad)
244
                                    break;
245

246
                                for (int kj = 0; kj < kernel_w; kj++)
247
                                {
248
                                    int sx = sx0 + kj;
249

250
                                    if (sx < pad_left)
251
                                        continue;
252

253
                                    if (sx >= w - pad_right - wtailpad)
254
                                        break;
255

256
                                    vfloat32m1_t _val = vle32_v_f32m1(m.row(sy) + sx * packn, vl);
257
                                    _sum = vfadd_vv_f32m1(_sum, _val, vl);
258
                                    area += 1;
259
                                }
260
                            }
261

262
                            vfloat32m1_t _avg = vfmul_vf_f32m1(_sum, 1.f / area, vl);
263
                            vse32_v_f32m1(outptr + j * packn, _avg, vl);
264
                        }
265

266
                        outptr += outw * packn;
267
                    }
268
                }
269
            }
270
            else // if (avgpool_count_include_pad == 1)
271
            {
272
                #pragma omp parallel for num_threads(opt.num_threads)
273
                for (int q = 0; q < channels; q++)
274
                {
275
                    const Mat m = bottom_blob_bordered.channel(q);
276
                    float* outptr = top_blob.channel(q);
277

278
                    const float inv_maxk = 1.f / maxk;
279

280
                    for (int i = 0; i < outh; i++)
281
                    {
282
                        for (int j = 0; j < outw; j++)
283
                        {
284
                            const float* sptr = m.row(i * stride_h) + j * stride_w * packn;
285

286
                            vfloat32m1_t _sum = vfmv_v_f_f32m1(0.f, vl);
287

288
                            for (int k = 0; k < maxk; k++)
289
                            {
290
                                vfloat32m1_t _val = vle32_v_f32m1(sptr + space_ofs[k] * packn, vl);
291
                                _sum = vfadd_vv_f32m1(_sum, _val, vl);
292
                            }
293

294
                            vfloat32m1_t _avg = vfmul_vf_f32m1(_sum, inv_maxk, vl);
295
                            vse32_v_f32m1(outptr + j * packn, _avg, vl);
296
                        }
297

298
                        outptr += outw * packn;
299
                    }
300
                }
301
            }
302
        }
303

304
        return 0;
305
    }
306
#endif // __riscv_vector
307

308
    return Pooling::forward(bottom_blob, top_blob, opt);
309
}
310

311
#if __riscv_vector && __riscv_zfh
312
int Pooling_riscv::forward_fp16s(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
313
{
314
    // max value in NxN window
315
    // avg value in NxN window
316

317
    const int packn = csrr_vlenb() / 2;
318
    const size_t vl = vsetvl_e16m1(packn);
319

320
    int w = bottom_blob.w;
321
    int h = bottom_blob.h;
322
    int channels = bottom_blob.c;
323
    size_t elemsize = bottom_blob.elemsize;
324
    int elempack = bottom_blob.elempack;
325

326
    //     NCNN_LOGE("Pooling     input %d x %d  pad = %d %d %d %d  ksize=%d %d  stride=%d %d", w, h, pad_left, pad_right, pad_top, pad_bottom, kernel_w, kernel_h, stride_w, stride_h);
327

328
    if (global_pooling)
329
    {
330
        top_blob.create(channels, elemsize, elempack, opt.blob_allocator);
331
        if (top_blob.empty())
332
            return -100;
333

334
        int size = w * h;
335

336
        if (pooling_type == PoolMethod_MAX)
337
        {
338
            if (elempack == packn)
339
            {
340
                #pragma omp parallel for num_threads(opt.num_threads)
341
                for (int q = 0; q < channels; q++)
342
                {
343
                    const __fp16* ptr = bottom_blob.channel(q);
344

345
                    vfloat16m1_t _max = vfmv_v_f_f16m1((__fp16)-FLT_MAX, vl);
346
                    for (int i = 0; i < size; i++)
347
                    {
348
                        vfloat16m1_t _val = vle16_v_f16m1(ptr, vl);
349
                        _max = vfmax_vv_f16m1(_max, _val, vl);
350
                        ptr += packn;
351
                    }
352

353
                    __fp16* outptr = top_blob;
354
                    vse16_v_f16m1(outptr + q * packn, _max, vl);
355
                }
356
            }
357

358
            if (elempack == 1)
359
            {
360
                #pragma omp parallel for num_threads(opt.num_threads)
361
                for (int q = 0; q < channels; q++)
362
                {
363
                    const __fp16* ptr = bottom_blob.channel(q);
364

365
                    __fp16 max = (__fp16)-FLT_MAX;
366
                    for (int i = 0; i < size; i++)
367
                    {
368
                        max = std::max(max, ptr[i]);
369
                    }
370

371
                    __fp16* outptr = top_blob;
372
                    outptr[q] = max;
373
                }
374
            }
375
        }
376

377
        if (pooling_type == PoolMethod_AVE)
378
        {
379
            if (elempack == packn)
380
            {
381
                #pragma omp parallel for num_threads(opt.num_threads)
382
                for (int q = 0; q < channels; q++)
383
                {
384
                    const __fp16* ptr = bottom_blob.channel(q);
385

386
                    vfloat32m2_t _sum = vfmv_v_f_f32m2(0.f, vl);
387
                    for (int i = 0; i < size; i++)
388
                    {
389
                        vfloat32m2_t _val = vfwcvt_f_f_v_f32m2(vle16_v_f16m1(ptr, vl), vl);
390
                        _sum = vfadd_vv_f32m2(_sum, _val, vl);
391
                        ptr += packn;
392
                    }
393

394
                    vfloat32m2_t _avg = vfmul_vf_f32m2(_sum, 1.f / size, vl);
395

396
                    __fp16* outptr = top_blob;
397
                    vse16_v_f16m1(outptr + q * packn, vfncvt_f_f_w_f16m1(_avg, vl), vl);
398
                }
399
            }
400

401
            if (elempack == 1)
402
            {
403
                #pragma omp parallel for num_threads(opt.num_threads)
404
                for (int q = 0; q < channels; q++)
405
                {
406
                    const __fp16* ptr = bottom_blob.channel(q);
407

408
                    float sum = 0.f;
409
                    for (int i = 0; i < size; i++)
410
                    {
411
                        sum += (float)ptr[i];
412
                    }
413

414
                    __fp16* outptr = top_blob;
415
                    outptr[q] = (__fp16)(sum / size);
416
                }
417
            }
418
        }
419

420
        return 0;
421
    }
422

423
    Mat bottom_blob_bordered;
424
    make_padding(bottom_blob, bottom_blob_bordered, opt);
425
    if (bottom_blob_bordered.empty())
426
        return -100;
427

428
    w = bottom_blob_bordered.w;
429
    h = bottom_blob_bordered.h;
430

431
    int outw = (w - kernel_w) / stride_w + 1;
432
    int outh = (h - kernel_h) / stride_h + 1;
433

434
    top_blob.create(outw, outh, channels, elemsize, elempack, opt.blob_allocator);
435
    if (top_blob.empty())
436
        return -100;
437

438
    const int maxk = kernel_w * kernel_h;
439

440
    // kernel offsets
441
    std::vector<int> _space_ofs(maxk);
442
    int* space_ofs = &_space_ofs[0];
443
    {
444
        int p1 = 0;
445
        int p2 = 0;
446
        int gap = w - kernel_w;
447
        for (int i = 0; i < kernel_h; i++)
448
        {
449
            for (int j = 0; j < kernel_w; j++)
450
            {
451
                space_ofs[p1] = p2;
452
                p1++;
453
                p2++;
454
            }
455
            p2 += gap;
456
        }
457
    }
458

459
    if (pooling_type == PoolMethod_MAX)
460
    {
461
        if (elempack == packn)
462
        {
463
            #pragma omp parallel for num_threads(opt.num_threads)
464
            for (int q = 0; q < channels; q++)
465
            {
466
                const Mat m = bottom_blob_bordered.channel(q);
467
                __fp16* outptr = top_blob.channel(q);
468

469
                for (int i = 0; i < outh; i++)
470
                {
471
                    for (int j = 0; j < outw; j++)
472
                    {
473
                        const __fp16* sptr = m.row<const __fp16>(i * stride_h) + j * stride_w * packn;
474

475
                        vfloat16m1_t _max = vfmv_v_f_f16m1((__fp16)-FLT_MAX, vl);
476

477
                        for (int k = 0; k < maxk; k++)
478
                        {
479
                            vfloat16m1_t _val = vle16_v_f16m1(sptr + space_ofs[k] * packn, vl);
480
                            _max = vfmax_vv_f16m1(_max, _val, vl);
481
                        }
482

483
                        vse16_v_f16m1(outptr + j * packn, _max, vl);
484
                    }
485

486
                    outptr += outw * packn;
487
                }
488
            }
489
        }
490

491
        if (elempack == 1)
492
        {
493
            #pragma omp parallel for num_threads(opt.num_threads)
494
            for (int q = 0; q < channels; q++)
495
            {
496
                const Mat m = bottom_blob_bordered.channel(q);
497
                __fp16* outptr = top_blob.channel(q);
498

499
                for (int i = 0; i < outh; i++)
500
                {
501
                    for (int j = 0; j < outw; j++)
502
                    {
503
                        const __fp16* sptr = m.row<const __fp16>(i * stride_h) + j * stride_w;
504

505
                        __fp16 max = (__fp16)-FLT_MAX;
506

507
                        for (int k = 0; k < maxk; k++)
508
                        {
509
                            __fp16 val = sptr[space_ofs[k]];
510
                            max = std::max(max, val);
511
                        }
512

513
                        outptr[j] = max;
514
                    }
515

516
                    outptr += outw;
517
                }
518
            }
519
        }
520
    }
521

522
    if (pooling_type == PoolMethod_AVE)
523
    {
524
        if (avgpool_count_include_pad == 0)
525
        {
526
            int wtailpad = 0;
527
            int htailpad = 0;
528

529
            if (pad_mode == 0) // full padding
530
            {
531
                wtailpad = bottom_blob_bordered.w - bottom_blob.w - pad_left - pad_right;
532
                htailpad = bottom_blob_bordered.h - bottom_blob.h - pad_top - pad_bottom;
533
            }
534

535
            if (elempack == packn)
536
            {
537
                #pragma omp parallel for num_threads(opt.num_threads)
538
                for (int q = 0; q < channels; q++)
539
                {
540
                    const Mat m = bottom_blob_bordered.channel(q);
541
                    __fp16* outptr = top_blob.channel(q);
542

543
                    for (int i = 0; i < outh; i++)
544
                    {
545
                        int sy0 = i * stride_h;
546

547
                        for (int j = 0; j < outw; j++)
548
                        {
549
                            int sx0 = j * stride_w;
550

551
                            vfloat32m2_t _sum = vfmv_v_f_f32m2(0.f, vl);
552
                            int area = 0;
553

554
                            for (int ki = 0; ki < kernel_h; ki++)
555
                            {
556
                                int sy = sy0 + ki;
557

558
                                if (sy < pad_top)
559
                                    continue;
560

561
                                if (sy >= h - pad_bottom - htailpad)
562
                                    break;
563

564
                                for (int kj = 0; kj < kernel_w; kj++)
565
                                {
566
                                    int sx = sx0 + kj;
567

568
                                    if (sx < pad_left)
569
                                        continue;
570

571
                                    if (sx >= w - pad_right - wtailpad)
572
                                        break;
573

574
                                    vfloat32m2_t _val = vfwcvt_f_f_v_f32m2(vle16_v_f16m1(m.row<const __fp16>(sy) + sx * packn, vl), vl);
575
                                    _sum = vfadd_vv_f32m2(_sum, _val, vl);
576
                                    area += 1;
577
                                }
578
                            }
579

580
                            vfloat32m2_t _avg = vfmul_vf_f32m2(_sum, 1.f / area, vl);
581
                            vse16_v_f16m1(outptr + j * packn, vfncvt_f_f_w_f16m1(_avg, vl), vl);
582
                        }
583

584
                        outptr += outw * packn;
585
                    }
586
                }
587
            }
588

589
            if (elempack == 1)
590
            {
591
                #pragma omp parallel for num_threads(opt.num_threads)
592
                for (int q = 0; q < channels; q++)
593
                {
594
                    const Mat m = bottom_blob_bordered.channel(q);
595
                    __fp16* outptr = top_blob.channel(q);
596

597
                    for (int i = 0; i < outh; i++)
598
                    {
599
                        int sy0 = i * stride_h;
600

601
                        for (int j = 0; j < outw; j++)
602
                        {
603
                            int sx0 = j * stride_w;
604

605
                            float sum = 0.f;
606
                            int area = 0;
607

608
                            for (int ki = 0; ki < kernel_h; ki++)
609
                            {
610
                                int sy = sy0 + ki;
611

612
                                if (sy < pad_top)
613
                                    continue;
614

615
                                if (sy >= h - pad_bottom - htailpad)
616
                                    break;
617

618
                                for (int kj = 0; kj < kernel_w; kj++)
619
                                {
620
                                    int sx = sx0 + kj;
621

622
                                    if (sx < pad_left)
623
                                        continue;
624

625
                                    if (sx >= w - pad_right - wtailpad)
626
                                        break;
627

628
                                    float val = (float)(m.row<const __fp16>(sy)[sx]);
629
                                    sum += val;
630
                                    area += 1;
631
                                }
632
                            }
633

634
                            outptr[j] = (__fp16)(sum / area);
635
                        }
636

637
                        outptr += outw;
638
                    }
639
                }
640
            }
641
        }
642

643
        if (avgpool_count_include_pad == 1)
644
        {
645
            if (elempack == packn)
646
            {
647
                #pragma omp parallel for num_threads(opt.num_threads)
648
                for (int q = 0; q < channels; q++)
649
                {
650
                    const Mat m = bottom_blob_bordered.channel(q);
651
                    __fp16* outptr = top_blob.channel(q);
652

653
                    const float inv_maxk = 1.f / maxk;
654

655
                    for (int i = 0; i < outh; i++)
656
                    {
657
                        for (int j = 0; j < outw; j++)
658
                        {
659
                            const __fp16* sptr = m.row<const __fp16>(i * stride_h) + j * stride_w * packn;
660

661
                            vfloat32m2_t _sum = vfmv_v_f_f32m2(0.f, vl);
662

663
                            for (int k = 0; k < maxk; k++)
664
                            {
665
                                vfloat32m2_t _val = vfwcvt_f_f_v_f32m2(vle16_v_f16m1(sptr + space_ofs[k] * packn, vl), vl);
666
                                _sum = vfadd_vv_f32m2(_sum, _val, vl);
667
                            }
668

669
                            vfloat32m2_t _avg = vfmul_vf_f32m2(_sum, inv_maxk, vl);
670
                            vse16_v_f16m1(outptr + j * packn, vfncvt_f_f_w_f16m1(_avg, vl), vl);
671
                        }
672

673
                        outptr += outw * packn;
674
                    }
675
                }
676
            }
677

678
            if (elempack == 1)
679
            {
680
                #pragma omp parallel for num_threads(opt.num_threads)
681
                for (int q = 0; q < channels; q++)
682
                {
683
                    const Mat m = bottom_blob_bordered.channel(q);
684
                    __fp16* outptr = top_blob.channel(q);
685

686
                    for (int i = 0; i < outh; i++)
687
                    {
688
                        for (int j = 0; j < outw; j++)
689
                        {
690
                            const __fp16* sptr = m.row<const __fp16>(i * stride_h) + j * stride_w;
691

692
                            float sum = 0.f;
693

694
                            for (int k = 0; k < maxk; k++)
695
                            {
696
                                float val = (float)(sptr[space_ofs[k]]);
697
                                sum += val;
698
                            }
699

700
                            outptr[j] = (__fp16)(sum / maxk);
701
                        }
702

703
                        outptr += outw;
704
                    }
705
                }
706
            }
707
        }
708
    }
709

710
    return 0;
711
}
712

713
int Pooling_riscv::forward_fp16sa(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
714
{
715
    // max value in NxN window
716
    // avg value in NxN window
717

718
    if (pooling_type == PoolMethod_MAX || global_pooling)
719
    {
720
        return forward_fp16s(bottom_blob, top_blob, opt);
721
    }
722

723
    const int packn = csrr_vlenb() / 2;
724
    const size_t vl = vsetvl_e16m1(packn);
725

726
    int w = bottom_blob.w;
727
    int h = bottom_blob.h;
728
    int channels = bottom_blob.c;
729
    size_t elemsize = bottom_blob.elemsize;
730
    int elempack = bottom_blob.elempack;
731

732
    //     NCNN_LOGE("Pooling     input %d x %d  pad = %d %d %d %d  ksize=%d %d  stride=%d %d", w, h, pad_left, pad_right, pad_top, pad_bottom, kernel_w, kernel_h, stride_w, stride_h);
733

734
    Mat bottom_blob_bordered;
735
    make_padding(bottom_blob, bottom_blob_bordered, opt);
736
    if (bottom_blob_bordered.empty())
737
        return -100;
738

739
    w = bottom_blob_bordered.w;
740
    h = bottom_blob_bordered.h;
741

742
    int outw = (w - kernel_w) / stride_w + 1;
743
    int outh = (h - kernel_h) / stride_h + 1;
744

745
    top_blob.create(outw, outh, channels, elemsize, elempack, opt.blob_allocator);
746
    if (top_blob.empty())
747
        return -100;
748

749
    const int maxk = kernel_w * kernel_h;
750

751
    // kernel offsets
752
    std::vector<int> _space_ofs(maxk);
753
    int* space_ofs = &_space_ofs[0];
754
    {
755
        int p1 = 0;
756
        int p2 = 0;
757
        int gap = w - kernel_w;
758
        for (int i = 0; i < kernel_h; i++)
759
        {
760
            for (int j = 0; j < kernel_w; j++)
761
            {
762
                space_ofs[p1] = p2;
763
                p1++;
764
                p2++;
765
            }
766
            p2 += gap;
767
        }
768
    }
769

770
    if (pooling_type == PoolMethod_AVE)
771
    {
772
        if (avgpool_count_include_pad == 0)
773
        {
774
            int wtailpad = 0;
775
            int htailpad = 0;
776

777
            if (pad_mode == 0) // full padding
778
            {
779
                wtailpad = bottom_blob_bordered.w - bottom_blob.w - pad_left - pad_right;
780
                htailpad = bottom_blob_bordered.h - bottom_blob.h - pad_top - pad_bottom;
781
            }
782

783
            if (elempack == packn)
784
            {
785
                #pragma omp parallel for num_threads(opt.num_threads)
786
                for (int q = 0; q < channels; q++)
787
                {
788
                    const Mat m = bottom_blob_bordered.channel(q);
789
                    __fp16* outptr = top_blob.channel(q);
790

791
                    for (int i = 0; i < outh; i++)
792
                    {
793
                        int sy0 = i * stride_h;
794

795
                        for (int j = 0; j < outw; j++)
796
                        {
797
                            int sx0 = j * stride_w;
798

799
                            vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl);
800
                            int area = 0;
801

802
                            for (int ki = 0; ki < kernel_h; ki++)
803
                            {
804
                                int sy = sy0 + ki;
805

806
                                if (sy < pad_top)
807
                                    continue;
808

809
                                if (sy >= h - pad_bottom - htailpad)
810
                                    break;
811

812
                                for (int kj = 0; kj < kernel_w; kj++)
813
                                {
814
                                    int sx = sx0 + kj;
815

816
                                    if (sx < pad_left)
817
                                        continue;
818

819
                                    if (sx >= w - pad_right - wtailpad)
820
                                        break;
821

822
                                    vfloat16m1_t _val = vle16_v_f16m1(m.row<const __fp16>(sy) + sx * packn, vl);
823
                                    _sum = vfadd_vv_f16m1(_sum, _val, vl);
824
                                    area += 1;
825
                                }
826
                            }
827

828
                            vfloat16m1_t _avg = vfmul_vf_f16m1(_sum, (__fp16)(1.f / area), vl);
829
                            vse16_v_f16m1(outptr + j * packn, _avg, vl);
830
                        }
831

832
                        outptr += outw * packn;
833
                    }
834
                }
835
            }
836

837
            if (elempack == 1)
838
            {
839
                #pragma omp parallel for num_threads(opt.num_threads)
840
                for (int q = 0; q < channels; q++)
841
                {
842
                    const Mat m = bottom_blob_bordered.channel(q);
843
                    __fp16* outptr = top_blob.channel(q);
844

845
                    for (int i = 0; i < outh; i++)
846
                    {
847
                        int sy0 = i * stride_h;
848

849
                        for (int j = 0; j < outw; j++)
850
                        {
851
                            int sx0 = j * stride_w;
852

853
                            __fp16 sum = (__fp16)0.f;
854
                            int area = 0;
855

856
                            for (int ki = 0; ki < kernel_h; ki++)
857
                            {
858
                                int sy = sy0 + ki;
859

860
                                if (sy < pad_top)
861
                                    continue;
862

863
                                if (sy >= h - pad_bottom - htailpad)
864
                                    break;
865

866
                                for (int kj = 0; kj < kernel_w; kj++)
867
                                {
868
                                    int sx = sx0 + kj;
869

870
                                    if (sx < pad_left)
871
                                        continue;
872

873
                                    if (sx >= w - pad_right - wtailpad)
874
                                        break;
875

876
                                    __fp16 val = m.row<const __fp16>(sy)[sx];
877
                                    sum += val;
878
                                    area += 1;
879
                                }
880
                            }
881

882
                            outptr[j] = sum / area;
883
                        }
884

885
                        outptr += outw;
886
                    }
887
                }
888
            }
889
        }
890

891
        if (avgpool_count_include_pad == 1)
892
        {
893
            if (elempack == packn)
894
            {
895
                #pragma omp parallel for num_threads(opt.num_threads)
896
                for (int q = 0; q < channels; q++)
897
                {
898
                    const Mat m = bottom_blob_bordered.channel(q);
899
                    __fp16* outptr = top_blob.channel(q);
900

901
                    const __fp16 inv_maxk = (__fp16)(1.f / maxk);
902

903
                    for (int i = 0; i < outh; i++)
904
                    {
905
                        for (int j = 0; j < outw; j++)
906
                        {
907
                            const __fp16* sptr = m.row<const __fp16>(i * stride_h) + j * stride_w * packn;
908

909
                            vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl);
910

911
                            for (int k = 0; k < maxk; k++)
912
                            {
913
                                vfloat16m1_t _val = vle16_v_f16m1(sptr + space_ofs[k] * packn, vl);
914
                                _sum = vfadd_vv_f16m1(_sum, _val, vl);
915
                            }
916

917
                            vfloat16m1_t _avg = vfmul_vf_f16m1(_sum, inv_maxk, vl);
918
                            vse16_v_f16m1(outptr + j * packn, _avg, vl);
919
                        }
920

921
                        outptr += outw * packn;
922
                    }
923
                }
924
            }
925

926
            if (elempack == 1)
927
            {
928
                #pragma omp parallel for num_threads(opt.num_threads)
929
                for (int q = 0; q < channels; q++)
930
                {
931
                    const Mat m = bottom_blob_bordered.channel(q);
932
                    __fp16* outptr = top_blob.channel(q);
933

934
                    for (int i = 0; i < outh; i++)
935
                    {
936
                        for (int j = 0; j < outw; j++)
937
                        {
938
                            const __fp16* sptr = m.row<const __fp16>(i * stride_h) + j * stride_w;
939

940
                            __fp16 sum = (__fp16)0.f;
941

942
                            for (int k = 0; k < maxk; k++)
943
                            {
944
                                __fp16 val = sptr[space_ofs[k]];
945
                                sum += val;
946
                            }
947

948
                            outptr[j] = sum / maxk;
949
                        }
950

951
                        outptr += outw;
952
                    }
953
                }
954
            }
955
        }
956
    }
957

958
    return 0;
959
}
960
#endif // __riscv_vector && __riscv_zfh
961

962
} // namespace ncnn
963

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.