ncnn

Форк
0
/
testutil.cpp 
1828 строк · 47.5 Кб
1
// Tencent is pleased to support the open source community by making ncnn available.
2
//
3
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
4
//
5
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6
// in compliance with the License. You may obtain a copy of the License at
7
//
8
// https://opensource.org/licenses/BSD-3-Clause
9
//
10
// Unless required by applicable law or agreed to in writing, software distributed
11
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
13
// specific language governing permissions and limitations under the License.
14

15
#include "testutil.h"
16

17
#include "cpu.h"
18
#include "layer.h"
19
#include "mat.h"
20
#include "prng.h"
21

22
#include <limits.h>
23
#include <stdio.h>
24
#include <stdlib.h>
25

26
#if NCNN_VULKAN
27
#include "command.h"
28
#include "gpu.h"
29
#endif // NCNN_VULKAN
30

31
static struct prng_rand_t g_prng_rand_state;
32

33
void SRAND(int seed)
34
{
35
    prng_srand(seed, &g_prng_rand_state);
36
}
37

38
uint64_t RAND()
39
{
40
    return prng_rand(&g_prng_rand_state);
41
}
42

43
float RandomFloat(float a, float b)
44
{
45
    float random = ((float)RAND()) / (float)uint64_t(-1); //RAND_MAX;
46
    float diff = b - a;
47
    float r = random * diff;
48
    float v = a + r;
49
    // generate denormal as zero
50
    if (v < 0.0001 && v > -0.0001)
51
        v = 0.f;
52
    return v;
53
}
54

55
int RandomInt(int a, int b)
56
{
57
    float random = ((float)RAND()) / (float)uint64_t(-1); //RAND_MAX;
58
    int diff = b - a;
59
    float r = random * diff;
60
    return a + (int)r;
61
}
62

63
signed char RandomS8()
64
{
65
    return (signed char)RandomInt(-127, 127);
66
}
67

68
void Randomize(ncnn::Mat& m, float a, float b)
69
{
70
    for (size_t i = 0; i < m.total(); i++)
71
    {
72
        m[i] = RandomFloat(a, b);
73
    }
74
}
75

76
void RandomizeInt(ncnn::Mat& m, int a, int b)
77
{
78
    for (size_t i = 0; i < m.total(); i++)
79
    {
80
        ((int*)m)[i] = RandomInt(a, b);
81
    }
82
}
83

84
void RandomizeS8(ncnn::Mat& m)
85
{
86
    for (size_t i = 0; i < m.total(); i++)
87
    {
88
        ((signed char*)m)[i] = RandomS8();
89
    }
90
}
91

92
ncnn::Mat RandomMat(int w, float a, float b)
93
{
94
    ncnn::Mat m(w);
95
    Randomize(m, a, b);
96
    return m;
97
}
98

99
ncnn::Mat RandomMat(int w, int h, float a, float b)
100
{
101
    ncnn::Mat m(w, h);
102
    Randomize(m, a, b);
103
    return m;
104
}
105

106
ncnn::Mat RandomMat(int w, int h, int c, float a, float b)
107
{
108
    ncnn::Mat m(w, h, c);
109
    Randomize(m, a, b);
110
    return m;
111
}
112

113
ncnn::Mat RandomMat(int w, int h, int d, int c, float a, float b)
114
{
115
    ncnn::Mat m(w, h, d, c);
116
    Randomize(m, a, b);
117
    return m;
118
}
119

120
ncnn::Mat RandomIntMat(int w)
121
{
122
    ncnn::Mat m(w);
123
    RandomizeInt(m);
124
    return m;
125
}
126

127
ncnn::Mat RandomIntMat(int w, int h)
128
{
129
    ncnn::Mat m(w, h);
130
    RandomizeInt(m);
131
    return m;
132
}
133

134
ncnn::Mat RandomIntMat(int w, int h, int c)
135
{
136
    ncnn::Mat m(w, h, c);
137
    RandomizeInt(m);
138
    return m;
139
}
140

141
ncnn::Mat RandomIntMat(int w, int h, int d, int c)
142
{
143
    ncnn::Mat m(w, h, d, c);
144
    RandomizeInt(m);
145
    return m;
146
}
147

148
ncnn::Mat RandomS8Mat(int w)
149
{
150
    ncnn::Mat m(w, (size_t)1u);
151
    RandomizeS8(m);
152
    return m;
153
}
154

155
ncnn::Mat RandomS8Mat(int w, int h)
156
{
157
    ncnn::Mat m(w, h, (size_t)1u);
158
    RandomizeS8(m);
159
    return m;
160
}
161

162
ncnn::Mat RandomS8Mat(int w, int h, int c)
163
{
164
    ncnn::Mat m(w, h, c, (size_t)1u);
165
    RandomizeS8(m);
166
    return m;
167
}
168

169
ncnn::Mat RandomS8Mat(int w, int h, int d, int c)
170
{
171
    ncnn::Mat m(w, h, d, c, (size_t)1u);
172
    RandomizeS8(m);
173
    return m;
174
}
175

176
ncnn::Mat scales_mat(const ncnn::Mat& mat, int m, int k, int ldx)
177
{
178
    ncnn::Mat weight_scales(m);
179
    for (int i = 0; i < m; ++i)
180
    {
181
        float min = mat[0], _max = mat[0];
182
        const float* ptr = (const float*)(mat.data) + i * ldx;
183
        for (int j = 0; j < k; ++j)
184
        {
185
            if (min > ptr[j])
186
            {
187
                min = ptr[j];
188
            }
189
            if (_max < ptr[j])
190
            {
191
                _max = ptr[j];
192
            }
193
        }
194
        const float abs_min = abs(min), abs_max = abs(_max);
195
        weight_scales[i] = 127.f / (abs_min > abs_max ? abs_min : abs_max);
196
    }
197
    return weight_scales;
198
}
199

200
bool NearlyEqual(float a, float b, float epsilon)
201
{
202
    if (a == b)
203
        return true;
204

205
    float diff = (float)fabs(a - b);
206
    if (diff <= epsilon)
207
        return true;
208

209
    // relative error
210
    return diff < epsilon * std::max(fabs(a), fabs(b));
211
}
212

213
int Compare(const ncnn::Mat& a, const ncnn::Mat& b, float epsilon)
214
{
215
#define CHECK_MEMBER(m)                                                                 \
216
    if (a.m != b.m)                                                                     \
217
    {                                                                                   \
218
        fprintf(stderr, #m " not match    expect %d but got %d\n", (int)a.m, (int)b.m); \
219
        return -1;                                                                      \
220
    }
221

222
    CHECK_MEMBER(dims)
223
    CHECK_MEMBER(w)
224
    CHECK_MEMBER(h)
225
    CHECK_MEMBER(d)
226
    CHECK_MEMBER(c)
227
    CHECK_MEMBER(elemsize)
228
    CHECK_MEMBER(elempack)
229

230
#undef CHECK_MEMBER
231

232
    for (int q = 0; q < a.c; q++)
233
    {
234
        const ncnn::Mat ma = a.channel(q);
235
        const ncnn::Mat mb = b.channel(q);
236
        for (int z = 0; z < a.d; z++)
237
        {
238
            const ncnn::Mat da = ma.depth(z);
239
            const ncnn::Mat db = mb.depth(z);
240
            for (int i = 0; i < a.h; i++)
241
            {
242
                const float* pa = da.row(i);
243
                const float* pb = db.row(i);
244
                for (int j = 0; j < a.w; j++)
245
                {
246
                    if (!NearlyEqual(pa[j], pb[j], epsilon))
247
                    {
248
                        fprintf(stderr, "value not match  at c:%d d:%d h:%d w:%d    expect %f but got %f\n", q, z, i, j, pa[j], pb[j]);
249
                        return -1;
250
                    }
251
                }
252
            }
253
        }
254
    }
255

256
    return 0;
257
}
258

259
int CompareMat(const ncnn::Mat& a, const ncnn::Mat& b, float epsilon)
260
{
261
    ncnn::Option opt;
262
    opt.num_threads = 1;
263

264
    if (a.elempack != 1)
265
    {
266
        ncnn::Mat a1;
267
        ncnn::convert_packing(a, a1, 1, opt);
268
        return CompareMat(a1, b, epsilon);
269
    }
270

271
    if (b.elempack != 1)
272
    {
273
        ncnn::Mat b1;
274
        ncnn::convert_packing(b, b1, 1, opt);
275
        return CompareMat(a, b1, epsilon);
276
    }
277

278
    if (a.elemsize == 2u)
279
    {
280
        ncnn::Mat a32;
281
        cast_float16_to_float32(a, a32, opt);
282
        return CompareMat(a32, b, epsilon);
283
    }
284
    if (a.elemsize == 1u)
285
    {
286
        ncnn::Mat a32;
287
        cast_int8_to_float32(a, a32, opt);
288
        return CompareMat(a32, b, epsilon);
289
    }
290

291
    if (b.elemsize == 2u)
292
    {
293
        ncnn::Mat b32;
294
        cast_float16_to_float32(b, b32, opt);
295
        return CompareMat(a, b32, epsilon);
296
    }
297
    if (b.elemsize == 1u)
298
    {
299
        ncnn::Mat b32;
300
        cast_int8_to_float32(b, b32, opt);
301
        return CompareMat(a, b32, epsilon);
302
    }
303

304
    return Compare(a, b, epsilon);
305
}
306

307
int CompareMat(const std::vector<ncnn::Mat>& a, const std::vector<ncnn::Mat>& b, float epsilon)
308
{
309
    if (a.size() != b.size())
310
    {
311
        fprintf(stderr, "output blob count not match %zu %zu\n", a.size(), b.size());
312
        return -1;
313
    }
314

315
    for (size_t i = 0; i < a.size(); i++)
316
    {
317
        if (CompareMat(a[i], b[i], epsilon))
318
        {
319
            fprintf(stderr, "output blob %zu not match\n", i);
320
            return -1;
321
        }
322
    }
323

324
    return 0;
325
}
326

327
static int convert_to_optimal_layout(const ncnn::Mat& a, ncnn::Mat& a4, const ncnn::Option& opt, const ncnn::Layer* op, int flag)
328
{
329
    // clang-format off
330
    // *INDENT-OFF*
331
#if NCNN_ARM82
332
    if (opt.use_fp16_storage && ncnn::cpu_support_arm_asimdhp() && op->support_fp16_storage && !(flag & TEST_LAYER_DISABLE_AUTO_INPUT_CASTING))
333
    {
334
        ncnn::cast_float32_to_float16(a, a4, opt);
335
    }
336
    else
337
#endif // NCNN_ARM82
338
#if NCNN_VFPV4
339
    if (opt.use_fp16_storage && !opt.use_bf16_storage && ncnn::cpu_support_arm_vfpv4() && op->support_fp16_storage && !(flag & TEST_LAYER_DISABLE_AUTO_INPUT_CASTING))
340
    {
341
        ncnn::cast_float32_to_float16(a, a4, opt);
342
    }
343
    else
344
#endif // NCNN_VFPV4
345
#if NCNN_RVV
346
    if (opt.use_fp16_storage && ncnn::cpu_support_riscv_v() && ncnn::cpu_support_riscv_zfh() && op->support_fp16_storage && !(flag & TEST_LAYER_DISABLE_AUTO_INPUT_CASTING))
347
    {
348
        ncnn::cast_float32_to_float16(a, a4, opt);
349
    }
350
    else
351
#endif // NCNN_RVV
352
#if NCNN_BF16
353
    if (opt.use_bf16_storage && op->support_bf16_storage && !(flag & TEST_LAYER_DISABLE_AUTO_INPUT_CASTING))
354
    {
355
        ncnn::cast_float32_to_bfloat16(a, a4, opt);
356
    }
357
    else
358
#endif // NCNN_BF16
359
    if (opt.use_fp16_storage && op->support_fp16_storage && !(flag & TEST_LAYER_DISABLE_AUTO_INPUT_CASTING))
360
    {
361
        ncnn::cast_float32_to_float16(a, a4, opt);
362
    }
363
    else
364
    {
365
        a4 = a;
366
    }
367
    // *INDENT-ON*
368
    // clang-format on
369

370
    if (opt.use_packing_layout && op->support_packing && !(flag & TEST_LAYER_DISABLE_AUTO_INPUT_PACKING))
371
    {
372
        // resolve dst_elempack
373
        int dims = a4.dims;
374
        int elemcount = 0;
375
        if (dims == 1) elemcount = a4.elempack * a4.w;
376
        if (dims == 2) elemcount = a4.elempack * a4.h;
377
        if (dims == 3 || dims == 4) elemcount = a4.elempack * a4.c;
378

379
        int elembits = a4.elembits();
380

381
        int dst_elempack = 1;
382

383
        if (elembits == 32)
384
        {
385
#if NCNN_AVX512
386
            if (elemcount % 16 == 0 && ncnn::cpu_support_x86_avx512())
387
                dst_elempack = 16;
388
            else if (elemcount % 8 == 0 && ncnn::cpu_support_x86_avx())
389
                dst_elempack = 8;
390
            else if (elemcount % 4 == 0)
391
                dst_elempack = 4;
392
#elif NCNN_AVX
393
            if (elemcount % 8 == 0 && ncnn::cpu_support_x86_avx())
394
                dst_elempack = 8;
395
            else if (elemcount % 4 == 0)
396
                dst_elempack = 4;
397
#elif NCNN_RVV
398
            const int packn = ncnn::cpu_riscv_vlenb() / (elembits / 8);
399
            if (elemcount % packn == 0)
400
                dst_elempack = packn;
401
#else
402
            if (elemcount % 4 == 0)
403
                dst_elempack = 4;
404
#endif
405
        }
406
        if (elembits == 16)
407
        {
408
#if NCNN_ARM82
409
            if (elemcount % 8 == 0 && ncnn::cpu_support_arm_asimdhp() && opt.use_fp16_arithmetic)
410
                dst_elempack = 8;
411
            else if (elemcount % 4 == 0)
412
                dst_elempack = 4;
413
#elif NCNN_RVV
414
            const int packn = ncnn::cpu_riscv_vlenb() / 2;
415
            if (elemcount % packn == 0)
416
                dst_elempack = packn;
417
#else
418
            if (elemcount % 4 == 0)
419
                dst_elempack = 4;
420
#endif
421
        }
422
        if (elembits == 8)
423
        {
424
#if NCNN_RVV
425
            const int packn = ncnn::cpu_riscv_vlenb() / 1;
426
            if (elemcount % packn == 0)
427
                dst_elempack = packn;
428
#else
429
            if (elemcount % 8 == 0)
430
                dst_elempack = 8;
431
#endif
432
        }
433

434
        if (flag & TEST_LAYER_ENABLE_FORCE_INPUT_PACK8)
435
            dst_elempack = 8;
436

437
        ncnn::Mat a4_packed;
438
        ncnn::convert_packing(a4, a4_packed, dst_elempack, opt);
439
        a4 = a4_packed;
440
    }
441

442
    return 0;
443
}
444

445
static int convert_to_vanilla_layout(const ncnn::Mat& c4, ncnn::Mat& c, const ncnn::Option& opt, const ncnn::Layer* op, int flag)
446
{
447
    ncnn::Mat c4_unpacked;
448
    if (c4.elempack != 1)
449
    {
450
        ncnn::convert_packing(c4, c4_unpacked, 1, opt);
451
    }
452
    else
453
    {
454
        c4_unpacked = c4;
455
    }
456

457
    // clang-format off
458
    // *INDENT-OFF*
459
#if NCNN_ARM82
460
    if (opt.use_fp16_storage && ncnn::cpu_support_arm_asimdhp() && op->support_fp16_storage && c4_unpacked.elembits() == 16)
461
    {
462
        ncnn::cast_float16_to_float32(c4_unpacked, c, opt);
463
    }
464
    else
465
#endif // NCNN_ARM82
466
#if NCNN_VFPV4
467
    if (opt.use_fp16_storage && !opt.use_bf16_storage && ncnn::cpu_support_arm_vfpv4() && op->support_fp16_storage && c4_unpacked.elembits() == 16)
468
    {
469
        ncnn::cast_float16_to_float32(c4_unpacked, c, opt);
470
    }
471
    else
472
#endif // NCNN_VFPV4
473
#if NCNN_RVV
474
    if (opt.use_fp16_storage && ncnn::cpu_support_riscv_v() && ncnn::cpu_support_riscv_zfh() && op->support_fp16_storage && c4_unpacked.elembits() == 16)
475
    {
476
        ncnn::cast_float16_to_float32(c4_unpacked, c, opt);
477
    }
478
    else
479
#endif // NCNN_RVV
480
#if NCNN_BF16
481
    if (opt.use_bf16_storage && op->support_bf16_storage && c4_unpacked.elembits() == 16)
482
    {
483
        ncnn::cast_bfloat16_to_float32(c4_unpacked, c, opt);
484
    }
485
    else
486
#endif // NCNN_BF16
487
    if (opt.use_fp16_storage && op->support_fp16_storage && c4_unpacked.elembits() == 16)
488
    {
489
        ncnn::cast_float16_to_float32(c4_unpacked, c, opt);
490
    }
491
    else
492
    {
493
        c = c4_unpacked;
494
    }
495
    // *INDENT-ON*
496
    // clang-format on
497

498
    return 0;
499
}
500

501
int test_layer_naive(int typeindex, const ncnn::ParamDict& pd, const std::vector<ncnn::Mat>& weights, const std::vector<ncnn::Mat>& a, int top_blob_count, std::vector<ncnn::Mat>& b, void (*func)(ncnn::Layer*), int flag)
502
{
503
    ncnn::Layer* op = ncnn::create_layer_naive(typeindex);
504

505
    if (func)
506
    {
507
        (*func)((ncnn::Layer*)op);
508
    }
509

510
    op->load_param(pd);
511

512
    if (op->one_blob_only && a.size() != 1)
513
    {
514
        fprintf(stderr, "layer with one_blob_only but consume multiple inputs\n");
515
        delete op;
516
        return -1;
517
    }
518

519
    ncnn::ModelBinFromMatArray mb(weights.data());
520

521
    op->load_model(mb);
522

523
    ncnn::Option opt;
524
    opt.num_threads = 1;
525
    opt.lightmode = false;
526
    opt.use_packing_layout = false;
527
    opt.use_fp16_packed = false;
528
    opt.use_fp16_storage = false;
529
    opt.use_fp16_arithmetic = false;
530
    opt.use_shader_pack8 = false;
531
    opt.use_image_storage = false;
532
    opt.use_bf16_storage = false;
533
    opt.use_vulkan_compute = false;
534

535
    op->create_pipeline(opt);
536

537
    b.resize(top_blob_count);
538

539
    if (op->support_inplace)
540
    {
541
        for (size_t i = 0; i < a.size(); i++)
542
        {
543
            b[i] = a[i].clone();
544
        }
545

546
        op->forward_inplace(b, opt);
547
    }
548
    else
549
    {
550
        op->forward(a, b, opt);
551
    }
552

553
    op->destroy_pipeline(opt);
554

555
    delete op;
556

557
    return 0;
558
}
559

560
int test_layer_cpu(int typeindex, const ncnn::ParamDict& pd, const std::vector<ncnn::Mat>& weights, const ncnn::Option& _opt, const std::vector<ncnn::Mat>& a, int top_blob_count, std::vector<ncnn::Mat>& c, const std::vector<ncnn::Mat>& top_shapes, void (*func)(ncnn::Layer*), int flag)
561
{
562
    ncnn::Layer* op = ncnn::create_layer_cpu(typeindex);
563

564
    if (!op->support_packing && _opt.use_packing_layout)
565
    {
566
        delete op;
567
        return 233;
568
    }
569
    if (!op->support_bf16_storage && !op->support_fp16_storage && (_opt.use_bf16_storage || _opt.use_fp16_arithmetic))
570
    {
571
        delete op;
572
        return 233;
573
    }
574

575
    if (func)
576
    {
577
        (*func)((ncnn::Layer*)op);
578
    }
579

580
    if (!top_shapes.empty())
581
    {
582
        op->bottom_shapes = a;
583
        op->top_shapes = top_shapes;
584
    }
585

586
    op->load_param(pd);
587

588
    if (op->one_blob_only && a.size() != 1)
589
    {
590
        fprintf(stderr, "layer with one_blob_only but consume multiple inputs\n");
591
        delete op;
592
        return -1;
593
    }
594

595
    ncnn::ModelBinFromMatArray mb(weights.data());
596

597
    op->load_model(mb);
598

599
    ncnn::Option opt = _opt;
600
    opt.num_threads = 1;
601
    opt.use_vulkan_compute = false;
602

603
    op->create_pipeline(opt);
604

605
    if (!op->support_packing && _opt.use_packing_layout)
606
    {
607
        op->destroy_pipeline(opt);
608
        delete op;
609
        return 233;
610
    }
611
    if (!op->support_bf16_storage && !op->support_fp16_storage && (_opt.use_bf16_storage || _opt.use_fp16_arithmetic))
612
    {
613
        op->destroy_pipeline(opt);
614
        delete op;
615
        return 233;
616
    }
617

618
    std::vector<ncnn::Mat> a4(a.size());
619

620
    for (size_t i = 0; i < a4.size(); i++)
621
    {
622
        convert_to_optimal_layout(a[i], a4[i], opt, op, flag);
623
    }
624

625
    c.resize(top_blob_count);
626

627
    if (op->support_inplace)
628
    {
629
        for (size_t i = 0; i < a4.size(); i++)
630
        {
631
            c[i] = a4[i].clone();
632
        }
633

634
        op->forward_inplace(c, opt);
635
    }
636
    else
637
    {
638
        op->forward(a4, c, opt);
639
    }
640

641
    for (size_t i = 0; i < c.size(); i++)
642
    {
643
        convert_to_vanilla_layout(c[i], c[i], opt, op, flag);
644
    }
645

646
    op->destroy_pipeline(opt);
647

648
    delete op;
649

650
    return 0;
651
}
652

653
#if NCNN_VULKAN
654
int test_layer_gpu(int typeindex, const ncnn::ParamDict& pd, const std::vector<ncnn::Mat>& weights, const ncnn::Option& _opt, const std::vector<ncnn::Mat>& a, int top_blob_count, std::vector<ncnn::Mat>& d, const std::vector<ncnn::Mat>& top_shapes, void (*func)(ncnn::Layer*), int flag)
655
{
656
    if (!_opt.use_packing_layout)
657
    {
658
        // pack1 test is useless for gpu
659
        return 233;
660
    }
661

662
    ncnn::Layer* op = ncnn::create_layer_vulkan(typeindex);
663
    if (!op)
664
    {
665
        return 233;
666
    }
667

668
    op->load_param(pd);
669

670
    if (!op->support_vulkan)
671
    {
672
        delete op;
673
        return 233;
674
    }
675

676
    ncnn::VulkanDevice* vkdev = ncnn::get_gpu_device();
677

678
    op->vkdev = vkdev;
679

680
    if (func)
681
    {
682
        (*func)((ncnn::Layer*)op);
683
    }
684

685
    if (!top_shapes.empty())
686
    {
687
        op->bottom_shapes = a;
688
        op->top_shapes = top_shapes;
689
    }
690

691
    if (op->one_blob_only && a.size() != 1)
692
    {
693
        fprintf(stderr, "layer with one_blob_only but consume multiple inputs\n");
694
        delete op;
695
        return -1;
696
    }
697

698
    ncnn::ModelBinFromMatArray mb(weights.data());
699

700
    op->load_model(mb);
701

702
    ncnn::VkWeightAllocator g_weight_vkallocator(vkdev);
703
    ncnn::VkWeightStagingAllocator g_weight_staging_vkallocator(vkdev);
704

705
    ncnn::VkAllocator* blob_vkallocator = vkdev->acquire_blob_allocator();
706
    ncnn::VkAllocator* staging_vkallocator = vkdev->acquire_staging_allocator();
707

708
    ncnn::Option opt = _opt;
709
    opt.num_threads = 1;
710
    opt.use_vulkan_compute = true;
711

712
#if __APPLE__
713
    opt.use_image_storage = false;
714
#endif
715

716
    opt.blob_vkallocator = blob_vkallocator;
717
    opt.workspace_vkallocator = blob_vkallocator;
718
    opt.staging_vkallocator = staging_vkallocator;
719

720
    if (!vkdev->info.support_fp16_packed()) opt.use_fp16_packed = false;
721
    if (!vkdev->info.support_fp16_storage()) opt.use_fp16_storage = false;
722
    if (!vkdev->info.support_fp16_uniform()) opt.use_fp16_uniform = false;
723
    if (!vkdev->info.support_fp16_arithmetic()) opt.use_fp16_arithmetic = false;
724
    if (!vkdev->info.support_int8_packed()) opt.use_int8_packed = false;
725
    if (!vkdev->info.support_int8_storage()) opt.use_int8_storage = false;
726
    if (!vkdev->info.support_int8_uniform()) opt.use_int8_uniform = false;
727
    if (!vkdev->info.support_int8_arithmetic()) opt.use_int8_arithmetic = false;
728
    if (!vkdev->info.support_cooperative_matrix()) opt.use_cooperative_matrix = false;
729

730
    // FIXME fp16a may produce large error
731
    opt.use_fp16_arithmetic = false;
732

733
    op->create_pipeline(opt);
734

735
    if (!op->support_vulkan)
736
    {
737
        op->destroy_pipeline(opt);
738
        delete op;
739
        return 233;
740
    }
741

742
    {
743
        ncnn::VkTransfer cmd(vkdev);
744

745
        ncnn::Option opt_upload = opt;
746
        opt_upload.blob_vkallocator = &g_weight_vkallocator;
747
        opt_upload.workspace_vkallocator = &g_weight_vkallocator;
748
        opt_upload.staging_vkallocator = &g_weight_staging_vkallocator;
749

750
        op->upload_model(cmd, opt_upload);
751

752
        cmd.submit_and_wait();
753
    }
754

755
    d.resize(top_blob_count);
756

757
    {
758
        // forward
759
        ncnn::VkCompute cmd(vkdev);
760

761
        if (op->support_image_storage && opt.use_image_storage)
762
        {
763
            // upload
764
            std::vector<ncnn::VkImageMat> a_gpu(a.size());
765
            for (size_t i = 0; i < a_gpu.size(); i++)
766
            {
767
                cmd.record_upload(a[i], a_gpu[i], opt);
768
            }
769

770
            std::vector<ncnn::VkImageMat> d_gpu(top_blob_count);
771
            if (op->support_inplace)
772
            {
773
                op->forward_inplace(a_gpu, cmd, opt);
774

775
                d_gpu = a_gpu;
776
            }
777
            else
778
            {
779
                op->forward(a_gpu, d_gpu, cmd, opt);
780
            }
781

782
            // download
783
            for (size_t i = 0; i < d_gpu.size(); i++)
784
            {
785
                cmd.record_download(d_gpu[i], d[i], opt);
786
            }
787
        }
788
        else
789
        {
790
            // upload
791
            std::vector<ncnn::VkMat> a_gpu(a.size());
792
            for (size_t i = 0; i < a_gpu.size(); i++)
793
            {
794
                cmd.record_upload(a[i], a_gpu[i], opt);
795
            }
796

797
            std::vector<ncnn::VkMat> d_gpu(top_blob_count);
798
            if (op->support_inplace)
799
            {
800
                op->forward_inplace(a_gpu, cmd, opt);
801

802
                d_gpu = a_gpu;
803
            }
804
            else
805
            {
806
                op->forward(a_gpu, d_gpu, cmd, opt);
807
            }
808

809
            // download
810
            for (size_t i = 0; i < d_gpu.size(); i++)
811
            {
812
                cmd.record_download(d_gpu[i], d[i], opt);
813
            }
814
        }
815

816
        cmd.submit_and_wait();
817
    }
818

819
    op->destroy_pipeline(opt);
820

821
    delete op;
822

823
    vkdev->reclaim_blob_allocator(blob_vkallocator);
824
    vkdev->reclaim_staging_allocator(staging_vkallocator);
825
    g_weight_vkallocator.clear();
826
    g_weight_staging_vkallocator.clear();
827

828
    return 0;
829
}
830
#endif // NCNN_VULKAN
831

832
int test_layer(int typeindex, const ncnn::ParamDict& pd, const std::vector<ncnn::Mat>& weights, const ncnn::Option& _opt, const std::vector<ncnn::Mat>& a, int top_blob_count, const std::vector<ncnn::Mat>& top_shapes, float epsilon, void (*func)(ncnn::Layer*), int flag)
833
{
834
    // naive
835
    std::vector<ncnn::Mat> b;
836
    {
837
        int ret = test_layer_naive(typeindex, pd, weights, a, top_blob_count, b, func, flag);
838
        if (ret != 233 && ret != 0)
839
        {
840
            fprintf(stderr, "test_layer_naive failed\n");
841
            return -1;
842
        }
843
    }
844

845
    // cpu
846
    {
847
        std::vector<ncnn::Mat> c;
848
        int ret = test_layer_cpu(typeindex, pd, weights, _opt, a, top_blob_count, c, std::vector<ncnn::Mat>(), func, flag);
849
        if (ret != 233 && (ret != 0 || CompareMat(b, c, epsilon) != 0))
850
        {
851
            fprintf(stderr, "test_layer_cpu failed\n");
852
            return -1;
853
        }
854
    }
855

856
    // cpu shape hint
857
    {
858
        std::vector<ncnn::Mat> c;
859
        int ret = test_layer_cpu(typeindex, pd, weights, _opt, a, top_blob_count, c, b, func, flag);
860
        if (ret != 233 && (ret != 0 || CompareMat(b, c, epsilon) != 0))
861
        {
862
            fprintf(stderr, "test_layer_cpu failed with shape hint\n");
863
            return -1;
864
        }
865
    }
866

867
#if NCNN_VULKAN
868
    // gpu
869
    if (!(flag & TEST_LAYER_DISABLE_GPU_TESTING))
870
    {
871
        std::vector<ncnn::Mat> d;
872
        int ret = test_layer_gpu(typeindex, pd, weights, _opt, a, top_blob_count, d, std::vector<ncnn::Mat>(), func, flag);
873
        if (ret != 233 && (ret != 0 || CompareMat(b, d, epsilon) != 0))
874
        {
875
            fprintf(stderr, "test_layer_gpu failed\n");
876
            return -1;
877
        }
878
    }
879

880
    // gpu shape hint
881
    if (!(flag & TEST_LAYER_DISABLE_GPU_TESTING))
882
    {
883
        std::vector<ncnn::Mat> d;
884
        int ret = test_layer_gpu(typeindex, pd, weights, _opt, a, top_blob_count, d, b, func, flag);
885
        if (ret != 233 && (ret != 0 || CompareMat(b, d, epsilon) != 0))
886
        {
887
            fprintf(stderr, "test_layer_gpu failed with shape hint\n");
888
            return -1;
889
        }
890
    }
891
#endif // NCNN_VULKAN
892

893
    return 0;
894
}
895

896
int test_layer_naive(int typeindex, const ncnn::ParamDict& pd, const std::vector<ncnn::Mat>& weights, const ncnn::Mat& a, ncnn::Mat& b, void (*func)(ncnn::Layer*), int flag)
897
{
898
    ncnn::Layer* op = ncnn::create_layer_naive(typeindex);
899

900
    if (func)
901
    {
902
        (*func)((ncnn::Layer*)op);
903
    }
904

905
    op->load_param(pd);
906

907
    ncnn::ModelBinFromMatArray mb(weights.data());
908

909
    op->load_model(mb);
910

911
    ncnn::Option opt;
912
    opt.num_threads = 1;
913
    opt.lightmode = false;
914
    opt.use_packing_layout = false;
915
    opt.use_fp16_packed = false;
916
    opt.use_fp16_storage = false;
917
    opt.use_fp16_arithmetic = false;
918
    opt.use_shader_pack8 = false;
919
    opt.use_image_storage = false;
920
    opt.use_bf16_storage = false;
921
    opt.use_vulkan_compute = false;
922

923
    op->create_pipeline(opt);
924

925
    if (op->support_inplace)
926
    {
927
        b = a.clone();
928
        op->forward_inplace(b, opt);
929
    }
930
    else
931
    {
932
        op->forward(a, b, opt);
933
    }
934

935
    op->destroy_pipeline(opt);
936

937
    delete op;
938

939
    return 0;
940
}
941

942
int test_layer_cpu(int typeindex, const ncnn::ParamDict& pd, const std::vector<ncnn::Mat>& weights, const ncnn::Option& _opt, const ncnn::Mat& a, ncnn::Mat& c, const ncnn::Mat& top_shape, void (*func)(ncnn::Layer*), int flag)
943
{
944
    ncnn::Layer* op = ncnn::create_layer_cpu(typeindex);
945

946
    if (!op->support_packing && _opt.use_packing_layout)
947
    {
948
        delete op;
949
        return 233;
950
    }
951
    if (!op->support_bf16_storage && !op->support_fp16_storage && (_opt.use_bf16_storage || _opt.use_fp16_arithmetic))
952
    {
953
        delete op;
954
        return 233;
955
    }
956

957
    if (func)
958
    {
959
        (*func)((ncnn::Layer*)op);
960
    }
961

962
    if (top_shape.dims)
963
    {
964
        op->bottom_shapes.resize(1);
965
        op->top_shapes.resize(1);
966
        op->bottom_shapes[0] = a;
967
        op->top_shapes[0] = top_shape;
968
    }
969

970
    op->load_param(pd);
971

972
    ncnn::ModelBinFromMatArray mb(weights.data());
973

974
    op->load_model(mb);
975

976
    ncnn::Option opt = _opt;
977
    opt.num_threads = 1;
978
    opt.use_vulkan_compute = false;
979

980
    op->create_pipeline(opt);
981

982
    if (!op->support_packing && _opt.use_packing_layout)
983
    {
984
        op->destroy_pipeline(opt);
985
        delete op;
986
        return 233;
987
    }
988
    if (!op->support_bf16_storage && !op->support_fp16_storage && (_opt.use_bf16_storage || _opt.use_fp16_arithmetic))
989
    {
990
        op->destroy_pipeline(opt);
991
        delete op;
992
        return 233;
993
    }
994

995
    ncnn::Mat a4;
996
    convert_to_optimal_layout(a, a4, opt, op, flag);
997

998
    if (op->support_inplace)
999
    {
1000
        c = a4.clone();
1001
        op->forward_inplace(c, opt);
1002
    }
1003
    else
1004
    {
1005
        op->forward(a4, c, opt);
1006
    }
1007

1008
    convert_to_vanilla_layout(c, c, opt, op, flag);
1009

1010
    op->destroy_pipeline(opt);
1011

1012
    delete op;
1013

1014
    return 0;
1015
}
1016

1017
#if NCNN_VULKAN
1018
int test_layer_gpu(int typeindex, const ncnn::ParamDict& pd, const std::vector<ncnn::Mat>& weights, const ncnn::Option& _opt, const ncnn::Mat& a, ncnn::Mat& d, const ncnn::Mat& top_shape, void (*func)(ncnn::Layer*), int flag)
1019
{
1020
    if (!_opt.use_packing_layout)
1021
    {
1022
        // pack1 test is useless for gpu
1023
        return 233;
1024
    }
1025

1026
    ncnn::Layer* op = ncnn::create_layer_vulkan(typeindex);
1027
    if (!op)
1028
    {
1029
        return 233;
1030
    }
1031

1032
    op->load_param(pd);
1033

1034
    if (!op->support_vulkan)
1035
    {
1036
        delete op;
1037
        return 233;
1038
    }
1039

1040
    ncnn::VulkanDevice* vkdev = ncnn::get_gpu_device();
1041

1042
    op->vkdev = vkdev;
1043

1044
    if (func)
1045
    {
1046
        (*func)((ncnn::Layer*)op);
1047
    }
1048

1049
    if (top_shape.dims)
1050
    {
1051
        op->bottom_shapes.resize(1);
1052
        op->top_shapes.resize(1);
1053
        op->bottom_shapes[0] = a;
1054
        op->top_shapes[0] = top_shape;
1055
    }
1056

1057
    ncnn::ModelBinFromMatArray mb(weights.data());
1058

1059
    op->load_model(mb);
1060

1061
    ncnn::VkWeightAllocator g_weight_vkallocator(vkdev);
1062
    ncnn::VkWeightStagingAllocator g_weight_staging_vkallocator(vkdev);
1063

1064
    ncnn::VkAllocator* blob_vkallocator = vkdev->acquire_blob_allocator();
1065
    ncnn::VkAllocator* staging_vkallocator = vkdev->acquire_staging_allocator();
1066

1067
    ncnn::Option opt = _opt;
1068
    opt.num_threads = 1;
1069
    opt.use_vulkan_compute = true;
1070

1071
#if __APPLE__
1072
    opt.use_image_storage = false;
1073
#endif
1074

1075
    opt.blob_vkallocator = blob_vkallocator;
1076
    opt.workspace_vkallocator = blob_vkallocator;
1077
    opt.staging_vkallocator = staging_vkallocator;
1078

1079
    if (!vkdev->info.support_fp16_packed()) opt.use_fp16_packed = false;
1080
    if (!vkdev->info.support_fp16_storage()) opt.use_fp16_storage = false;
1081
    if (!vkdev->info.support_fp16_uniform()) opt.use_fp16_uniform = false;
1082
    if (!vkdev->info.support_fp16_arithmetic()) opt.use_fp16_arithmetic = false;
1083
    if (!vkdev->info.support_int8_packed()) opt.use_int8_packed = false;
1084
    if (!vkdev->info.support_int8_storage()) opt.use_int8_storage = false;
1085
    if (!vkdev->info.support_int8_uniform()) opt.use_int8_uniform = false;
1086
    if (!vkdev->info.support_int8_arithmetic()) opt.use_int8_arithmetic = false;
1087
    if (!vkdev->info.support_cooperative_matrix()) opt.use_cooperative_matrix = false;
1088

1089
    // FIXME fp16a may produce large error
1090
    opt.use_fp16_arithmetic = false;
1091

1092
    op->create_pipeline(opt);
1093

1094
    if (!op->support_vulkan)
1095
    {
1096
        op->destroy_pipeline(opt);
1097
        delete op;
1098
        return 233;
1099
    }
1100

1101
    {
1102
        ncnn::VkTransfer cmd(vkdev);
1103

1104
        ncnn::Option opt_upload = opt;
1105
        opt_upload.blob_vkallocator = &g_weight_vkallocator;
1106
        opt_upload.workspace_vkallocator = &g_weight_vkallocator;
1107
        opt_upload.staging_vkallocator = &g_weight_staging_vkallocator;
1108

1109
        op->upload_model(cmd, opt_upload);
1110

1111
        cmd.submit_and_wait();
1112
    }
1113

1114
    {
1115
        // forward
1116
        ncnn::VkCompute cmd(vkdev);
1117

1118
        if (op->support_image_storage && opt.use_image_storage)
1119
        {
1120
            // upload
1121
            ncnn::VkImageMat a_gpu;
1122
            cmd.record_upload(a, a_gpu, opt);
1123

1124
            ncnn::VkImageMat d_gpu;
1125
            if (op->support_inplace)
1126
            {
1127
                op->forward_inplace(a_gpu, cmd, opt);
1128

1129
                d_gpu = a_gpu;
1130
            }
1131
            else
1132
            {
1133
                op->forward(a_gpu, d_gpu, cmd, opt);
1134
            }
1135

1136
            // download
1137
            cmd.record_download(d_gpu, d, opt);
1138
        }
1139
        else
1140
        {
1141
            // upload
1142
            ncnn::VkMat a_gpu;
1143
            cmd.record_upload(a, a_gpu, opt);
1144

1145
            ncnn::VkMat d_gpu;
1146
            if (op->support_inplace)
1147
            {
1148
                op->forward_inplace(a_gpu, cmd, opt);
1149

1150
                d_gpu = a_gpu;
1151
            }
1152
            else
1153
            {
1154
                op->forward(a_gpu, d_gpu, cmd, opt);
1155
            }
1156

1157
            // download
1158
            cmd.record_download(d_gpu, d, opt);
1159
        }
1160

1161
        cmd.submit_and_wait();
1162
    }
1163

1164
    op->destroy_pipeline(opt);
1165

1166
    delete op;
1167

1168
    vkdev->reclaim_blob_allocator(blob_vkallocator);
1169
    vkdev->reclaim_staging_allocator(staging_vkallocator);
1170
    g_weight_vkallocator.clear();
1171
    g_weight_staging_vkallocator.clear();
1172

1173
    return 0;
1174
}
1175
#endif // NCNN_VULKAN
1176

1177
int test_layer(int typeindex, const ncnn::ParamDict& pd, const std::vector<ncnn::Mat>& weights, const ncnn::Option& _opt, const ncnn::Mat& a, const ncnn::Mat& top_shape, float epsilon, void (*func)(ncnn::Layer*), int flag)
1178
{
1179
    // naive
1180
    ncnn::Mat b;
1181
    {
1182
        int ret = test_layer_naive(typeindex, pd, weights, a, b, func, flag);
1183
        if (ret != 233 && ret != 0)
1184
        {
1185
            fprintf(stderr, "test_layer_naive failed\n");
1186
            return -1;
1187
        }
1188
    }
1189

1190
    // cpu
1191
    {
1192
        ncnn::Mat c;
1193
        int ret = test_layer_cpu(typeindex, pd, weights, _opt, a, c, ncnn::Mat(), func, flag);
1194
        if (ret != 233 && (ret != 0 || CompareMat(b, c, epsilon) != 0))
1195
        {
1196
            fprintf(stderr, "test_layer_cpu failed\n");
1197
            return -1;
1198
        }
1199
    }
1200

1201
    // cpu shape hint
1202
    {
1203
        ncnn::Mat c;
1204
        int ret = test_layer_cpu(typeindex, pd, weights, _opt, a, c, b, func, flag);
1205
        if (ret != 233 && (ret != 0 || CompareMat(b, c, epsilon) != 0))
1206
        {
1207
            fprintf(stderr, "test_layer_cpu failed with shape hint\n");
1208
            return -1;
1209
        }
1210
    }
1211

1212
#if NCNN_VULKAN
1213
    // gpu
1214
    if (!(flag & TEST_LAYER_DISABLE_GPU_TESTING))
1215
    {
1216
        ncnn::Mat d;
1217
        int ret = test_layer_gpu(typeindex, pd, weights, _opt, a, d, ncnn::Mat(), func, flag);
1218
        if (ret != 233 && (ret != 0 || CompareMat(b, d, epsilon) != 0))
1219
        {
1220
            fprintf(stderr, "test_layer_gpu failed\n");
1221
            return -1;
1222
        }
1223
    }
1224

1225
    // gpu shape hint
1226
    if (!(flag & TEST_LAYER_DISABLE_GPU_TESTING))
1227
    {
1228
        ncnn::Mat d;
1229
        int ret = test_layer_gpu(typeindex, pd, weights, _opt, a, d, b, func, flag);
1230
        if (ret != 233 && (ret != 0 || CompareMat(b, d, epsilon) != 0))
1231
        {
1232
            fprintf(stderr, "test_layer_gpu failed with shape hint\n");
1233
            return -1;
1234
        }
1235
    }
1236
#endif // NCNN_VULKAN
1237

1238
    return 0;
1239
}
1240

1241
int test_layer_opt(const char* layer_type, const ncnn::ParamDict& pd, const std::vector<ncnn::Mat>& weights, const ncnn::Option& opt, const std::vector<ncnn::Mat>& a, int top_blob_count, float epsilon, void (*func)(ncnn::Layer*), int flag)
1242
{
1243
    // fp16 representation
1244
    std::vector<ncnn::Mat> a_fp16;
1245
    if (opt.use_bf16_storage && !(flag & TEST_LAYER_DISABLE_AUTO_INPUT_CASTING))
1246
    {
1247
        a_fp16.resize(a.size());
1248
        for (size_t j = 0; j < a.size(); j++)
1249
        {
1250
            ncnn::Mat tmp;
1251
            ncnn::cast_float32_to_bfloat16(a[j], tmp, opt);
1252
            ncnn::cast_bfloat16_to_float32(tmp, a_fp16[j], opt);
1253
        }
1254
    }
1255
    else if ((opt.use_fp16_packed || opt.use_fp16_storage) && !(flag & TEST_LAYER_DISABLE_AUTO_INPUT_CASTING))
1256
    {
1257
        a_fp16.resize(a.size());
1258
        for (size_t j = 0; j < a.size(); j++)
1259
        {
1260
            ncnn::Mat tmp;
1261
            ncnn::cast_float32_to_float16(a[j], tmp, opt);
1262
            ncnn::cast_float16_to_float32(tmp, a_fp16[j], opt);
1263
        }
1264
    }
1265
    else
1266
    {
1267
        a_fp16 = a;
1268
    }
1269

1270
    std::vector<ncnn::Mat> weights_fp16;
1271
    float epsilon_fp16;
1272
    if (opt.use_bf16_storage)
1273
    {
1274
        weights_fp16.resize(weights.size());
1275
        for (size_t j = 0; j < weights.size(); j++)
1276
        {
1277
            if (weights[j].elembits() != 32)
1278
            {
1279
                weights_fp16[j] = weights[j];
1280
                continue;
1281
            }
1282

1283
            ncnn::Mat tmp;
1284
            ncnn::cast_float32_to_bfloat16(weights[j], tmp, opt);
1285
            ncnn::cast_bfloat16_to_float32(tmp, weights_fp16[j], opt);
1286
        }
1287
        epsilon_fp16 = epsilon * 100; // 0.1
1288
    }
1289
    else if (opt.use_fp16_packed || opt.use_fp16_storage)
1290
    {
1291
        weights_fp16.resize(weights.size());
1292
        for (size_t j = 0; j < weights.size(); j++)
1293
        {
1294
            if (weights[j].elembits() != 32)
1295
            {
1296
                weights_fp16[j] = weights[j];
1297
                continue;
1298
            }
1299

1300
            ncnn::Mat tmp;
1301
            ncnn::cast_float32_to_float16(weights[j], tmp, opt);
1302
            ncnn::cast_float16_to_float32(tmp, weights_fp16[j], opt);
1303
        }
1304
        epsilon_fp16 = epsilon * 100; // 0.1
1305
    }
1306
    else
1307
    {
1308
        weights_fp16 = weights;
1309
        epsilon_fp16 = epsilon;
1310
    }
1311

1312
    if (opt.use_fp16_arithmetic)
1313
    {
1314
        epsilon_fp16 = epsilon * 1000; // 1.0
1315
    }
1316

1317
    std::vector<ncnn::Mat> top_shapes;
1318
    int ret = test_layer(ncnn::layer_to_index(layer_type), pd, weights_fp16, opt, a_fp16, top_blob_count, top_shapes, epsilon_fp16, func, flag);
1319
    if (ret != 0)
1320
    {
1321
        fprintf(stderr, "test_layer %s failed use_packing_layout=%d use_fp16_packed=%d use_fp16_storage=%d use_fp16_arithmetic=%d use_shader_pack8=%d use_bf16_storage=%d use_image_storage=%d use_sgemm_convolution=%d use_winograd_convolution=%d\n", layer_type, opt.use_packing_layout, opt.use_fp16_packed, opt.use_fp16_storage, opt.use_fp16_arithmetic, opt.use_shader_pack8, opt.use_bf16_storage, opt.use_image_storage, opt.use_sgemm_convolution, opt.use_winograd_convolution);
1322
        return ret;
1323
    }
1324

1325
    return 0;
1326
}
1327

1328
int test_layer_opt(const char* layer_type, const ncnn::ParamDict& pd, const std::vector<ncnn::Mat>& weights, const ncnn::Option& opt, const ncnn::Mat& a, float epsilon, void (*func)(ncnn::Layer*), int flag)
1329
{
1330
    // fp16 representation
1331
    ncnn::Mat a_fp16;
1332
    if (opt.use_bf16_storage && !(flag & TEST_LAYER_DISABLE_AUTO_INPUT_CASTING))
1333
    {
1334
        ncnn::Mat tmp;
1335
        ncnn::cast_float32_to_bfloat16(a, tmp, opt);
1336
        ncnn::cast_bfloat16_to_float32(tmp, a_fp16, opt);
1337
    }
1338
    else if ((opt.use_fp16_packed || opt.use_fp16_storage) && !(flag & TEST_LAYER_DISABLE_AUTO_INPUT_CASTING))
1339
    {
1340
        ncnn::Mat tmp;
1341
        ncnn::cast_float32_to_float16(a, tmp, opt);
1342
        ncnn::cast_float16_to_float32(tmp, a_fp16, opt);
1343
    }
1344
    else
1345
    {
1346
        a_fp16 = a;
1347
    }
1348

1349
    std::vector<ncnn::Mat> weights_fp16;
1350
    float epsilon_fp16;
1351
    if (opt.use_bf16_storage)
1352
    {
1353
        weights_fp16.resize(weights.size());
1354
        for (size_t j = 0; j < weights.size(); j++)
1355
        {
1356
            if (weights[j].elembits() != 32)
1357
            {
1358
                weights_fp16[j] = weights[j];
1359
                continue;
1360
            }
1361

1362
            ncnn::Mat tmp;
1363
            ncnn::cast_float32_to_bfloat16(weights[j], tmp, opt);
1364
            ncnn::cast_bfloat16_to_float32(tmp, weights_fp16[j], opt);
1365
        }
1366
        epsilon_fp16 = epsilon * 100; // 0.1
1367
    }
1368
    else if (opt.use_fp16_packed || opt.use_fp16_storage)
1369
    {
1370
        weights_fp16.resize(weights.size());
1371
        for (size_t j = 0; j < weights.size(); j++)
1372
        {
1373
            if (weights[j].elembits() != 32)
1374
            {
1375
                weights_fp16[j] = weights[j];
1376
                continue;
1377
            }
1378

1379
            ncnn::Mat tmp;
1380
            ncnn::cast_float32_to_float16(weights[j], tmp, opt);
1381
            ncnn::cast_float16_to_float32(tmp, weights_fp16[j], opt);
1382
        }
1383
        epsilon_fp16 = epsilon * 100; // 0.1
1384
    }
1385
    else
1386
    {
1387
        weights_fp16 = weights;
1388
        epsilon_fp16 = epsilon;
1389
    }
1390

1391
    if (opt.use_fp16_arithmetic)
1392
    {
1393
        epsilon_fp16 = epsilon * 1000; // 1.0
1394
    }
1395

1396
    ncnn::Mat top_shape;
1397
    int ret = test_layer(ncnn::layer_to_index(layer_type), pd, weights_fp16, opt, a_fp16, top_shape, epsilon_fp16, func, flag);
1398
    if (ret != 0)
1399
    {
1400
        fprintf(stderr, "test_layer %s failed use_packing_layout=%d use_fp16_packed=%d use_fp16_storage=%d use_fp16_arithmetic=%d use_shader_pack8=%d use_bf16_storage=%d use_image_storage=%d use_sgemm_convolution=%d use_winograd_convolution=%d\n", layer_type, opt.use_packing_layout, opt.use_fp16_packed, opt.use_fp16_storage, opt.use_fp16_arithmetic, opt.use_shader_pack8, opt.use_bf16_storage, opt.use_image_storage, opt.use_sgemm_convolution, opt.use_winograd_convolution);
1401
        return ret;
1402
    }
1403

1404
    return 0;
1405
}
1406

1407
int test_layer(const char* layer_type, const ncnn::ParamDict& pd, const std::vector<ncnn::Mat>& weights, const std::vector<ncnn::Mat>& a, int top_blob_count, float epsilon, void (*func)(ncnn::Layer*), int flag)
1408
{
1409
    // pack fp16p fp16s fp16a bf16s shader8 image
1410
    const int options[][7] = {
1411
        {0, 0, 0, 0, 0, 0, 0},
1412
        {0, 0, 1, 0, 0, 0, 0},
1413
        {0, 0, 1, 1, 1, 0, 0},
1414
        {1, 0, 0, 0, 0, 0, 0},
1415
        {1, 1, 0, 0, 1, 0, 0},
1416
        {1, 0, 1, 0, 0, 1, 0},
1417
        {1, 1, 1, 1, 0, 0, 0},
1418
        {1, 1, 1, 1, 1, 1, 1},
1419
    };
1420

1421
    const int opt_count = sizeof(options) / sizeof(options[0]);
1422

1423
    for (int i = 0; i < opt_count; i++)
1424
    {
1425
        ncnn::Option opt;
1426
        opt.num_threads = 1;
1427
        opt.use_packing_layout = options[i][0];
1428
        opt.use_fp16_packed = options[i][1];
1429
        opt.use_fp16_storage = options[i][2];
1430
        opt.use_fp16_arithmetic = options[i][3];
1431
        opt.use_bf16_storage = options[i][4];
1432
        opt.use_shader_pack8 = options[i][5];
1433
        opt.use_image_storage = options[i][6];
1434

1435
        int ret = test_layer_opt(layer_type, pd, weights, opt, a, top_blob_count, epsilon, func, flag);
1436
        if (ret != 0)
1437
            return ret;
1438
    }
1439

1440
    return 0;
1441
}
1442

1443
int test_layer(const char* layer_type, const ncnn::ParamDict& pd, const std::vector<ncnn::Mat>& weights, const ncnn::Mat& a, float epsilon, void (*func)(ncnn::Layer*), int flag)
1444
{
1445
    // pack fp16p fp16s fp16a bf16s shader8 image
1446
    const int options[][7] = {
1447
        {0, 0, 0, 0, 0, 0, 0},
1448
        {0, 0, 1, 0, 0, 0, 0},
1449
        {0, 0, 1, 1, 1, 0, 0},
1450
        {1, 0, 0, 0, 0, 0, 0},
1451
        {1, 1, 0, 0, 1, 0, 0},
1452
        {1, 0, 1, 0, 0, 1, 0},
1453
        {1, 1, 1, 1, 0, 0, 0},
1454
        {1, 1, 1, 1, 1, 1, 1},
1455
    };
1456

1457
    const int opt_count = sizeof(options) / sizeof(options[0]);
1458

1459
    for (int i = 0; i < opt_count; i++)
1460
    {
1461
        ncnn::Option opt;
1462
        opt.num_threads = 1;
1463
        opt.use_packing_layout = options[i][0];
1464
        opt.use_fp16_packed = options[i][1];
1465
        opt.use_fp16_storage = options[i][2];
1466
        opt.use_fp16_arithmetic = options[i][3];
1467
        opt.use_bf16_storage = options[i][4];
1468
        opt.use_shader_pack8 = options[i][5];
1469
        opt.use_image_storage = options[i][6];
1470

1471
        int ret = test_layer_opt(layer_type, pd, weights, opt, a, epsilon, func, flag);
1472
        if (ret != 0)
1473
            return ret;
1474
    }
1475

1476
    return 0;
1477
}
1478

1479
class TestOOMAllocator : public ncnn::UnlockedPoolAllocator
1480
{
1481
public:
1482
    TestOOMAllocator();
1483
    virtual void* fastMalloc(size_t size);
1484
    virtual void fastFree(void* ptr);
1485

1486
    ncnn::Mutex lock;
1487
    int counter;
1488
    int failid;
1489
};
1490

1491
TestOOMAllocator::TestOOMAllocator()
1492
{
1493
    counter = 0;
1494
    failid = INT_MAX;
1495
}
1496

1497
void* TestOOMAllocator::fastMalloc(size_t size)
1498
{
1499
    lock.lock();
1500

1501
    void* ptr;
1502
    if (counter == failid)
1503
    {
1504
        ptr = 0;
1505
    }
1506
    else
1507
    {
1508
        ptr = ncnn::UnlockedPoolAllocator::fastMalloc(size);
1509
    }
1510
    counter++;
1511

1512
    lock.unlock();
1513

1514
    return ptr;
1515
}
1516

1517
void TestOOMAllocator::fastFree(void* ptr)
1518
{
1519
    lock.lock();
1520

1521
    ncnn::UnlockedPoolAllocator::fastFree(ptr);
1522

1523
    lock.unlock();
1524
}
1525

1526
int test_layer_oom_opt(const char* layer_type, const ncnn::ParamDict& pd, const std::vector<ncnn::Mat>& weights, const ncnn::Option& _opt, const std::vector<ncnn::Mat>& a, int top_blob_count, int flag)
1527
{
1528
    int typeindex = ncnn::layer_to_index(layer_type);
1529
    if (typeindex == -1)
1530
        return -1;
1531

1532
    ncnn::Layer* op = ncnn::create_layer_cpu(typeindex);
1533

1534
    if (!op->support_packing && _opt.use_packing_layout)
1535
    {
1536
        delete op;
1537
        return 233;
1538
    }
1539
    if (!op->support_bf16_storage && !op->support_fp16_storage && (_opt.use_bf16_storage || _opt.use_fp16_arithmetic))
1540
    {
1541
        delete op;
1542
        return 233;
1543
    }
1544

1545
    op->load_param(pd);
1546

1547
    if (op->one_blob_only && a.size() != 1)
1548
    {
1549
        fprintf(stderr, "layer with one_blob_only but consume multiple inputs\n");
1550
        delete op;
1551
        return -1;
1552
    }
1553

1554
    ncnn::ModelBinFromMatArray mb(weights.data());
1555

1556
    op->load_model(mb);
1557

1558
    ncnn::Option opt = _opt;
1559
    opt.num_threads = 1;
1560
    opt.use_vulkan_compute = false;
1561

1562
    op->create_pipeline(opt);
1563

1564
    if (!op->support_packing && _opt.use_packing_layout)
1565
    {
1566
        op->destroy_pipeline(opt);
1567
        delete op;
1568
        return 233;
1569
    }
1570
    if (!op->support_bf16_storage && !op->support_fp16_storage && (_opt.use_bf16_storage || _opt.use_fp16_arithmetic))
1571
    {
1572
        op->destroy_pipeline(opt);
1573
        delete op;
1574
        return 233;
1575
    }
1576

1577
    std::vector<ncnn::Mat> a4(a.size());
1578

1579
    for (size_t i = 0; i < a4.size(); i++)
1580
    {
1581
        convert_to_optimal_layout(a[i], a4[i], opt, op, flag);
1582
    }
1583

1584
    TestOOMAllocator test_oom_allocator;
1585
    opt.blob_allocator = &test_oom_allocator;
1586
    opt.workspace_allocator = &test_oom_allocator;
1587

1588
    std::vector<ncnn::Mat> c;
1589
    c.resize(top_blob_count);
1590

1591
    if (op->support_inplace)
1592
    {
1593
        for (size_t i = 0; i < a4.size(); i++)
1594
        {
1595
            c[i] = a4[i].clone();
1596
        }
1597

1598
        op->forward_inplace(c, opt);
1599
    }
1600
    else
1601
    {
1602
        op->forward(a4, c, opt);
1603
    }
1604

1605
    for (int i = 0; i < top_blob_count; i++)
1606
    {
1607
        c[i].release();
1608
    }
1609

1610
    const int alloc_count = test_oom_allocator.counter;
1611
    for (int i = 0; i < alloc_count; i++)
1612
    {
1613
        test_oom_allocator.counter = 0;
1614
        test_oom_allocator.failid = i;
1615

1616
        int ret = 0;
1617
        if (op->support_inplace)
1618
        {
1619
            for (size_t i = 0; i < a4.size(); i++)
1620
            {
1621
                c[i] = a4[i].clone();
1622
            }
1623

1624
            ret = op->forward_inplace(c, opt);
1625
        }
1626
        else
1627
        {
1628
            ret = op->forward(a4, c, opt);
1629
        }
1630

1631
        for (int i = 0; i < top_blob_count; i++)
1632
        {
1633
            c[i].release();
1634
        }
1635

1636
        if (ret != -100)
1637
        {
1638
            fprintf(stderr, "oom not catched %d/%d\n", i, alloc_count);
1639

1640
            op->destroy_pipeline(opt);
1641

1642
            delete op;
1643

1644
            return -1;
1645
        }
1646
    }
1647

1648
    op->destroy_pipeline(opt);
1649

1650
    delete op;
1651

1652
    return 0;
1653
}
1654

1655
int test_layer_oom_opt(const char* layer_type, const ncnn::ParamDict& pd, const std::vector<ncnn::Mat>& weights, const ncnn::Option& _opt, const ncnn::Mat& a, int flag)
1656
{
1657
    int typeindex = ncnn::layer_to_index(layer_type);
1658
    if (typeindex == -1)
1659
        return -1;
1660

1661
    ncnn::Layer* op = ncnn::create_layer_cpu(typeindex);
1662

1663
    if (!op->support_packing && _opt.use_packing_layout)
1664
    {
1665
        delete op;
1666
        return 233;
1667
    }
1668
    if (!op->support_bf16_storage && !op->support_fp16_storage && (_opt.use_bf16_storage || _opt.use_fp16_arithmetic))
1669
    {
1670
        delete op;
1671
        return 233;
1672
    }
1673

1674
    op->load_param(pd);
1675

1676
    ncnn::ModelBinFromMatArray mb(weights.data());
1677

1678
    op->load_model(mb);
1679

1680
    ncnn::Option opt = _opt;
1681
    opt.num_threads = 1;
1682
    opt.use_vulkan_compute = false;
1683

1684
    op->create_pipeline(opt);
1685

1686
    if (!op->support_packing && _opt.use_packing_layout)
1687
    {
1688
        op->destroy_pipeline(opt);
1689
        delete op;
1690
        return 233;
1691
    }
1692
    if (!op->support_bf16_storage && !op->support_fp16_storage && (_opt.use_bf16_storage || _opt.use_fp16_arithmetic))
1693
    {
1694
        op->destroy_pipeline(opt);
1695
        delete op;
1696
        return 233;
1697
    }
1698

1699
    ncnn::Mat a4;
1700
    convert_to_optimal_layout(a, a4, opt, op, flag);
1701

1702
    TestOOMAllocator test_oom_allocator;
1703
    opt.blob_allocator = &test_oom_allocator;
1704
    opt.workspace_allocator = &test_oom_allocator;
1705

1706
    ncnn::Mat c;
1707

1708
    if (op->support_inplace)
1709
    {
1710
        c = a4.clone();
1711
        op->forward_inplace(c, opt);
1712
    }
1713
    else
1714
    {
1715
        op->forward(a4, c, opt);
1716
    }
1717

1718
    c.release();
1719

1720
    const int alloc_count = test_oom_allocator.counter;
1721
    for (int i = 0; i < alloc_count; i++)
1722
    {
1723
        test_oom_allocator.counter = 0;
1724
        test_oom_allocator.failid = i;
1725

1726
        int ret = 0;
1727
        if (op->support_inplace)
1728
        {
1729
            c = a4.clone();
1730
            ret = op->forward_inplace(c, opt);
1731
        }
1732
        else
1733
        {
1734
            ret = op->forward(a4, c, opt);
1735
        }
1736

1737
        c.release();
1738

1739
        if (ret != -100)
1740
        {
1741
            fprintf(stderr, "oom not catched %d/%d\n", i, alloc_count);
1742

1743
            op->destroy_pipeline(opt);
1744

1745
            delete op;
1746

1747
            return -1;
1748
        }
1749
    }
1750

1751
    op->destroy_pipeline(opt);
1752

1753
    delete op;
1754

1755
    return 0;
1756
}
1757

1758
int test_layer_oom(const char* layer_type, const ncnn::ParamDict& pd, const std::vector<ncnn::Mat>& weights, const std::vector<ncnn::Mat>& a, int top_blob_count, int flag)
1759
{
1760
    // pack fp16p fp16s fp16a bf16s shader8 image
1761
    const int options[][7] = {
1762
        {0, 0, 0, 0, 0, 0, 0},
1763
        {0, 0, 1, 0, 0, 0, 0},
1764
        {0, 0, 1, 1, 1, 0, 0},
1765
        {1, 0, 0, 0, 0, 0, 0},
1766
        {1, 1, 0, 0, 1, 0, 0},
1767
        {1, 0, 1, 0, 0, 1, 0},
1768
        {1, 1, 1, 1, 0, 0, 0},
1769
        {1, 1, 1, 1, 1, 1, 1},
1770
    };
1771

1772
    const int opt_count = sizeof(options) / sizeof(options[0]);
1773

1774
    for (int i = 0; i < opt_count; i++)
1775
    {
1776
        ncnn::Option opt;
1777
        opt.num_threads = 1;
1778
        opt.use_packing_layout = options[i][0];
1779
        opt.use_fp16_packed = options[i][1];
1780
        opt.use_fp16_storage = options[i][2];
1781
        opt.use_fp16_arithmetic = options[i][3];
1782
        opt.use_bf16_storage = options[i][4];
1783
        opt.use_shader_pack8 = options[i][5];
1784
        opt.use_image_storage = options[i][6];
1785

1786
        int ret = test_layer_oom_opt(layer_type, pd, weights, opt, a, top_blob_count, flag);
1787
        if (ret != 233 && ret != 0)
1788
            return ret;
1789
    }
1790

1791
    return 0;
1792
}
1793

1794
int test_layer_oom(const char* layer_type, const ncnn::ParamDict& pd, const std::vector<ncnn::Mat>& weights, const ncnn::Mat& a, int flag)
1795
{
1796
    // pack fp16p fp16s fp16a bf16s shader8 image
1797
    const int options[][7] = {
1798
        {0, 0, 0, 0, 0, 0, 0},
1799
        {0, 0, 1, 0, 0, 0, 0},
1800
        {0, 0, 1, 1, 1, 0, 0},
1801
        {1, 0, 0, 0, 0, 0, 0},
1802
        {1, 1, 0, 0, 1, 0, 0},
1803
        {1, 0, 1, 0, 0, 1, 0},
1804
        {1, 1, 1, 1, 0, 0, 0},
1805
        {1, 1, 1, 1, 1, 1, 1},
1806
    };
1807

1808
    const int opt_count = sizeof(options) / sizeof(options[0]);
1809

1810
    for (int i = 0; i < opt_count; i++)
1811
    {
1812
        ncnn::Option opt;
1813
        opt.num_threads = 1;
1814
        opt.use_packing_layout = options[i][0];
1815
        opt.use_fp16_packed = options[i][1];
1816
        opt.use_fp16_storage = options[i][2];
1817
        opt.use_fp16_arithmetic = options[i][3];
1818
        opt.use_bf16_storage = options[i][4];
1819
        opt.use_shader_pack8 = options[i][5];
1820
        opt.use_image_storage = options[i][6];
1821

1822
        int ret = test_layer_oom_opt(layer_type, pd, weights, opt, a, flag);
1823
        if (ret != 233 && ret != 0)
1824
            return ret;
1825
    }
1826

1827
    return 0;
1828
}
1829

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.