ncnn

Форк
0
/
test_packing.cpp 
640 строк · 16.9 Кб
1
// Tencent is pleased to support the open source community by making ncnn available.
2
//
3
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
4
//
5
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6
// in compliance with the License. You may obtain a copy of the License at
7
//
8
// https://opensource.org/licenses/BSD-3-Clause
9
//
10
// Unless required by applicable law or agreed to in writing, software distributed
11
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
13
// specific language governing permissions and limitations under the License.
14

15
#include "testutil.h"
16

17
static int packing_cpu_naive(const ncnn::Mat& a, ncnn::Mat& b, int out_elempack)
18
{
19
    ncnn::ParamDict pd;
20
    pd.set(0, out_elempack);
21

22
    std::vector<ncnn::Mat> weights(0);
23

24
    ncnn::Option opt;
25
    opt.num_threads = 1;
26

27
    ncnn::Layer* op = ncnn::create_layer_naive("Packing");
28

29
    op->load_param(pd);
30

31
    ncnn::ModelBinFromMatArray mb(weights.data());
32

33
    op->load_model(mb);
34

35
    op->create_pipeline(opt);
36

37
    op->forward(a, b, opt);
38

39
    op->destroy_pipeline(opt);
40

41
    delete op;
42

43
    return 0;
44
}
45

46
static int test_packing_cpu_fp32(const ncnn::Mat& a, int in_elempack, int out_elempack)
47
{
48
    ncnn::ParamDict pd;
49
    pd.set(0, out_elempack);
50

51
    std::vector<ncnn::Mat> weights(0);
52

53
    ncnn::Option opt;
54
    opt.num_threads = 1;
55
    opt.use_vulkan_compute = false;
56
    opt.use_int8_inference = false;
57
    opt.use_fp16_storage = false;
58
    opt.use_fp16_arithmetic = false;
59
    opt.use_packing_layout = false;
60

61
    ncnn::Layer* op = ncnn::create_layer_cpu("Packing");
62

63
    op->load_param(pd);
64

65
    ncnn::ModelBinFromMatArray mb(weights.data());
66

67
    op->load_model(mb);
68

69
    op->create_pipeline(opt);
70

71
    ncnn::Mat ap;
72
    ncnn::convert_packing(a, ap, in_elempack, opt);
73

74
    ncnn::Mat b;
75
    packing_cpu_naive(ap, b, out_elempack);
76

77
    ncnn::Mat c;
78
    op->forward(ap, c, opt);
79

80
    op->destroy_pipeline(opt);
81

82
    delete op;
83

84
    if (CompareMat(b, c, 0.001) != 0)
85
    {
86
        fprintf(stderr, "test_packing_cpu_fp32 failed a.dims=%d a=(%d %d %d %d) in_elempack=%d out_elempack=%d\n", a.dims, a.w, a.h, a.d, a.c, in_elempack, out_elempack);
87
        return -1;
88
    }
89

90
    return 0;
91
}
92

93
static int test_packing_cpu_fp16(const ncnn::Mat& a, int in_elempack, int out_elempack)
94
{
95
    ncnn::ParamDict pd;
96
    pd.set(0, out_elempack);
97

98
    std::vector<ncnn::Mat> weights(0);
99

100
    ncnn::Option opt;
101
    opt.num_threads = 1;
102
    opt.use_vulkan_compute = false;
103
    opt.use_int8_inference = false;
104
    opt.use_fp16_storage = true;
105
    opt.use_fp16_arithmetic = true;
106
    opt.use_packing_layout = false;
107

108
    ncnn::Layer* op = ncnn::create_layer_cpu("Packing");
109

110
    if (!op->support_fp16_storage)
111
    {
112
        delete op;
113
        return 0;
114
    }
115

116
    op->load_param(pd);
117

118
    ncnn::ModelBinFromMatArray mb(weights.data());
119

120
    op->load_model(mb);
121

122
    op->create_pipeline(opt);
123

124
    ncnn::Mat a16;
125
    ncnn::cast_float32_to_float16(a, a16, opt);
126

127
    ncnn::Mat ap;
128
    ncnn::convert_packing(a16, ap, in_elempack, opt);
129

130
    ncnn::Mat b;
131
    packing_cpu_naive(ap, b, out_elempack);
132

133
    ncnn::Mat c;
134
    op->forward(ap, c, opt);
135

136
    op->destroy_pipeline(opt);
137

138
    delete op;
139

140
    ncnn::Mat c32;
141
    ncnn::cast_float16_to_float32(c, c32, opt);
142

143
    if (CompareMat(b, c32, 0.001) != 0)
144
    {
145
        fprintf(stderr, "test_packing_cpu_fp16 failed a.dims=%d a=(%d %d %d %d) in_elempack=%d out_elempack=%d\n", a.dims, a.w, a.h, a.d, a.c, in_elempack, out_elempack);
146
        return -1;
147
    }
148

149
    return 0;
150
}
151

152
static int test_packing_cpu_int8(const ncnn::Mat& a, int in_elempack, int out_elempack)
153
{
154
    ncnn::ParamDict pd;
155
    pd.set(0, out_elempack);
156

157
    std::vector<ncnn::Mat> weights(0);
158

159
    ncnn::Option opt;
160
    opt.num_threads = 1;
161
    opt.use_vulkan_compute = false;
162
    opt.use_int8_inference = false;
163
    opt.use_fp16_storage = false;
164
    opt.use_fp16_arithmetic = false;
165
    opt.use_packing_layout = false;
166

167
    ncnn::Layer* op = ncnn::create_layer_cpu("Packing");
168

169
    op->load_param(pd);
170

171
    ncnn::ModelBinFromMatArray mb(weights.data());
172

173
    op->load_model(mb);
174

175
    op->create_pipeline(opt);
176

177
    ncnn::Mat a8;
178
    if (a.dims == 1) a8 = RandomS8Mat(a.w);
179
    if (a.dims == 2) a8 = RandomS8Mat(a.w, a.h);
180
    if (a.dims == 3) a8 = RandomS8Mat(a.w, a.h, a.c);
181
    if (a.dims == 4) a8 = RandomS8Mat(a.w, a.h, a.d, a.c);
182

183
    ncnn::Mat ap;
184
    ncnn::convert_packing(a8, ap, in_elempack, opt);
185

186
    ncnn::Mat b;
187
    packing_cpu_naive(ap, b, out_elempack);
188

189
    ncnn::Mat c;
190
    op->forward(ap, c, opt);
191

192
    op->destroy_pipeline(opt);
193

194
    delete op;
195

196
    ncnn::Mat b32;
197
    ncnn::cast_int8_to_float32(b, b32, opt);
198

199
    ncnn::Mat c32;
200
    ncnn::cast_int8_to_float32(c, c32, opt);
201

202
    if (CompareMat(b32, c32, 0.001) != 0)
203
    {
204
        fprintf(stderr, "test_packing_cpu_int8 failed a.dims=%d a=(%d %d %d %d) in_elempack=%d out_elempack=%d\n", a.dims, a.w, a.h, a.d, a.c, in_elempack, out_elempack);
205
        return -1;
206
    }
207

208
    return 0;
209
}
210

211
static int test_packing_cpu(const ncnn::Mat& a, int in_elempack, int out_elempack)
212
{
213
    return 0
214
           || test_packing_cpu_fp32(a, in_elempack, out_elempack)
215
           || test_packing_cpu_fp16(a, in_elempack, out_elempack)
216
           || test_packing_cpu_int8(a, in_elempack, out_elempack);
217
}
218

219
#if NCNN_VULKAN
220

221
static int test_packing_gpu_buffer(const ncnn::Mat& a, int in_elempack, int out_elempack)
222
{
223
    ncnn::ParamDict pd;
224
    pd.set(0, out_elempack);
225
    pd.set(2, 1); // cast_type_from
226
    pd.set(3, 1); // cast_type_to
227
    pd.set(4, 0); // storage_type_from
228
    pd.set(5, 0); // storage_type_to
229

230
    std::vector<ncnn::Mat> weights(0);
231

232
    ncnn::Option opt;
233
    opt.num_threads = 1;
234
    opt.use_vulkan_compute = true;
235
    opt.use_int8_inference = false;
236
    opt.use_fp16_packed = false;
237
    opt.use_fp16_storage = false;
238
    opt.use_fp16_arithmetic = false;
239
    opt.use_int8_storage = false;
240
    opt.use_int8_arithmetic = false;
241
    opt.use_packing_layout = true;
242
    opt.use_shader_pack8 = true;
243
    opt.use_image_storage = false;
244

245
    ncnn::VulkanDevice* vkdev = ncnn::get_gpu_device();
246

247
    ncnn::VkAllocator* blob_vkallocator = vkdev->acquire_blob_allocator();
248
    ncnn::VkAllocator* staging_vkallocator = vkdev->acquire_staging_allocator();
249

250
    opt.blob_vkallocator = blob_vkallocator;
251
    opt.workspace_vkallocator = blob_vkallocator;
252
    opt.staging_vkallocator = staging_vkallocator;
253

254
    if (!vkdev->info.support_fp16_packed()) opt.use_fp16_packed = false;
255
    if (!vkdev->info.support_fp16_storage()) opt.use_fp16_storage = false;
256

257
    ncnn::Layer* op = ncnn::create_layer_vulkan("Packing");
258

259
    op->vkdev = vkdev;
260

261
    op->load_param(pd);
262

263
    ncnn::ModelBinFromMatArray mb(weights.data());
264

265
    op->load_model(mb);
266

267
    op->create_pipeline(opt);
268

269
    ncnn::Mat ap;
270
    ncnn::convert_packing(a, ap, in_elempack, opt);
271

272
    ncnn::Mat b;
273
    packing_cpu_naive(ap, b, out_elempack);
274

275
    ncnn::Mat d;
276

277
    // forward
278
    ncnn::VkCompute cmd(vkdev);
279

280
    // upload
281
    ncnn::VkMat a_gpu;
282
    cmd.record_clone(ap, a_gpu, opt);
283

284
    ncnn::VkMat d_gpu;
285
    op->forward(a_gpu, d_gpu, cmd, opt);
286

287
    // download
288
    cmd.record_clone(d_gpu, d, opt);
289

290
    cmd.submit_and_wait();
291

292
    op->destroy_pipeline(opt);
293

294
    delete op;
295

296
    vkdev->reclaim_blob_allocator(blob_vkallocator);
297
    vkdev->reclaim_staging_allocator(staging_vkallocator);
298

299
    if (CompareMat(b, d, 0.001) != 0)
300
    {
301
        fprintf(stderr, "test_packing_gpu_buffer failed a.dims=%d a=(%d %d %d %d) in_elempack=%d out_elempack=%d\n", a.dims, a.w, a.h, a.d, a.c, in_elempack, out_elempack);
302
        return -1;
303
    }
304

305
    return 0;
306
}
307

308
static int test_packing_gpu_image(const ncnn::Mat& a, int in_elempack, int out_elempack)
309
{
310
    ncnn::ParamDict pd;
311
    pd.set(0, out_elempack);
312
    pd.set(2, 1); // cast_type_from
313
    pd.set(3, 1); // cast_type_to
314
    pd.set(4, 1); // storage_type_from
315
    pd.set(5, 1); // storage_type_to
316

317
    std::vector<ncnn::Mat> weights(0);
318

319
    ncnn::Option opt;
320
    opt.num_threads = 1;
321
    opt.use_vulkan_compute = true;
322
    opt.use_int8_inference = false;
323
    opt.use_fp16_packed = false;
324
    opt.use_fp16_storage = false;
325
    opt.use_fp16_arithmetic = false;
326
    opt.use_int8_storage = false;
327
    opt.use_int8_arithmetic = false;
328
    opt.use_packing_layout = true;
329
    opt.use_shader_pack8 = true;
330
    opt.use_image_storage = true;
331

332
    ncnn::VulkanDevice* vkdev = ncnn::get_gpu_device();
333

334
    ncnn::VkAllocator* blob_vkallocator = vkdev->acquire_blob_allocator();
335
    ncnn::VkAllocator* staging_vkallocator = vkdev->acquire_staging_allocator();
336

337
    opt.blob_vkallocator = blob_vkallocator;
338
    opt.workspace_vkallocator = blob_vkallocator;
339
    opt.staging_vkallocator = staging_vkallocator;
340

341
    if (!vkdev->info.support_fp16_packed()) opt.use_fp16_packed = false;
342
    if (!vkdev->info.support_fp16_storage()) opt.use_fp16_storage = false;
343

344
    ncnn::Layer* op = ncnn::create_layer_vulkan("Packing");
345

346
    op->vkdev = vkdev;
347

348
    op->load_param(pd);
349

350
    ncnn::ModelBinFromMatArray mb(weights.data());
351

352
    op->load_model(mb);
353

354
    op->create_pipeline(opt);
355

356
    ncnn::Mat ap;
357
    ncnn::convert_packing(a, ap, in_elempack, opt);
358

359
    ncnn::Mat b;
360
    packing_cpu_naive(ap, b, out_elempack);
361

362
    ncnn::Mat d;
363

364
    // forward
365
    ncnn::VkCompute cmd(vkdev);
366

367
    // upload
368
    ncnn::VkImageMat a_gpu;
369
    cmd.record_clone(ap, a_gpu, opt);
370

371
    ncnn::VkImageMat d_gpu;
372
    op->forward(a_gpu, d_gpu, cmd, opt);
373

374
    // download
375
    cmd.record_clone(d_gpu, d, opt);
376

377
    cmd.submit_and_wait();
378

379
    op->destroy_pipeline(opt);
380

381
    delete op;
382

383
    vkdev->reclaim_blob_allocator(blob_vkallocator);
384
    vkdev->reclaim_staging_allocator(staging_vkallocator);
385

386
    if (CompareMat(b, d, 0.001) != 0)
387
    {
388
        fprintf(stderr, "test_packing_gpu_image failed a.dims=%d a=(%d %d %d %d) in_elempack=%d out_elempack=%d\n", a.dims, a.w, a.h, a.d, a.c, in_elempack, out_elempack);
389
        return -1;
390
    }
391

392
    return 0;
393
}
394

395
static int test_packing_gpu_buffer2image(const ncnn::Mat& a, int in_elempack, int out_elempack)
396
{
397
    ncnn::Option opt;
398
    opt.num_threads = 1;
399
    opt.use_vulkan_compute = true;
400
    opt.use_int8_inference = false;
401
    opt.use_fp16_packed = false;
402
    opt.use_fp16_storage = false;
403
    opt.use_fp16_arithmetic = false;
404
    opt.use_int8_storage = false;
405
    opt.use_int8_arithmetic = false;
406
    opt.use_packing_layout = true;
407
    opt.use_shader_pack8 = true;
408
    opt.use_image_storage = true;
409

410
    ncnn::VulkanDevice* vkdev = ncnn::get_gpu_device();
411

412
    ncnn::VkAllocator* blob_vkallocator = vkdev->acquire_blob_allocator();
413
    ncnn::VkAllocator* staging_vkallocator = vkdev->acquire_staging_allocator();
414

415
    opt.blob_vkallocator = blob_vkallocator;
416
    opt.workspace_vkallocator = blob_vkallocator;
417
    opt.staging_vkallocator = staging_vkallocator;
418

419
    if (!vkdev->info.support_fp16_packed()) opt.use_fp16_packed = false;
420
    if (!vkdev->info.support_fp16_storage()) opt.use_fp16_storage = false;
421

422
    ncnn::Mat ap;
423
    ncnn::convert_packing(a, ap, in_elempack, opt);
424

425
    ncnn::Mat b;
426
    packing_cpu_naive(ap, b, out_elempack);
427

428
    ncnn::Mat d;
429

430
    // forward
431
    ncnn::VkCompute cmd(vkdev);
432

433
    // upload
434
    ncnn::VkMat a_gpu;
435
    cmd.record_clone(ap, a_gpu, opt);
436

437
    ncnn::VkImageMat d_gpu;
438
    vkdev->convert_packing(a_gpu, d_gpu, out_elempack, cmd, opt);
439

440
    // download
441
    cmd.record_clone(d_gpu, d, opt);
442

443
    cmd.submit_and_wait();
444

445
    vkdev->reclaim_blob_allocator(blob_vkallocator);
446
    vkdev->reclaim_staging_allocator(staging_vkallocator);
447

448
    if (CompareMat(b, d, 0.001) != 0)
449
    {
450
        fprintf(stderr, "test_packing_gpu_buffer2image failed a.dims=%d a=(%d %d %d %d) in_elempack=%d out_elempack=%d\n", a.dims, a.w, a.h, a.d, a.c, in_elempack, out_elempack);
451
        return -1;
452
    }
453

454
    return 0;
455
}
456

457
static int test_packing_gpu_image2buffer(const ncnn::Mat& a, int in_elempack, int out_elempack)
458
{
459
    ncnn::Option opt;
460
    opt.num_threads = 1;
461
    opt.use_vulkan_compute = true;
462
    opt.use_int8_inference = false;
463
    opt.use_fp16_packed = false;
464
    opt.use_fp16_storage = false;
465
    opt.use_fp16_arithmetic = false;
466
    opt.use_int8_storage = false;
467
    opt.use_int8_arithmetic = false;
468
    opt.use_packing_layout = true;
469
    opt.use_shader_pack8 = true;
470
    opt.use_image_storage = true;
471

472
    ncnn::VulkanDevice* vkdev = ncnn::get_gpu_device();
473

474
    ncnn::VkAllocator* blob_vkallocator = vkdev->acquire_blob_allocator();
475
    ncnn::VkAllocator* staging_vkallocator = vkdev->acquire_staging_allocator();
476

477
    opt.blob_vkallocator = blob_vkallocator;
478
    opt.workspace_vkallocator = blob_vkallocator;
479
    opt.staging_vkallocator = staging_vkallocator;
480

481
    if (!vkdev->info.support_fp16_packed()) opt.use_fp16_packed = false;
482
    if (!vkdev->info.support_fp16_storage()) opt.use_fp16_storage = false;
483

484
    ncnn::Mat ap;
485
    ncnn::convert_packing(a, ap, in_elempack, opt);
486

487
    ncnn::Mat b;
488
    packing_cpu_naive(ap, b, out_elempack);
489

490
    ncnn::Mat d;
491

492
    // forward
493
    ncnn::VkCompute cmd(vkdev);
494

495
    // upload
496
    ncnn::VkImageMat a_gpu;
497
    cmd.record_clone(ap, a_gpu, opt);
498

499
    ncnn::VkMat d_gpu;
500
    vkdev->convert_packing(a_gpu, d_gpu, out_elempack, cmd, opt);
501

502
    // download
503
    cmd.record_clone(d_gpu, d, opt);
504

505
    cmd.submit_and_wait();
506

507
    vkdev->reclaim_blob_allocator(blob_vkallocator);
508
    vkdev->reclaim_staging_allocator(staging_vkallocator);
509

510
    if (CompareMat(b, d, 0.001) != 0)
511
    {
512
        fprintf(stderr, "test_packing_gpu_image2buffer failed a.dims=%d a=(%d %d %d %d) in_elempack=%d out_elempack=%d\n", a.dims, a.w, a.h, a.d, a.c, in_elempack, out_elempack);
513
        return -1;
514
    }
515

516
    return 0;
517
}
518
#endif
519

520
static int test_packing_cpu(const ncnn::Mat& a)
521
{
522
    return 0
523
           || test_packing_cpu(a, 1, 1)
524
           || test_packing_cpu(a, 4, 4)
525
           || test_packing_cpu(a, 4, 8)
526
           || test_packing_cpu(a, 1, 4)
527
           || test_packing_cpu(a, 4, 1)
528
           || test_packing_cpu(a, 1, 8)
529
           || test_packing_cpu(a, 8, 1)
530
           || test_packing_cpu(a, 4, 8)
531
           || test_packing_cpu(a, 8, 4)
532
           || test_packing_cpu(a, 1, 16)
533
           || test_packing_cpu(a, 16, 1)
534
           || test_packing_cpu(a, 4, 16)
535
           || test_packing_cpu(a, 16, 4)
536
           || test_packing_cpu(a, 8, 16)
537
           || test_packing_cpu(a, 16, 8);
538
}
539

540
#if NCNN_VULKAN
541
static int test_packing_gpu(const ncnn::Mat& a)
542
{
543
    return 0
544
           || test_packing_gpu_buffer(a, 1, 1)
545
           || test_packing_gpu_buffer(a, 4, 4)
546
           || test_packing_gpu_buffer(a, 8, 8)
547
           || test_packing_gpu_buffer(a, 1, 4)
548
           || test_packing_gpu_buffer(a, 4, 1)
549
           || test_packing_gpu_buffer(a, 1, 8)
550
           || test_packing_gpu_buffer(a, 8, 1)
551
           || test_packing_gpu_buffer(a, 4, 8)
552
           || test_packing_gpu_buffer(a, 8, 4)
553
           || test_packing_gpu_image(a, 1, 1)
554
           || test_packing_gpu_image(a, 4, 4)
555
           || test_packing_gpu_image(a, 8, 8)
556
           || test_packing_gpu_image(a, 1, 4)
557
           || test_packing_gpu_image(a, 4, 1)
558
           || test_packing_gpu_image(a, 1, 8)
559
           || test_packing_gpu_image(a, 8, 1)
560
           || test_packing_gpu_image(a, 4, 8)
561
           || test_packing_gpu_image(a, 8, 4)
562
           || test_packing_gpu_buffer2image(a, 1, 1)
563
           || test_packing_gpu_buffer2image(a, 4, 4)
564
           || test_packing_gpu_buffer2image(a, 8, 8)
565
           || test_packing_gpu_buffer2image(a, 1, 4)
566
           || test_packing_gpu_buffer2image(a, 4, 1)
567
           || test_packing_gpu_buffer2image(a, 1, 8)
568
           || test_packing_gpu_buffer2image(a, 8, 1)
569
           || test_packing_gpu_buffer2image(a, 4, 8)
570
           || test_packing_gpu_buffer2image(a, 8, 4)
571
           || test_packing_gpu_image2buffer(a, 1, 1)
572
           || test_packing_gpu_image2buffer(a, 4, 4)
573
           || test_packing_gpu_image2buffer(a, 8, 8)
574
           || test_packing_gpu_image2buffer(a, 1, 4)
575
           || test_packing_gpu_image2buffer(a, 4, 1)
576
           || test_packing_gpu_image2buffer(a, 1, 8)
577
           || test_packing_gpu_image2buffer(a, 8, 1)
578
           || test_packing_gpu_image2buffer(a, 4, 8)
579
           || test_packing_gpu_image2buffer(a, 8, 4);
580
}
581
#endif // NCNN_VULKAN
582

583
static int test_packing_0()
584
{
585
    ncnn::Mat a = RandomMat(9, 7, 10, 16);
586
    ncnn::Mat b = RandomMat(9, 7, 10, 3);
587
    return 0
588
           || test_packing_cpu(a)
589
           || test_packing_cpu(b)
590
#if NCNN_VULKAN
591
           || test_packing_gpu(a)
592
#endif
593
           ;
594
}
595

596
static int test_packing_1()
597
{
598
    ncnn::Mat a = RandomMat(9, 10, 16);
599
    ncnn::Mat b = RandomMat(9, 10, 3);
600
    return 0
601
           || test_packing_cpu(a)
602
           || test_packing_cpu(b)
603
#if NCNN_VULKAN
604
           || test_packing_gpu(a)
605
#endif
606
           ;
607
}
608

609
static int test_packing_2()
610
{
611
    ncnn::Mat a = RandomMat(19, 16);
612
    return 0
613
           || test_packing_cpu(a)
614
#if NCNN_VULKAN
615
           || test_packing_gpu(a)
616
#endif
617
           ;
618
}
619

620
static int test_packing_3()
621
{
622
    ncnn::Mat a = RandomMat(80);
623
    return 0
624
           || test_packing_cpu(a)
625
#if NCNN_VULKAN
626
           || test_packing_gpu(a)
627
#endif
628
           ;
629
}
630

631
int main()
632
{
633
    SRAND(7767517);
634

635
    return 0
636
           || test_packing_0()
637
           || test_packing_1()
638
           || test_packing_2()
639
           || test_packing_3();
640
}
641

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.