ncnn

Форк
0
/
test_cast.cpp 
687 строк · 16.4 Кб
1
// Tencent is pleased to support the open source community by making ncnn available.
2
//
3
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
4
//
5
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6
// in compliance with the License. You may obtain a copy of the License at
7
//
8
// https://opensource.org/licenses/BSD-3-Clause
9
//
10
// Unless required by applicable law or agreed to in writing, software distributed
11
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
13
// specific language governing permissions and limitations under the License.
14

15
#include "testutil.h"
16

17
static int cast_cpu_naive(const ncnn::Mat& a, ncnn::Mat& b, int type_from, int type_to)
18
{
19
    ncnn::ParamDict pd;
20
    pd.set(0, type_from);
21
    pd.set(1, type_to);
22

23
    std::vector<ncnn::Mat> weights(0);
24

25
    ncnn::Option opt;
26
    opt.num_threads = 1;
27

28
    ncnn::Layer* op = ncnn::create_layer_naive("Cast");
29

30
    op->load_param(pd);
31

32
    ncnn::ModelBinFromMatArray mb(weights.data());
33

34
    op->load_model(mb);
35

36
    op->create_pipeline(opt);
37

38
    op->forward(a, b, opt);
39

40
    op->destroy_pipeline(opt);
41

42
    delete op;
43

44
    return 0;
45
}
46

47
static int test_cast_cpu(const ncnn::Mat& a, int type_from, int type_to)
48
{
49
    ncnn::ParamDict pd;
50
    pd.set(0, type_from);
51
    pd.set(1, type_to);
52

53
    std::vector<ncnn::Mat> weights(0);
54

55
    ncnn::Option opt;
56
    opt.num_threads = 1;
57
    opt.use_vulkan_compute = false;
58
    opt.use_int8_inference = false;
59
    opt.use_packing_layout = false;
60

61
    ncnn::Layer* op = ncnn::create_layer_cpu("Cast");
62

63
    op->load_param(pd);
64

65
    ncnn::ModelBinFromMatArray mb(weights.data());
66

67
    op->load_model(mb);
68

69
    op->create_pipeline(opt);
70

71
    ncnn::Mat a_fp16;
72
    cast_cpu_naive(a, a_fp16, 1, type_from);
73

74
    ncnn::Mat b;
75
    cast_cpu_naive(a_fp16, b, type_from, type_to);
76

77
    ncnn::Mat c;
78
    op->forward(a_fp16, c, opt);
79

80
    op->destroy_pipeline(opt);
81

82
    delete op;
83

84
    if (CompareMat(b, c, 0.001) != 0)
85
    {
86
        fprintf(stderr, "test_cast_cpu failed a.dims=%d a=(%d %d %d %d) type_from=%d type_to=%d\n", a.dims, a.w, a.h, a.d, a.c, type_from, type_to);
87
        return -1;
88
    }
89

90
    return 0;
91
}
92

93
static int test_cast_cpu_packed(const ncnn::Mat& a, int type_from, int type_to)
94
{
95
    ncnn::ParamDict pd;
96
    pd.set(0, type_from);
97
    pd.set(1, type_to);
98

99
    std::vector<ncnn::Mat> weights(0);
100

101
    ncnn::Option opt;
102
    opt.num_threads = 1;
103
    opt.use_vulkan_compute = false;
104
    opt.use_packing_layout = false;
105

106
    ncnn::Layer* op = ncnn::create_layer_cpu("Cast");
107

108
    op->load_param(pd);
109

110
    ncnn::ModelBinFromMatArray mb(weights.data());
111

112
    op->load_model(mb);
113

114
    op->create_pipeline(opt);
115

116
    ncnn::Mat a_fp16;
117
    cast_cpu_naive(a, a_fp16, 1, type_from);
118

119
    ncnn::Mat b;
120
    cast_cpu_naive(a_fp16, b, type_from, type_to);
121

122
    ncnn::Mat a4;
123
    ncnn::convert_packing(a, a4, 4, opt);
124

125
    ncnn::Mat a4_fp16;
126
    cast_cpu_naive(a4, a4_fp16, 1, type_from);
127

128
    ncnn::Mat c;
129
    op->forward(a4_fp16, c, opt);
130

131
    op->destroy_pipeline(opt);
132

133
    delete op;
134

135
    if (CompareMat(b, c, 0.001) != 0)
136
    {
137
        fprintf(stderr, "test_cast_cpu_packed failed a.dims=%d a=(%d %d %d %d) type_from=%d type_to=%d\n", a.dims, a.w, a.h, a.d, a.c, type_from, type_to);
138
        return -1;
139
    }
140

141
    return 0;
142
}
143

144
#if NCNN_VULKAN
145
static int test_cast_gpu_fp16p(const ncnn::Mat& a, int type_from, int type_to)
146
{
147
    if (type_to == 4 || type_from == 4)
148
        return 0;
149
    ncnn::ParamDict pd;
150
    pd.set(0, type_from);
151
    pd.set(1, type_to);
152

153
    std::vector<ncnn::Mat> weights(0);
154

155
    ncnn::Option opt;
156
    opt.num_threads = 1;
157
    opt.use_vulkan_compute = true;
158
    opt.use_int8_inference = false;
159
    opt.use_fp16_packed = true;
160
    opt.use_fp16_storage = false;
161
    opt.use_fp16_arithmetic = false;
162
    opt.use_int8_storage = false;
163
    opt.use_int8_arithmetic = false;
164
    opt.use_packing_layout = true;
165
    opt.use_image_storage = false;
166

167
    ncnn::VulkanDevice* vkdev = ncnn::get_gpu_device();
168

169
    ncnn::VkAllocator* blob_vkallocator = vkdev->acquire_blob_allocator();
170
    ncnn::VkAllocator* staging_vkallocator = vkdev->acquire_staging_allocator();
171

172
    opt.blob_vkallocator = blob_vkallocator;
173
    opt.workspace_vkallocator = blob_vkallocator;
174
    opt.staging_vkallocator = staging_vkallocator;
175

176
    if (!vkdev->info.support_fp16_packed()) opt.use_fp16_packed = false;
177
    if (!vkdev->info.support_fp16_storage()) opt.use_fp16_storage = false;
178

179
    ncnn::Layer* op = ncnn::create_layer_vulkan("Cast");
180

181
    op->vkdev = vkdev;
182

183
    op->load_param(pd);
184

185
    ncnn::ModelBinFromMatArray mb(weights.data());
186

187
    op->load_model(mb);
188

189
    op->create_pipeline(opt);
190

191
    ncnn::Mat a_fp16;
192
    if (type_from == 2)
193
    {
194
        ncnn::cast_float32_to_float16(a, a_fp16, opt);
195
    }
196
    else
197
    {
198
        a_fp16 = a;
199
    }
200

201
    ncnn::Mat b;
202
    cast_cpu_naive(a_fp16, b, type_from, type_to);
203

204
    ncnn::Mat d;
205

206
    // pack
207
    ncnn::Mat a4;
208
    ncnn::convert_packing(a, a4, 4, opt);
209

210
    ncnn::Mat a4_fp16;
211
    if (type_from == 2 && a4.elempack == 4)
212
    {
213
        ncnn::cast_float32_to_float16(a4, a4_fp16, opt);
214
    }
215
    else
216
    {
217
        a4_fp16 = a4;
218
    }
219

220
    // forward
221
    ncnn::VkCompute cmd(vkdev);
222

223
    // upload
224
    ncnn::VkMat a4_gpu;
225
    cmd.record_clone(a4_fp16, a4_gpu, opt);
226

227
    ncnn::VkMat d4_gpu;
228
    if (op->support_inplace)
229
    {
230
        op->forward_inplace(a4_gpu, cmd, opt);
231

232
        d4_gpu = a4_gpu;
233
    }
234
    else
235
    {
236
        op->forward(a4_gpu, d4_gpu, cmd, opt);
237
    }
238

239
    // download
240
    cmd.record_clone(d4_gpu, d, opt);
241

242
    cmd.submit_and_wait();
243

244
    op->destroy_pipeline(opt);
245

246
    delete op;
247

248
    vkdev->reclaim_blob_allocator(blob_vkallocator);
249
    vkdev->reclaim_staging_allocator(staging_vkallocator);
250

251
    if (CompareMat(b, d, 0.001) != 0)
252
    {
253
        fprintf(stderr, "test_cast_gpu_fp16p failed a.dims=%d a=(%d %d %d %d) type_from=%d type_to=%d\n", a.dims, a.w, a.h, a.d, a.c, type_from, type_to);
254
        return -1;
255
    }
256

257
    return 0;
258
}
259

260
static int test_cast_gpu_fp16p_pack8(const ncnn::Mat& a, int type_from, int type_to)
261
{
262
    if (type_to == 4 || type_from == 4)
263
        return 0;
264
    ncnn::ParamDict pd;
265
    pd.set(0, type_from);
266
    pd.set(1, type_to);
267

268
    std::vector<ncnn::Mat> weights(0);
269

270
    ncnn::Option opt;
271
    opt.num_threads = 1;
272
    opt.use_vulkan_compute = true;
273
    opt.use_int8_inference = false;
274
    opt.use_fp16_packed = true;
275
    opt.use_fp16_storage = false;
276
    opt.use_fp16_arithmetic = false;
277
    opt.use_int8_storage = false;
278
    opt.use_int8_arithmetic = false;
279
    opt.use_packing_layout = true;
280
    opt.use_shader_pack8 = true;
281
    opt.use_image_storage = false;
282

283
    ncnn::VulkanDevice* vkdev = ncnn::get_gpu_device();
284

285
    ncnn::VkAllocator* blob_vkallocator = vkdev->acquire_blob_allocator();
286
    ncnn::VkAllocator* staging_vkallocator = vkdev->acquire_staging_allocator();
287

288
    opt.blob_vkallocator = blob_vkallocator;
289
    opt.workspace_vkallocator = blob_vkallocator;
290
    opt.staging_vkallocator = staging_vkallocator;
291

292
    if (!vkdev->info.support_fp16_packed()) opt.use_fp16_packed = false;
293
    if (!vkdev->info.support_fp16_storage()) opt.use_fp16_storage = false;
294

295
    ncnn::Layer* op = ncnn::create_layer_vulkan("Cast");
296

297
    op->vkdev = vkdev;
298

299
    op->load_param(pd);
300

301
    ncnn::ModelBinFromMatArray mb(weights.data());
302

303
    op->load_model(mb);
304

305
    op->create_pipeline(opt);
306

307
    ncnn::Mat a_fp16;
308
    if (type_from == 2)
309
    {
310
        ncnn::cast_float32_to_float16(a, a_fp16, opt);
311
    }
312
    else
313
    {
314
        a_fp16 = a;
315
    }
316

317
    ncnn::Mat b;
318
    cast_cpu_naive(a_fp16, b, type_from, type_to);
319

320
    ncnn::Mat d;
321

322
    // pack
323
    ncnn::Mat a4;
324
    ncnn::convert_packing(a, a4, 8, opt);
325
    if (a4.elempack != 8)
326
        ncnn::convert_packing(a, a4, 4, opt);
327

328
    ncnn::Mat a4_fp16;
329
    if (type_from == 2 && (a4.elempack == 4 || a4.elempack == 8))
330
    {
331
        ncnn::cast_float32_to_float16(a4, a4_fp16, opt);
332
    }
333
    else
334
    {
335
        a4_fp16 = a4;
336
    }
337

338
    // forward
339
    ncnn::VkCompute cmd(vkdev);
340

341
    // upload
342
    ncnn::VkMat a4_gpu;
343
    cmd.record_clone(a4_fp16, a4_gpu, opt);
344

345
    ncnn::VkMat d4_gpu;
346
    if (op->support_inplace)
347
    {
348
        op->forward_inplace(a4_gpu, cmd, opt);
349

350
        d4_gpu = a4_gpu;
351
    }
352
    else
353
    {
354
        op->forward(a4_gpu, d4_gpu, cmd, opt);
355
    }
356

357
    // download
358
    cmd.record_clone(d4_gpu, d, opt);
359

360
    cmd.submit_and_wait();
361

362
    op->destroy_pipeline(opt);
363

364
    delete op;
365

366
    vkdev->reclaim_blob_allocator(blob_vkallocator);
367
    vkdev->reclaim_staging_allocator(staging_vkallocator);
368

369
    if (CompareMat(b, d, 0.001) != 0)
370
    {
371
        fprintf(stderr, "test_cast_gpu_fp16p_pack8 failed a.dims=%d a=(%d %d %d %d) type_from=%d type_to=%d\n", a.dims, a.w, a.h, a.d, a.c, type_from, type_to);
372
        return -1;
373
    }
374

375
    return 0;
376
}
377

378
static int test_cast_gpu_image_fp16p(const ncnn::Mat& a, int type_from, int type_to)
379
{
380
    if (type_to == 4 || type_from == 4)
381
        return 0;
382
    ncnn::ParamDict pd;
383
    pd.set(0, type_from);
384
    pd.set(1, type_to);
385

386
    std::vector<ncnn::Mat> weights(0);
387

388
    ncnn::Option opt;
389
    opt.num_threads = 1;
390
    opt.use_vulkan_compute = true;
391
    opt.use_int8_inference = false;
392
    opt.use_fp16_packed = true;
393
    opt.use_fp16_storage = false;
394
    opt.use_fp16_arithmetic = false;
395
    opt.use_int8_storage = false;
396
    opt.use_int8_arithmetic = false;
397
    opt.use_packing_layout = true;
398
    opt.use_image_storage = true;
399

400
    ncnn::VulkanDevice* vkdev = ncnn::get_gpu_device();
401

402
    ncnn::VkAllocator* blob_vkallocator = vkdev->acquire_blob_allocator();
403
    ncnn::VkAllocator* staging_vkallocator = vkdev->acquire_staging_allocator();
404

405
    opt.blob_vkallocator = blob_vkallocator;
406
    opt.workspace_vkallocator = blob_vkallocator;
407
    opt.staging_vkallocator = staging_vkallocator;
408

409
    if (!vkdev->info.support_fp16_packed()) opt.use_fp16_packed = false;
410
    if (!vkdev->info.support_fp16_storage()) opt.use_fp16_storage = false;
411

412
    ncnn::Layer* op = ncnn::create_layer_vulkan("Cast");
413

414
    op->vkdev = vkdev;
415

416
    op->load_param(pd);
417

418
    ncnn::ModelBinFromMatArray mb(weights.data());
419

420
    op->load_model(mb);
421

422
    op->create_pipeline(opt);
423

424
    ncnn::Mat a_fp16;
425
    if (type_from == 2)
426
    {
427
        ncnn::cast_float32_to_float16(a, a_fp16, opt);
428
    }
429
    else
430
    {
431
        a_fp16 = a;
432
    }
433

434
    ncnn::Mat b;
435
    cast_cpu_naive(a_fp16, b, type_from, type_to);
436

437
    ncnn::Mat d;
438

439
    // pack
440
    ncnn::Mat a4;
441
    ncnn::convert_packing(a, a4, 4, opt);
442

443
    ncnn::Mat a4_fp16;
444
    if (type_from == 2 && a4.elempack == 4)
445
    {
446
        ncnn::cast_float32_to_float16(a4, a4_fp16, opt);
447
    }
448
    else
449
    {
450
        a4_fp16 = a4;
451
    }
452

453
    // forward
454
    ncnn::VkCompute cmd(vkdev);
455

456
    // upload
457
    ncnn::VkImageMat a4_gpu;
458
    cmd.record_clone(a4_fp16, a4_gpu, opt);
459

460
    ncnn::VkImageMat d4_gpu;
461
    if (op->support_inplace)
462
    {
463
        op->forward_inplace(a4_gpu, cmd, opt);
464

465
        d4_gpu = a4_gpu;
466
    }
467
    else
468
    {
469
        op->forward(a4_gpu, d4_gpu, cmd, opt);
470
    }
471

472
    // download
473
    cmd.record_clone(d4_gpu, d, opt);
474

475
    cmd.submit_and_wait();
476

477
    op->destroy_pipeline(opt);
478

479
    delete op;
480

481
    vkdev->reclaim_blob_allocator(blob_vkallocator);
482
    vkdev->reclaim_staging_allocator(staging_vkallocator);
483

484
    if (CompareMat(b, d, 0.001) != 0)
485
    {
486
        fprintf(stderr, "test_cast_gpu_image_fp16p failed a.dims=%d a=(%d %d %d %d) type_from=%d type_to=%d\n", a.dims, a.w, a.h, a.d, a.c, type_from, type_to);
487
        return -1;
488
    }
489

490
    return 0;
491
}
492

493
static int test_cast_gpu_image_fp16p_pack8(const ncnn::Mat& a, int type_from, int type_to)
494
{
495
    if (type_to == 4 || type_from == 4)
496
        return 0;
497
    ncnn::ParamDict pd;
498
    pd.set(0, type_from);
499
    pd.set(1, type_to);
500

501
    std::vector<ncnn::Mat> weights(0);
502

503
    ncnn::Option opt;
504
    opt.num_threads = 1;
505
    opt.use_vulkan_compute = true;
506
    opt.use_int8_inference = false;
507
    opt.use_fp16_packed = true;
508
    opt.use_fp16_storage = false;
509
    opt.use_fp16_arithmetic = false;
510
    opt.use_int8_storage = false;
511
    opt.use_int8_arithmetic = false;
512
    opt.use_packing_layout = true;
513
    opt.use_shader_pack8 = true;
514
    opt.use_image_storage = true;
515

516
    ncnn::VulkanDevice* vkdev = ncnn::get_gpu_device();
517

518
    ncnn::VkAllocator* blob_vkallocator = vkdev->acquire_blob_allocator();
519
    ncnn::VkAllocator* staging_vkallocator = vkdev->acquire_staging_allocator();
520

521
    opt.blob_vkallocator = blob_vkallocator;
522
    opt.workspace_vkallocator = blob_vkallocator;
523
    opt.staging_vkallocator = staging_vkallocator;
524

525
    if (!vkdev->info.support_fp16_packed()) opt.use_fp16_packed = false;
526
    if (!vkdev->info.support_fp16_storage()) opt.use_fp16_storage = false;
527

528
    ncnn::Layer* op = ncnn::create_layer_vulkan("Cast");
529

530
    op->vkdev = vkdev;
531

532
    op->load_param(pd);
533

534
    ncnn::ModelBinFromMatArray mb(weights.data());
535

536
    op->load_model(mb);
537

538
    op->create_pipeline(opt);
539

540
    ncnn::Mat a_fp16;
541
    if (type_from == 2)
542
    {
543
        ncnn::cast_float32_to_float16(a, a_fp16, opt);
544
    }
545
    else
546
    {
547
        a_fp16 = a;
548
    }
549

550
    ncnn::Mat b;
551
    cast_cpu_naive(a_fp16, b, type_from, type_to);
552

553
    ncnn::Mat d;
554

555
    // pack
556
    ncnn::Mat a4;
557
    ncnn::convert_packing(a, a4, 8, opt);
558
    if (a4.elempack != 8)
559
        ncnn::convert_packing(a, a4, 4, opt);
560

561
    ncnn::Mat a4_fp16;
562
    if (type_from == 2 && (a4.elempack == 4 || a4.elempack == 8))
563
    {
564
        ncnn::cast_float32_to_float16(a4, a4_fp16, opt);
565
    }
566
    else
567
    {
568
        a4_fp16 = a4;
569
    }
570

571
    // forward
572
    ncnn::VkCompute cmd(vkdev);
573

574
    // upload
575
    ncnn::VkImageMat a4_gpu;
576
    cmd.record_clone(a4_fp16, a4_gpu, opt);
577

578
    ncnn::VkImageMat d4_gpu;
579
    if (op->support_inplace)
580
    {
581
        op->forward_inplace(a4_gpu, cmd, opt);
582

583
        d4_gpu = a4_gpu;
584
    }
585
    else
586
    {
587
        op->forward(a4_gpu, d4_gpu, cmd, opt);
588
    }
589

590
    // download
591
    cmd.record_clone(d4_gpu, d, opt);
592

593
    cmd.submit_and_wait();
594

595
    op->destroy_pipeline(opt);
596

597
    delete op;
598

599
    vkdev->reclaim_blob_allocator(blob_vkallocator);
600
    vkdev->reclaim_staging_allocator(staging_vkallocator);
601

602
    if (CompareMat(b, d, 0.001) != 0)
603
    {
604
        fprintf(stderr, "test_cast_gpu_image_fp16p_pack8 failed a.dims=%d a=(%d %d %d %d) type_from=%d type_to=%d\n", a.dims, a.w, a.h, a.d, a.c, type_from, type_to);
605
        return -1;
606
    }
607

608
    return 0;
609
}
610
#endif // NCNN_VULKAN
611

612
static int test_cast(const ncnn::Mat& a, int type_from, int type_to)
613
{
614
    return 0
615
           || test_cast_cpu(a, type_from, type_to)
616
           || test_cast_cpu_packed(a, type_from, type_to)
617
#if NCNN_VULKAN
618
           || test_cast_gpu_fp16p(a, type_from, type_to)
619
           || test_cast_gpu_fp16p_pack8(a, type_from, type_to)
620
           || test_cast_gpu_image_fp16p(a, type_from, type_to)
621
           || test_cast_gpu_image_fp16p_pack8(a, type_from, type_to)
622
#endif // NCNN_VULKAN
623
           ;
624
}
625

626
static int test_cast_0()
627
{
628
    return 0
629
           || test_cast(RandomMat(5, 6, 7, 16), 1, 2)
630
           || test_cast(RandomMat(3, 4, 5, 13), 1, 2)
631
           || test_cast(RandomMat(5, 6, 7, 16), 2, 1)
632
           || test_cast(RandomMat(3, 4, 5, 13), 2, 1)
633
           || test_cast(RandomMat(5, 6, 7, 16), 1, 4)
634
           || test_cast(RandomMat(3, 4, 5, 13), 1, 4)
635
           || test_cast(RandomMat(5, 6, 7, 16), 4, 1)
636
           || test_cast(RandomMat(3, 4, 5, 13), 4, 1);
637
}
638

639
static int test_cast_1()
640
{
641
    return 0
642
           || test_cast(RandomMat(5, 7, 16), 1, 2)
643
           || test_cast(RandomMat(3, 5, 13), 1, 2)
644
           || test_cast(RandomMat(5, 7, 16), 2, 1)
645
           || test_cast(RandomMat(3, 5, 13), 2, 1)
646
           || test_cast(RandomMat(5, 7, 16), 1, 4)
647
           || test_cast(RandomMat(3, 5, 13), 1, 4)
648
           || test_cast(RandomMat(5, 7, 16), 4, 1)
649
           || test_cast(RandomMat(3, 5, 13), 4, 1);
650
}
651

652
static int test_cast_2()
653
{
654
    return 0
655
           || test_cast(RandomMat(6, 16), 1, 2)
656
           || test_cast(RandomMat(7, 15), 1, 2)
657
           || test_cast(RandomMat(6, 16), 2, 1)
658
           || test_cast(RandomMat(7, 15), 2, 1)
659
           || test_cast(RandomMat(6, 16), 1, 4)
660
           || test_cast(RandomMat(7, 15), 1, 4)
661
           || test_cast(RandomMat(6, 16), 4, 1)
662
           || test_cast(RandomMat(7, 15), 4, 1);
663
}
664

665
static int test_cast_3()
666
{
667
    return 0
668
           || test_cast(RandomMat(128), 1, 2)
669
           || test_cast(RandomMat(127), 1, 2)
670
           || test_cast(RandomMat(128), 2, 1)
671
           || test_cast(RandomMat(127), 2, 1)
672
           || test_cast(RandomMat(128), 1, 4)
673
           || test_cast(RandomMat(127), 1, 4)
674
           || test_cast(RandomMat(128), 4, 1)
675
           || test_cast(RandomMat(127), 4, 1);
676
}
677

678
int main()
679
{
680
    SRAND(7767517);
681

682
    return 0
683
           || test_cast_0()
684
           || test_cast_1()
685
           || test_cast_2()
686
           || test_cast_3();
687
}
688

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.