ncnn

Форк
0
/
innerproduct_vulkan.cpp 
761 строка · 28.4 Кб
1
// Tencent is pleased to support the open source community by making ncnn available.
2
//
3
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
4
//
5
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6
// in compliance with the License. You may obtain a copy of the License at
7
//
8
// https://opensource.org/licenses/BSD-3-Clause
9
//
10
// Unless required by applicable law or agreed to in writing, software distributed
11
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
13
// specific language governing permissions and limitations under the License.
14

15
#include "innerproduct_vulkan.h"
16

17
#include "layer_shader_type.h"
18
#include "layer_type.h"
19

20
namespace ncnn {
21

22
InnerProduct_vulkan::InnerProduct_vulkan()
23
{
24
    support_vulkan = true;
25
    support_image_storage = true;
26

27
    flatten = 0;
28

29
    pipeline_innerproduct = 0;
30

31
    pipeline_innerproduct_sum8 = 0;
32
    pipeline_innerproduct_reduce_sum8 = 0;
33

34
    pipeline_innerproduct_gemm = 0;
35
}
36

37
int InnerProduct_vulkan::create_pipeline(const Option& _opt)
38
{
39
    Option opt = _opt;
40
    const Mat& shape = bottom_shapes.empty() ? Mat() : bottom_shapes[0];
41
    const Mat& out_shape = top_shapes.empty() ? Mat() : top_shapes[0];
42

43
    const int num_input = weight_data_size / num_output;
44

45
    int in_elempack = opt.use_shader_pack8 && num_input % 8 == 0 ? 8 : num_input % 4 == 0 ? 4 : 1;
46
    int out_elempack = opt.use_shader_pack8 && num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
47

48
    // src = inch-outch
49
    // dst = pa-pb-inch/pa-outch/pb
50
    {
51
        Mat weight_data_r2 = weight_data.reshape(num_input, num_output);
52

53
        weight_data_packed.create(num_input / in_elempack, num_output / out_elempack, (size_t)4 * in_elempack * out_elempack, in_elempack * out_elempack);
54

55
        for (int q = 0; q + (out_elempack - 1) < num_output; q += out_elempack)
56
        {
57
            float* g00 = weight_data_packed.row(q / out_elempack);
58

59
            for (int p = 0; p + (in_elempack - 1) < num_input; p += in_elempack)
60
            {
61
                for (int i = 0; i < out_elempack; i++)
62
                {
63
                    const float* k0 = weight_data_r2.row(q + i);
64
                    k0 += p;
65

66
                    for (int j = 0; j < in_elempack; j++)
67
                    {
68
                        g00[0] = k0[j];
69

70
                        g00++;
71
                    }
72
                }
73
            }
74
        }
75
    }
76

77
    if (bias_term)
78
    {
79
        convert_packing(bias_data, bias_data_packed, out_elempack, opt);
80
    }
81

82
    if (shape.dims == 2 && shape.w == num_input)
83
    {
84
        // gemm
85
        int elempack = opt.use_shader_pack8 && shape.h % 8 == 0 ? 8 : shape.h % 4 == 0 ? 4 : 1;
86

87
        size_t elemsize;
88
        if (opt.use_fp16_storage)
89
        {
90
            elemsize = elempack * 2u;
91
        }
92
        else if (opt.use_fp16_packed)
93
        {
94
            elemsize = elempack == 1 ? 4u : elempack * 2u;
95
        }
96
        else
97
        {
98
            elemsize = elempack * 4u;
99
        }
100

101
        Mat shape_packed = Mat(shape.w, shape.h / elempack, (void*)0, elemsize, elempack);
102
        Mat out_shape_packed = Mat(out_shape.w, out_shape.h / elempack, (void*)0, elemsize, elempack);
103

104
        // check blob shape
105
        if (!vkdev->shape_support_image_storage(shape) || !vkdev->shape_support_image_storage(out_shape))
106
        {
107
            support_image_storage = false;
108
            opt.use_image_storage = false;
109
        }
110

111
        // check blob shape
112
        if (!vkdev->shape_support_image_storage(shape_packed) || !vkdev->shape_support_image_storage(out_shape_packed))
113
        {
114
            support_image_storage = false;
115
            opt.use_image_storage = false;
116
        }
117

118
        std::vector<vk_specialization_type> specializations(4 + 10);
119
        specializations[0].i = bias_term;
120
        specializations[1].i = activation_type;
121
        specializations[2].f = activation_params.w >= 1 ? activation_params[0] : 0.f;
122
        specializations[3].f = activation_params.w == 2 ? activation_params[1] : 0.f;
123
        specializations[4 + 0].i = shape.dims;
124
        specializations[4 + 1].i = shape.w;
125
        specializations[4 + 2].i = shape.h;
126
        specializations[4 + 3].i = shape.c;
127
        specializations[4 + 4].i = shape.cstep;
128
        specializations[4 + 5].i = out_shape.dims;
129
        specializations[4 + 6].i = out_shape.w;
130
        specializations[4 + 7].i = out_shape.h;
131
        specializations[4 + 8].i = out_shape.c;
132
        specializations[4 + 9].i = out_shape.cstep;
133

134
        Mat local_size_xyz(std::min(16, num_output / out_elempack), 4, 1, (void*)0);
135
        if (out_shape.dims != 0)
136
        {
137
            local_size_xyz.w = std::min(16, out_shape.w / out_elempack);
138
            local_size_xyz.h = std::min(4, out_shape.h);
139
            local_size_xyz.c = 1;
140
        }
141

142
        int shader_type_index = -1;
143
        if (in_elempack == 1 && out_elempack == 1) shader_type_index = LayerShaderType::innerproduct_gemm;
144
        if (in_elempack == 4 && out_elempack == 4) shader_type_index = LayerShaderType::innerproduct_gemm_wp4;
145
        if (in_elempack == 1 && out_elempack == 4) shader_type_index = LayerShaderType::innerproduct_gemm_wp1to4;
146
        if (in_elempack == 4 && out_elempack == 1) shader_type_index = LayerShaderType::innerproduct_gemm_wp4to1;
147
        if (in_elempack == 8 && out_elempack == 8) shader_type_index = LayerShaderType::innerproduct_gemm_wp8;
148
        if (in_elempack == 1 && out_elempack == 8) shader_type_index = LayerShaderType::innerproduct_gemm_wp1to8;
149
        if (in_elempack == 8 && out_elempack == 1) shader_type_index = LayerShaderType::innerproduct_gemm_wp8to1;
150
        if (in_elempack == 4 && out_elempack == 8) shader_type_index = LayerShaderType::innerproduct_gemm_wp4to8;
151
        if (in_elempack == 8 && out_elempack == 4) shader_type_index = LayerShaderType::innerproduct_gemm_wp8to4;
152

153
        pipeline_innerproduct_gemm = new Pipeline(vkdev);
154
        pipeline_innerproduct_gemm->set_optimal_local_size_xyz(local_size_xyz);
155
        pipeline_innerproduct_gemm->create(shader_type_index, opt, specializations);
156

157
        if (opt.lightmode)
158
        {
159
            weight_data.release();
160
            bias_data.release();
161
        }
162

163
        return 0;
164
    }
165

166
    Mat shape_flatten;
167
    if (shape.dims != 0)
168
    {
169
        shape_flatten = Mat(shape.w * shape.h * shape.c, (void*)0);
170
    }
171

172
    size_t elemsize;
173
    size_t out_elemsize;
174
    if (opt.use_fp16_storage)
175
    {
176
        elemsize = in_elempack * 2u;
177
        out_elemsize = out_elempack * 2u;
178
    }
179
    else if (opt.use_fp16_packed)
180
    {
181
        elemsize = in_elempack == 1 ? 4u : in_elempack * 2u;
182
        out_elemsize = out_elempack == 1 ? 4u : out_elempack * 2u;
183
    }
184
    else
185
    {
186
        elemsize = in_elempack * 4u;
187
        out_elemsize = out_elempack * 4u;
188
    }
189

190
    Mat shape_flatten_packed;
191
    if (shape_flatten.dims == 1) shape_flatten_packed = Mat(shape_flatten.w / in_elempack, (void*)0, elemsize, in_elempack);
192

193
    Mat out_shape_packed;
194
    if (out_shape.dims == 1) out_shape_packed = Mat(out_shape.w / out_elempack, (void*)0, out_elemsize, out_elempack);
195

196
    // check blob shape
197
    if (!vkdev->shape_support_image_storage(shape_flatten_packed) || !vkdev->shape_support_image_storage(out_shape_packed))
198
    {
199
        support_image_storage = false;
200
        opt.use_image_storage = false;
201
    }
202

203
    // check weight shape
204
    Mat weight_data_packed(num_input / in_elempack, num_output / out_elempack, (void*)0, (size_t)4 * in_elempack * out_elempack, in_elempack * out_elempack);
205
    if (!vkdev->shape_support_image_storage(weight_data_packed))
206
    {
207
        support_image_storage = false;
208
        opt.use_image_storage = false;
209
    }
210

211
    if (shape.dims == 0)
212
    {
213
        // check weight shape
214
        Mat weight_data_packed(num_input, num_output, (void*)0, (size_t)4u, 1);
215
        if (!vkdev->shape_support_image_storage(weight_data_packed))
216
        {
217
            support_image_storage = false;
218
            opt.use_image_storage = false;
219
        }
220
    }
221

222
    {
223
        flatten = ncnn::create_layer_vulkan(ncnn::LayerType::Flatten);
224
        flatten->vkdev = vkdev;
225

226
        flatten->bottom_shapes.resize(1);
227
        flatten->bottom_shapes[0] = shape;
228
        flatten->top_shapes.resize(1);
229
        flatten->top_shapes[0] = shape_flatten;
230

231
        ncnn::ParamDict pd;
232

233
        flatten->load_param(pd);
234

235
        flatten->create_pipeline(opt);
236
    }
237

238
    if (num_input / in_elempack >= 32)
239
    {
240
        Mat out_sum8_shape((num_input / in_elempack + 7) / 8, num_output, (void*)0);
241
        Mat out_sum8_shape_packed = Mat(out_sum8_shape.w, out_sum8_shape.h / out_elempack, (void*)0, out_elemsize, out_elempack);
242
        if (!vkdev->shape_support_image_storage(out_sum8_shape_packed))
243
        {
244
            support_image_storage = false;
245
            opt.use_image_storage = false;
246
        }
247

248
        // sum8
249
        {
250
            std::vector<vk_specialization_type> specializations(0 + 3);
251
            specializations[0 + 0].i = shape_flatten_packed.w;
252
            specializations[0 + 1].i = out_sum8_shape_packed.w;
253
            specializations[0 + 2].i = out_sum8_shape_packed.h;
254

255
            int shader_type_index = -1;
256
            if (in_elempack == 1 && out_elempack == 1) shader_type_index = LayerShaderType::innerproduct_sum8;
257
            if (in_elempack == 4 && out_elempack == 4) shader_type_index = LayerShaderType::innerproduct_sum8_pack4;
258
            if (in_elempack == 1 && out_elempack == 4) shader_type_index = LayerShaderType::innerproduct_sum8_pack1to4;
259
            if (in_elempack == 4 && out_elempack == 1) shader_type_index = LayerShaderType::innerproduct_sum8_pack4to1;
260
            if (in_elempack == 8 && out_elempack == 8) shader_type_index = LayerShaderType::innerproduct_sum8_pack8;
261
            if (in_elempack == 1 && out_elempack == 8) shader_type_index = LayerShaderType::innerproduct_sum8_pack1to8;
262
            if (in_elempack == 8 && out_elempack == 1) shader_type_index = LayerShaderType::innerproduct_sum8_pack8to1;
263
            if (in_elempack == 4 && out_elempack == 8) shader_type_index = LayerShaderType::innerproduct_sum8_pack4to8;
264
            if (in_elempack == 8 && out_elempack == 4) shader_type_index = LayerShaderType::innerproduct_sum8_pack8to4;
265

266
            pipeline_innerproduct_sum8 = new Pipeline(vkdev);
267
            pipeline_innerproduct_sum8->set_local_size_xyz(8, std::min(8, num_output / out_elempack), 1);
268
            pipeline_innerproduct_sum8->create(shader_type_index, opt, specializations);
269
        }
270

271
        // reduce sum8
272
        {
273
            std::vector<vk_specialization_type> specializations(4 + 3);
274
            specializations[0].i = bias_term;
275
            specializations[1].i = activation_type;
276
            specializations[2].f = activation_params.w >= 1 ? activation_params[0] : 0.f;
277
            specializations[3].f = activation_params.w == 2 ? activation_params[1] : 0.f;
278
            specializations[4 + 0].i = out_sum8_shape_packed.w;
279
            specializations[4 + 1].i = out_sum8_shape_packed.h;
280
            specializations[4 + 2].i = out_shape_packed.w;
281

282
            int shader_type_index = -1;
283
            if (out_elempack == 1) shader_type_index = LayerShaderType::innerproduct_reduce_sum8;
284
            if (out_elempack == 4) shader_type_index = LayerShaderType::innerproduct_reduce_sum8_pack4;
285
            if (out_elempack == 8) shader_type_index = LayerShaderType::innerproduct_reduce_sum8_pack8;
286

287
            pipeline_innerproduct_reduce_sum8 = new Pipeline(vkdev);
288
            pipeline_innerproduct_reduce_sum8->set_local_size_xyz(std::min(64, num_output / out_elempack), 1, 1);
289
            pipeline_innerproduct_reduce_sum8->create(shader_type_index, opt, specializations);
290
        }
291
    }
292
    else
293
    {
294
        std::vector<vk_specialization_type> specializations(4 + 10);
295
        specializations[0].i = bias_term;
296
        specializations[1].i = activation_type;
297
        specializations[2].f = activation_params.w >= 1 ? activation_params[0] : 0.f;
298
        specializations[3].f = activation_params.w == 2 ? activation_params[1] : 0.f;
299
        specializations[4 + 0].i = shape_flatten_packed.dims;
300
        specializations[4 + 1].i = shape_flatten_packed.w;
301
        specializations[4 + 2].i = shape_flatten_packed.h;
302
        specializations[4 + 3].i = shape_flatten_packed.c;
303
        specializations[4 + 4].i = shape_flatten_packed.cstep;
304
        specializations[4 + 5].i = out_shape_packed.dims;
305
        specializations[4 + 6].i = out_shape_packed.w;
306
        specializations[4 + 7].i = out_shape_packed.h;
307
        specializations[4 + 8].i = out_shape_packed.c;
308
        specializations[4 + 9].i = out_shape_packed.cstep;
309

310
        Mat local_size_xyz(std::min(64, num_output / out_elempack), 1, 1, (void*)0);
311
        if (out_shape_packed.dims != 0)
312
        {
313
            local_size_xyz.w = std::min(64, out_shape_packed.w);
314
            local_size_xyz.h = 1;
315
            local_size_xyz.c = 1;
316
        }
317

318
        int shader_type_index = -1;
319
        if (in_elempack == 1 && out_elempack == 1) shader_type_index = LayerShaderType::innerproduct;
320
        if (in_elempack == 4 && out_elempack == 4) shader_type_index = LayerShaderType::innerproduct_pack4;
321
        if (in_elempack == 1 && out_elempack == 4) shader_type_index = LayerShaderType::innerproduct_pack1to4;
322
        if (in_elempack == 4 && out_elempack == 1) shader_type_index = LayerShaderType::innerproduct_pack4to1;
323
        if (in_elempack == 8 && out_elempack == 8) shader_type_index = LayerShaderType::innerproduct_pack8;
324
        if (in_elempack == 1 && out_elempack == 8) shader_type_index = LayerShaderType::innerproduct_pack1to8;
325
        if (in_elempack == 8 && out_elempack == 1) shader_type_index = LayerShaderType::innerproduct_pack8to1;
326
        if (in_elempack == 4 && out_elempack == 8) shader_type_index = LayerShaderType::innerproduct_pack4to8;
327
        if (in_elempack == 8 && out_elempack == 4) shader_type_index = LayerShaderType::innerproduct_pack8to4;
328

329
        pipeline_innerproduct = new Pipeline(vkdev);
330
        pipeline_innerproduct->set_optimal_local_size_xyz(local_size_xyz);
331
        pipeline_innerproduct->create(shader_type_index, opt, specializations);
332
    }
333

334
    // gemm for no shape hint
335
    if (shape.dims == 0)
336
    {
337
        std::vector<vk_specialization_type> specializations(4 + 10);
338
        specializations[0].i = bias_term;
339
        specializations[1].i = activation_type;
340
        specializations[2].f = activation_params.w >= 1 ? activation_params[0] : 0.f;
341
        specializations[3].f = activation_params.w == 2 ? activation_params[1] : 0.f;
342
        specializations[4 + 0].i = 0;
343
        specializations[4 + 1].i = 0;
344
        specializations[4 + 2].i = 0;
345
        specializations[4 + 3].i = 0;
346
        specializations[4 + 4].i = 0;
347
        specializations[4 + 5].i = 0;
348
        specializations[4 + 6].i = 0;
349
        specializations[4 + 7].i = 0;
350
        specializations[4 + 8].i = 0;
351
        specializations[4 + 9].i = 0;
352

353
        Mat local_size_xyz(std::min(16, num_output / out_elempack), 4, 1, (void*)0);
354

355
        int shader_type_index = -1;
356
        if (in_elempack == 1 && out_elempack == 1) shader_type_index = LayerShaderType::innerproduct_gemm;
357
        if (in_elempack == 4 && out_elempack == 4) shader_type_index = LayerShaderType::innerproduct_gemm_wp4;
358
        if (in_elempack == 1 && out_elempack == 4) shader_type_index = LayerShaderType::innerproduct_gemm_wp1to4;
359
        if (in_elempack == 4 && out_elempack == 1) shader_type_index = LayerShaderType::innerproduct_gemm_wp4to1;
360
        if (in_elempack == 8 && out_elempack == 8) shader_type_index = LayerShaderType::innerproduct_gemm_wp8;
361
        if (in_elempack == 1 && out_elempack == 8) shader_type_index = LayerShaderType::innerproduct_gemm_wp1to8;
362
        if (in_elempack == 8 && out_elempack == 1) shader_type_index = LayerShaderType::innerproduct_gemm_wp8to1;
363
        if (in_elempack == 4 && out_elempack == 8) shader_type_index = LayerShaderType::innerproduct_gemm_wp4to8;
364
        if (in_elempack == 8 && out_elempack == 4) shader_type_index = LayerShaderType::innerproduct_gemm_wp8to4;
365

366
        pipeline_innerproduct_gemm = new Pipeline(vkdev);
367
        pipeline_innerproduct_gemm->set_optimal_local_size_xyz(local_size_xyz);
368
        pipeline_innerproduct_gemm->create(shader_type_index, opt, specializations);
369

370
        if (opt.lightmode)
371
        {
372
            weight_data.release();
373
            bias_data.release();
374
        }
375

376
        return 0;
377
    }
378

379
    if (opt.lightmode)
380
    {
381
        weight_data.release();
382
        bias_data.release();
383
    }
384

385
    return 0;
386
}
387

388
int InnerProduct_vulkan::destroy_pipeline(const Option& opt)
389
{
390
    if (flatten)
391
    {
392
        flatten->destroy_pipeline(opt);
393
        delete flatten;
394
        flatten = 0;
395
    }
396

397
    delete pipeline_innerproduct;
398
    pipeline_innerproduct = 0;
399

400
    delete pipeline_innerproduct_sum8;
401
    delete pipeline_innerproduct_reduce_sum8;
402
    pipeline_innerproduct_sum8 = 0;
403
    pipeline_innerproduct_reduce_sum8 = 0;
404

405
    delete pipeline_innerproduct_gemm;
406
    pipeline_innerproduct_gemm = 0;
407

408
    return 0;
409
}
410

411
int InnerProduct_vulkan::upload_model(VkTransfer& cmd, const Option& opt)
412
{
413
    if (support_image_storage && opt.use_image_storage)
414
    {
415
        cmd.record_upload(weight_data_packed, weight_data_gpu_image, opt);
416
    }
417
    else
418
    {
419
        cmd.record_upload(weight_data_packed, weight_data_gpu, opt);
420
    }
421

422
    weight_data_packed.release();
423

424
    if (bias_term)
425
    {
426
        if (support_image_storage && opt.use_image_storage)
427
        {
428
            cmd.record_upload(bias_data_packed, bias_data_gpu_image, opt);
429
        }
430
        else
431
        {
432
            cmd.record_upload(bias_data_packed, bias_data_gpu, opt);
433
        }
434

435
        bias_data_packed.release();
436
    }
437

438
    return 0;
439
}
440

441
int InnerProduct_vulkan::forward(const VkMat& bottom_blob, VkMat& top_blob, VkCompute& cmd, const Option& opt) const
442
{
443
    const int num_input = weight_data_size / num_output;
444

445
    int in_elempack = opt.use_shader_pack8 && num_input % 8 == 0 ? 8 : num_input % 4 == 0 ? 4 : 1;
446
    int out_elempack = opt.use_shader_pack8 && num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
447

448
    if (bottom_blob.dims == 2 && bottom_blob.w == num_input)
449
    {
450
        // gemm
451
        int h = bottom_blob.h;
452
        size_t elemsize = bottom_blob.elemsize;
453
        int elempack = bottom_blob.elempack;
454

455
        // unpacking
456
        VkMat bottom_blob_unpacked = bottom_blob;
457
        if (elempack > 1)
458
        {
459
            Option opt_pack1 = opt;
460
            opt_pack1.blob_vkallocator = opt.workspace_vkallocator;
461

462
            vkdev->convert_packing(bottom_blob, bottom_blob_unpacked, 1, cmd, opt_pack1);
463
        }
464

465
        top_blob.create(num_output, h, elemsize, elempack, opt.blob_vkallocator);
466
        if (top_blob.empty())
467
            return -100;
468

469
        VkMat top_blob_unpacked = top_blob;
470
        if (elempack > 1)
471
        {
472
            top_blob_unpacked.create(num_output, h * elempack, bottom_blob_unpacked.elemsize, 1, opt.workspace_vkallocator);
473
            if (top_blob_unpacked.empty())
474
                return -100;
475
        }
476

477
        std::vector<VkMat> bindings(4);
478
        bindings[0] = bottom_blob_unpacked;
479
        bindings[1] = top_blob_unpacked;
480
        bindings[2] = weight_data_gpu;
481
        bindings[3] = bias_data_gpu;
482

483
        std::vector<vk_constant_type> constants(10);
484
        constants[0].i = bottom_blob_unpacked.dims;
485
        constants[1].i = bottom_blob_unpacked.w;
486
        constants[2].i = bottom_blob_unpacked.h;
487
        constants[3].i = bottom_blob_unpacked.c;
488
        constants[4].i = bottom_blob_unpacked.cstep;
489
        constants[5].i = top_blob_unpacked.dims;
490
        constants[6].i = top_blob_unpacked.w;
491
        constants[7].i = top_blob_unpacked.h;
492
        constants[8].i = top_blob_unpacked.c;
493
        constants[9].i = top_blob_unpacked.cstep;
494

495
        VkMat dispatcher;
496
        dispatcher.w = top_blob_unpacked.w / out_elempack;
497
        dispatcher.h = top_blob_unpacked.h;
498
        dispatcher.c = 1;
499

500
        cmd.record_pipeline(pipeline_innerproduct_gemm, bindings, constants, dispatcher);
501

502
        // packing
503
        if (elempack > 1)
504
        {
505
            vkdev->convert_packing(top_blob_unpacked, top_blob, elempack, cmd, opt);
506
        }
507

508
        return 0;
509
    }
510

511
    // flatten
512
    VkMat bottom_blob_flattened = bottom_blob;
513
    {
514
        Option opt_flatten = opt;
515
        opt_flatten.blob_vkallocator = opt.workspace_vkallocator;
516

517
        flatten->forward(bottom_blob, bottom_blob_flattened, cmd, opt_flatten);
518
    }
519

520
    size_t elemsize = bottom_blob_flattened.elemsize;
521
    size_t out_elemsize = elemsize / in_elempack * out_elempack;
522

523
    if (opt.use_fp16_packed && !opt.use_fp16_storage)
524
    {
525
        if (out_elempack == 8) out_elemsize = 8 * 2u;
526
        if (out_elempack == 4) out_elemsize = 4 * 2u;
527
        if (out_elempack == 1) out_elemsize = 4u;
528
    }
529

530
    if (num_input / in_elempack >= 32)
531
    {
532
        // sum8
533
        VkMat top_blob_sum8;
534
        {
535
            top_blob_sum8.create((num_input / in_elempack + 7) / 8, num_output / out_elempack, out_elemsize, out_elempack, opt.blob_vkallocator);
536
            if (top_blob_sum8.empty())
537
                return -100;
538

539
            std::vector<VkMat> bindings(3);
540
            bindings[0] = bottom_blob_flattened;
541
            bindings[1] = top_blob_sum8;
542
            bindings[2] = weight_data_gpu;
543

544
            std::vector<vk_constant_type> constants(3);
545
            constants[0].i = bottom_blob_flattened.w;
546
            constants[1].i = top_blob_sum8.w;
547
            constants[2].i = top_blob_sum8.h;
548

549
            cmd.record_pipeline(pipeline_innerproduct_sum8, bindings, constants, top_blob_sum8);
550
        }
551

552
        // reduce sum8
553
        {
554
            top_blob.create(num_output / out_elempack, out_elemsize, out_elempack, opt.blob_vkallocator);
555
            if (top_blob.empty())
556
                return -100;
557

558
            std::vector<VkMat> bindings(3);
559
            bindings[0] = top_blob_sum8;
560
            bindings[1] = top_blob;
561
            bindings[2] = bias_data_gpu;
562

563
            std::vector<vk_constant_type> constants(3);
564
            constants[0].i = top_blob_sum8.w;
565
            constants[1].i = top_blob_sum8.h;
566
            constants[2].i = top_blob.w;
567

568
            cmd.record_pipeline(pipeline_innerproduct_reduce_sum8, bindings, constants, top_blob);
569
        }
570
    }
571
    else
572
    {
573
        top_blob.create(num_output / out_elempack, out_elemsize, out_elempack, opt.blob_vkallocator);
574
        if (top_blob.empty())
575
            return -100;
576

577
        std::vector<VkMat> bindings(4);
578
        bindings[0] = bottom_blob_flattened;
579
        bindings[1] = top_blob;
580
        bindings[2] = weight_data_gpu;
581
        bindings[3] = bias_data_gpu;
582

583
        std::vector<vk_constant_type> constants(10);
584
        constants[0].i = bottom_blob_flattened.dims;
585
        constants[1].i = bottom_blob_flattened.w;
586
        constants[2].i = bottom_blob_flattened.h;
587
        constants[3].i = bottom_blob_flattened.c;
588
        constants[4].i = bottom_blob_flattened.cstep;
589
        constants[5].i = top_blob.dims;
590
        constants[6].i = top_blob.w;
591
        constants[7].i = top_blob.h;
592
        constants[8].i = top_blob.c;
593
        constants[9].i = top_blob.cstep;
594

595
        cmd.record_pipeline(pipeline_innerproduct, bindings, constants, top_blob);
596
    }
597

598
    return 0;
599
}
600

601
int InnerProduct_vulkan::forward(const VkImageMat& bottom_blob, VkImageMat& top_blob, VkCompute& cmd, const Option& opt) const
602
{
603
    const int num_input = weight_data_size / num_output;
604

605
    int in_elempack = opt.use_shader_pack8 && num_input % 8 == 0 ? 8 : num_input % 4 == 0 ? 4 : 1;
606
    int out_elempack = opt.use_shader_pack8 && num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
607

608
    if (bottom_blob.dims == 2 && bottom_blob.w == num_input)
609
    {
610
        // gemm
611
        int h = bottom_blob.h;
612
        size_t elemsize = bottom_blob.elemsize;
613
        int elempack = bottom_blob.elempack;
614

615
        // unpacking
616
        VkImageMat bottom_blob_unpacked = bottom_blob;
617
        if (elempack > 1)
618
        {
619
            Option opt_pack1 = opt;
620
            opt_pack1.blob_vkallocator = opt.workspace_vkallocator;
621

622
            vkdev->convert_packing(bottom_blob, bottom_blob_unpacked, 1, cmd, opt_pack1);
623
        }
624

625
        top_blob.create(num_output, h, elemsize, elempack, opt.blob_vkallocator);
626
        if (top_blob.empty())
627
            return -100;
628

629
        VkImageMat top_blob_unpacked = top_blob;
630
        if (elempack > 1)
631
        {
632
            top_blob_unpacked.create(num_output, h * elempack, bottom_blob_unpacked.elemsize, 1, opt.workspace_vkallocator);
633
            if (top_blob_unpacked.empty())
634
                return -100;
635
        }
636

637
        std::vector<VkImageMat> bindings(4);
638
        bindings[0] = bottom_blob_unpacked;
639
        bindings[1] = top_blob_unpacked;
640
        bindings[2] = weight_data_gpu_image;
641
        bindings[3] = bias_data_gpu_image;
642

643
        std::vector<vk_constant_type> constants(10);
644
        constants[0].i = bottom_blob_unpacked.dims;
645
        constants[1].i = bottom_blob_unpacked.w;
646
        constants[2].i = bottom_blob_unpacked.h;
647
        constants[3].i = bottom_blob_unpacked.c;
648
        constants[4].i = 0; //bottom_blob_unpacked.cstep;
649
        constants[5].i = top_blob_unpacked.dims;
650
        constants[6].i = top_blob_unpacked.w;
651
        constants[7].i = top_blob_unpacked.h;
652
        constants[8].i = top_blob_unpacked.c;
653
        constants[9].i = 0; //top_blob_unpacked.cstep;
654

655
        VkImageMat dispatcher;
656
        dispatcher.w = top_blob_unpacked.w / out_elempack;
657
        dispatcher.h = top_blob_unpacked.h;
658
        dispatcher.c = 1;
659

660
        cmd.record_pipeline(pipeline_innerproduct_gemm, bindings, constants, dispatcher);
661

662
        // packing
663
        if (elempack > 1)
664
        {
665
            vkdev->convert_packing(top_blob_unpacked, top_blob, elempack, cmd, opt);
666
        }
667

668
        return 0;
669
    }
670

671
    // flatten
672
    VkImageMat bottom_blob_flattened = bottom_blob;
673
    {
674
        Option opt_flatten = opt;
675
        opt_flatten.blob_vkallocator = opt.workspace_vkallocator;
676

677
        flatten->forward(bottom_blob, bottom_blob_flattened, cmd, opt_flatten);
678
    }
679

680
    size_t elemsize = bottom_blob_flattened.elemsize;
681
    size_t out_elemsize = elemsize / in_elempack * out_elempack;
682

683
    if (opt.use_fp16_packed && !opt.use_fp16_storage)
684
    {
685
        if (out_elempack == 8) out_elemsize = 8 * 2u;
686
        if (out_elempack == 4) out_elemsize = 4 * 2u;
687
        if (out_elempack == 1) out_elemsize = 4u;
688
    }
689

690
    if (num_input / in_elempack >= 32)
691
    {
692
        // sum8
693
        VkImageMat top_blob_sum8;
694
        {
695
            top_blob_sum8.create((num_input / in_elempack + 7) / 8, num_output / out_elempack, out_elemsize, out_elempack, opt.blob_vkallocator);
696
            if (top_blob_sum8.empty())
697
                return -100;
698

699
            std::vector<VkImageMat> bindings(3);
700
            bindings[0] = bottom_blob_flattened;
701
            bindings[1] = top_blob_sum8;
702
            bindings[2] = weight_data_gpu_image;
703

704
            std::vector<vk_constant_type> constants(3);
705
            constants[0].i = bottom_blob_flattened.w;
706
            constants[1].i = top_blob_sum8.w;
707
            constants[2].i = top_blob_sum8.h;
708

709
            cmd.record_pipeline(pipeline_innerproduct_sum8, bindings, constants, top_blob_sum8);
710
        }
711

712
        // reduce sum8
713
        {
714
            top_blob.create(num_output / out_elempack, out_elemsize, out_elempack, opt.blob_vkallocator);
715
            if (top_blob.empty())
716
                return -100;
717

718
            std::vector<VkImageMat> bindings(3);
719
            bindings[0] = top_blob_sum8;
720
            bindings[1] = top_blob;
721
            bindings[2] = bias_data_gpu_image;
722

723
            std::vector<vk_constant_type> constants(3);
724
            constants[0].i = top_blob_sum8.w;
725
            constants[1].i = top_blob_sum8.h;
726
            constants[2].i = top_blob.w;
727

728
            cmd.record_pipeline(pipeline_innerproduct_reduce_sum8, bindings, constants, top_blob);
729
        }
730
    }
731
    else
732
    {
733
        top_blob.create(num_output / out_elempack, out_elemsize, out_elempack, opt.blob_vkallocator);
734
        if (top_blob.empty())
735
            return -100;
736

737
        std::vector<VkImageMat> bindings(4);
738
        bindings[0] = bottom_blob_flattened;
739
        bindings[1] = top_blob;
740
        bindings[2] = weight_data_gpu_image;
741
        bindings[3] = bias_data_gpu_image;
742

743
        std::vector<vk_constant_type> constants(10);
744
        constants[0].i = bottom_blob_flattened.dims;
745
        constants[1].i = bottom_blob_flattened.w;
746
        constants[2].i = bottom_blob_flattened.h;
747
        constants[3].i = bottom_blob_flattened.c;
748
        constants[4].i = 0; //bottom_blob_flattened.cstep;
749
        constants[5].i = top_blob.dims;
750
        constants[6].i = top_blob.w;
751
        constants[7].i = top_blob.h;
752
        constants[8].i = top_blob.c;
753
        constants[9].i = 0; //top_blob.cstep;
754

755
        cmd.record_pipeline(pipeline_innerproduct, bindings, constants, top_blob);
756
    }
757

758
    return 0;
759
}
760

761
} // namespace ncnn
762

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.