ncnn

Форк
0
/
convolution_pack4_gemm.comp 
324 строки · 10.0 Кб
1
// Tencent is pleased to support the open source community by making ncnn available.
2
//
3
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
4
//
5
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6
// in compliance with the License. You may obtain a copy of the License at
7
//
8
// https://opensource.org/licenses/BSD-3-Clause
9
//
10
// Unless required by applicable law or agreed to in writing, software distributed
11
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
13
// specific language governing permissions and limitations under the License.
14

15
#version 450
16

17
#if NCNN_fp16_storage
18
#extension GL_EXT_shader_16bit_storage: require
19
#endif
20
#if NCNN_fp16_arithmetic
21
#extension GL_EXT_shader_explicit_arithmetic_types_float16: require
22
#endif
23

24
#extension GL_GOOGLE_include_directive: enable
25
#include "vulkan_activation.comp"
26

27
#define LOCAL_MEMORY_UNROLL_INCH 8
28

29
layout (constant_id = 0) const int kernel_w = 1;
30
layout (constant_id = 1) const int kernel_h = 1;
31
layout (constant_id = 2) const int dilation_w = 1;
32
layout (constant_id = 3) const int dilation_h = 1;
33
layout (constant_id = 4) const int stride_w = 1;
34
layout (constant_id = 5) const int stride_h = 1;
35
layout (constant_id = 6) const int bias_term = 0;
36
layout (constant_id = 7) const int activation_type = 0;
37
layout (constant_id = 8) const float activation_param_0 = 0;
38
layout (constant_id = 9) const float activation_param_1 = 0;
39

40
#define shape_constant_id_offset 10
41
layout (constant_id = shape_constant_id_offset + 0) const int w = 0;
42
layout (constant_id = shape_constant_id_offset + 1) const int h = 0;
43
layout (constant_id = shape_constant_id_offset + 2) const int c = 0;
44
layout (constant_id = shape_constant_id_offset + 3) const int cstep = 0;
45

46
layout (constant_id = shape_constant_id_offset + 4) const int outw = 0;
47
layout (constant_id = shape_constant_id_offset + 5) const int outh = 0;
48
layout (constant_id = shape_constant_id_offset + 6) const int outc = 0;
49
layout (constant_id = shape_constant_id_offset + 7) const int outcstep = 0;
50

51
#if NCNN_image_shader
52
layout (binding = 0) uniform unfp sampler3D bottom_blob;
53
layout (binding = 1, imfmtc4) writeonly uniform unfp image3D top_blob;
54
layout (binding = 2) uniform unfp sampler3D weight_blob;
55
layout (binding = 3) uniform unfp sampler3D bias_blob;
56
#else
57
layout (binding = 0) readonly buffer bottom_blob { sfpvec4 bottom_blob_data[]; };
58
layout (binding = 1) writeonly buffer top_blob { sfpvec4 top_blob_data[]; };
59
layout (binding = 2) readonly buffer weight_blob { sfpvec4 weight_data[]; };
60
layout (binding = 3) readonly buffer bias_blob { sfpvec4 bias_data[]; };
61
#endif
62

63
layout (push_constant) uniform parameter
64
{
65
    int w;
66
    int h;
67
    int c;
68
    int cstep;
69

70
    int outw;
71
    int outh;
72
    int outc;
73
    int outcstep;
74
} p;
75

76
#if NCNN_shader_local_memory
77
shared lfpvec4 tmp_v[8][LOCAL_MEMORY_UNROLL_INCH][4];
78
shared lfpvec4 tmp_k[8][LOCAL_MEMORY_UNROLL_INCH][4];
79
#endif
80

81
void main()
82
{
83
    int gx = int(gl_GlobalInvocationID.x) * 4;
84
    int gy = int(gl_GlobalInvocationID.y);
85

86
    const int outsize = psc(outw) * psc(outh);
87

88
#if !NCNN_shader_local_memory
89
    if (gx >= outsize || gy >= psc(outc))
90
        return;
91
#endif
92

93
    afpvec4 sum0;
94
    afpvec4 sum1;
95
    afpvec4 sum2;
96
    afpvec4 sum3;
97

98
    if (bias_term == 1)
99
    {
100
#if NCNN_image_shader
101
        sum0 = image3d_ld4(bias_blob, ivec3(gy, 0, 0));
102
#else
103
        sum0 = buffer_ld4(bias_data, gy);
104
#endif
105
        sum1 = sum0;
106
        sum2 = sum0;
107
        sum3 = sum0;
108
    }
109
    else
110
    {
111
        sum0 = afpvec4(0.f);
112
        sum1 = afpvec4(0.f);
113
        sum2 = afpvec4(0.f);
114
        sum3 = afpvec4(0.f);
115
    }
116

117
    const int maxk = kernel_w * kernel_h;
118
    const int N = psc(c) * maxk;
119

120
    const ivec4 gx4 = gx + ivec4(0, 1, 2, 3);
121

122
    const ivec4 sy4 = gx4 / psc(outw);
123
    const ivec4 sx4 = gx4 % psc(outw);
124

125
    const ivec4 sxs4 = sx4 * stride_w;
126
    const ivec4 sys4 = sy4 * stride_h;
127

128
#if NCNN_image_shader
129
    for (int z = 0; z < N; z++)
130
    {
131
        const int sz = z / maxk;
132
        const int kk = z % maxk;
133

134
        const int ky = kk / kernel_w;
135
        const int kx = kk % kernel_w;
136

137
        const ivec4 x4 = sxs4 + kx * dilation_w;
138
        const ivec4 y4 = sys4 + ky * dilation_h;
139

140
        afpvec4 v0 = image3d_ld4(bottom_blob, ivec3(x4.r, y4.r, sz));
141
        afpvec4 v1 = image3d_ld4(bottom_blob, ivec3(x4.g, y4.g, sz));
142
        afpvec4 v2 = image3d_ld4(bottom_blob, ivec3(x4.b, y4.b, sz));
143
        afpvec4 v3 = image3d_ld4(bottom_blob, ivec3(x4.a, y4.a, sz));
144

145
        afpmat4 k = afpmat4(
146
            image3d_ld4(weight_blob, ivec3(z * 4 + 0, gy, 0)),
147
            image3d_ld4(weight_blob, ivec3(z * 4 + 1, gy, 0)),
148
            image3d_ld4(weight_blob, ivec3(z * 4 + 2, gy, 0)),
149
            image3d_ld4(weight_blob, ivec3(z * 4 + 3, gy, 0))
150
        );
151

152
        sum0 += v0 * k;
153
        sum1 += v1 * k;
154
        sum2 += v2 * k;
155
        sum3 += v3 * k;
156
    }
157
#else
158
    int w_offset = gy * N * 4;
159

160
#if NCNN_shader_local_memory
161
    const int lx = int(gl_LocalInvocationID.x);
162
    const int ly = int(gl_LocalInvocationID.y);
163

164
    int z = 0;
165
    for (; z + (LOCAL_MEMORY_UNROLL_INCH - 1) < N; z += LOCAL_MEMORY_UNROLL_INCH)
166
    {
167
        if (ly < 4)
168
        {
169
            for (int z4 = 0; z4 < LOCAL_MEMORY_UNROLL_INCH; z4++)
170
            {
171
                const int sz = (z + z4) / maxk;
172
                const int k = (z + z4) % maxk;
173

174
                const int ky = k / kernel_w;
175
                const int kx = k % kernel_w;
176

177
                const int v_offset = sz * psc(cstep) + (sys4[ly] + ky * dilation_h) * psc(w) + sxs4[ly] + kx * dilation_w;
178

179
                tmp_v[lx][z4][ly] = sfp2lfpvec4(bottom_blob_data[v_offset]);
180
            }
181
        }
182

183
        if (lx < 4)
184
        {
185
            for (int z4 = 0; z4 < LOCAL_MEMORY_UNROLL_INCH; z4++)
186
            {
187
                tmp_k[ly][z4][lx] = sfp2lfpvec4(weight_data[w_offset + z4 * 4 + lx]);
188
            }
189
        }
190

191
        barrier();
192

193
        for (int z4 = 0; z4 < LOCAL_MEMORY_UNROLL_INCH; z4++)
194
        {
195
            afpvec4 v0 = lfp2afpvec4(tmp_v[lx][z4][0]);
196
            afpvec4 v1 = lfp2afpvec4(tmp_v[lx][z4][1]);
197
            afpvec4 v2 = lfp2afpvec4(tmp_v[lx][z4][2]);
198
            afpvec4 v3 = lfp2afpvec4(tmp_v[lx][z4][3]);
199

200
            afpvec4 k0 = lfp2afpvec4(tmp_k[ly][z4][0]);
201
            afpvec4 k1 = lfp2afpvec4(tmp_k[ly][z4][1]);
202
            afpvec4 k2 = lfp2afpvec4(tmp_k[ly][z4][2]);
203
            afpvec4 k3 = lfp2afpvec4(tmp_k[ly][z4][3]);
204

205
            afpmat4 k = afpmat4(k0, k1, k2, k3);
206

207
            sum0 += v0 * k;
208
            sum1 += v1 * k;
209
            sum2 += v2 * k;
210
            sum3 += v3 * k;
211
        }
212

213
        w_offset += LOCAL_MEMORY_UNROLL_INCH * 4;
214

215
        barrier();
216
    }
217

218
    if (z < N)
219
    {
220
        const int remain = N - z;
221

222
        if (ly < 4)
223
        {
224
            for (int z4 = 0; z4 < remain; z4++)
225
            {
226
                const int sz = (z + z4) / maxk;
227
                const int k = (z + z4) % maxk;
228

229
                const int ky = k / kernel_w;
230
                const int kx = k % kernel_w;
231

232
                const int v_offset = sz * psc(cstep) + (sys4[ly] + ky * dilation_h) * psc(w) + sxs4[ly] + kx * dilation_w;
233

234
                tmp_v[lx][z4][ly] = sfp2lfpvec4(bottom_blob_data[v_offset]);
235
            }
236
        }
237

238
        if (lx < 4)
239
        {
240
            for (int z4 = 0; z4 < remain; z4++)
241
            {
242
                tmp_k[ly][z4][lx] = sfp2lfpvec4(weight_data[w_offset + z4 * 4 + lx]);
243
            }
244
        }
245

246
        barrier();
247

248
        for (int z4 = 0; z4 < remain; z4++)
249
        {
250
            afpvec4 v0 = lfp2afpvec4(tmp_v[lx][z4][0]);
251
            afpvec4 v1 = lfp2afpvec4(tmp_v[lx][z4][1]);
252
            afpvec4 v2 = lfp2afpvec4(tmp_v[lx][z4][2]);
253
            afpvec4 v3 = lfp2afpvec4(tmp_v[lx][z4][3]);
254

255
            afpvec4 k0 = lfp2afpvec4(tmp_k[ly][z4][0]);
256
            afpvec4 k1 = lfp2afpvec4(tmp_k[ly][z4][1]);
257
            afpvec4 k2 = lfp2afpvec4(tmp_k[ly][z4][2]);
258
            afpvec4 k3 = lfp2afpvec4(tmp_k[ly][z4][3]);
259

260
            afpmat4 k = afpmat4(k0, k1, k2, k3);
261

262
            sum0 += v0 * k;
263
            sum1 += v1 * k;
264
            sum2 += v2 * k;
265
            sum3 += v3 * k;
266
        }
267
    }
268
#else
269
    for (int z = 0; z < N; z++)
270
    {
271
        const int sz = z / maxk;
272
        const int kk = z % maxk;
273

274
        const int ky = kk / kernel_w;
275
        const int kx = kk % kernel_w;
276

277
        const ivec4 v_offset = sz * psc(cstep) + (sys4 + ky * dilation_h) * psc(w) + sxs4 + kx * dilation_w;
278

279
        afpvec4 v0 = buffer_ld4(bottom_blob_data, v_offset.r);
280
        afpvec4 v1 = buffer_ld4(bottom_blob_data, v_offset.g);
281
        afpvec4 v2 = buffer_ld4(bottom_blob_data, v_offset.b);
282
        afpvec4 v3 = buffer_ld4(bottom_blob_data, v_offset.a);
283

284
        afpmat4 k = afpmat4(
285
            buffer_ld4(weight_data, w_offset + 0),
286
            buffer_ld4(weight_data, w_offset + 1),
287
            buffer_ld4(weight_data, w_offset + 2),
288
            buffer_ld4(weight_data, w_offset + 3)
289
        );
290

291
        sum0 += v0 * k;
292
        sum1 += v1 * k;
293
        sum2 += v2 * k;
294
        sum3 += v3 * k;
295

296
        w_offset += 4;
297
    }
298
#endif
299
#endif
300

301
#if NCNN_shader_local_memory
302
    if (gx >= outsize || gy >= psc(outc))
303
        return;
304
#endif
305

306
    sum0 = activation_afpvec4(sum0, activation_type, activation_param_0, activation_param_1);
307
    sum1 = activation_afpvec4(sum1, activation_type, activation_param_0, activation_param_1);
308
    sum2 = activation_afpvec4(sum2, activation_type, activation_param_0, activation_param_1);
309
    sum3 = activation_afpvec4(sum3, activation_type, activation_param_0, activation_param_1);
310

311
#if NCNN_image_shader
312
    image3d_st4(top_blob, ivec3(sx4.r, sy4.r, gy), sum0);
313
    image3d_st4(top_blob, ivec3(sx4.g, sy4.g, gy), sum1);
314
    image3d_st4(top_blob, ivec3(sx4.b, sy4.b, gy), sum2);
315
    image3d_st4(top_blob, ivec3(sx4.a, sy4.a, gy), sum3);
316
#else
317
    const int gi = gy * psc(outcstep) + gx;
318

319
    buffer_st4(top_blob_data, gi, sum0);
320
    if (gx + 1 < outsize) buffer_st4(top_blob_data, gi + 1, sum1);
321
    if (gx + 2 < outsize) buffer_st4(top_blob_data, gi + 2, sum2);
322
    if (gx + 3 < outsize) buffer_st4(top_blob_data, gi + 3, sum3);
323
#endif
324
}
325

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.