1
// Tencent is pleased to support the open source community by making ncnn available.
3
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
5
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6
// in compliance with the License. You may obtain a copy of the License at
8
// https://opensource.org/licenses/BSD-3-Clause
10
// Unless required by applicable law or agreed to in writing, software distributed
11
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
13
// specific language governing permissions and limitations under the License.
18
#extension GL_EXT_shader_16bit_storage: require
20
#if NCNN_fp16_arithmetic
21
#extension GL_EXT_shader_explicit_arithmetic_types_float16: require
24
#extension GL_GOOGLE_include_directive: enable
25
#include "vulkan_activation.comp"
27
#define LOCAL_MEMORY_UNROLL_INCH 8
29
layout (constant_id = 0) const int kernel_w = 1;
30
layout (constant_id = 1) const int kernel_h = 1;
31
layout (constant_id = 2) const int dilation_w = 1;
32
layout (constant_id = 3) const int dilation_h = 1;
33
layout (constant_id = 4) const int stride_w = 1;
34
layout (constant_id = 5) const int stride_h = 1;
35
layout (constant_id = 6) const int bias_term = 0;
36
layout (constant_id = 7) const int activation_type = 0;
37
layout (constant_id = 8) const float activation_param_0 = 0;
38
layout (constant_id = 9) const float activation_param_1 = 0;
40
#define shape_constant_id_offset 10
41
layout (constant_id = shape_constant_id_offset + 0) const int w = 0;
42
layout (constant_id = shape_constant_id_offset + 1) const int h = 0;
43
layout (constant_id = shape_constant_id_offset + 2) const int c = 0;
44
layout (constant_id = shape_constant_id_offset + 3) const int cstep = 0;
46
layout (constant_id = shape_constant_id_offset + 4) const int outw = 0;
47
layout (constant_id = shape_constant_id_offset + 5) const int outh = 0;
48
layout (constant_id = shape_constant_id_offset + 6) const int outc = 0;
49
layout (constant_id = shape_constant_id_offset + 7) const int outcstep = 0;
52
layout (binding = 0) uniform unfp sampler3D bottom_blob;
53
layout (binding = 1, imfmtc1) writeonly uniform unfp image3D top_blob;
54
layout (binding = 2) uniform unfp sampler3D weight_blob;
55
layout (binding = 3) uniform unfp sampler3D bias_blob;
57
layout (binding = 0) readonly buffer bottom_blob { sfp bottom_blob_data[]; };
58
layout (binding = 1) writeonly buffer top_blob { sfp top_blob_data[]; };
59
layout (binding = 2) readonly buffer weight_blob { sfp weight_data[]; };
60
layout (binding = 3) readonly buffer bias_blob { sfp bias_data[]; };
63
layout (push_constant) uniform parameter
76
#if NCNN_shader_local_memory
77
shared lfp tmp_v[8][LOCAL_MEMORY_UNROLL_INCH][4];
78
shared lfp tmp_k[8][LOCAL_MEMORY_UNROLL_INCH];
83
int gx = int(gl_GlobalInvocationID.x) * 4;
84
int gy = int(gl_GlobalInvocationID.y);
86
const int outsize = psc(outw) * psc(outh);
88
#if !NCNN_shader_local_memory
89
if (gx >= outsize || gy >= psc(outc))
101
sum0 = image3d_ld1(bias_blob, ivec3(gy, 0, 0));
103
sum0 = buffer_ld1(bias_data, gy);
117
const int maxk = kernel_w * kernel_h;
118
const int N = psc(c) * maxk;
120
const ivec4 gx4 = gx + ivec4(0, 1, 2, 3);
122
const ivec4 sy4 = gx4 / psc(outw);
123
const ivec4 sx4 = gx4 % psc(outw);
125
const ivec4 sxs4 = sx4 * stride_w;
126
const ivec4 sys4 = sy4 * stride_h;
129
for (int z = 0; z < N; z++)
131
const int sz = z / maxk;
132
const int kk = z % maxk;
134
const int ky = kk / kernel_w;
135
const int kx = kk % kernel_w;
137
const ivec4 x4 = sxs4 + kx * dilation_w;
138
const ivec4 y4 = sys4 + ky * dilation_h;
140
afp v0 = image3d_ld1(bottom_blob, ivec3(x4.r, y4.r, sz));
141
afp v1 = image3d_ld1(bottom_blob, ivec3(x4.g, y4.g, sz));
142
afp v2 = image3d_ld1(bottom_blob, ivec3(x4.b, y4.b, sz));
143
afp v3 = image3d_ld1(bottom_blob, ivec3(x4.a, y4.a, sz));
145
afp k = image3d_ld1(weight_blob, ivec3(z, gy, 0));
153
int w_offset = gy * N;
155
#if NCNN_shader_local_memory
156
const int lx = int(gl_LocalInvocationID.x);
157
const int ly = int(gl_LocalInvocationID.y);
160
for (; z + (LOCAL_MEMORY_UNROLL_INCH - 1) < N; z += LOCAL_MEMORY_UNROLL_INCH)
164
for (int z4 = 0; z4 < LOCAL_MEMORY_UNROLL_INCH; z4++)
166
const int sz = (z + z4) / maxk;
167
const int k = (z + z4) % maxk;
169
const int ky = k / kernel_w;
170
const int kx = k % kernel_w;
172
const int v_offset = sz * psc(cstep) + (sys4[ly] + ky * dilation_h) * psc(w) + sxs4[ly] + kx * dilation_w;
174
tmp_v[lx][z4][ly] = sfp2lfp(bottom_blob_data[v_offset]);
180
for (int z4 = 0; z4 < LOCAL_MEMORY_UNROLL_INCH; z4++)
182
tmp_k[ly][z4] = sfp2lfp(weight_data[w_offset + z4]);
188
for (int z4 = 0; z4 < LOCAL_MEMORY_UNROLL_INCH; z4++)
190
afp v0 = lfp2afp(tmp_v[lx][z4][0]);
191
afp v1 = lfp2afp(tmp_v[lx][z4][1]);
192
afp v2 = lfp2afp(tmp_v[lx][z4][2]);
193
afp v3 = lfp2afp(tmp_v[lx][z4][3]);
195
afp k = lfp2afp(tmp_k[ly][z4]);
203
w_offset += LOCAL_MEMORY_UNROLL_INCH;
210
const int remain = N - z;
214
for (int z4 = 0; z4 < remain; z4++)
216
const int sz = (z + z4) / maxk;
217
const int k = (z + z4) % maxk;
219
const int ky = k / kernel_w;
220
const int kx = k % kernel_w;
222
const int v_offset = sz * psc(cstep) + (sys4[ly] + ky * dilation_h) * psc(w) + sxs4[ly] + kx * dilation_w;
224
tmp_v[lx][z4][ly] = sfp2lfp(bottom_blob_data[v_offset]);
230
for (int z4 = 0; z4 < remain; z4++)
232
tmp_k[ly][z4] = sfp2lfp(weight_data[w_offset + z4]);
238
for (int z4 = 0; z4 < remain; z4++)
240
afp v0 = lfp2afp(tmp_v[lx][z4][0]);
241
afp v1 = lfp2afp(tmp_v[lx][z4][1]);
242
afp v2 = lfp2afp(tmp_v[lx][z4][2]);
243
afp v3 = lfp2afp(tmp_v[lx][z4][3]);
245
afp k = lfp2afp(tmp_k[ly][z4]);
254
for (int z = 0; z < N; z++)
256
const int sz = z / maxk;
257
const int kk = z % maxk;
259
const int ky = kk / kernel_w;
260
const int kx = kk % kernel_w;
262
const ivec4 v_offset = sz * psc(cstep) + (sys4 + ky * dilation_h) * psc(w) + sxs4 + kx * dilation_w;
264
afp v0 = buffer_ld1(bottom_blob_data, v_offset.r);
265
afp v1 = buffer_ld1(bottom_blob_data, v_offset.g);
266
afp v2 = buffer_ld1(bottom_blob_data, v_offset.b);
267
afp v3 = buffer_ld1(bottom_blob_data, v_offset.a);
269
afp k = buffer_ld1(weight_data, w_offset);
281
#if NCNN_shader_local_memory
282
if (gx >= outsize || gy >= psc(outc))
286
sum0 = activation_afp(sum0, activation_type, activation_param_0, activation_param_1);
287
sum1 = activation_afp(sum1, activation_type, activation_param_0, activation_param_1);
288
sum2 = activation_afp(sum2, activation_type, activation_param_0, activation_param_1);
289
sum3 = activation_afp(sum3, activation_type, activation_param_0, activation_param_1);
292
image3d_st1(top_blob, ivec3(sx4.r, sy4.r, gy), sum0);
293
image3d_st1(top_blob, ivec3(sx4.g, sy4.g, gy), sum1);
294
image3d_st1(top_blob, ivec3(sx4.b, sy4.b, gy), sum2);
295
image3d_st1(top_blob, ivec3(sx4.a, sy4.a, gy), sum3);
297
const int gi = gy * psc(outcstep) + gx;
299
buffer_st1(top_blob_data, gi, sum0);
300
if (gx + 1 < outsize) buffer_st1(top_blob_data, gi + 1, sum1);
301
if (gx + 2 < outsize) buffer_st1(top_blob_data, gi + 2, sum2);
302
if (gx + 3 < outsize) buffer_st1(top_blob_data, gi + 3, sum3);