1
// Tencent is pleased to support the open source community by making ncnn available.
3
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
5
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6
// in compliance with the License. You may obtain a copy of the License at
8
// https://opensource.org/licenses/BSD-3-Clause
10
// Unless required by applicable law or agreed to in writing, software distributed
11
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
13
// specific language governing permissions and limitations under the License.
18
#extension GL_EXT_shader_16bit_storage: require
19
struct sfpvec8 { f16vec4 abcd; f16vec4 efgh; };
21
#if NCNN_fp16_arithmetic
22
#extension GL_EXT_shader_explicit_arithmetic_types_float16: require
25
#extension GL_GOOGLE_include_directive: enable
26
#include "vulkan_activation.comp"
28
layout (constant_id = 0) const int kernel_w = 1;
29
layout (constant_id = 1) const int kernel_h = 1;
30
layout (constant_id = 2) const int dilation_w = 1;
31
layout (constant_id = 3) const int dilation_h = 1;
32
layout (constant_id = 4) const int stride_w = 1;
33
layout (constant_id = 5) const int stride_h = 1;
34
layout (constant_id = 6) const int bias_term = 0;
35
layout (constant_id = 7) const int activation_type = 0;
36
layout (constant_id = 8) const float activation_param_0 = 0;
37
layout (constant_id = 9) const float activation_param_1 = 0;
39
#define shape_constant_id_offset 10
40
layout (constant_id = shape_constant_id_offset + 0) const int w = 0;
41
layout (constant_id = shape_constant_id_offset + 1) const int h = 0;
42
layout (constant_id = shape_constant_id_offset + 2) const int c = 0;
43
layout (constant_id = shape_constant_id_offset + 3) const int cstep = 0;
45
layout (constant_id = shape_constant_id_offset + 4) const int outw = 0;
46
layout (constant_id = shape_constant_id_offset + 5) const int outh = 0;
47
layout (constant_id = shape_constant_id_offset + 6) const int outc = 0;
48
layout (constant_id = shape_constant_id_offset + 7) const int outcstep = 0;
51
layout (binding = 0) uniform unfp sampler3D bottom_blob;
52
layout (binding = 1, imfmtc4) writeonly uniform unfp image3D top_blob;
53
layout (binding = 2) uniform unfp sampler3D weight_blob;
54
layout (binding = 3) uniform unfp sampler3D bias_blob;
56
layout (binding = 0) readonly buffer bottom_blob { sfpvec4 bottom_blob_data[]; };
57
layout (binding = 1) writeonly buffer top_blob { sfpvec8 top_blob_data[]; };
58
layout (binding = 2) readonly buffer weight_blob { sfpvec4 weight_data[]; };
59
layout (binding = 3) readonly buffer bias_blob { sfpvec8 bias_data[]; };
62
layout (push_constant) uniform parameter
77
int gx = int(gl_GlobalInvocationID.x) * 4;
78
int gy = int(gl_GlobalInvocationID.y);
80
const int outsize = psc(outw) * psc(outh);
82
if (gx >= outsize || gy >= psc(outc))
93
sum0 = image3d_ld8(bias_blob, ivec3(gy, 0, 0));
95
sum0 = buffer_ld8(bias_data, gy);
103
sum0 = afpvec8(afpvec4(0.f), afpvec4(0.f));
104
sum1 = afpvec8(afpvec4(0.f), afpvec4(0.f));
105
sum2 = afpvec8(afpvec4(0.f), afpvec4(0.f));
106
sum3 = afpvec8(afpvec4(0.f), afpvec4(0.f));
109
const int maxk = kernel_w * kernel_h;
110
const int N = psc(c) * maxk;
112
const ivec4 gx4 = gx + ivec4(0, 1, 2, 3);
114
const ivec4 sy4 = gx4 / psc(outw);
115
const ivec4 sx4 = gx4 % psc(outw);
117
const ivec4 sxs4 = sx4 * stride_w;
118
const ivec4 sys4 = sy4 * stride_h;
121
for (int z = 0; z < N; z++)
123
const int sz = z / maxk;
124
const int kk = z % maxk;
126
const int ky = kk / kernel_w;
127
const int kx = kk % kernel_w;
129
const ivec4 x4 = sxs4 + kx * dilation_w;
130
const ivec4 y4 = sys4 + ky * dilation_h;
132
afpvec4 v0 = image3d_ld4(bottom_blob, ivec3(x4.r, y4.r, sz));
133
afpvec4 v1 = image3d_ld4(bottom_blob, ivec3(x4.g, y4.g, sz));
134
afpvec4 v2 = image3d_ld4(bottom_blob, ivec3(x4.b, y4.b, sz));
135
afpvec4 v3 = image3d_ld4(bottom_blob, ivec3(x4.a, y4.a, sz));
137
afpvec4 k0 = image3d_ld4(weight_blob, ivec3(z * 8 + 0, gy, 0));
138
afpvec4 k1 = image3d_ld4(weight_blob, ivec3(z * 8 + 1, gy, 0));
139
afpvec4 k2 = image3d_ld4(weight_blob, ivec3(z * 8 + 2, gy, 0));
140
afpvec4 k3 = image3d_ld4(weight_blob, ivec3(z * 8 + 3, gy, 0));
141
afpvec4 k4 = image3d_ld4(weight_blob, ivec3(z * 8 + 4, gy, 0));
142
afpvec4 k5 = image3d_ld4(weight_blob, ivec3(z * 8 + 5, gy, 0));
143
afpvec4 k6 = image3d_ld4(weight_blob, ivec3(z * 8 + 6, gy, 0));
144
afpvec4 k7 = image3d_ld4(weight_blob, ivec3(z * 8 + 7, gy, 0));
147
sum0[0].r += dot(v0, k0);
148
sum0[0].g += dot(v0, k1);
149
sum0[0].b += dot(v0, k2);
150
sum0[0].a += dot(v0, k3);
151
sum0[1].r += dot(v0, k4);
152
sum0[1].g += dot(v0, k5);
153
sum0[1].b += dot(v0, k6);
154
sum0[1].a += dot(v0, k7);
156
sum1[0].r += dot(v1, k0);
157
sum1[0].g += dot(v1, k1);
158
sum1[0].b += dot(v1, k2);
159
sum1[0].a += dot(v1, k3);
160
sum1[1].r += dot(v1, k4);
161
sum1[1].g += dot(v1, k5);
162
sum1[1].b += dot(v1, k6);
163
sum1[1].a += dot(v1, k7);
165
sum2[0].r += dot(v2, k0);
166
sum2[0].g += dot(v2, k1);
167
sum2[0].b += dot(v2, k2);
168
sum2[0].a += dot(v2, k3);
169
sum2[1].r += dot(v2, k4);
170
sum2[1].g += dot(v2, k5);
171
sum2[1].b += dot(v2, k6);
172
sum2[1].a += dot(v2, k7);
174
sum3[0].r += dot(v3, k0);
175
sum3[0].g += dot(v3, k1);
176
sum3[0].b += dot(v3, k2);
177
sum3[0].a += dot(v3, k3);
178
sum3[1].r += dot(v3, k4);
179
sum3[1].g += dot(v3, k5);
180
sum3[1].b += dot(v3, k6);
181
sum3[1].a += dot(v3, k7);
184
int w_offset = gy * N * 8;
186
for (int z = 0; z < N; z++)
188
const int sz = z / maxk;
189
const int kk = z % maxk;
191
const int ky = kk / kernel_w;
192
const int kx = kk % kernel_w;
194
const ivec4 v_offset = sz * psc(cstep) + (sys4 + ky * dilation_h) * psc(w) + sxs4 + kx * dilation_w;
196
afpvec4 v0 = buffer_ld4(bottom_blob_data, v_offset.r);
197
afpvec4 v1 = buffer_ld4(bottom_blob_data, v_offset.g);
198
afpvec4 v2 = buffer_ld4(bottom_blob_data, v_offset.b);
199
afpvec4 v3 = buffer_ld4(bottom_blob_data, v_offset.a);
201
afpvec4 k0 = buffer_ld4(weight_data, w_offset + 0);
202
afpvec4 k1 = buffer_ld4(weight_data, w_offset + 1);
203
afpvec4 k2 = buffer_ld4(weight_data, w_offset + 2);
204
afpvec4 k3 = buffer_ld4(weight_data, w_offset + 3);
205
afpvec4 k4 = buffer_ld4(weight_data, w_offset + 4);
206
afpvec4 k5 = buffer_ld4(weight_data, w_offset + 5);
207
afpvec4 k6 = buffer_ld4(weight_data, w_offset + 6);
208
afpvec4 k7 = buffer_ld4(weight_data, w_offset + 7);
211
sum0[0].r += dot(v0, k0);
212
sum0[0].g += dot(v0, k1);
213
sum0[0].b += dot(v0, k2);
214
sum0[0].a += dot(v0, k3);
215
sum0[1].r += dot(v0, k4);
216
sum0[1].g += dot(v0, k5);
217
sum0[1].b += dot(v0, k6);
218
sum0[1].a += dot(v0, k7);
220
sum1[0].r += dot(v1, k0);
221
sum1[0].g += dot(v1, k1);
222
sum1[0].b += dot(v1, k2);
223
sum1[0].a += dot(v1, k3);
224
sum1[1].r += dot(v1, k4);
225
sum1[1].g += dot(v1, k5);
226
sum1[1].b += dot(v1, k6);
227
sum1[1].a += dot(v1, k7);
229
sum2[0].r += dot(v2, k0);
230
sum2[0].g += dot(v2, k1);
231
sum2[0].b += dot(v2, k2);
232
sum2[0].a += dot(v2, k3);
233
sum2[1].r += dot(v2, k4);
234
sum2[1].g += dot(v2, k5);
235
sum2[1].b += dot(v2, k6);
236
sum2[1].a += dot(v2, k7);
238
sum3[0].r += dot(v3, k0);
239
sum3[0].g += dot(v3, k1);
240
sum3[0].b += dot(v3, k2);
241
sum3[0].a += dot(v3, k3);
242
sum3[1].r += dot(v3, k4);
243
sum3[1].g += dot(v3, k5);
244
sum3[1].b += dot(v3, k6);
245
sum3[1].a += dot(v3, k7);
251
sum0 = activation_afpvec8(sum0, activation_type, activation_param_0, activation_param_1);
252
sum1 = activation_afpvec8(sum1, activation_type, activation_param_0, activation_param_1);
253
sum2 = activation_afpvec8(sum2, activation_type, activation_param_0, activation_param_1);
254
sum3 = activation_afpvec8(sum3, activation_type, activation_param_0, activation_param_1);
257
image3d_st8(top_blob, ivec3(sx4.r, sy4.r, gy), sum0);
258
image3d_st8(top_blob, ivec3(sx4.g, sy4.g, gy), sum1);
259
image3d_st8(top_blob, ivec3(sx4.b, sy4.b, gy), sum2);
260
image3d_st8(top_blob, ivec3(sx4.a, sy4.a, gy), sum3);
262
const int gi = gy * psc(outcstep) + gx;
264
buffer_st8(top_blob_data, gi, sum0);
265
if (gx + 1 < outsize) buffer_st8(top_blob_data, gi + 1, sum1);
266
if (gx + 2 < outsize) buffer_st8(top_blob_data, gi + 2, sum2);
267
if (gx + 3 < outsize) buffer_st8(top_blob_data, gi + 3, sum3);