1
// Tencent is pleased to support the open source community by making ncnn available.
3
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
5
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6
// in compliance with the License. You may obtain a copy of the License at
8
// https://opensource.org/licenses/BSD-3-Clause
10
// Unless required by applicable law or agreed to in writing, software distributed
11
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
13
// specific language governing permissions and limitations under the License.
18
#extension GL_EXT_shader_16bit_storage: require
19
struct sfpvec8 { f16vec4 abcd; f16vec4 efgh; };
21
#if NCNN_fp16_arithmetic
22
#extension GL_EXT_shader_explicit_arithmetic_types_float16: require
25
#extension GL_GOOGLE_include_directive: enable
26
#include "vulkan_activation.comp"
28
layout (constant_id = 0) const int kernel_w = 1;
29
layout (constant_id = 1) const int kernel_h = 1;
30
layout (constant_id = 2) const int dilation_w = 1;
31
layout (constant_id = 3) const int dilation_h = 1;
32
layout (constant_id = 4) const int stride_w = 1;
33
layout (constant_id = 5) const int stride_h = 1;
34
layout (constant_id = 6) const int bias_term = 0;
35
layout (constant_id = 7) const int activation_type = 0;
36
layout (constant_id = 8) const float activation_param_0 = 0;
37
layout (constant_id = 9) const float activation_param_1 = 0;
39
#define shape_constant_id_offset 10
40
layout (constant_id = shape_constant_id_offset + 0) const int dims = 0;
41
layout (constant_id = shape_constant_id_offset + 1) const int w = 0;
42
layout (constant_id = shape_constant_id_offset + 2) const int h = 0;
43
layout (constant_id = shape_constant_id_offset + 3) const int c = 0;
44
layout (constant_id = shape_constant_id_offset + 4) const int cstep = 0;
46
layout (constant_id = shape_constant_id_offset + 5) const int outdims = 0;
47
layout (constant_id = shape_constant_id_offset + 6) const int outw = 0;
48
layout (constant_id = shape_constant_id_offset + 7) const int outh = 0;
49
layout (constant_id = shape_constant_id_offset + 8) const int outc = 0;
50
layout (constant_id = shape_constant_id_offset + 9) const int outcstep = 0;
53
layout (binding = 0) uniform unfp sampler3D bottom_blob;
54
layout (binding = 1, imfmtc1) writeonly uniform unfp image3D top_blob;
55
layout (binding = 2) uniform unfp sampler3D weight_blob;
56
layout (binding = 3) uniform unfp sampler3D bias_blob;
58
layout (binding = 0) readonly buffer bottom_blob { sfpvec8 bottom_blob_data[]; };
59
layout (binding = 1) writeonly buffer top_blob { sfp top_blob_data[]; };
60
layout (binding = 2) readonly buffer weight_blob { sfpvec8 weight_data[]; };
61
layout (binding = 3) readonly buffer bias_blob { sfp bias_data[]; };
64
layout (push_constant) uniform parameter
81
int gx = int(gl_GlobalInvocationID.x) * 2;
82
int gy = int(gl_GlobalInvocationID.y) * 2;
83
int gz = int(gl_GlobalInvocationID.z) * 2;
85
if (gx >= psc(outw) || gy >= psc(outh) || gz >= psc(outc))
88
const ivec2 gx2 = gx + ivec2(0, 1);
89
const ivec2 gy2 = gy + ivec2(0, 1);
90
const ivec2 gz2 = gz + ivec2(0, 1);
104
sum0 = image3d_ld1(bias_blob, ivec3(gz2.x, 0, 0));
105
sum4 = image3d_ld1(bias_blob, ivec3(gz2.y, 0, 0));
107
sum0 = buffer_ld1(bias_data, gz2.x);
108
sum4 = buffer_ld1(bias_data, gz2.y);
130
for (int z = 0; z < psc(c); z++)
132
ivec2 sy = gy2 * stride_h;
135
for (int y = 0; y < kernel_h; y++)
137
ivec2 sx = gx2 * stride_w;
139
for (int x = 0; x < kernel_w; x++)
141
afpvec8 v0 = image3d_ld8(bottom_blob, ivec3(sx.x, sy.x, z));
142
afpvec8 v1 = image3d_ld8(bottom_blob, ivec3(sx.y, sy.x, z));
143
afpvec8 v2 = image3d_ld8(bottom_blob, ivec3(sx.x, sy.y, z));
144
afpvec8 v3 = image3d_ld8(bottom_blob, ivec3(sx.y, sy.y, z));
146
afpvec8 k0 = image3d_ld8(weight_blob, ivec3(wx, z, gz2.x));
147
afpvec8 k1 = image3d_ld8(weight_blob, ivec3(wx, z, gz2.y));
150
sum0 += dot(v0[0], k0[0]) + dot(v0[1], k0[1]);
151
sum1 += dot(v1[0], k0[0]) + dot(v1[1], k0[1]);
152
sum2 += dot(v2[0], k0[0]) + dot(v2[1], k0[1]);
153
sum3 += dot(v3[0], k0[0]) + dot(v3[1], k0[1]);
154
sum4 += dot(v0[0], k1[0]) + dot(v0[1], k1[1]);
155
sum5 += dot(v1[0], k1[0]) + dot(v1[1], k1[1]);
156
sum6 += dot(v2[0], k1[0]) + dot(v2[1], k1[1]);
157
sum7 += dot(v3[0], k1[0]) + dot(v3[1], k1[1]);
167
ivec2 w_offset = gz2 * psc(c) * kernel_w * kernel_h;
169
for (int z = 0; z < psc(c); z++)
172
v_offset.rg = z * psc(cstep) + gy2.x * stride_h * psc(w) + gx2 * stride_w;
173
v_offset.ba = z * psc(cstep) + gy2.y * stride_h * psc(w) + gx2 * stride_w;
175
for (int y = 0; y < kernel_h; y++)
177
for (int x = 0; x < kernel_w; x++)
179
afpvec8 v0 = buffer_ld8(bottom_blob_data, v_offset.r + x * dilation_w);
180
afpvec8 v1 = buffer_ld8(bottom_blob_data, v_offset.g + x * dilation_w);
181
afpvec8 v2 = buffer_ld8(bottom_blob_data, v_offset.b + x * dilation_w);
182
afpvec8 v3 = buffer_ld8(bottom_blob_data, v_offset.a + x * dilation_w);
184
afpvec8 k0 = buffer_ld8(weight_data, w_offset.x + x);
185
afpvec8 k1 = buffer_ld8(weight_data, w_offset.y + x);
188
sum0 += dot(v0[0], k0[0]) + dot(v0[1], k0[1]);
189
sum1 += dot(v1[0], k0[0]) + dot(v1[1], k0[1]);
190
sum2 += dot(v2[0], k0[0]) + dot(v2[1], k0[1]);
191
sum3 += dot(v3[0], k0[0]) + dot(v3[1], k0[1]);
192
sum4 += dot(v0[0], k1[0]) + dot(v0[1], k1[1]);
193
sum5 += dot(v1[0], k1[0]) + dot(v1[1], k1[1]);
194
sum6 += dot(v2[0], k1[0]) + dot(v2[1], k1[1]);
195
sum7 += dot(v3[0], k1[0]) + dot(v3[1], k1[1]);
198
v_offset += dilation_h * psc(w);
199
w_offset += kernel_w;
204
sum0 = activation_afp(sum0, activation_type, activation_param_0, activation_param_1);
205
sum1 = activation_afp(sum1, activation_type, activation_param_0, activation_param_1);
206
sum2 = activation_afp(sum2, activation_type, activation_param_0, activation_param_1);
207
sum3 = activation_afp(sum3, activation_type, activation_param_0, activation_param_1);
208
sum4 = activation_afp(sum4, activation_type, activation_param_0, activation_param_1);
209
sum5 = activation_afp(sum5, activation_type, activation_param_0, activation_param_1);
210
sum6 = activation_afp(sum6, activation_type, activation_param_0, activation_param_1);
211
sum7 = activation_afp(sum7, activation_type, activation_param_0, activation_param_1);
214
image3d_st1(top_blob, ivec3(gx2.x, gy2.x, gz2.x), sum0);
215
image3d_st1(top_blob, ivec3(gx2.y, gy2.x, gz2.x), sum1);
216
image3d_st1(top_blob, ivec3(gx2.x, gy2.y, gz2.x), sum2);
217
image3d_st1(top_blob, ivec3(gx2.y, gy2.y, gz2.x), sum3);
218
image3d_st1(top_blob, ivec3(gx2.x, gy2.x, gz2.y), sum4);
219
image3d_st1(top_blob, ivec3(gx2.y, gy2.x, gz2.y), sum5);
220
image3d_st1(top_blob, ivec3(gx2.x, gy2.y, gz2.y), sum6);
221
image3d_st1(top_blob, ivec3(gx2.y, gy2.y, gz2.y), sum7);
223
const ivec2 gi = gz2 * psc(outcstep) + gy * psc(outw) + gx;
225
buffer_st1(top_blob_data, gi.x, sum0);
226
if (gx + 1 < psc(outw)) buffer_st1(top_blob_data, gi.x + 1, sum1);
227
if (gy + 1 < psc(outh)) buffer_st1(top_blob_data, gi.x + psc(outw), sum2);
228
if (gy + 1 < psc(outh) && gx + 1 < psc(outw)) buffer_st1(top_blob_data, gi.x + psc(outw) + 1, sum3);
229
if (gz + 1 < psc(outc))
231
buffer_st1(top_blob_data, gi.y, sum4);
232
if (gx + 1 < psc(outw)) buffer_st1(top_blob_data, gi.y + 1, sum5);
233
if (gy + 1 < psc(outh)) buffer_st1(top_blob_data, gi.y + psc(outw), sum6);
234
if (gy + 1 < psc(outh) && gx + 1 < psc(outw)) buffer_st1(top_blob_data, gi.y + psc(outw) + 1, sum7);