1
// Tencent is pleased to support the open source community by making ncnn available.
3
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
5
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6
// in compliance with the License. You may obtain a copy of the License at
8
// https://opensource.org/licenses/BSD-3-Clause
10
// Unless required by applicable law or agreed to in writing, software distributed
11
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
13
// specific language governing permissions and limitations under the License.
18
#extension GL_EXT_shader_16bit_storage: require
19
struct sfpvec8 { f16vec4 abcd; f16vec4 efgh; };
21
#if NCNN_fp16_arithmetic
22
#extension GL_EXT_shader_explicit_arithmetic_types_float16: require
25
#extension GL_GOOGLE_include_directive: enable
26
#include "vulkan_activation.comp"
28
layout (constant_id = 0) const int kernel_w = 1;
29
layout (constant_id = 1) const int kernel_h = 1;
30
layout (constant_id = 2) const int dilation_w = 1;
31
layout (constant_id = 3) const int dilation_h = 1;
32
layout (constant_id = 4) const int stride_w = 1;
33
layout (constant_id = 5) const int stride_h = 1;
34
layout (constant_id = 6) const int bias_term = 0;
35
layout (constant_id = 7) const int group = 1;
36
layout (constant_id = 8) const int activation_type = 0;
37
layout (constant_id = 9) const float activation_param_0 = 0;
38
layout (constant_id = 10) const float activation_param_1 = 0;
40
#define shape_constant_id_offset 11
41
layout (constant_id = shape_constant_id_offset + 0) const int dims = 0;
42
layout (constant_id = shape_constant_id_offset + 1) const int w = 0;
43
layout (constant_id = shape_constant_id_offset + 2) const int h = 0;
44
layout (constant_id = shape_constant_id_offset + 3) const int c = 0;
45
layout (constant_id = shape_constant_id_offset + 4) const int cstep = 0;
47
layout (constant_id = shape_constant_id_offset + 5) const int outdims = 0;
48
layout (constant_id = shape_constant_id_offset + 6) const int outw = 0;
49
layout (constant_id = shape_constant_id_offset + 7) const int outh = 0;
50
layout (constant_id = shape_constant_id_offset + 8) const int outc = 0;
51
layout (constant_id = shape_constant_id_offset + 9) const int outcstep = 0;
54
layout (binding = 0) uniform unfp sampler3D bottom_blob;
55
layout (binding = 1, imfmtc4) writeonly uniform unfp image3D top_blob;
56
layout (binding = 2) uniform unfp sampler3D weight_blob;
57
layout (binding = 3) uniform unfp sampler3D bias_blob;
59
layout (binding = 0) readonly buffer bottom_blob { sfpvec4 bottom_blob_data[]; };
60
layout (binding = 1) writeonly buffer top_blob { sfpvec8 top_blob_data[]; };
61
layout (binding = 2) readonly buffer weight_blob { sfpvec4 weight_data[]; };
62
layout (binding = 3) readonly buffer bias_blob { sfpvec8 bias_data[]; };
65
layout (push_constant) uniform parameter
82
int gx = int(gl_GlobalInvocationID.x);
83
int gy = int(gl_GlobalInvocationID.y);
84
int gz = int(gl_GlobalInvocationID.z);
86
if (gx >= psc(outw) || gy >= psc(outh) || gz >= psc(outc))
94
sum = image3d_ld8(bias_blob, ivec3(gz, 0, 0));
96
sum = buffer_ld8(bias_data, gz);
101
sum = afpvec8(afpvec4(0.f), afpvec4(0.f));
104
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
105
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
108
const int channels_g = psc(c) / group;
109
const int num_output_g = psc(outc) / group;
112
const int gg = gz / num_output_g;
115
for (int y = 0; y < kernel_h; y++)
117
int sys = (gy + y * dilation_h - (kernel_extent_h - 1));
118
if (sys < 0 || sys % stride_h != 0)
121
int sy = sys / stride_h;
125
for (int x = 0; x < kernel_w; x++)
127
int sxs = (gx + x * dilation_w - (kernel_extent_w - 1));
128
if (sxs < 0 || sxs % stride_w != 0)
131
int sx = sxs / stride_w;
135
int sz = gg * channels_g;
136
int wx = (y * kernel_w + x) * 8;
138
for (int z = 0; z < channels_g; z++)
140
afpvec4 v = image3d_ld4(bottom_blob, ivec3(sx, sy, sz));
142
afpvec4 k0 = image3d_ld4(weight_blob, ivec3(wx + 0, z, gz));
143
afpvec4 k1 = image3d_ld4(weight_blob, ivec3(wx + 1, z, gz));
144
afpvec4 k2 = image3d_ld4(weight_blob, ivec3(wx + 2, z, gz));
145
afpvec4 k3 = image3d_ld4(weight_blob, ivec3(wx + 3, z, gz));
146
afpvec4 k4 = image3d_ld4(weight_blob, ivec3(wx + 4, z, gz));
147
afpvec4 k5 = image3d_ld4(weight_blob, ivec3(wx + 5, z, gz));
148
afpvec4 k6 = image3d_ld4(weight_blob, ivec3(wx + 6, z, gz));
149
afpvec4 k7 = image3d_ld4(weight_blob, ivec3(wx + 7, z, gz));
152
sum[0].r += dot(v, k0);
153
sum[0].g += dot(v, k1);
154
sum[0].b += dot(v, k2);
155
sum[0].a += dot(v, k3);
156
sum[1].r += dot(v, k4);
157
sum[1].g += dot(v, k5);
158
sum[1].b += dot(v, k6);
159
sum[1].a += dot(v, k7);
166
int w_offset_0 = gz * channels_g * kernel_w * kernel_h;
167
int v_offset_0 = gg * channels_g * psc(cstep);
169
for (int y = 0; y < kernel_h; y++)
171
int sys = (gy + y * dilation_h - (kernel_extent_h - 1));
172
if (sys < 0 || sys % stride_h != 0)
175
int sy = sys / stride_h;
179
for (int x = 0; x < kernel_w; x++)
181
int sxs = (gx + x * dilation_w - (kernel_extent_w - 1));
182
if (sxs < 0 || sxs % stride_w != 0)
185
int sx = sxs / stride_w;
189
int v_offset = v_offset_0 + sy * psc(w) + sx;
190
int w_offset = w_offset_0 + y * kernel_w + x;
192
for (int z = 0; z < channels_g; z++)
194
afpvec4 v = buffer_ld4(bottom_blob_data, v_offset);
196
afpvec4 k0 = buffer_ld4(weight_data, w_offset * 8 + 0);
197
afpvec4 k1 = buffer_ld4(weight_data, w_offset * 8 + 1);
198
afpvec4 k2 = buffer_ld4(weight_data, w_offset * 8 + 2);
199
afpvec4 k3 = buffer_ld4(weight_data, w_offset * 8 + 3);
200
afpvec4 k4 = buffer_ld4(weight_data, w_offset * 8 + 4);
201
afpvec4 k5 = buffer_ld4(weight_data, w_offset * 8 + 5);
202
afpvec4 k6 = buffer_ld4(weight_data, w_offset * 8 + 6);
203
afpvec4 k7 = buffer_ld4(weight_data, w_offset * 8 + 7);
206
sum[0].r += dot(v, k0);
207
sum[0].g += dot(v, k1);
208
sum[0].b += dot(v, k2);
209
sum[0].a += dot(v, k3);
210
sum[1].r += dot(v, k4);
211
sum[1].g += dot(v, k5);
212
sum[1].b += dot(v, k6);
213
sum[1].a += dot(v, k7);
215
v_offset += psc(cstep);
216
w_offset += kernel_w * kernel_h;
222
sum = activation_afpvec8(sum, activation_type, activation_param_0, activation_param_1);
225
image3d_st8(top_blob, ivec3(gx, gy, gz), sum);
227
const int gi = gz * psc(outcstep) + gy * psc(outw) + gx;
229
buffer_st8(top_blob_data, gi, sum);