1
// Tencent is pleased to support the open source community by making ncnn available.
3
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
5
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6
// in compliance with the License. You may obtain a copy of the License at
8
// https://opensource.org/licenses/BSD-3-Clause
10
// Unless required by applicable law or agreed to in writing, software distributed
11
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
13
// specific language governing permissions and limitations under the License.
18
#extension GL_EXT_shader_16bit_storage: require
19
struct sfpvec8 { f16vec4 abcd; f16vec4 efgh; };
21
#if NCNN_fp16_arithmetic
22
#extension GL_EXT_shader_explicit_arithmetic_types_float16: require
25
#extension GL_GOOGLE_include_directive: enable
26
#include "vulkan_activation.comp"
28
layout (constant_id = 0) const int bias_term = 0;
29
layout (constant_id = 1) const int activation_type = 0;
30
layout (constant_id = 2) const float activation_param_0 = 0;
31
layout (constant_id = 3) const float activation_param_1 = 0;
33
#define shape_constant_id_offset 4
34
layout (constant_id = shape_constant_id_offset + 0) const int w = 0;
35
layout (constant_id = shape_constant_id_offset + 1) const int h = 0;
36
layout (constant_id = shape_constant_id_offset + 2) const int c = 0;
37
layout (constant_id = shape_constant_id_offset + 3) const int cstep = 0;
39
layout (constant_id = shape_constant_id_offset + 4) const int outw = 0;
40
layout (constant_id = shape_constant_id_offset + 5) const int outh = 0;
41
layout (constant_id = shape_constant_id_offset + 6) const int outc = 0;
42
layout (constant_id = shape_constant_id_offset + 7) const int outcstep = 0;
45
layout (binding = 0) uniform unfp sampler3D bottom_blob;
46
layout (binding = 1, imfmtc4) writeonly uniform unfp image3D top_blob;
47
layout (binding = 2) uniform unfp sampler3D weight_blob;
48
layout (binding = 3) uniform unfp sampler3D bias_blob;
50
layout (binding = 0) readonly buffer bottom_blob { sfpvec8 bottom_blob_data[]; };
51
layout (binding = 1) writeonly buffer top_blob { sfpvec8 top_blob_data[]; };
52
layout (binding = 2) readonly buffer weight_blob { sfpvec8 weight_data[]; };
53
layout (binding = 3) readonly buffer bias_blob { sfpvec8 bias_data[]; };
56
layout (push_constant) uniform parameter
71
int gx = int(gl_GlobalInvocationID.x) * 4;
72
int gy = int(gl_GlobalInvocationID.y);
75
if (gx >= psc(outw) * psc(outh) || gy >= psc(outc))
78
if (gx >= psc(outcstep) || gy >= psc(outc))
90
afpvec8 b = image3d_ld8(bias_blob, ivec3(gy, 0, 0));
92
afpvec8 b = buffer_ld8(bias_data, gy);
101
sum0 = afpvec8(afpvec4(0.f), afpvec4(0.f));
102
sum1 = afpvec8(afpvec4(0.f), afpvec4(0.f));
103
sum2 = afpvec8(afpvec4(0.f), afpvec4(0.f));
104
sum3 = afpvec8(afpvec4(0.f), afpvec4(0.f));
108
ivec4 gx4 = gx + ivec4(0, 1, 2, 3);
110
ivec4 sy4 = gx4 / psc(w);
111
ivec4 sx4 = gx4 % psc(w);
113
for (int z = 0; z < psc(c); z++)
115
afpvec8 v0 = image3d_ld8(bottom_blob, ivec3(sx4.r, sy4.r, z));
116
afpvec8 v1 = image3d_ld8(bottom_blob, ivec3(sx4.g, sy4.g, z));
117
afpvec8 v2 = image3d_ld8(bottom_blob, ivec3(sx4.b, sy4.b, z));
118
afpvec8 v3 = image3d_ld8(bottom_blob, ivec3(sx4.a, sy4.a, z));
120
afpvec8 k0 = image3d_ld8(weight_blob, ivec3(0, z, gy));
121
afpvec8 k1 = image3d_ld8(weight_blob, ivec3(1, z, gy));
122
afpvec8 k2 = image3d_ld8(weight_blob, ivec3(2, z, gy));
123
afpvec8 k3 = image3d_ld8(weight_blob, ivec3(3, z, gy));
124
afpvec8 k4 = image3d_ld8(weight_blob, ivec3(4, z, gy));
125
afpvec8 k5 = image3d_ld8(weight_blob, ivec3(5, z, gy));
126
afpvec8 k6 = image3d_ld8(weight_blob, ivec3(6, z, gy));
127
afpvec8 k7 = image3d_ld8(weight_blob, ivec3(7, z, gy));
130
sum0[0].r += dot(v0[0], k0[0]) + dot(v0[1], k0[1]);
131
sum0[0].g += dot(v0[0], k1[0]) + dot(v0[1], k1[1]);
132
sum0[0].b += dot(v0[0], k2[0]) + dot(v0[1], k2[1]);
133
sum0[0].a += dot(v0[0], k3[0]) + dot(v0[1], k3[1]);
134
sum0[1].r += dot(v0[0], k4[0]) + dot(v0[1], k4[1]);
135
sum0[1].g += dot(v0[0], k5[0]) + dot(v0[1], k5[1]);
136
sum0[1].b += dot(v0[0], k6[0]) + dot(v0[1], k6[1]);
137
sum0[1].a += dot(v0[0], k7[0]) + dot(v0[1], k7[1]);
139
sum1[0].r += dot(v1[0], k0[0]) + dot(v1[1], k0[1]);
140
sum1[0].g += dot(v1[0], k1[0]) + dot(v1[1], k1[1]);
141
sum1[0].b += dot(v1[0], k2[0]) + dot(v1[1], k2[1]);
142
sum1[0].a += dot(v1[0], k3[0]) + dot(v1[1], k3[1]);
143
sum1[1].r += dot(v1[0], k4[0]) + dot(v1[1], k4[1]);
144
sum1[1].g += dot(v1[0], k5[0]) + dot(v1[1], k5[1]);
145
sum1[1].b += dot(v1[0], k6[0]) + dot(v1[1], k6[1]);
146
sum1[1].a += dot(v1[0], k7[0]) + dot(v1[1], k7[1]);
148
sum2[0].r += dot(v2[0], k0[0]) + dot(v2[1], k0[1]);
149
sum2[0].g += dot(v2[0], k1[0]) + dot(v2[1], k1[1]);
150
sum2[0].b += dot(v2[0], k2[0]) + dot(v2[1], k2[1]);
151
sum2[0].a += dot(v2[0], k3[0]) + dot(v2[1], k3[1]);
152
sum2[1].r += dot(v2[0], k4[0]) + dot(v2[1], k4[1]);
153
sum2[1].g += dot(v2[0], k5[0]) + dot(v2[1], k5[1]);
154
sum2[1].b += dot(v2[0], k6[0]) + dot(v2[1], k6[1]);
155
sum2[1].a += dot(v2[0], k7[0]) + dot(v2[1], k7[1]);
157
sum3[0].r += dot(v3[0], k0[0]) + dot(v3[1], k0[1]);
158
sum3[0].g += dot(v3[0], k1[0]) + dot(v3[1], k1[1]);
159
sum3[0].b += dot(v3[0], k2[0]) + dot(v3[1], k2[1]);
160
sum3[0].a += dot(v3[0], k3[0]) + dot(v3[1], k3[1]);
161
sum3[1].r += dot(v3[0], k4[0]) + dot(v3[1], k4[1]);
162
sum3[1].g += dot(v3[0], k5[0]) + dot(v3[1], k5[1]);
163
sum3[1].b += dot(v3[0], k6[0]) + dot(v3[1], k6[1]);
164
sum3[1].a += dot(v3[0], k7[0]) + dot(v3[1], k7[1]);
167
int w_offset = gy * psc(c) * 8;
170
for (int z = 0; z < psc(c); z++)
172
afpvec8 v0 = buffer_ld8(bottom_blob_data, v_offset + 0);
173
afpvec8 v1 = buffer_ld8(bottom_blob_data, v_offset + 1);
174
afpvec8 v2 = buffer_ld8(bottom_blob_data, v_offset + 2);
175
afpvec8 v3 = buffer_ld8(bottom_blob_data, v_offset + 3);
177
afpvec8 k0 = buffer_ld8(weight_data, w_offset + 0);
178
afpvec8 k1 = buffer_ld8(weight_data, w_offset + 1);
179
afpvec8 k2 = buffer_ld8(weight_data, w_offset + 2);
180
afpvec8 k3 = buffer_ld8(weight_data, w_offset + 3);
181
afpvec8 k4 = buffer_ld8(weight_data, w_offset + 4);
182
afpvec8 k5 = buffer_ld8(weight_data, w_offset + 5);
183
afpvec8 k6 = buffer_ld8(weight_data, w_offset + 6);
184
afpvec8 k7 = buffer_ld8(weight_data, w_offset + 7);
187
sum0[0].r += dot(v0[0], k0[0]) + dot(v0[1], k0[1]);
188
sum0[0].g += dot(v0[0], k1[0]) + dot(v0[1], k1[1]);
189
sum0[0].b += dot(v0[0], k2[0]) + dot(v0[1], k2[1]);
190
sum0[0].a += dot(v0[0], k3[0]) + dot(v0[1], k3[1]);
191
sum0[1].r += dot(v0[0], k4[0]) + dot(v0[1], k4[1]);
192
sum0[1].g += dot(v0[0], k5[0]) + dot(v0[1], k5[1]);
193
sum0[1].b += dot(v0[0], k6[0]) + dot(v0[1], k6[1]);
194
sum0[1].a += dot(v0[0], k7[0]) + dot(v0[1], k7[1]);
196
sum1[0].r += dot(v1[0], k0[0]) + dot(v1[1], k0[1]);
197
sum1[0].g += dot(v1[0], k1[0]) + dot(v1[1], k1[1]);
198
sum1[0].b += dot(v1[0], k2[0]) + dot(v1[1], k2[1]);
199
sum1[0].a += dot(v1[0], k3[0]) + dot(v1[1], k3[1]);
200
sum1[1].r += dot(v1[0], k4[0]) + dot(v1[1], k4[1]);
201
sum1[1].g += dot(v1[0], k5[0]) + dot(v1[1], k5[1]);
202
sum1[1].b += dot(v1[0], k6[0]) + dot(v1[1], k6[1]);
203
sum1[1].a += dot(v1[0], k7[0]) + dot(v1[1], k7[1]);
205
sum2[0].r += dot(v2[0], k0[0]) + dot(v2[1], k0[1]);
206
sum2[0].g += dot(v2[0], k1[0]) + dot(v2[1], k1[1]);
207
sum2[0].b += dot(v2[0], k2[0]) + dot(v2[1], k2[1]);
208
sum2[0].a += dot(v2[0], k3[0]) + dot(v2[1], k3[1]);
209
sum2[1].r += dot(v2[0], k4[0]) + dot(v2[1], k4[1]);
210
sum2[1].g += dot(v2[0], k5[0]) + dot(v2[1], k5[1]);
211
sum2[1].b += dot(v2[0], k6[0]) + dot(v2[1], k6[1]);
212
sum2[1].a += dot(v2[0], k7[0]) + dot(v2[1], k7[1]);
214
sum3[0].r += dot(v3[0], k0[0]) + dot(v3[1], k0[1]);
215
sum3[0].g += dot(v3[0], k1[0]) + dot(v3[1], k1[1]);
216
sum3[0].b += dot(v3[0], k2[0]) + dot(v3[1], k2[1]);
217
sum3[0].a += dot(v3[0], k3[0]) + dot(v3[1], k3[1]);
218
sum3[1].r += dot(v3[0], k4[0]) + dot(v3[1], k4[1]);
219
sum3[1].g += dot(v3[0], k5[0]) + dot(v3[1], k5[1]);
220
sum3[1].b += dot(v3[0], k6[0]) + dot(v3[1], k6[1]);
221
sum3[1].a += dot(v3[0], k7[0]) + dot(v3[1], k7[1]);
224
v_offset += psc(cstep);
228
sum0 = activation_afpvec8(sum0, activation_type, activation_param_0, activation_param_1);
229
sum1 = activation_afpvec8(sum1, activation_type, activation_param_0, activation_param_1);
230
sum2 = activation_afpvec8(sum2, activation_type, activation_param_0, activation_param_1);
231
sum3 = activation_afpvec8(sum3, activation_type, activation_param_0, activation_param_1);
234
image3d_st8(top_blob, ivec3(sx4.r, sy4.r, gy), sum0);
235
image3d_st8(top_blob, ivec3(sx4.g, sy4.g, gy), sum1);
236
image3d_st8(top_blob, ivec3(sx4.b, sy4.b, gy), sum2);
237
image3d_st8(top_blob, ivec3(sx4.a, sy4.a, gy), sum3);
239
int gi = gy * psc(outcstep) + gx;
241
buffer_st8(top_blob_data, gi + 0, sum0);
242
if (gx + 1 < psc(outcstep)) buffer_st8(top_blob_data, gi + 1, sum1);
243
if (gx + 2 < psc(outcstep)) buffer_st8(top_blob_data, gi + 2, sum2);
244
if (gx + 3 < psc(outcstep)) buffer_st8(top_blob_data, gi + 3, sum3);