1
// Tencent is pleased to support the open source community by making ncnn available.
3
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
5
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6
// in compliance with the License. You may obtain a copy of the License at
8
// https://opensource.org/licenses/BSD-3-Clause
10
// Unless required by applicable law or agreed to in writing, software distributed
11
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
13
// specific language governing permissions and limitations under the License.
18
#extension GL_EXT_shader_16bit_storage: require
20
#if NCNN_fp16_arithmetic
21
#extension GL_EXT_shader_explicit_arithmetic_types_float16: require
24
#define LOCAL_MEMORY_UNROLL_INCH 8
26
layout (constant_id = 0) const int maxk = 1;
28
#define shape_constant_id_offset 1
29
layout (constant_id = shape_constant_id_offset + 0) const int w = 0;
30
layout (constant_id = shape_constant_id_offset + 1) const int h = 0;
31
layout (constant_id = shape_constant_id_offset + 2) const int c = 0;
32
layout (constant_id = shape_constant_id_offset + 3) const int cstep = 0;
34
layout (constant_id = shape_constant_id_offset + 4) const int outw = 0;
35
layout (constant_id = shape_constant_id_offset + 5) const int outh = 0;
38
layout (binding = 0) uniform unfp sampler3D bottom_blob;
39
layout (binding = 1, imfmtc1) writeonly uniform unfp image3D col_blob;
40
layout (binding = 2) uniform unfp sampler3D weight_blob;
42
layout (binding = 0) readonly buffer bottom_blob { sfpvec4 bottom_blob_data[]; };
43
layout (binding = 1) writeonly buffer col_blob { sfp col_blob_data[]; };
44
layout (binding = 2) readonly buffer weight_blob { sfpvec4 weight_data[]; };
47
layout (push_constant) uniform parameter
58
#if NCNN_shader_local_memory
59
shared lfpvec4 tmp_v[8][LOCAL_MEMORY_UNROLL_INCH][4];
60
shared lfpvec4 tmp_k[8][LOCAL_MEMORY_UNROLL_INCH];
65
int gx = int(gl_GlobalInvocationID.x) * 4;
66
int gy = int(gl_GlobalInvocationID.y);
68
#if !NCNN_shader_local_memory
69
if (gx >= psc(outw) || gy >= psc(outh))
79
ivec4 gx4 = gx + ivec4(0, 1, 2, 3);
81
ivec4 sy4 = gx4 / psc(w);
82
ivec4 sx4 = gx4 % psc(w);
84
for (int z = 0; z < psc(c); z++)
86
afpvec4 v0 = image3d_ld4(bottom_blob, ivec3(sx4.r, sy4.r, z));
87
afpvec4 v1 = image3d_ld4(bottom_blob, ivec3(sx4.g, sy4.g, z));
88
afpvec4 v2 = image3d_ld4(bottom_blob, ivec3(sx4.b, sy4.b, z));
89
afpvec4 v3 = image3d_ld4(bottom_blob, ivec3(sx4.a, sy4.a, z));
91
afpvec4 k = image3d_ld4(weight_blob, ivec3(z, gy, 0));
100
int w_offset = gy * psc(c);
102
#if NCNN_shader_local_memory
103
const int lx = int(gl_LocalInvocationID.x);
104
const int ly = int(gl_LocalInvocationID.y);
107
for (; z + (LOCAL_MEMORY_UNROLL_INCH - 1) < psc(c); z += LOCAL_MEMORY_UNROLL_INCH)
111
for (int z4 = 0; z4 < LOCAL_MEMORY_UNROLL_INCH; z4++)
113
tmp_v[lx][z4][ly] = sfp2lfpvec4(bottom_blob_data[v_offset + z4 * psc(cstep) + ly]);
119
for (int z4 = 0; z4 < LOCAL_MEMORY_UNROLL_INCH; z4++)
121
tmp_k[ly][z4] = sfp2lfpvec4(weight_data[w_offset + z4]);
127
for (int z4 = 0; z4 < LOCAL_MEMORY_UNROLL_INCH; z4++)
129
afpvec4 v0 = lfp2afpvec4(tmp_v[lx][z4][0]);
130
afpvec4 v1 = lfp2afpvec4(tmp_v[lx][z4][1]);
131
afpvec4 v2 = lfp2afpvec4(tmp_v[lx][z4][2]);
132
afpvec4 v3 = lfp2afpvec4(tmp_v[lx][z4][3]);
134
afpvec4 k = lfp2afpvec4(tmp_k[ly][z4]);
142
v_offset += LOCAL_MEMORY_UNROLL_INCH * psc(cstep);
143
w_offset += LOCAL_MEMORY_UNROLL_INCH;
150
const int remain = psc(c) - z;
154
for (int z4 = 0; z4 < remain; z4++)
156
tmp_v[lx][z4][ly] = sfp2lfpvec4(bottom_blob_data[v_offset + z4 * psc(cstep) + ly]);
162
for (int z4 = 0; z4 < remain; z4++)
164
tmp_k[ly][z4] = sfp2lfpvec4(weight_data[w_offset + z4]);
170
for (int z4 = 0; z4 < remain; z4++)
172
afpvec4 v0 = lfp2afpvec4(tmp_v[lx][z4][0]);
173
afpvec4 v1 = lfp2afpvec4(tmp_v[lx][z4][1]);
174
afpvec4 v2 = lfp2afpvec4(tmp_v[lx][z4][2]);
175
afpvec4 v3 = lfp2afpvec4(tmp_v[lx][z4][3]);
177
afpvec4 k = lfp2afpvec4(tmp_k[ly][z4]);
186
for (int z = 0; z < psc(c); z++)
188
afpvec4 v0 = buffer_ld4(bottom_blob_data, v_offset + 0);
189
afpvec4 v1 = buffer_ld4(bottom_blob_data, v_offset + 1);
190
afpvec4 v2 = buffer_ld4(bottom_blob_data, v_offset + 2);
191
afpvec4 v3 = buffer_ld4(bottom_blob_data, v_offset + 3);
193
afpvec4 k = buffer_ld4(weight_data, w_offset);
200
v_offset += psc(cstep);
206
#if NCNN_shader_local_memory
207
if (gx >= psc(outw) || gy >= psc(outh))
212
image3d_st1(col_blob, ivec3(gx4.r, gy, 0), sum0);
213
image3d_st1(col_blob, ivec3(gx4.g, gy, 0), sum1);
214
image3d_st1(col_blob, ivec3(gx4.b, gy, 0), sum2);
215
image3d_st1(col_blob, ivec3(gx4.a, gy, 0), sum3);
217
const int gi = gy * psc(outw) + gx;
219
buffer_st1(col_blob_data, gi, sum0);
220
if (gx + 1 < psc(outw)) buffer_st1(col_blob_data, gi + 1, sum1);
221
if (gx + 2 < psc(outw)) buffer_st1(col_blob_data, gi + 2, sum2);
222
if (gx + 3 < psc(outw)) buffer_st1(col_blob_data, gi + 3, sum3);