1
// Tencent is pleased to support the open source community by making ncnn available.
3
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
5
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6
// in compliance with the License. You may obtain a copy of the License at
8
// https://opensource.org/licenses/BSD-3-Clause
10
// Unless required by applicable law or agreed to in writing, software distributed
11
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
13
// specific language governing permissions and limitations under the License.
18
#extension GL_EXT_shader_16bit_storage: require
19
struct sfpvec8 { f16vec4 abcd; f16vec4 efgh; };
21
#if NCNN_fp16_arithmetic
22
#extension GL_EXT_shader_explicit_arithmetic_types_float16: require
25
layout (constant_id = 0) const int stride = 0;
26
layout (constant_id = 1) const int mode = 0;
28
#define shape_constant_id_offset 2
29
layout (constant_id = shape_constant_id_offset + 0) const int dims = 0;
30
layout (constant_id = shape_constant_id_offset + 1) const int w = 0;
31
layout (constant_id = shape_constant_id_offset + 2) const int h = 0;
32
layout (constant_id = shape_constant_id_offset + 3) const int c = 0;
33
layout (constant_id = shape_constant_id_offset + 4) const int cstep = 0;
35
layout (constant_id = shape_constant_id_offset + 5) const int outdims = 0;
36
layout (constant_id = shape_constant_id_offset + 6) const int outw = 0;
37
layout (constant_id = shape_constant_id_offset + 7) const int outh = 0;
38
layout (constant_id = shape_constant_id_offset + 8) const int outc = 0;
39
layout (constant_id = shape_constant_id_offset + 9) const int outcstep = 0;
42
layout (binding = 0) uniform unfp sampler3D bottom_blob;
43
layout (binding = 1, imfmtc4) writeonly uniform unfp image3D top_blob;
46
layout (binding = 0) readonly buffer bottom_blob { sfpvec2 bottom_blob_data[]; };
48
layout (binding = 0) readonly buffer bottom_blob { sfp bottom_blob_data[]; };
50
layout (binding = 1) writeonly buffer top_blob { sfpvec8 top_blob_data[]; };
53
layout (push_constant) uniform parameter
70
int gx = int(gl_GlobalInvocationID.x);
71
int gy = int(gl_GlobalInvocationID.y);
72
int gz = int(gl_GlobalInvocationID.z);
74
if (gx >= psc(outw) || gy >= psc(outh) || gz >= psc(outc))
77
ivec4 gz4 = gz * 8 + ivec4(0, 1, 2, 3);
86
z4 = gz4 / (stride * stride);
87
zi4 = gz4 % (stride * stride);
88
zz4 = gzz4 / (stride * stride);
89
zii4 = gzz4 % (stride * stride);
91
else // if (mode == 1)
93
z4 = gz4 % (psc(c) * 8);
94
zi4 = gz4 / (psc(c) * 8);
95
zz4 = gzz4 % (psc(c) * 8);
96
zii4 = gzz4 / (psc(c) * 8);
98
ivec4 y4 = gy * stride + zi4 / stride;
99
ivec4 x4 = gx * stride + zi4 % stride;
100
ivec4 yy4 = gy * stride + zii4 / stride;
101
ivec4 xx4 = gx * stride + zii4 % stride;
104
afpvec8 v0 = image3d_ld8(bottom_blob, ivec3(x4.r, y4.r, z4.r / 8));
105
afpvec8 v1 = image3d_ld8(bottom_blob, ivec3(x4.g, y4.g, z4.g / 8));
106
afpvec8 v2 = image3d_ld8(bottom_blob, ivec3(x4.b, y4.b, z4.b / 8));
107
afpvec8 v3 = image3d_ld8(bottom_blob, ivec3(x4.a, y4.a, z4.a / 8));
108
afpvec8 v4 = image3d_ld8(bottom_blob, ivec3(xx4.r, yy4.r, zz4.r / 8));
109
afpvec8 v5 = image3d_ld8(bottom_blob, ivec3(xx4.g, yy4.g, zz4.g / 8));
110
afpvec8 v6 = image3d_ld8(bottom_blob, ivec3(xx4.b, yy4.b, zz4.b / 8));
111
afpvec8 v7 = image3d_ld8(bottom_blob, ivec3(xx4.a, yy4.a, zz4.a / 8));
114
v[0].r = v0[(z4.r % 8) / 4][z4.r % 4];
115
v[0].g = v1[(z4.g % 8) / 4][z4.g % 4];
116
v[0].b = v2[(z4.b % 8) / 4][z4.b % 4];
117
v[0].a = v3[(z4.a % 8) / 4][z4.a % 4];
118
v[1].r = v4[(zz4.r % 8) / 4][zz4.r % 4];
119
v[1].g = v5[(zz4.g % 8) / 4][zz4.g % 4];
120
v[1].b = v6[(zz4.b % 8) / 4][zz4.b % 4];
121
v[1].a = v7[(zz4.a % 8) / 4][zz4.a % 4];
123
image3d_st8(top_blob, ivec3(gx, gy, gz), v);
125
int gi = gz * psc(outcstep) + gy * psc(outw) + gx;
128
ivec4 v_offset = ((z4 / 8) * psc(cstep) + y4 * psc(w) + x4) * 4 + (z4 % 8) / 2;
129
ivec4 lane2 = z4 % 2;
130
ivec4 vv_offset = ((zz4 / 8) * psc(cstep) + yy4 * psc(w) + xx4) * 4 + (zz4 % 8) / 2;
131
ivec4 lane4 = zz4 % 2;
133
afpvec2 vr = buffer_ld2(bottom_blob_data, v_offset.r);
134
afpvec2 vg = buffer_ld2(bottom_blob_data, v_offset.g);
135
afpvec2 vb = buffer_ld2(bottom_blob_data, v_offset.b);
136
afpvec2 va = buffer_ld2(bottom_blob_data, v_offset.a);
138
afpvec2 vvr = buffer_ld2(bottom_blob_data, vv_offset.r);
139
afpvec2 vvg = buffer_ld2(bottom_blob_data, vv_offset.g);
140
afpvec2 vvb = buffer_ld2(bottom_blob_data, vv_offset.b);
141
afpvec2 vva = buffer_ld2(bottom_blob_data, vv_offset.a);
143
afpvec8 v = afpvec8(vr[lane2.r], vg[lane2.g], vb[lane2.b], va[lane2.a], vvr[lane4.r], vvg[lane4.g], vvb[lane4.b], vva[lane4.a]);
145
buffer_st8(top_blob_data, gi, v);
147
ivec4 v_offset = ((z4 / 8) * psc(cstep) + y4 * psc(w) + x4) * 8 + z4 % 8;
148
ivec4 vv_offset = ((zz4 / 8) * psc(cstep) + yy4 * psc(w) + xx4) * 8 + zz4 % 8;
150
buffer_cp1to8(top_blob_data, gi, bottom_blob_data, v_offset, vv_offset);