ncnn
202 строки · 8.1 Кб
1// Tencent is pleased to support the open source community by making ncnn available.
2//
3// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
4//
5// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6// in compliance with the License. You may obtain a copy of the License at
7//
8// https://opensource.org/licenses/BSD-3-Clause
9//
10// Unless required by applicable law or agreed to in writing, software distributed
11// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12// CONDITIONS OF ANY KIND, either express or implied. See the License for the
13// specific language governing permissions and limitations under the License.
14
15#version 450
16
17#if NCNN_fp16_storage
18#extension GL_EXT_shader_16bit_storage: require
19#endif
20#if NCNN_fp16_arithmetic
21#extension GL_EXT_shader_explicit_arithmetic_types_float16: require
22#endif
23
24#define shape_constant_id_offset 0
25layout (constant_id = shape_constant_id_offset + 0) const int w = 0;
26layout (constant_id = shape_constant_id_offset + 1) const int h = 0;
27layout (constant_id = shape_constant_id_offset + 2) const int c = 0;
28layout (constant_id = shape_constant_id_offset + 3) const int cstep = 0;
29
30layout (constant_id = shape_constant_id_offset + 4) const int outcstep = 0;
31
32layout (constant_id = shape_constant_id_offset + 5) const int block_x = 0;
33layout (constant_id = shape_constant_id_offset + 6) const int block_y = 0;
34
35#if NCNN_image_shader
36layout (binding = 0) uniform unfp sampler3D bottom_blob;
37layout (binding = 1, imfmtc1) writeonly uniform unfp image3D bottom_tm_blob;
38#else
39layout (binding = 0) readonly buffer bottom_blob { sfp bottom_blob_data[]; };
40layout (binding = 1) writeonly buffer bottom_tm_blob { sfp bottom_tm_blob_data[]; };
41#endif
42
43layout (push_constant) uniform parameter
44{
45int w;
46int h;
47int c;
48int cstep;
49
50int outcstep;
51
52int block_x;
53int block_y;
54} p;
55
56void main()
57{
58int gx = int(gl_GlobalInvocationID.x);
59int gy = int(gl_GlobalInvocationID.y);
60int gz = int(gl_GlobalInvocationID.z);
61
62if (gx >= psc(block_x) || gy >= psc(block_y) || gz >= psc(c))
63return;
64
65// load 4x4
66int sx = gx * 2;
67int sy = gy * 2;
68
69#if NCNN_image_shader
70afp v00 = image3d_ld1(bottom_blob, ivec3(sx + 0, sy + 0, gz));
71afp v01 = image3d_ld1(bottom_blob, ivec3(sx + 1, sy + 0, gz));
72afp v02 = image3d_ld1(bottom_blob, ivec3(sx + 2, sy + 0, gz));
73afp v03 = image3d_ld1(bottom_blob, ivec3(sx + 3, sy + 0, gz));
74
75afp v10 = image3d_ld1(bottom_blob, ivec3(sx + 0, sy + 1, gz));
76afp v11 = image3d_ld1(bottom_blob, ivec3(sx + 1, sy + 1, gz));
77afp v12 = image3d_ld1(bottom_blob, ivec3(sx + 2, sy + 1, gz));
78afp v13 = image3d_ld1(bottom_blob, ivec3(sx + 3, sy + 1, gz));
79
80afp v20 = image3d_ld1(bottom_blob, ivec3(sx + 0, sy + 2, gz));
81afp v21 = image3d_ld1(bottom_blob, ivec3(sx + 1, sy + 2, gz));
82afp v22 = image3d_ld1(bottom_blob, ivec3(sx + 2, sy + 2, gz));
83afp v23 = image3d_ld1(bottom_blob, ivec3(sx + 3, sy + 2, gz));
84
85afp v30 = image3d_ld1(bottom_blob, ivec3(sx + 0, sy + 3, gz));
86afp v31 = image3d_ld1(bottom_blob, ivec3(sx + 1, sy + 3, gz));
87afp v32 = image3d_ld1(bottom_blob, ivec3(sx + 2, sy + 3, gz));
88afp v33 = image3d_ld1(bottom_blob, ivec3(sx + 3, sy + 3, gz));
89#else
90int v_offset_0 = gz * psc(cstep) + sy * psc(w) + sx;
91ivec4 v_offset = v_offset_0 + ivec4(0, 1, 2, 3) * psc(w);
92
93afp v00 = buffer_ld1(bottom_blob_data, v_offset.r + 0);
94afp v01 = sx + 1 < psc(w) ? buffer_ld1(bottom_blob_data, v_offset.r + 1) : afp(0.f);
95afp v02 = sx + 2 < psc(w) ? buffer_ld1(bottom_blob_data, v_offset.r + 2) : afp(0.f);
96afp v03 = sx + 3 < psc(w) ? buffer_ld1(bottom_blob_data, v_offset.r + 3) : afp(0.f);
97
98afp v10 = sy + 1 < psc(h) ? buffer_ld1(bottom_blob_data, v_offset.g + 0) : afp(0.f);
99afp v11 = sy + 1 < psc(h) && sx + 1 < psc(w) ? buffer_ld1(bottom_blob_data, v_offset.g + 1) : afp(0.f);
100afp v12 = sy + 1 < psc(h) && sx + 2 < psc(w) ? buffer_ld1(bottom_blob_data, v_offset.g + 2) : afp(0.f);
101afp v13 = sy + 1 < psc(h) && sx + 3 < psc(w) ? buffer_ld1(bottom_blob_data, v_offset.g + 3) : afp(0.f);
102
103afp v20 = sy + 2 < psc(h) ? buffer_ld1(bottom_blob_data, v_offset.b + 0) : afp(0.f);
104afp v21 = sy + 2 < psc(h) && sx + 1 < psc(w) ? buffer_ld1(bottom_blob_data, v_offset.b + 1) : afp(0.f);
105afp v22 = sy + 2 < psc(h) && sx + 2 < psc(w) ? buffer_ld1(bottom_blob_data, v_offset.b + 2) : afp(0.f);
106afp v23 = sy + 2 < psc(h) && sx + 3 < psc(w) ? buffer_ld1(bottom_blob_data, v_offset.b + 3) : afp(0.f);
107
108afp v30 = sy + 3 < psc(h) ? buffer_ld1(bottom_blob_data, v_offset.a + 0) : afp(0.f);
109afp v31 = sy + 3 < psc(h) && sx + 1 < psc(w) ? buffer_ld1(bottom_blob_data, v_offset.a + 1) : afp(0.f);
110afp v32 = sy + 3 < psc(h) && sx + 2 < psc(w) ? buffer_ld1(bottom_blob_data, v_offset.a + 2) : afp(0.f);
111afp v33 = sy + 3 < psc(h) && sx + 3 < psc(w) ? buffer_ld1(bottom_blob_data, v_offset.a + 3) : afp(0.f);
112#endif
113
114// const float itm[4][4] = {
115// {1.0f, 0.0f, -1.0f, 0.0f},
116// {0.0f, 1.0f, 1.0f, 0.0f},
117// {0.0f, -1.0f, 1.0f, 0.0f},
118// {0.0f, -1.0f, 0.0f, 1.0f}
119// };
120
121// implicit transpose
122afp m00 = v00 - v02;
123afp m01 = v10 - v12;
124afp m02 = v20 - v22;
125afp m03 = v30 - v32;
126
127afp m10 = v02 + v01;
128afp m11 = v12 + v11;
129afp m12 = v22 + v21;
130afp m13 = v32 + v31;
131
132afp m20 = v02 - v01;
133afp m21 = v12 - v11;
134afp m22 = v22 - v21;
135afp m23 = v32 - v31;
136
137afp m30 = v03 - v01;
138afp m31 = v13 - v11;
139afp m32 = v23 - v21;
140afp m33 = v33 - v31;
141
142v00 = m00 - m02;
143v10 = m10 - m12;
144v20 = m20 - m22;
145v30 = m30 - m32;
146
147v01 = m02 + m01;
148v11 = m12 + m11;
149v21 = m22 + m21;
150v31 = m32 + m31;
151
152v02 = m02 - m01;
153v12 = m12 - m11;
154v22 = m22 - m21;
155v32 = m32 - m31;
156
157v03 = m03 - m01;
158v13 = m13 - m11;
159v23 = m23 - m21;
160v33 = m33 - m31;
161
162// store 16
163#if NCNN_image_shader
164int x = gy * psc(block_x) + gx;
165
166image3d_st1(bottom_tm_blob, ivec3(x, gz, 0), v00);
167image3d_st1(bottom_tm_blob, ivec3(x, gz, 1), v01);
168image3d_st1(bottom_tm_blob, ivec3(x, gz, 2), v02);
169image3d_st1(bottom_tm_blob, ivec3(x, gz, 3), v03);
170image3d_st1(bottom_tm_blob, ivec3(x, gz, 4), v10);
171image3d_st1(bottom_tm_blob, ivec3(x, gz, 5), v11);
172image3d_st1(bottom_tm_blob, ivec3(x, gz, 6), v12);
173image3d_st1(bottom_tm_blob, ivec3(x, gz, 7), v13);
174image3d_st1(bottom_tm_blob, ivec3(x, gz, 8), v20);
175image3d_st1(bottom_tm_blob, ivec3(x, gz, 9), v21);
176image3d_st1(bottom_tm_blob, ivec3(x, gz, 10), v22);
177image3d_st1(bottom_tm_blob, ivec3(x, gz, 11), v23);
178image3d_st1(bottom_tm_blob, ivec3(x, gz, 12), v30);
179image3d_st1(bottom_tm_blob, ivec3(x, gz, 13), v31);
180image3d_st1(bottom_tm_blob, ivec3(x, gz, 14), v32);
181image3d_st1(bottom_tm_blob, ivec3(x, gz, 15), v33);
182#else
183int v_tm_offset = gz * psc(block_x) * psc(block_y) + gy * psc(block_x) + gx;
184
185buffer_st1(bottom_tm_blob_data, v_tm_offset + 0 * psc(outcstep), v00);
186buffer_st1(bottom_tm_blob_data, v_tm_offset + 1 * psc(outcstep), v01);
187buffer_st1(bottom_tm_blob_data, v_tm_offset + 2 * psc(outcstep), v02);
188buffer_st1(bottom_tm_blob_data, v_tm_offset + 3 * psc(outcstep), v03);
189buffer_st1(bottom_tm_blob_data, v_tm_offset + 4 * psc(outcstep), v10);
190buffer_st1(bottom_tm_blob_data, v_tm_offset + 5 * psc(outcstep), v11);
191buffer_st1(bottom_tm_blob_data, v_tm_offset + 6 * psc(outcstep), v12);
192buffer_st1(bottom_tm_blob_data, v_tm_offset + 7 * psc(outcstep), v13);
193buffer_st1(bottom_tm_blob_data, v_tm_offset + 8 * psc(outcstep), v20);
194buffer_st1(bottom_tm_blob_data, v_tm_offset + 9 * psc(outcstep), v21);
195buffer_st1(bottom_tm_blob_data, v_tm_offset + 10 * psc(outcstep), v22);
196buffer_st1(bottom_tm_blob_data, v_tm_offset + 11 * psc(outcstep), v23);
197buffer_st1(bottom_tm_blob_data, v_tm_offset + 12 * psc(outcstep), v30);
198buffer_st1(bottom_tm_blob_data, v_tm_offset + 13 * psc(outcstep), v31);
199buffer_st1(bottom_tm_blob_data, v_tm_offset + 14 * psc(outcstep), v32);
200buffer_st1(bottom_tm_blob_data, v_tm_offset + 15 * psc(outcstep), v33);
201#endif
202}
203