ncnn

Форк
0
/
deconvolution_pack8_gemm.comp 
204 строки · 8.6 Кб
1
// Tencent is pleased to support the open source community by making ncnn available.
2
//
3
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
4
//
5
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6
// in compliance with the License. You may obtain a copy of the License at
7
//
8
// https://opensource.org/licenses/BSD-3-Clause
9
//
10
// Unless required by applicable law or agreed to in writing, software distributed
11
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
13
// specific language governing permissions and limitations under the License.
14

15
#version 450
16

17
#if NCNN_fp16_storage
18
#extension GL_EXT_shader_16bit_storage: require
19
struct sfpvec8 { f16vec4 abcd; f16vec4 efgh; };
20
#endif
21
#if NCNN_fp16_arithmetic
22
#extension GL_EXT_shader_explicit_arithmetic_types_float16: require
23
#endif
24

25
layout (constant_id = 0) const int maxk = 1;
26

27
#define shape_constant_id_offset 1
28
layout (constant_id = shape_constant_id_offset + 0) const int w = 0;
29
layout (constant_id = shape_constant_id_offset + 1) const int h = 0;
30
layout (constant_id = shape_constant_id_offset + 2) const int c = 0;
31
layout (constant_id = shape_constant_id_offset + 3) const int cstep = 0;
32

33
layout (constant_id = shape_constant_id_offset + 4) const int outw = 0;
34
layout (constant_id = shape_constant_id_offset + 5) const int outh = 0;
35

36
#if NCNN_image_shader
37
layout (binding = 0) uniform unfp sampler3D bottom_blob;
38
layout (binding = 1, imfmtc4) writeonly uniform unfp image3D col_blob;
39
layout (binding = 2) uniform unfp sampler3D weight_blob;
40
#else
41
layout (binding = 0) readonly buffer bottom_blob { sfpvec8 bottom_blob_data[]; };
42
layout (binding = 1) writeonly buffer col_blob { sfpvec8 col_blob_data[]; };
43
layout (binding = 2) readonly buffer weight_blob { sfpvec8 weight_data[]; };
44
#endif
45

46
layout (push_constant) uniform parameter
47
{
48
    int w;
49
    int h;
50
    int c;
51
    int cstep;
52

53
    int outw;
54
    int outh;
55
} p;
56

57
void main()
58
{
59
    int gx = int(gl_GlobalInvocationID.x) * 4;
60
    int gy = int(gl_GlobalInvocationID.y);
61

62
    if (gx >= psc(outw) || gy >= psc(outh))
63
        return;
64

65
    afpvec8 sum0 = afpvec8(afpvec4(0.f), afpvec4(0.f));
66
    afpvec8 sum1 = afpvec8(afpvec4(0.f), afpvec4(0.f));
67
    afpvec8 sum2 = afpvec8(afpvec4(0.f), afpvec4(0.f));
68
    afpvec8 sum3 = afpvec8(afpvec4(0.f), afpvec4(0.f));
69

70
#if NCNN_image_shader
71
    ivec4 gx4 = gx + ivec4(0, 1, 2, 3);
72

73
    ivec4 sy4 = gx4 / psc(w);
74
    ivec4 sx4 = gx4 % psc(w);
75

76
    for (int z = 0; z < psc(c); z++)
77
    {
78
        afpvec8 v0 = image3d_ld8(bottom_blob, ivec3(sx4.r, sy4.r, z));
79
        afpvec8 v1 = image3d_ld8(bottom_blob, ivec3(sx4.g, sy4.g, z));
80
        afpvec8 v2 = image3d_ld8(bottom_blob, ivec3(sx4.b, sy4.b, z));
81
        afpvec8 v3 = image3d_ld8(bottom_blob, ivec3(sx4.a, sy4.a, z));
82

83
        afpvec8 k0 = image3d_ld8(weight_blob, ivec3(z * 8 + 0, gy, 0));
84
        afpvec8 k1 = image3d_ld8(weight_blob, ivec3(z * 8 + 1, gy, 0));
85
        afpvec8 k2 = image3d_ld8(weight_blob, ivec3(z * 8 + 2, gy, 0));
86
        afpvec8 k3 = image3d_ld8(weight_blob, ivec3(z * 8 + 3, gy, 0));
87
        afpvec8 k4 = image3d_ld8(weight_blob, ivec3(z * 8 + 4, gy, 0));
88
        afpvec8 k5 = image3d_ld8(weight_blob, ivec3(z * 8 + 5, gy, 0));
89
        afpvec8 k6 = image3d_ld8(weight_blob, ivec3(z * 8 + 6, gy, 0));
90
        afpvec8 k7 = image3d_ld8(weight_blob, ivec3(z * 8 + 7, gy, 0));
91

92
        // sum += v * k
93
        sum0[0].r += dot(v0[0], k0[0]) + dot(v0[1], k0[1]);
94
        sum0[0].g += dot(v0[0], k1[0]) + dot(v0[1], k1[1]);
95
        sum0[0].b += dot(v0[0], k2[0]) + dot(v0[1], k2[1]);
96
        sum0[0].a += dot(v0[0], k3[0]) + dot(v0[1], k3[1]);
97
        sum0[1].r += dot(v0[0], k4[0]) + dot(v0[1], k4[1]);
98
        sum0[1].g += dot(v0[0], k5[0]) + dot(v0[1], k5[1]);
99
        sum0[1].b += dot(v0[0], k6[0]) + dot(v0[1], k6[1]);
100
        sum0[1].a += dot(v0[0], k7[0]) + dot(v0[1], k7[1]);
101

102
        sum1[0].r += dot(v1[0], k0[0]) + dot(v1[1], k0[1]);
103
        sum1[0].g += dot(v1[0], k1[0]) + dot(v1[1], k1[1]);
104
        sum1[0].b += dot(v1[0], k2[0]) + dot(v1[1], k2[1]);
105
        sum1[0].a += dot(v1[0], k3[0]) + dot(v1[1], k3[1]);
106
        sum1[1].r += dot(v1[0], k4[0]) + dot(v1[1], k4[1]);
107
        sum1[1].g += dot(v1[0], k5[0]) + dot(v1[1], k5[1]);
108
        sum1[1].b += dot(v1[0], k6[0]) + dot(v1[1], k6[1]);
109
        sum1[1].a += dot(v1[0], k7[0]) + dot(v1[1], k7[1]);
110

111
        sum2[0].r += dot(v2[0], k0[0]) + dot(v2[1], k0[1]);
112
        sum2[0].g += dot(v2[0], k1[0]) + dot(v2[1], k1[1]);
113
        sum2[0].b += dot(v2[0], k2[0]) + dot(v2[1], k2[1]);
114
        sum2[0].a += dot(v2[0], k3[0]) + dot(v2[1], k3[1]);
115
        sum2[1].r += dot(v2[0], k4[0]) + dot(v2[1], k4[1]);
116
        sum2[1].g += dot(v2[0], k5[0]) + dot(v2[1], k5[1]);
117
        sum2[1].b += dot(v2[0], k6[0]) + dot(v2[1], k6[1]);
118
        sum2[1].a += dot(v2[0], k7[0]) + dot(v2[1], k7[1]);
119

120
        sum3[0].r += dot(v3[0], k0[0]) + dot(v3[1], k0[1]);
121
        sum3[0].g += dot(v3[0], k1[0]) + dot(v3[1], k1[1]);
122
        sum3[0].b += dot(v3[0], k2[0]) + dot(v3[1], k2[1]);
123
        sum3[0].a += dot(v3[0], k3[0]) + dot(v3[1], k3[1]);
124
        sum3[1].r += dot(v3[0], k4[0]) + dot(v3[1], k4[1]);
125
        sum3[1].g += dot(v3[0], k5[0]) + dot(v3[1], k5[1]);
126
        sum3[1].b += dot(v3[0], k6[0]) + dot(v3[1], k6[1]);
127
        sum3[1].a += dot(v3[0], k7[0]) + dot(v3[1], k7[1]);
128
    }
129
#else
130
    int v_offset = gx;
131
    int w_offset = gy * psc(c) * 8;
132

133
    for (int z = 0; z < psc(c); z++)
134
    {
135
        afpvec8 v0 = buffer_ld8(bottom_blob_data, v_offset + 0);
136
        afpvec8 v1 = buffer_ld8(bottom_blob_data, v_offset + 1);
137
        afpvec8 v2 = buffer_ld8(bottom_blob_data, v_offset + 2);
138
        afpvec8 v3 = buffer_ld8(bottom_blob_data, v_offset + 3);
139

140
        afpvec8 k0 = buffer_ld8(weight_data, w_offset + 0);
141
        afpvec8 k1 = buffer_ld8(weight_data, w_offset + 1);
142
        afpvec8 k2 = buffer_ld8(weight_data, w_offset + 2);
143
        afpvec8 k3 = buffer_ld8(weight_data, w_offset + 3);
144
        afpvec8 k4 = buffer_ld8(weight_data, w_offset + 4);
145
        afpvec8 k5 = buffer_ld8(weight_data, w_offset + 5);
146
        afpvec8 k6 = buffer_ld8(weight_data, w_offset + 6);
147
        afpvec8 k7 = buffer_ld8(weight_data, w_offset + 7);
148

149
        // sum += v * k
150
        sum0[0].r += dot(v0[0], k0[0]) + dot(v0[1], k0[1]);
151
        sum0[0].g += dot(v0[0], k1[0]) + dot(v0[1], k1[1]);
152
        sum0[0].b += dot(v0[0], k2[0]) + dot(v0[1], k2[1]);
153
        sum0[0].a += dot(v0[0], k3[0]) + dot(v0[1], k3[1]);
154
        sum0[1].r += dot(v0[0], k4[0]) + dot(v0[1], k4[1]);
155
        sum0[1].g += dot(v0[0], k5[0]) + dot(v0[1], k5[1]);
156
        sum0[1].b += dot(v0[0], k6[0]) + dot(v0[1], k6[1]);
157
        sum0[1].a += dot(v0[0], k7[0]) + dot(v0[1], k7[1]);
158

159
        sum1[0].r += dot(v1[0], k0[0]) + dot(v1[1], k0[1]);
160
        sum1[0].g += dot(v1[0], k1[0]) + dot(v1[1], k1[1]);
161
        sum1[0].b += dot(v1[0], k2[0]) + dot(v1[1], k2[1]);
162
        sum1[0].a += dot(v1[0], k3[0]) + dot(v1[1], k3[1]);
163
        sum1[1].r += dot(v1[0], k4[0]) + dot(v1[1], k4[1]);
164
        sum1[1].g += dot(v1[0], k5[0]) + dot(v1[1], k5[1]);
165
        sum1[1].b += dot(v1[0], k6[0]) + dot(v1[1], k6[1]);
166
        sum1[1].a += dot(v1[0], k7[0]) + dot(v1[1], k7[1]);
167

168
        sum2[0].r += dot(v2[0], k0[0]) + dot(v2[1], k0[1]);
169
        sum2[0].g += dot(v2[0], k1[0]) + dot(v2[1], k1[1]);
170
        sum2[0].b += dot(v2[0], k2[0]) + dot(v2[1], k2[1]);
171
        sum2[0].a += dot(v2[0], k3[0]) + dot(v2[1], k3[1]);
172
        sum2[1].r += dot(v2[0], k4[0]) + dot(v2[1], k4[1]);
173
        sum2[1].g += dot(v2[0], k5[0]) + dot(v2[1], k5[1]);
174
        sum2[1].b += dot(v2[0], k6[0]) + dot(v2[1], k6[1]);
175
        sum2[1].a += dot(v2[0], k7[0]) + dot(v2[1], k7[1]);
176

177
        sum3[0].r += dot(v3[0], k0[0]) + dot(v3[1], k0[1]);
178
        sum3[0].g += dot(v3[0], k1[0]) + dot(v3[1], k1[1]);
179
        sum3[0].b += dot(v3[0], k2[0]) + dot(v3[1], k2[1]);
180
        sum3[0].a += dot(v3[0], k3[0]) + dot(v3[1], k3[1]);
181
        sum3[1].r += dot(v3[0], k4[0]) + dot(v3[1], k4[1]);
182
        sum3[1].g += dot(v3[0], k5[0]) + dot(v3[1], k5[1]);
183
        sum3[1].b += dot(v3[0], k6[0]) + dot(v3[1], k6[1]);
184
        sum3[1].a += dot(v3[0], k7[0]) + dot(v3[1], k7[1]);
185

186
        v_offset += psc(cstep);
187
        w_offset += 8;
188
    }
189
#endif
190

191
#if NCNN_image_shader
192
    image3d_st8(col_blob, ivec3(gx4.r, gy, 0), sum0);
193
    image3d_st8(col_blob, ivec3(gx4.g, gy, 0), sum1);
194
    image3d_st8(col_blob, ivec3(gx4.b, gy, 0), sum2);
195
    image3d_st8(col_blob, ivec3(gx4.a, gy, 0), sum3);
196
#else
197
    const int gi = gy * psc(outw) + gx;
198

199
    buffer_st8(col_blob_data, gi, sum0);
200
    if (gx + 1 < psc(outw)) buffer_st8(col_blob_data, gi + 1, sum1);
201
    if (gx + 2 < psc(outw)) buffer_st8(col_blob_data, gi + 2, sum2);
202
    if (gx + 3 < psc(outw)) buffer_st8(col_blob_data, gi + 3, sum3);
203
#endif
204
}
205

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.