ncnn

Форк
0
/
convolution_pack8_3x3s1d1_winograd_gemm.comp 
198 строк · 8.7 Кб
1
// Tencent is pleased to support the open source community by making ncnn available.
2
//
3
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
4
//
5
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6
// in compliance with the License. You may obtain a copy of the License at
7
//
8
// https://opensource.org/licenses/BSD-3-Clause
9
//
10
// Unless required by applicable law or agreed to in writing, software distributed
11
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
13
// specific language governing permissions and limitations under the License.
14

15
#version 450
16

17
#if NCNN_fp16_storage
18
#extension GL_EXT_shader_16bit_storage: require
19
struct sfpvec8 { f16vec4 abcd; f16vec4 efgh; };
20
#endif
21
#if NCNN_fp16_arithmetic
22
#extension GL_EXT_shader_explicit_arithmetic_types_float16: require
23
#endif
24

25
layout (constant_id = 0) const int batch = 1;
26

27
#define shape_constant_id_offset 1
28
layout (constant_id = shape_constant_id_offset + 0) const int c = 0;
29
layout (constant_id = shape_constant_id_offset + 1) const int cstep = 0;
30

31
layout (constant_id = shape_constant_id_offset + 2) const int outw = 0;
32
layout (constant_id = shape_constant_id_offset + 3) const int outc = 0;
33
layout (constant_id = shape_constant_id_offset + 4) const int outcstep = 0;
34

35
#if NCNN_image_shader
36
layout (binding = 0) uniform unfp sampler3D bottom_tm_blob;
37
layout (binding = 1, imfmtc4) writeonly uniform unfp image3D top_tm_blob;
38
layout (binding = 2) uniform unfp sampler3D weight_tm_blob;
39
#else
40
layout (binding = 0) readonly buffer bottom_tm_blob { sfpvec8 bottom_tm_blob_data[]; };
41
layout (binding = 1) writeonly buffer top_tm_blob { sfpvec8 top_tm_blob_data[]; };
42
layout (binding = 2) readonly buffer weight_tm_blob { sfpvec8 weight_tm_data[]; };
43
#endif
44

45
layout (push_constant) uniform parameter
46
{
47
    int c;
48
    int cstep;
49

50
    int outw;
51
    int outc;
52
    int outcstep;
53
} p;
54

55
void main()
56
{
57
    int gx = int(gl_GlobalInvocationID.x) * 4;
58
    int gy = int(gl_GlobalInvocationID.y);
59
    int gz = int(gl_GlobalInvocationID.z);
60

61
    if (gx >= psc(outw) || gy >= psc(outc) || gz >= batch)
62
        return;
63

64
    afpvec8 sum0 = afpvec8(afpvec4(0.f), afpvec4(0.f));
65
    afpvec8 sum1 = afpvec8(afpvec4(0.f), afpvec4(0.f));
66
    afpvec8 sum2 = afpvec8(afpvec4(0.f), afpvec4(0.f));
67
    afpvec8 sum3 = afpvec8(afpvec4(0.f), afpvec4(0.f));
68

69
#if NCNN_image_shader
70
    for (int z = 0; z < psc(c); z++)
71
    {
72
        afpvec8 v0 = image3d_ld8(bottom_tm_blob, ivec3(gx + 0, z, gz));
73
        afpvec8 v1 = image3d_ld8(bottom_tm_blob, ivec3(gx + 1, z, gz));
74
        afpvec8 v2 = image3d_ld8(bottom_tm_blob, ivec3(gx + 2, z, gz));
75
        afpvec8 v3 = image3d_ld8(bottom_tm_blob, ivec3(gx + 3, z, gz));
76

77
        afpvec8 k0 = image3d_ld8(weight_tm_blob, ivec3(z * 8 + 0, gy, gz));
78
        afpvec8 k1 = image3d_ld8(weight_tm_blob, ivec3(z * 8 + 1, gy, gz));
79
        afpvec8 k2 = image3d_ld8(weight_tm_blob, ivec3(z * 8 + 2, gy, gz));
80
        afpvec8 k3 = image3d_ld8(weight_tm_blob, ivec3(z * 8 + 3, gy, gz));
81
        afpvec8 k4 = image3d_ld8(weight_tm_blob, ivec3(z * 8 + 4, gy, gz));
82
        afpvec8 k5 = image3d_ld8(weight_tm_blob, ivec3(z * 8 + 5, gy, gz));
83
        afpvec8 k6 = image3d_ld8(weight_tm_blob, ivec3(z * 8 + 6, gy, gz));
84
        afpvec8 k7 = image3d_ld8(weight_tm_blob, ivec3(z * 8 + 7, gy, gz));
85

86
        // sum += v * k
87
        sum0[0].r += dot(v0[0], k0[0]) + dot(v0[1], k0[1]);
88
        sum0[0].g += dot(v0[0], k1[0]) + dot(v0[1], k1[1]);
89
        sum0[0].b += dot(v0[0], k2[0]) + dot(v0[1], k2[1]);
90
        sum0[0].a += dot(v0[0], k3[0]) + dot(v0[1], k3[1]);
91
        sum0[1].r += dot(v0[0], k4[0]) + dot(v0[1], k4[1]);
92
        sum0[1].g += dot(v0[0], k5[0]) + dot(v0[1], k5[1]);
93
        sum0[1].b += dot(v0[0], k6[0]) + dot(v0[1], k6[1]);
94
        sum0[1].a += dot(v0[0], k7[0]) + dot(v0[1], k7[1]);
95

96
        sum1[0].r += dot(v1[0], k0[0]) + dot(v1[1], k0[1]);
97
        sum1[0].g += dot(v1[0], k1[0]) + dot(v1[1], k1[1]);
98
        sum1[0].b += dot(v1[0], k2[0]) + dot(v1[1], k2[1]);
99
        sum1[0].a += dot(v1[0], k3[0]) + dot(v1[1], k3[1]);
100
        sum1[1].r += dot(v1[0], k4[0]) + dot(v1[1], k4[1]);
101
        sum1[1].g += dot(v1[0], k5[0]) + dot(v1[1], k5[1]);
102
        sum1[1].b += dot(v1[0], k6[0]) + dot(v1[1], k6[1]);
103
        sum1[1].a += dot(v1[0], k7[0]) + dot(v1[1], k7[1]);
104

105
        sum2[0].r += dot(v2[0], k0[0]) + dot(v2[1], k0[1]);
106
        sum2[0].g += dot(v2[0], k1[0]) + dot(v2[1], k1[1]);
107
        sum2[0].b += dot(v2[0], k2[0]) + dot(v2[1], k2[1]);
108
        sum2[0].a += dot(v2[0], k3[0]) + dot(v2[1], k3[1]);
109
        sum2[1].r += dot(v2[0], k4[0]) + dot(v2[1], k4[1]);
110
        sum2[1].g += dot(v2[0], k5[0]) + dot(v2[1], k5[1]);
111
        sum2[1].b += dot(v2[0], k6[0]) + dot(v2[1], k6[1]);
112
        sum2[1].a += dot(v2[0], k7[0]) + dot(v2[1], k7[1]);
113

114
        sum3[0].r += dot(v3[0], k0[0]) + dot(v3[1], k0[1]);
115
        sum3[0].g += dot(v3[0], k1[0]) + dot(v3[1], k1[1]);
116
        sum3[0].b += dot(v3[0], k2[0]) + dot(v3[1], k2[1]);
117
        sum3[0].a += dot(v3[0], k3[0]) + dot(v3[1], k3[1]);
118
        sum3[1].r += dot(v3[0], k4[0]) + dot(v3[1], k4[1]);
119
        sum3[1].g += dot(v3[0], k5[0]) + dot(v3[1], k5[1]);
120
        sum3[1].b += dot(v3[0], k6[0]) + dot(v3[1], k6[1]);
121
        sum3[1].a += dot(v3[0], k7[0]) + dot(v3[1], k7[1]);
122
    }
123
#else
124
    int v_offset = gz * psc(cstep) + gx;
125
    int w_offset = (gz * psc(c) * psc(outc) + gy * psc(c)) * 8;
126

127
    for (int z = 0; z < psc(c); z++)
128
    {
129
        afpvec8 v0 = buffer_ld8(bottom_tm_blob_data, v_offset + 0);
130
        afpvec8 v1 = buffer_ld8(bottom_tm_blob_data, v_offset + 1);
131
        afpvec8 v2 = buffer_ld8(bottom_tm_blob_data, v_offset + 2);
132
        afpvec8 v3 = buffer_ld8(bottom_tm_blob_data, v_offset + 3);
133

134
        afpvec8 k0 = buffer_ld8(weight_tm_data, w_offset + 0);
135
        afpvec8 k1 = buffer_ld8(weight_tm_data, w_offset + 1);
136
        afpvec8 k2 = buffer_ld8(weight_tm_data, w_offset + 2);
137
        afpvec8 k3 = buffer_ld8(weight_tm_data, w_offset + 3);
138
        afpvec8 k4 = buffer_ld8(weight_tm_data, w_offset + 4);
139
        afpvec8 k5 = buffer_ld8(weight_tm_data, w_offset + 5);
140
        afpvec8 k6 = buffer_ld8(weight_tm_data, w_offset + 6);
141
        afpvec8 k7 = buffer_ld8(weight_tm_data, w_offset + 7);
142

143
        // sum += v * k
144
        sum0[0].r += dot(v0[0], k0[0]) + dot(v0[1], k0[1]);
145
        sum0[0].g += dot(v0[0], k1[0]) + dot(v0[1], k1[1]);
146
        sum0[0].b += dot(v0[0], k2[0]) + dot(v0[1], k2[1]);
147
        sum0[0].a += dot(v0[0], k3[0]) + dot(v0[1], k3[1]);
148
        sum0[1].r += dot(v0[0], k4[0]) + dot(v0[1], k4[1]);
149
        sum0[1].g += dot(v0[0], k5[0]) + dot(v0[1], k5[1]);
150
        sum0[1].b += dot(v0[0], k6[0]) + dot(v0[1], k6[1]);
151
        sum0[1].a += dot(v0[0], k7[0]) + dot(v0[1], k7[1]);
152

153
        sum1[0].r += dot(v1[0], k0[0]) + dot(v1[1], k0[1]);
154
        sum1[0].g += dot(v1[0], k1[0]) + dot(v1[1], k1[1]);
155
        sum1[0].b += dot(v1[0], k2[0]) + dot(v1[1], k2[1]);
156
        sum1[0].a += dot(v1[0], k3[0]) + dot(v1[1], k3[1]);
157
        sum1[1].r += dot(v1[0], k4[0]) + dot(v1[1], k4[1]);
158
        sum1[1].g += dot(v1[0], k5[0]) + dot(v1[1], k5[1]);
159
        sum1[1].b += dot(v1[0], k6[0]) + dot(v1[1], k6[1]);
160
        sum1[1].a += dot(v1[0], k7[0]) + dot(v1[1], k7[1]);
161

162
        sum2[0].r += dot(v2[0], k0[0]) + dot(v2[1], k0[1]);
163
        sum2[0].g += dot(v2[0], k1[0]) + dot(v2[1], k1[1]);
164
        sum2[0].b += dot(v2[0], k2[0]) + dot(v2[1], k2[1]);
165
        sum2[0].a += dot(v2[0], k3[0]) + dot(v2[1], k3[1]);
166
        sum2[1].r += dot(v2[0], k4[0]) + dot(v2[1], k4[1]);
167
        sum2[1].g += dot(v2[0], k5[0]) + dot(v2[1], k5[1]);
168
        sum2[1].b += dot(v2[0], k6[0]) + dot(v2[1], k6[1]);
169
        sum2[1].a += dot(v2[0], k7[0]) + dot(v2[1], k7[1]);
170

171
        sum3[0].r += dot(v3[0], k0[0]) + dot(v3[1], k0[1]);
172
        sum3[0].g += dot(v3[0], k1[0]) + dot(v3[1], k1[1]);
173
        sum3[0].b += dot(v3[0], k2[0]) + dot(v3[1], k2[1]);
174
        sum3[0].a += dot(v3[0], k3[0]) + dot(v3[1], k3[1]);
175
        sum3[1].r += dot(v3[0], k4[0]) + dot(v3[1], k4[1]);
176
        sum3[1].g += dot(v3[0], k5[0]) + dot(v3[1], k5[1]);
177
        sum3[1].b += dot(v3[0], k6[0]) + dot(v3[1], k6[1]);
178
        sum3[1].a += dot(v3[0], k7[0]) + dot(v3[1], k7[1]);
179

180
        v_offset += psc(outw);
181
        w_offset += 8;
182
    }
183
#endif
184

185
#if NCNN_image_shader
186
    image3d_st8(top_tm_blob, ivec3(gx + 0, gy, gz), sum0);
187
    image3d_st8(top_tm_blob, ivec3(gx + 1, gy, gz), sum1);
188
    image3d_st8(top_tm_blob, ivec3(gx + 2, gy, gz), sum2);
189
    image3d_st8(top_tm_blob, ivec3(gx + 3, gy, gz), sum3);
190
#else
191
    int gi = gz * psc(outcstep) + gy * psc(outw) + gx;
192

193
    buffer_st8(top_tm_blob_data, gi + 0, sum0);
194
    if (gx + 1 < psc(outw)) buffer_st8(top_tm_blob_data, gi + 1, sum1);
195
    if (gx + 2 < psc(outw)) buffer_st8(top_tm_blob_data, gi + 2, sum2);
196
    if (gx + 3 < psc(outw)) buffer_st8(top_tm_blob_data, gi + 3, sum3);
197
#endif
198
}
199

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.