ncnn

Форк
0
/
convolution1d_pack8.comp 
258 строк · 11.4 Кб
1
// Tencent is pleased to support the open source community by making ncnn available.
2
//
3
// Copyright (C) 2023 THL A29 Limited, a Tencent company. All rights reserved.
4
//
5
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6
// in compliance with the License. You may obtain a copy of the License at
7
//
8
// https://opensource.org/licenses/BSD-3-Clause
9
//
10
// Unless required by applicable law or agreed to in writing, software distributed
11
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
13
// specific language governing permissions and limitations under the License.
14

15
#version 450
16

17
#if NCNN_fp16_storage
18
#extension GL_EXT_shader_16bit_storage: require
19
struct sfpvec8 { f16vec4 abcd; f16vec4 efgh; };
20
#endif
21
#if NCNN_fp16_arithmetic
22
#extension GL_EXT_shader_explicit_arithmetic_types_float16: require
23
#endif
24

25
#extension GL_GOOGLE_include_directive: enable
26
#include "vulkan_activation.comp"
27

28
layout (constant_id = 0) const int kernel_w = 1;
29
layout (constant_id = 1) const int dilation_w = 1;
30
layout (constant_id = 2) const int stride_w = 1;
31
layout (constant_id = 3) const int bias_term = 0;
32
layout (constant_id = 4) const int activation_type = 0;
33
layout (constant_id = 5) const float activation_param_0 = 0;
34
layout (constant_id = 6) const float activation_param_1 = 0;
35

36
#define shape_constant_id_offset 7
37
layout (constant_id = shape_constant_id_offset + 0) const int w = 0;
38
layout (constant_id = shape_constant_id_offset + 1) const int h = 0;
39

40
layout (constant_id = shape_constant_id_offset + 2) const int outw = 0;
41
layout (constant_id = shape_constant_id_offset + 3) const int outh = 0;
42

43
#if NCNN_image_shader
44
layout (binding = 0) uniform unfp sampler3D bottom_blob;
45
layout (binding = 1, imfmtc4) writeonly uniform unfp image3D top_blob;
46
layout (binding = 2) uniform unfp sampler3D weight_blob;
47
layout (binding = 3) uniform unfp sampler3D bias_blob;
48
#else
49
layout (binding = 0) readonly buffer bottom_blob { sfpvec8 bottom_blob_data[]; };
50
layout (binding = 1) writeonly buffer top_blob { sfpvec8 top_blob_data[]; };
51
layout (binding = 2) readonly buffer weight_blob { sfpvec8 weight_data[]; };
52
layout (binding = 3) readonly buffer bias_blob { sfpvec8 bias_data[]; };
53
#endif
54

55
layout (push_constant) uniform parameter
56
{
57
    int w;
58
    int h;
59

60
    int outw;
61
    int outh;
62
} p;
63

64
void main()
65
{
66
    int gx = int(gl_GlobalInvocationID.x) * 2;
67
    int gy = int(gl_GlobalInvocationID.y) * 2;
68

69
    if (gx >= psc(outw) || gy >= psc(outh))
70
        return;
71

72
    const ivec2 gx2 = gx + ivec2(0, 1);
73
    const ivec2 gy2 = gy + ivec2(0, 1);
74

75
    afpvec8 sum0 = afpvec8(afpvec4(0.0f), afpvec4(0.0f));
76
    afpvec8 sum1 = afpvec8(afpvec4(0.0f), afpvec4(0.0f));
77
    afpvec8 sum2 = afpvec8(afpvec4(0.0f), afpvec4(0.0f));
78
    afpvec8 sum3 = afpvec8(afpvec4(0.0f), afpvec4(0.0f));
79

80
    if (bias_term == 1)
81
    {
82
#if NCNN_image_shader
83
        sum0 = image3d_ld8(bias_blob, ivec3(gy2.x, 0, 0));
84
        sum2 = image3d_ld8(bias_blob, ivec3(gy2.y, 0, 0));
85
#else
86
        sum0 = buffer_ld8(bias_data, gy2.x);
87
        sum2 = buffer_ld8(bias_data, gy2.y);
88
#endif
89
        sum1 = sum0;
90
        sum3 = sum2;
91
    }
92

93
#if NCNN_image_shader
94

95
    ivec2 v_offset = gx2 * stride_w;
96

97
    for (int y = 0; y < psc(h); y++)
98
    {
99
        int wx = 0;
100

101
        for (int x = 0; x < kernel_w; x++)
102
        {
103
            afpvec8 v0 = image3d_ld8(bottom_blob, ivec3(v_offset.x + x * dilation_w, y, 0));
104
            afpvec8 v1 = image3d_ld8(bottom_blob, ivec3(v_offset.y + x * dilation_w, y, 0));
105
            
106
            afpvec8 k0 = image3d_ld8(weight_blob, ivec3(wx + 0, y, gy2.x));
107
            afpvec8 k1 = image3d_ld8(weight_blob, ivec3(wx + 1, y, gy2.x));
108
            afpvec8 k2 = image3d_ld8(weight_blob, ivec3(wx + 2, y, gy2.x));
109
            afpvec8 k3 = image3d_ld8(weight_blob, ivec3(wx + 3, y, gy2.x));
110
            afpvec8 k4 = image3d_ld8(weight_blob, ivec3(wx + 4, y, gy2.x));
111
            afpvec8 k5 = image3d_ld8(weight_blob, ivec3(wx + 5, y, gy2.x));
112
            afpvec8 k6 = image3d_ld8(weight_blob, ivec3(wx + 6, y, gy2.x));
113
            afpvec8 k7 = image3d_ld8(weight_blob, ivec3(wx + 7, y, gy2.x));
114

115
            afpvec8 k8 = image3d_ld8(weight_blob, ivec3(wx + 0, y, gy2.y));
116
            afpvec8 k9 = image3d_ld8(weight_blob, ivec3(wx + 1, y, gy2.y));
117
            afpvec8 ka = image3d_ld8(weight_blob, ivec3(wx + 2, y, gy2.y));
118
            afpvec8 kb = image3d_ld8(weight_blob, ivec3(wx + 3, y, gy2.y));
119
            afpvec8 kc = image3d_ld8(weight_blob, ivec3(wx + 4, y, gy2.y));
120
            afpvec8 kd = image3d_ld8(weight_blob, ivec3(wx + 5, y, gy2.y));
121
            afpvec8 ke = image3d_ld8(weight_blob, ivec3(wx + 6, y, gy2.y));
122
            afpvec8 kf = image3d_ld8(weight_blob, ivec3(wx + 7, y, gy2.y));
123

124
            sum0[0].r += dot(v0[0], k0[0]) + dot(v0[1], k0[1]);
125
            sum0[0].g += dot(v0[0], k1[0]) + dot(v0[1], k1[1]);
126
            sum0[0].b += dot(v0[0], k2[0]) + dot(v0[1], k2[1]);
127
            sum0[0].a += dot(v0[0], k3[0]) + dot(v0[1], k3[1]);
128
            sum0[1].r += dot(v0[0], k4[0]) + dot(v0[1], k4[1]);
129
            sum0[1].g += dot(v0[0], k5[0]) + dot(v0[1], k5[1]);
130
            sum0[1].b += dot(v0[0], k6[0]) + dot(v0[1], k6[1]);
131
            sum0[1].a += dot(v0[0], k7[0]) + dot(v0[1], k7[1]);
132

133
            sum1[0].r += dot(v1[0], k0[0]) + dot(v1[1], k0[1]);
134
            sum1[0].g += dot(v1[0], k1[0]) + dot(v1[1], k1[1]);
135
            sum1[0].b += dot(v1[0], k2[0]) + dot(v1[1], k2[1]);
136
            sum1[0].a += dot(v1[0], k3[0]) + dot(v1[1], k3[1]);
137
            sum1[1].r += dot(v1[0], k4[0]) + dot(v1[1], k4[1]);
138
            sum1[1].g += dot(v1[0], k5[0]) + dot(v1[1], k5[1]);
139
            sum1[1].b += dot(v1[0], k6[0]) + dot(v1[1], k6[1]);
140
            sum1[1].a += dot(v1[0], k7[0]) + dot(v1[1], k7[1]);
141

142
            sum2[0].r += dot(v0[0], k8[0]) + dot(v0[1], k8[1]);
143
            sum2[0].g += dot(v0[0], k9[0]) + dot(v0[1], k9[1]);
144
            sum2[0].b += dot(v0[0], ka[0]) + dot(v0[1], ka[1]);
145
            sum2[0].a += dot(v0[0], kb[0]) + dot(v0[1], kb[1]);
146
            sum2[1].r += dot(v0[0], kc[0]) + dot(v0[1], kc[1]);
147
            sum2[1].g += dot(v0[0], kd[0]) + dot(v0[1], kd[1]);
148
            sum2[1].b += dot(v0[0], ke[0]) + dot(v0[1], ke[1]);
149
            sum2[1].a += dot(v0[0], kf[0]) + dot(v0[1], kf[1]);
150

151
            sum3[0].r += dot(v1[0], k8[0]) + dot(v1[1], k8[1]);
152
            sum3[0].g += dot(v1[0], k9[0]) + dot(v1[1], k9[1]);
153
            sum3[0].b += dot(v1[0], ka[0]) + dot(v1[1], ka[1]);
154
            sum3[0].a += dot(v1[0], kb[0]) + dot(v1[1], kb[1]);
155
            sum3[1].r += dot(v1[0], kc[0]) + dot(v1[1], kc[1]);
156
            sum3[1].g += dot(v1[0], kd[0]) + dot(v1[1], kd[1]);
157
            sum3[1].b += dot(v1[0], ke[0]) + dot(v1[1], ke[1]);
158
            sum3[1].a += dot(v1[0], kf[0]) + dot(v1[1], kf[1]);
159

160
            wx += 8;
161
        }
162
    }
163
    
164
#else
165

166
    ivec2 v_offset = gx2 * stride_w;
167
    ivec2 w_offset = gy2 * psc(h) * kernel_w;
168
    
169
    for (int y = 0; y < psc(h); y++)
170
    {    
171
        for (int x = 0; x < kernel_w; x++)
172
        {
173
            afpvec8 v0 = buffer_ld8(bottom_blob_data, v_offset.x + x * dilation_w);
174
            afpvec8 v1 = buffer_ld8(bottom_blob_data, v_offset.y + x * dilation_w);
175
            
176
            afpvec8 k0 = buffer_ld8(weight_data, (w_offset.x + x) * 8 + 0);
177
            afpvec8 k1 = buffer_ld8(weight_data, (w_offset.x + x) * 8 + 1);
178
            afpvec8 k2 = buffer_ld8(weight_data, (w_offset.x + x) * 8 + 2);
179
            afpvec8 k3 = buffer_ld8(weight_data, (w_offset.x + x) * 8 + 3);
180
            afpvec8 k4 = buffer_ld8(weight_data, (w_offset.x + x) * 8 + 4);
181
            afpvec8 k5 = buffer_ld8(weight_data, (w_offset.x + x) * 8 + 5);
182
            afpvec8 k6 = buffer_ld8(weight_data, (w_offset.x + x) * 8 + 6);
183
            afpvec8 k7 = buffer_ld8(weight_data, (w_offset.x + x) * 8 + 7);
184

185
            afpvec8 k8 = buffer_ld8(weight_data, (w_offset.y + x) * 8 + 0);
186
            afpvec8 k9 = buffer_ld8(weight_data, (w_offset.y + x) * 8 + 1);
187
            afpvec8 ka = buffer_ld8(weight_data, (w_offset.y + x) * 8 + 2);
188
            afpvec8 kb = buffer_ld8(weight_data, (w_offset.y + x) * 8 + 3);
189
            afpvec8 kc = buffer_ld8(weight_data, (w_offset.y + x) * 8 + 4);
190
            afpvec8 kd = buffer_ld8(weight_data, (w_offset.y + x) * 8 + 5);
191
            afpvec8 ke = buffer_ld8(weight_data, (w_offset.y + x) * 8 + 6);
192
            afpvec8 kf = buffer_ld8(weight_data, (w_offset.y + x) * 8 + 7);
193

194
            sum0[0].r += dot(v0[0], k0[0]) + dot(v0[1], k0[1]);
195
            sum0[0].g += dot(v0[0], k1[0]) + dot(v0[1], k1[1]);
196
            sum0[0].b += dot(v0[0], k2[0]) + dot(v0[1], k2[1]);
197
            sum0[0].a += dot(v0[0], k3[0]) + dot(v0[1], k3[1]);
198
            sum0[1].r += dot(v0[0], k4[0]) + dot(v0[1], k4[1]);
199
            sum0[1].g += dot(v0[0], k5[0]) + dot(v0[1], k5[1]);
200
            sum0[1].b += dot(v0[0], k6[0]) + dot(v0[1], k6[1]);
201
            sum0[1].a += dot(v0[0], k7[0]) + dot(v0[1], k7[1]);
202

203
            sum1[0].r += dot(v1[0], k0[0]) + dot(v1[1], k0[1]);
204
            sum1[0].g += dot(v1[0], k1[0]) + dot(v1[1], k1[1]);
205
            sum1[0].b += dot(v1[0], k2[0]) + dot(v1[1], k2[1]);
206
            sum1[0].a += dot(v1[0], k3[0]) + dot(v1[1], k3[1]);
207
            sum1[1].r += dot(v1[0], k4[0]) + dot(v1[1], k4[1]);
208
            sum1[1].g += dot(v1[0], k5[0]) + dot(v1[1], k5[1]);
209
            sum1[1].b += dot(v1[0], k6[0]) + dot(v1[1], k6[1]);
210
            sum1[1].a += dot(v1[0], k7[0]) + dot(v1[1], k7[1]);
211

212
            sum2[0].r += dot(v0[0], k8[0]) + dot(v0[1], k8[1]);
213
            sum2[0].g += dot(v0[0], k9[0]) + dot(v0[1], k9[1]);
214
            sum2[0].b += dot(v0[0], ka[0]) + dot(v0[1], ka[1]);
215
            sum2[0].a += dot(v0[0], kb[0]) + dot(v0[1], kb[1]);
216
            sum2[1].r += dot(v0[0], kc[0]) + dot(v0[1], kc[1]);
217
            sum2[1].g += dot(v0[0], kd[0]) + dot(v0[1], kd[1]);
218
            sum2[1].b += dot(v0[0], ke[0]) + dot(v0[1], ke[1]);
219
            sum2[1].a += dot(v0[0], kf[0]) + dot(v0[1], kf[1]);
220

221
            sum3[0].r += dot(v1[0], k8[0]) + dot(v1[1], k8[1]);
222
            sum3[0].g += dot(v1[0], k9[0]) + dot(v1[1], k9[1]);
223
            sum3[0].b += dot(v1[0], ka[0]) + dot(v1[1], ka[1]);
224
            sum3[0].a += dot(v1[0], kb[0]) + dot(v1[1], kb[1]);
225
            sum3[1].r += dot(v1[0], kc[0]) + dot(v1[1], kc[1]);
226
            sum3[1].g += dot(v1[0], kd[0]) + dot(v1[1], kd[1]);
227
            sum3[1].b += dot(v1[0], ke[0]) + dot(v1[1], ke[1]);
228
            sum3[1].a += dot(v1[0], kf[0]) + dot(v1[1], kf[1]);
229
        }       
230
        v_offset += psc(w);
231
        w_offset += kernel_w;
232
    }
233
    
234
#endif
235

236
    sum0 = activation_afpvec8(sum0, activation_type, activation_param_0, activation_param_1);
237
    sum1 = activation_afpvec8(sum1, activation_type, activation_param_0, activation_param_1);
238
    sum2 = activation_afpvec8(sum2, activation_type, activation_param_0, activation_param_1);
239
    sum3 = activation_afpvec8(sum3, activation_type, activation_param_0, activation_param_1);
240
    
241
#if NCNN_image_shader
242

243
    image3d_st8(top_blob, ivec3(gx2.x, gy2.x, 0), sum0);
244
    image3d_st8(top_blob, ivec3(gx2.y, gy2.x, 0), sum1);
245
    image3d_st8(top_blob, ivec3(gx2.x, gy2.y, 0), sum2);
246
    image3d_st8(top_blob, ivec3(gx2.y, gy2.y, 0), sum3);
247
    
248
#else
249

250
    const int gi = gy * psc(outw) + gx;
251

252
    buffer_st8(top_blob_data, gi, sum0);
253
    if (gx + 1 < psc(outw)) buffer_st8(top_blob_data, gi + 1, sum1);
254
    if (gy + 1 < psc(outh)) buffer_st8(top_blob_data, gi + psc(outw), sum2);
255
    if (gy + 1 < psc(outh) && gx + 1 < psc(outw)) buffer_st8(top_blob_data, gi + psc(outw) + 1, sum3);
256
    
257
#endif
258
}
259

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.