ncnn

Форк
0
/
convolution_pack1to8.comp 
253 строки · 9.4 Кб
1
// Tencent is pleased to support the open source community by making ncnn available.
2
//
3
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
4
//
5
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6
// in compliance with the License. You may obtain a copy of the License at
7
//
8
// https://opensource.org/licenses/BSD-3-Clause
9
//
10
// Unless required by applicable law or agreed to in writing, software distributed
11
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
13
// specific language governing permissions and limitations under the License.
14

15
#version 450
16

17
#if NCNN_fp16_storage
18
#extension GL_EXT_shader_16bit_storage: require
19
struct sfpvec8 { f16vec4 abcd; f16vec4 efgh; };
20
#endif
21
#if NCNN_fp16_arithmetic
22
#extension GL_EXT_shader_explicit_arithmetic_types_float16: require
23
#endif
24

25
#extension GL_GOOGLE_include_directive: enable
26
#include "vulkan_activation.comp"
27

28
layout (constant_id = 0) const int kernel_w = 1;
29
layout (constant_id = 1) const int kernel_h = 1;
30
layout (constant_id = 2) const int dilation_w = 1;
31
layout (constant_id = 3) const int dilation_h = 1;
32
layout (constant_id = 4) const int stride_w = 1;
33
layout (constant_id = 5) const int stride_h = 1;
34
layout (constant_id = 6) const int bias_term = 0;
35
layout (constant_id = 7) const int activation_type = 0;
36
layout (constant_id = 8) const float activation_param_0 = 0;
37
layout (constant_id = 9) const float activation_param_1 = 0;
38

39
#define shape_constant_id_offset 10
40
layout (constant_id = shape_constant_id_offset + 0) const int dims = 0;
41
layout (constant_id = shape_constant_id_offset + 1) const int w = 0;
42
layout (constant_id = shape_constant_id_offset + 2) const int h = 0;
43
layout (constant_id = shape_constant_id_offset + 3) const int c = 0;
44
layout (constant_id = shape_constant_id_offset + 4) const int cstep = 0;
45

46
layout (constant_id = shape_constant_id_offset + 5) const int outdims = 0;
47
layout (constant_id = shape_constant_id_offset + 6) const int outw = 0;
48
layout (constant_id = shape_constant_id_offset + 7) const int outh = 0;
49
layout (constant_id = shape_constant_id_offset + 8) const int outc = 0;
50
layout (constant_id = shape_constant_id_offset + 9) const int outcstep = 0;
51

52
#if NCNN_image_shader
53
layout (binding = 0) uniform unfp sampler3D bottom_blob;
54
layout (binding = 1, imfmtc4) writeonly uniform unfp image3D top_blob;
55
layout (binding = 2) uniform unfp sampler3D weight_blob;
56
layout (binding = 3) uniform unfp sampler3D bias_blob;
57
#else
58
layout (binding = 0) readonly buffer bottom_blob { sfp bottom_blob_data[]; };
59
layout (binding = 1) writeonly buffer top_blob { sfpvec8 top_blob_data[]; };
60
layout (binding = 2) readonly buffer weight_blob { sfpvec8 weight_data[]; };
61
layout (binding = 3) readonly buffer bias_blob { sfpvec8 bias_data[]; };
62
#endif
63

64
layout (push_constant) uniform parameter
65
{
66
    int dims;
67
    int w;
68
    int h;
69
    int c;
70
    int cstep;
71

72
    int outdims;
73
    int outw;
74
    int outh;
75
    int outc;
76
    int outcstep;
77
} p;
78

79
void main()
80
{
81
    int gx = int(gl_GlobalInvocationID.x) * 2;
82
    int gy = int(gl_GlobalInvocationID.y) * 2;
83
    int gz = int(gl_GlobalInvocationID.z) * 2;
84

85
    if (gx >= psc(outw) || gy >= psc(outh) || gz >= psc(outc))
86
        return;
87

88
    const ivec2 gx2 = gx + ivec2(0, 1);
89
    const ivec2 gy2 = gy + ivec2(0, 1);
90
    const ivec2 gz2 = gz + ivec2(0, 1);
91

92
    afpvec8 sum0;
93
    afpvec8 sum1;
94
    afpvec8 sum2;
95
    afpvec8 sum3;
96
    afpvec8 sum4;
97
    afpvec8 sum5;
98
    afpvec8 sum6;
99
    afpvec8 sum7;
100

101
    if (bias_term == 1)
102
    {
103
#if NCNN_image_shader
104
        sum0 = image3d_ld8(bias_blob, ivec3(gz2.x, 0, 0));
105
        sum4 = image3d_ld8(bias_blob, ivec3(gz2.y, 0, 0));
106
#else
107
        sum0 = buffer_ld8(bias_data, gz2.x);
108
        sum4 = buffer_ld8(bias_data, gz2.y);
109
#endif
110
        sum1 = sum0;
111
        sum2 = sum0;
112
        sum3 = sum0;
113
        sum5 = sum4;
114
        sum6 = sum4;
115
        sum7 = sum4;
116
    }
117
    else
118
    {
119
        sum0 = afpvec8(afpvec4(0.f), afpvec4(0.f));
120
        sum1 = afpvec8(afpvec4(0.f), afpvec4(0.f));
121
        sum2 = afpvec8(afpvec4(0.f), afpvec4(0.f));
122
        sum3 = afpvec8(afpvec4(0.f), afpvec4(0.f));
123
        sum4 = afpvec8(afpvec4(0.f), afpvec4(0.f));
124
        sum5 = afpvec8(afpvec4(0.f), afpvec4(0.f));
125
        sum6 = afpvec8(afpvec4(0.f), afpvec4(0.f));
126
        sum7 = afpvec8(afpvec4(0.f), afpvec4(0.f));
127
    }
128

129
#if NCNN_image_shader
130
    for (int z = 0; z < psc(c); z++)
131
    {
132
        ivec2 sy = gy2 * stride_h;
133
        int wx = 0;
134

135
        for (int y = 0; y < kernel_h; y++)
136
        {
137
            ivec2 sx = gx2 * stride_w;
138

139
            for (int x = 0; x < kernel_w; x++)
140
            {
141
                afp v0 = image3d_ld1(bottom_blob, ivec3(sx.x, sy.x, z));
142
                afp v1 = image3d_ld1(bottom_blob, ivec3(sx.y, sy.x, z));
143
                afp v2 = image3d_ld1(bottom_blob, ivec3(sx.x, sy.y, z));
144
                afp v3 = image3d_ld1(bottom_blob, ivec3(sx.y, sy.y, z));
145

146
                afpvec8 k0 = image3d_ld8(weight_blob, ivec3(wx, z, gz2.x));
147
                afpvec8 k1 = image3d_ld8(weight_blob, ivec3(wx, z, gz2.y));
148

149
                // sum += v * k;
150
                sum0[0] += v0 * k0[0];
151
                sum0[1] += v0 * k0[1];
152
                sum1[0] += v1 * k0[0];
153
                sum1[1] += v1 * k0[1];
154
                sum2[0] += v2 * k0[0];
155
                sum2[1] += v2 * k0[1];
156
                sum3[0] += v3 * k0[0];
157
                sum3[1] += v3 * k0[1];
158
                sum4[0] += v0 * k1[0];
159
                sum4[1] += v0 * k1[1];
160
                sum5[0] += v1 * k1[0];
161
                sum5[1] += v1 * k1[1];
162
                sum6[0] += v2 * k1[0];
163
                sum6[1] += v2 * k1[1];
164
                sum7[0] += v3 * k1[0];
165
                sum7[1] += v3 * k1[1];
166

167
                sx += dilation_w;
168
                wx += 1;
169
            }
170

171
            sy += dilation_h;
172
        }
173
    }
174
#else
175
    ivec2 w_offset = gz2 * psc(c) * kernel_w * kernel_h;
176

177
    for (int z = 0; z < psc(c); z++)
178
    {
179
        ivec4 v_offset;
180
        v_offset.rg = z * psc(cstep) + gy2.x * stride_h * psc(w) + gx2 * stride_w;
181
        v_offset.ba = z * psc(cstep) + gy2.y * stride_h * psc(w) + gx2 * stride_w;
182

183
        for (int y = 0; y < kernel_h; y++)
184
        {
185
            for (int x = 0; x < kernel_w; x++)
186
            {
187
                afp v0 = buffer_ld1(bottom_blob_data, v_offset.r + x * dilation_w);
188
                afp v1 = buffer_ld1(bottom_blob_data, v_offset.g + x * dilation_w);
189
                afp v2 = buffer_ld1(bottom_blob_data, v_offset.b + x * dilation_w);
190
                afp v3 = buffer_ld1(bottom_blob_data, v_offset.a + x * dilation_w);
191

192
                afpvec8 k0 = buffer_ld8(weight_data, w_offset.x + x);
193
                afpvec8 k1 = buffer_ld8(weight_data, w_offset.y + x);
194

195
                // sum += v * k;
196
                sum0[0] += v0 * k0[0];
197
                sum0[1] += v0 * k0[1];
198
                sum1[0] += v1 * k0[0];
199
                sum1[1] += v1 * k0[1];
200
                sum2[0] += v2 * k0[0];
201
                sum2[1] += v2 * k0[1];
202
                sum3[0] += v3 * k0[0];
203
                sum3[1] += v3 * k0[1];
204
                sum4[0] += v0 * k1[0];
205
                sum4[1] += v0 * k1[1];
206
                sum5[0] += v1 * k1[0];
207
                sum5[1] += v1 * k1[1];
208
                sum6[0] += v2 * k1[0];
209
                sum6[1] += v2 * k1[1];
210
                sum7[0] += v3 * k1[0];
211
                sum7[1] += v3 * k1[1];
212
            }
213

214
            v_offset += dilation_h * psc(w);
215
            w_offset += kernel_w;
216
        }
217
    }
218
#endif
219

220
    sum0 = activation_afpvec8(sum0, activation_type, activation_param_0, activation_param_1);
221
    sum1 = activation_afpvec8(sum1, activation_type, activation_param_0, activation_param_1);
222
    sum2 = activation_afpvec8(sum2, activation_type, activation_param_0, activation_param_1);
223
    sum3 = activation_afpvec8(sum3, activation_type, activation_param_0, activation_param_1);
224
    sum4 = activation_afpvec8(sum4, activation_type, activation_param_0, activation_param_1);
225
    sum5 = activation_afpvec8(sum5, activation_type, activation_param_0, activation_param_1);
226
    sum6 = activation_afpvec8(sum6, activation_type, activation_param_0, activation_param_1);
227
    sum7 = activation_afpvec8(sum7, activation_type, activation_param_0, activation_param_1);
228

229
#if NCNN_image_shader
230
    image3d_st8(top_blob, ivec3(gx2.x, gy2.x, gz2.x), sum0);
231
    image3d_st8(top_blob, ivec3(gx2.y, gy2.x, gz2.x), sum1);
232
    image3d_st8(top_blob, ivec3(gx2.x, gy2.y, gz2.x), sum2);
233
    image3d_st8(top_blob, ivec3(gx2.y, gy2.y, gz2.x), sum3);
234
    image3d_st8(top_blob, ivec3(gx2.x, gy2.x, gz2.y), sum4);
235
    image3d_st8(top_blob, ivec3(gx2.y, gy2.x, gz2.y), sum5);
236
    image3d_st8(top_blob, ivec3(gx2.x, gy2.y, gz2.y), sum6);
237
    image3d_st8(top_blob, ivec3(gx2.y, gy2.y, gz2.y), sum7);
238
#else
239
    const ivec2 gi = gz2 * psc(outcstep) + gy * psc(outw) + gx;
240

241
    buffer_st8(top_blob_data, gi.x, sum0);
242
    if (gx + 1 < psc(outw)) buffer_st8(top_blob_data, gi.x + 1, sum1);
243
    if (gy + 1 < psc(outh)) buffer_st8(top_blob_data, gi.x + psc(outw), sum2);
244
    if (gy + 1 < psc(outh) && gx + 1 < psc(outw)) buffer_st8(top_blob_data, gi.x + psc(outw) + 1, sum3);
245
    if (gz + 1 < psc(outc))
246
    {
247
        buffer_st8(top_blob_data, gi.y, sum4);
248
        if (gx + 1 < psc(outw)) buffer_st8(top_blob_data, gi.y + 1, sum5);
249
        if (gy + 1 < psc(outh)) buffer_st8(top_blob_data, gi.y + psc(outw), sum6);
250
        if (gy + 1 < psc(outh) && gx + 1 < psc(outw)) buffer_st8(top_blob_data, gi.y + psc(outw) + 1, sum7);
251
    }
252
#endif
253
}
254

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.