ncnn

Форк
0
/
convolution_pack8.comp 
381 строка · 18.9 Кб
1
// Tencent is pleased to support the open source community by making ncnn available.
2
//
3
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
4
//
5
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6
// in compliance with the License. You may obtain a copy of the License at
7
//
8
// https://opensource.org/licenses/BSD-3-Clause
9
//
10
// Unless required by applicable law or agreed to in writing, software distributed
11
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
13
// specific language governing permissions and limitations under the License.
14

15
#version 450
16

17
#if NCNN_fp16_storage
18
#extension GL_EXT_shader_16bit_storage: require
19
struct sfpvec8 { f16vec4 abcd; f16vec4 efgh; };
20
#endif
21
#if NCNN_fp16_arithmetic
22
#extension GL_EXT_shader_explicit_arithmetic_types_float16: require
23
#endif
24

25
#extension GL_GOOGLE_include_directive: enable
26
#include "vulkan_activation.comp"
27

28
layout (constant_id = 0) const int kernel_w = 1;
29
layout (constant_id = 1) const int kernel_h = 1;
30
layout (constant_id = 2) const int dilation_w = 1;
31
layout (constant_id = 3) const int dilation_h = 1;
32
layout (constant_id = 4) const int stride_w = 1;
33
layout (constant_id = 5) const int stride_h = 1;
34
layout (constant_id = 6) const int bias_term = 0;
35
layout (constant_id = 7) const int activation_type = 0;
36
layout (constant_id = 8) const float activation_param_0 = 0;
37
layout (constant_id = 9) const float activation_param_1 = 0;
38

39
#define shape_constant_id_offset 10
40
layout (constant_id = shape_constant_id_offset + 0) const int dims = 0;
41
layout (constant_id = shape_constant_id_offset + 1) const int w = 0;
42
layout (constant_id = shape_constant_id_offset + 2) const int h = 0;
43
layout (constant_id = shape_constant_id_offset + 3) const int c = 0;
44
layout (constant_id = shape_constant_id_offset + 4) const int cstep = 0;
45

46
layout (constant_id = shape_constant_id_offset + 5) const int outdims = 0;
47
layout (constant_id = shape_constant_id_offset + 6) const int outw = 0;
48
layout (constant_id = shape_constant_id_offset + 7) const int outh = 0;
49
layout (constant_id = shape_constant_id_offset + 8) const int outc = 0;
50
layout (constant_id = shape_constant_id_offset + 9) const int outcstep = 0;
51

52
#if NCNN_image_shader
53
layout (binding = 0) uniform unfp sampler3D bottom_blob;
54
layout (binding = 1, imfmtc4) writeonly uniform unfp image3D top_blob;
55
layout (binding = 2) uniform unfp sampler3D weight_blob;
56
layout (binding = 3) uniform unfp sampler3D bias_blob;
57
#else
58
layout (binding = 0) readonly buffer bottom_blob { sfpvec8 bottom_blob_data[]; };
59
layout (binding = 1) writeonly buffer top_blob { sfpvec8 top_blob_data[]; };
60
layout (binding = 2) readonly buffer weight_blob { sfpvec8 weight_data[]; };
61
layout (binding = 3) readonly buffer bias_blob { sfpvec8 bias_data[]; };
62
#endif
63

64
layout (push_constant) uniform parameter
65
{
66
    int dims;
67
    int w;
68
    int h;
69
    int c;
70
    int cstep;
71

72
    int outdims;
73
    int outw;
74
    int outh;
75
    int outc;
76
    int outcstep;
77
} p;
78

79
void main()
80
{
81
    int gx = int(gl_GlobalInvocationID.x) * 2;
82
    int gy = int(gl_GlobalInvocationID.y) * 2;
83
    int gz = int(gl_GlobalInvocationID.z) * 2;
84

85
    if (gx >= psc(outw) || gy >= psc(outh) || gz >= psc(outc))
86
        return;
87

88
    const ivec2 gx2 = gx + ivec2(0, 1);
89
    const ivec2 gy2 = gy + ivec2(0, 1);
90
    const ivec2 gz2 = gz + ivec2(0, 1);
91

92
    afpvec8 sum0;
93
    afpvec8 sum1;
94
    afpvec8 sum2;
95
    afpvec8 sum3;
96
    afpvec8 sum4;
97
    afpvec8 sum5;
98
    afpvec8 sum6;
99
    afpvec8 sum7;
100

101
    if (bias_term == 1)
102
    {
103
#if NCNN_image_shader
104
        sum0 = image3d_ld8(bias_blob, ivec3(gz2.x, 0, 0));
105
        sum4 = image3d_ld8(bias_blob, ivec3(gz2.y, 0, 0));
106
#else
107
        sum0 = buffer_ld8(bias_data, gz2.x);
108
        sum4 = buffer_ld8(bias_data, gz2.y);
109
#endif
110
        sum1 = sum0;
111
        sum2 = sum0;
112
        sum3 = sum0;
113
        sum5 = sum4;
114
        sum6 = sum4;
115
        sum7 = sum4;
116
    }
117
    else
118
    {
119
        sum0 = afpvec8(afpvec4(0.f), afpvec4(0.f));
120
        sum1 = afpvec8(afpvec4(0.f), afpvec4(0.f));
121
        sum2 = afpvec8(afpvec4(0.f), afpvec4(0.f));
122
        sum3 = afpvec8(afpvec4(0.f), afpvec4(0.f));
123
        sum4 = afpvec8(afpvec4(0.f), afpvec4(0.f));
124
        sum5 = afpvec8(afpvec4(0.f), afpvec4(0.f));
125
        sum6 = afpvec8(afpvec4(0.f), afpvec4(0.f));
126
        sum7 = afpvec8(afpvec4(0.f), afpvec4(0.f));
127
    }
128

129
#if NCNN_image_shader
130
    for (int z = 0; z < psc(c); z++)
131
    {
132
        ivec2 sy = gy2 * stride_h;
133
        int wx = 0;
134

135
        for (int y = 0; y < kernel_h; y++)
136
        {
137
            ivec2 sx = gx2 * stride_w;
138

139
            for (int x = 0; x < kernel_w; x++)
140
            {
141
                afpvec8 v0 = image3d_ld8(bottom_blob, ivec3(sx.x, sy.x, z));
142
                afpvec8 v1 = image3d_ld8(bottom_blob, ivec3(sx.y, sy.x, z));
143
                afpvec8 v2 = image3d_ld8(bottom_blob, ivec3(sx.x, sy.y, z));
144
                afpvec8 v3 = image3d_ld8(bottom_blob, ivec3(sx.y, sy.y, z));
145

146
                afpvec8 k0 = image3d_ld8(weight_blob, ivec3(wx + 0, z, gz2.x));
147
                afpvec8 k1 = image3d_ld8(weight_blob, ivec3(wx + 1, z, gz2.x));
148
                afpvec8 k2 = image3d_ld8(weight_blob, ivec3(wx + 2, z, gz2.x));
149
                afpvec8 k3 = image3d_ld8(weight_blob, ivec3(wx + 3, z, gz2.x));
150
                afpvec8 k4 = image3d_ld8(weight_blob, ivec3(wx + 4, z, gz2.x));
151
                afpvec8 k5 = image3d_ld8(weight_blob, ivec3(wx + 5, z, gz2.x));
152
                afpvec8 k6 = image3d_ld8(weight_blob, ivec3(wx + 6, z, gz2.x));
153
                afpvec8 k7 = image3d_ld8(weight_blob, ivec3(wx + 7, z, gz2.x));
154

155
                afpvec8 k8 = image3d_ld8(weight_blob, ivec3(wx + 0, z, gz2.y));
156
                afpvec8 k9 = image3d_ld8(weight_blob, ivec3(wx + 1, z, gz2.y));
157
                afpvec8 ka = image3d_ld8(weight_blob, ivec3(wx + 2, z, gz2.y));
158
                afpvec8 kb = image3d_ld8(weight_blob, ivec3(wx + 3, z, gz2.y));
159
                afpvec8 kc = image3d_ld8(weight_blob, ivec3(wx + 4, z, gz2.y));
160
                afpvec8 kd = image3d_ld8(weight_blob, ivec3(wx + 5, z, gz2.y));
161
                afpvec8 ke = image3d_ld8(weight_blob, ivec3(wx + 6, z, gz2.y));
162
                afpvec8 kf = image3d_ld8(weight_blob, ivec3(wx + 7, z, gz2.y));
163

164
                // sum += v * k;
165
                sum0[0].r += dot(v0[0], k0[0]) + dot(v0[1], k0[1]);
166
                sum0[0].g += dot(v0[0], k1[0]) + dot(v0[1], k1[1]);
167
                sum0[0].b += dot(v0[0], k2[0]) + dot(v0[1], k2[1]);
168
                sum0[0].a += dot(v0[0], k3[0]) + dot(v0[1], k3[1]);
169
                sum0[1].r += dot(v0[0], k4[0]) + dot(v0[1], k4[1]);
170
                sum0[1].g += dot(v0[0], k5[0]) + dot(v0[1], k5[1]);
171
                sum0[1].b += dot(v0[0], k6[0]) + dot(v0[1], k6[1]);
172
                sum0[1].a += dot(v0[0], k7[0]) + dot(v0[1], k7[1]);
173
                sum1[0].r += dot(v1[0], k0[0]) + dot(v1[1], k0[1]);
174
                sum1[0].g += dot(v1[0], k1[0]) + dot(v1[1], k1[1]);
175
                sum1[0].b += dot(v1[0], k2[0]) + dot(v1[1], k2[1]);
176
                sum1[0].a += dot(v1[0], k3[0]) + dot(v1[1], k3[1]);
177
                sum1[1].r += dot(v1[0], k4[0]) + dot(v1[1], k4[1]);
178
                sum1[1].g += dot(v1[0], k5[0]) + dot(v1[1], k5[1]);
179
                sum1[1].b += dot(v1[0], k6[0]) + dot(v1[1], k6[1]);
180
                sum1[1].a += dot(v1[0], k7[0]) + dot(v1[1], k7[1]);
181
                sum2[0].r += dot(v2[0], k0[0]) + dot(v2[1], k0[1]);
182
                sum2[0].g += dot(v2[0], k1[0]) + dot(v2[1], k1[1]);
183
                sum2[0].b += dot(v2[0], k2[0]) + dot(v2[1], k2[1]);
184
                sum2[0].a += dot(v2[0], k3[0]) + dot(v2[1], k3[1]);
185
                sum2[1].r += dot(v2[0], k4[0]) + dot(v2[1], k4[1]);
186
                sum2[1].g += dot(v2[0], k5[0]) + dot(v2[1], k5[1]);
187
                sum2[1].b += dot(v2[0], k6[0]) + dot(v2[1], k6[1]);
188
                sum2[1].a += dot(v2[0], k7[0]) + dot(v2[1], k7[1]);
189
                sum3[0].r += dot(v3[0], k0[0]) + dot(v3[1], k0[1]);
190
                sum3[0].g += dot(v3[0], k1[0]) + dot(v3[1], k1[1]);
191
                sum3[0].b += dot(v3[0], k2[0]) + dot(v3[1], k2[1]);
192
                sum3[0].a += dot(v3[0], k3[0]) + dot(v3[1], k3[1]);
193
                sum3[1].r += dot(v3[0], k4[0]) + dot(v3[1], k4[1]);
194
                sum3[1].g += dot(v3[0], k5[0]) + dot(v3[1], k5[1]);
195
                sum3[1].b += dot(v3[0], k6[0]) + dot(v3[1], k6[1]);
196
                sum3[1].a += dot(v3[0], k7[0]) + dot(v3[1], k7[1]);
197

198
                sum4[0].r += dot(v0[0], k8[0]) + dot(v0[1], k8[1]);
199
                sum4[0].g += dot(v0[0], k9[0]) + dot(v0[1], k9[1]);
200
                sum4[0].b += dot(v0[0], ka[0]) + dot(v0[1], ka[1]);
201
                sum4[0].a += dot(v0[0], kb[0]) + dot(v0[1], kb[1]);
202
                sum4[1].r += dot(v0[0], kc[0]) + dot(v0[1], kc[1]);
203
                sum4[1].g += dot(v0[0], kd[0]) + dot(v0[1], kd[1]);
204
                sum4[1].b += dot(v0[0], ke[0]) + dot(v0[1], ke[1]);
205
                sum4[1].a += dot(v0[0], kf[0]) + dot(v0[1], kf[1]);
206
                sum5[0].r += dot(v1[0], k8[0]) + dot(v1[1], k8[1]);
207
                sum5[0].g += dot(v1[0], k9[0]) + dot(v1[1], k9[1]);
208
                sum5[0].b += dot(v1[0], ka[0]) + dot(v1[1], ka[1]);
209
                sum5[0].a += dot(v1[0], kb[0]) + dot(v1[1], kb[1]);
210
                sum5[1].r += dot(v1[0], kc[0]) + dot(v1[1], kc[1]);
211
                sum5[1].g += dot(v1[0], kd[0]) + dot(v1[1], kd[1]);
212
                sum5[1].b += dot(v1[0], ke[0]) + dot(v1[1], ke[1]);
213
                sum5[1].a += dot(v1[0], kf[0]) + dot(v1[1], kf[1]);
214
                sum6[0].r += dot(v2[0], k8[0]) + dot(v2[1], k8[1]);
215
                sum6[0].g += dot(v2[0], k9[0]) + dot(v2[1], k9[1]);
216
                sum6[0].b += dot(v2[0], ka[0]) + dot(v2[1], ka[1]);
217
                sum6[0].a += dot(v2[0], kb[0]) + dot(v2[1], kb[1]);
218
                sum6[1].r += dot(v2[0], kc[0]) + dot(v2[1], kc[1]);
219
                sum6[1].g += dot(v2[0], kd[0]) + dot(v2[1], kd[1]);
220
                sum6[1].b += dot(v2[0], ke[0]) + dot(v2[1], ke[1]);
221
                sum6[1].a += dot(v2[0], kf[0]) + dot(v2[1], kf[1]);
222
                sum7[0].r += dot(v3[0], k8[0]) + dot(v3[1], k8[1]);
223
                sum7[0].g += dot(v3[0], k9[0]) + dot(v3[1], k9[1]);
224
                sum7[0].b += dot(v3[0], ka[0]) + dot(v3[1], ka[1]);
225
                sum7[0].a += dot(v3[0], kb[0]) + dot(v3[1], kb[1]);
226
                sum7[1].r += dot(v3[0], kc[0]) + dot(v3[1], kc[1]);
227
                sum7[1].g += dot(v3[0], kd[0]) + dot(v3[1], kd[1]);
228
                sum7[1].b += dot(v3[0], ke[0]) + dot(v3[1], ke[1]);
229
                sum7[1].a += dot(v3[0], kf[0]) + dot(v3[1], kf[1]);
230

231
                sx += dilation_w;
232
                wx += 8;
233
            }
234

235
            sy += dilation_h;
236
        }
237
    }
238
#else
239
    ivec2 w_offset = gz2 * psc(c) * kernel_w * kernel_h;
240

241
    for (int z = 0; z < psc(c); z++)
242
    {
243
        ivec4 v_offset;
244
        v_offset.rg = z * psc(cstep) + gy2.x * stride_h * psc(w) + gx2 * stride_w;
245
        v_offset.ba = z * psc(cstep) + gy2.y * stride_h * psc(w) + gx2 * stride_w;
246

247
        for (int y = 0; y < kernel_h; y++)
248
        {
249
            for (int x = 0; x < kernel_w; x++)
250
            {
251
                afpvec8 v0 = buffer_ld8(bottom_blob_data, v_offset.r + x * dilation_w);
252
                afpvec8 v1 = buffer_ld8(bottom_blob_data, v_offset.g + x * dilation_w);
253
                afpvec8 v2 = buffer_ld8(bottom_blob_data, v_offset.b + x * dilation_w);
254
                afpvec8 v3 = buffer_ld8(bottom_blob_data, v_offset.a + x * dilation_w);
255

256
                afpvec8 k0 = buffer_ld8(weight_data, (w_offset.x + x) * 8 + 0);
257
                afpvec8 k1 = buffer_ld8(weight_data, (w_offset.x + x) * 8 + 1);
258
                afpvec8 k2 = buffer_ld8(weight_data, (w_offset.x + x) * 8 + 2);
259
                afpvec8 k3 = buffer_ld8(weight_data, (w_offset.x + x) * 8 + 3);
260
                afpvec8 k4 = buffer_ld8(weight_data, (w_offset.x + x) * 8 + 4);
261
                afpvec8 k5 = buffer_ld8(weight_data, (w_offset.x + x) * 8 + 5);
262
                afpvec8 k6 = buffer_ld8(weight_data, (w_offset.x + x) * 8 + 6);
263
                afpvec8 k7 = buffer_ld8(weight_data, (w_offset.x + x) * 8 + 7);
264

265
                afpvec8 k8 = buffer_ld8(weight_data, (w_offset.y + x) * 8 + 0);
266
                afpvec8 k9 = buffer_ld8(weight_data, (w_offset.y + x) * 8 + 1);
267
                afpvec8 ka = buffer_ld8(weight_data, (w_offset.y + x) * 8 + 2);
268
                afpvec8 kb = buffer_ld8(weight_data, (w_offset.y + x) * 8 + 3);
269
                afpvec8 kc = buffer_ld8(weight_data, (w_offset.y + x) * 8 + 4);
270
                afpvec8 kd = buffer_ld8(weight_data, (w_offset.y + x) * 8 + 5);
271
                afpvec8 ke = buffer_ld8(weight_data, (w_offset.y + x) * 8 + 6);
272
                afpvec8 kf = buffer_ld8(weight_data, (w_offset.y + x) * 8 + 7);
273

274
                // sum += v * k
275
                sum0[0].r += dot(v0[0], k0[0]) + dot(v0[1], k0[1]);
276
                sum0[0].g += dot(v0[0], k1[0]) + dot(v0[1], k1[1]);
277
                sum0[0].b += dot(v0[0], k2[0]) + dot(v0[1], k2[1]);
278
                sum0[0].a += dot(v0[0], k3[0]) + dot(v0[1], k3[1]);
279
                sum0[1].r += dot(v0[0], k4[0]) + dot(v0[1], k4[1]);
280
                sum0[1].g += dot(v0[0], k5[0]) + dot(v0[1], k5[1]);
281
                sum0[1].b += dot(v0[0], k6[0]) + dot(v0[1], k6[1]);
282
                sum0[1].a += dot(v0[0], k7[0]) + dot(v0[1], k7[1]);
283
                sum1[0].r += dot(v1[0], k0[0]) + dot(v1[1], k0[1]);
284
                sum1[0].g += dot(v1[0], k1[0]) + dot(v1[1], k1[1]);
285
                sum1[0].b += dot(v1[0], k2[0]) + dot(v1[1], k2[1]);
286
                sum1[0].a += dot(v1[0], k3[0]) + dot(v1[1], k3[1]);
287
                sum1[1].r += dot(v1[0], k4[0]) + dot(v1[1], k4[1]);
288
                sum1[1].g += dot(v1[0], k5[0]) + dot(v1[1], k5[1]);
289
                sum1[1].b += dot(v1[0], k6[0]) + dot(v1[1], k6[1]);
290
                sum1[1].a += dot(v1[0], k7[0]) + dot(v1[1], k7[1]);
291
                sum2[0].r += dot(v2[0], k0[0]) + dot(v2[1], k0[1]);
292
                sum2[0].g += dot(v2[0], k1[0]) + dot(v2[1], k1[1]);
293
                sum2[0].b += dot(v2[0], k2[0]) + dot(v2[1], k2[1]);
294
                sum2[0].a += dot(v2[0], k3[0]) + dot(v2[1], k3[1]);
295
                sum2[1].r += dot(v2[0], k4[0]) + dot(v2[1], k4[1]);
296
                sum2[1].g += dot(v2[0], k5[0]) + dot(v2[1], k5[1]);
297
                sum2[1].b += dot(v2[0], k6[0]) + dot(v2[1], k6[1]);
298
                sum2[1].a += dot(v2[0], k7[0]) + dot(v2[1], k7[1]);
299
                sum3[0].r += dot(v3[0], k0[0]) + dot(v3[1], k0[1]);
300
                sum3[0].g += dot(v3[0], k1[0]) + dot(v3[1], k1[1]);
301
                sum3[0].b += dot(v3[0], k2[0]) + dot(v3[1], k2[1]);
302
                sum3[0].a += dot(v3[0], k3[0]) + dot(v3[1], k3[1]);
303
                sum3[1].r += dot(v3[0], k4[0]) + dot(v3[1], k4[1]);
304
                sum3[1].g += dot(v3[0], k5[0]) + dot(v3[1], k5[1]);
305
                sum3[1].b += dot(v3[0], k6[0]) + dot(v3[1], k6[1]);
306
                sum3[1].a += dot(v3[0], k7[0]) + dot(v3[1], k7[1]);
307

308
                sum4[0].r += dot(v0[0], k8[0]) + dot(v0[1], k8[1]);
309
                sum4[0].g += dot(v0[0], k9[0]) + dot(v0[1], k9[1]);
310
                sum4[0].b += dot(v0[0], ka[0]) + dot(v0[1], ka[1]);
311
                sum4[0].a += dot(v0[0], kb[0]) + dot(v0[1], kb[1]);
312
                sum4[1].r += dot(v0[0], kc[0]) + dot(v0[1], kc[1]);
313
                sum4[1].g += dot(v0[0], kd[0]) + dot(v0[1], kd[1]);
314
                sum4[1].b += dot(v0[0], ke[0]) + dot(v0[1], ke[1]);
315
                sum4[1].a += dot(v0[0], kf[0]) + dot(v0[1], kf[1]);
316
                sum5[0].r += dot(v1[0], k8[0]) + dot(v1[1], k8[1]);
317
                sum5[0].g += dot(v1[0], k9[0]) + dot(v1[1], k9[1]);
318
                sum5[0].b += dot(v1[0], ka[0]) + dot(v1[1], ka[1]);
319
                sum5[0].a += dot(v1[0], kb[0]) + dot(v1[1], kb[1]);
320
                sum5[1].r += dot(v1[0], kc[0]) + dot(v1[1], kc[1]);
321
                sum5[1].g += dot(v1[0], kd[0]) + dot(v1[1], kd[1]);
322
                sum5[1].b += dot(v1[0], ke[0]) + dot(v1[1], ke[1]);
323
                sum5[1].a += dot(v1[0], kf[0]) + dot(v1[1], kf[1]);
324
                sum6[0].r += dot(v2[0], k8[0]) + dot(v2[1], k8[1]);
325
                sum6[0].g += dot(v2[0], k9[0]) + dot(v2[1], k9[1]);
326
                sum6[0].b += dot(v2[0], ka[0]) + dot(v2[1], ka[1]);
327
                sum6[0].a += dot(v2[0], kb[0]) + dot(v2[1], kb[1]);
328
                sum6[1].r += dot(v2[0], kc[0]) + dot(v2[1], kc[1]);
329
                sum6[1].g += dot(v2[0], kd[0]) + dot(v2[1], kd[1]);
330
                sum6[1].b += dot(v2[0], ke[0]) + dot(v2[1], ke[1]);
331
                sum6[1].a += dot(v2[0], kf[0]) + dot(v2[1], kf[1]);
332
                sum7[0].r += dot(v3[0], k8[0]) + dot(v3[1], k8[1]);
333
                sum7[0].g += dot(v3[0], k9[0]) + dot(v3[1], k9[1]);
334
                sum7[0].b += dot(v3[0], ka[0]) + dot(v3[1], ka[1]);
335
                sum7[0].a += dot(v3[0], kb[0]) + dot(v3[1], kb[1]);
336
                sum7[1].r += dot(v3[0], kc[0]) + dot(v3[1], kc[1]);
337
                sum7[1].g += dot(v3[0], kd[0]) + dot(v3[1], kd[1]);
338
                sum7[1].b += dot(v3[0], ke[0]) + dot(v3[1], ke[1]);
339
                sum7[1].a += dot(v3[0], kf[0]) + dot(v3[1], kf[1]);
340
            }
341

342
            v_offset += dilation_h * psc(w);
343
            w_offset += kernel_w;
344
        }
345
    }
346
#endif
347

348
    sum0 = activation_afpvec8(sum0, activation_type, activation_param_0, activation_param_1);
349
    sum1 = activation_afpvec8(sum1, activation_type, activation_param_0, activation_param_1);
350
    sum2 = activation_afpvec8(sum2, activation_type, activation_param_0, activation_param_1);
351
    sum3 = activation_afpvec8(sum3, activation_type, activation_param_0, activation_param_1);
352
    sum4 = activation_afpvec8(sum4, activation_type, activation_param_0, activation_param_1);
353
    sum5 = activation_afpvec8(sum5, activation_type, activation_param_0, activation_param_1);
354
    sum6 = activation_afpvec8(sum6, activation_type, activation_param_0, activation_param_1);
355
    sum7 = activation_afpvec8(sum7, activation_type, activation_param_0, activation_param_1);
356

357
#if NCNN_image_shader
358
    image3d_st8(top_blob, ivec3(gx2.x, gy2.x, gz2.x), sum0);
359
    image3d_st8(top_blob, ivec3(gx2.y, gy2.x, gz2.x), sum1);
360
    image3d_st8(top_blob, ivec3(gx2.x, gy2.y, gz2.x), sum2);
361
    image3d_st8(top_blob, ivec3(gx2.y, gy2.y, gz2.x), sum3);
362
    image3d_st8(top_blob, ivec3(gx2.x, gy2.x, gz2.y), sum4);
363
    image3d_st8(top_blob, ivec3(gx2.y, gy2.x, gz2.y), sum5);
364
    image3d_st8(top_blob, ivec3(gx2.x, gy2.y, gz2.y), sum6);
365
    image3d_st8(top_blob, ivec3(gx2.y, gy2.y, gz2.y), sum7);
366
#else
367
    const ivec2 gi = gz2 * psc(outcstep) + gy * psc(outw) + gx;
368

369
    buffer_st8(top_blob_data, gi.x, sum0);
370
    if (gx + 1 < psc(outw)) buffer_st8(top_blob_data, gi.x + 1, sum1);
371
    if (gy + 1 < psc(outh)) buffer_st8(top_blob_data, gi.x + psc(outw), sum2);
372
    if (gy + 1 < psc(outh) && gx + 1 < psc(outw)) buffer_st8(top_blob_data, gi.x + psc(outw) + 1, sum3);
373
    if (gz + 1 < psc(outc))
374
    {
375
        buffer_st8(top_blob_data, gi.y, sum4);
376
        if (gx + 1 < psc(outw)) buffer_st8(top_blob_data, gi.y + 1, sum5);
377
        if (gy + 1 < psc(outh)) buffer_st8(top_blob_data, gi.y + psc(outw), sum6);
378
        if (gy + 1 < psc(outh) && gx + 1 < psc(outw)) buffer_st8(top_blob_data, gi.y + psc(outw) + 1, sum7);
379
    }
380
#endif
381
}
382

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.