ncnn

Форк
0
/
convolutiondepthwise3d.cpp 
276 строк · 9.4 Кб
1
// Tencent is pleased to support the open source community by making ncnn available.
2
//
3
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
4
//
5
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6
// in compliance with the License. You may obtain a copy of the License at
7
//
8
// https://opensource.org/licenses/BSD-3-Clause
9
//
10
// Unless required by applicable law or agreed to in writing, software distributed
11
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
13
// specific language governing permissions and limitations under the License.
14

15
#include "convolutiondepthwise3d.h"
16

17
#include "fused_activation.h"
18

19
namespace ncnn {
20

21
ConvolutionDepthWise3D::ConvolutionDepthWise3D()
22
{
23
    one_blob_only = true;
24
    support_inplace = false;
25
}
26

27
int ConvolutionDepthWise3D::load_param(const ParamDict& pd)
28
{
29
    num_output = pd.get(0, 0);
30
    kernel_w = pd.get(1, 0);
31
    kernel_h = pd.get(11, kernel_w);
32
    kernel_d = pd.get(21, kernel_w);
33
    dilation_w = pd.get(2, 1);
34
    dilation_h = pd.get(12, dilation_w);
35
    dilation_d = pd.get(22, dilation_w);
36
    stride_w = pd.get(3, 1);
37
    stride_h = pd.get(13, stride_w);
38
    stride_d = pd.get(23, stride_w);
39
    pad_left = pd.get(4, 0);
40
    pad_right = pd.get(15, pad_left);
41
    pad_top = pd.get(14, pad_left);
42
    pad_bottom = pd.get(16, pad_top);
43
    pad_front = pd.get(24, pad_left);
44
    pad_behind = pd.get(17, pad_front);
45
    pad_value = pd.get(18, 0.f);
46
    bias_term = pd.get(5, 0);
47
    weight_data_size = pd.get(6, 0);
48
    group = pd.get(7, 1);
49
    activation_type = pd.get(9, 0);
50
    activation_params = pd.get(10, Mat());
51

52
    return 0;
53
}
54

55
int ConvolutionDepthWise3D::load_model(const ModelBin& mb)
56
{
57
    weight_data = mb.load(weight_data_size, 0);
58
    if (weight_data.empty())
59
        return -100;
60

61
    if (bias_term)
62
    {
63
        bias_data = mb.load(num_output, 1);
64
        if (bias_data.empty())
65
            return -100;
66
    }
67

68
    return 0;
69
}
70

71
int ConvolutionDepthWise3D::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
72
{
73
    int w = bottom_blob.w;
74
    int h = bottom_blob.h;
75
    int d = bottom_blob.d;
76
    int channels = bottom_blob.c;
77
    size_t elemsize = bottom_blob.elemsize;
78

79
    const int kernel_extend_w = dilation_w * (kernel_w - 1) + 1;
80
    const int kernel_extend_h = dilation_h * (kernel_h - 1) + 1;
81
    const int kernel_extend_d = dilation_d * (kernel_d - 1) + 1;
82

83
    Mat bottom_blob_bordered;
84
    Option opt_pad = opt;
85
    opt_pad.use_packing_layout = false;
86
    make_padding(bottom_blob, bottom_blob_bordered, opt_pad);
87
    if (bottom_blob_bordered.empty())
88
        return -100;
89

90
    w = bottom_blob_bordered.w;
91
    h = bottom_blob_bordered.h;
92
    d = bottom_blob_bordered.d;
93

94
    int outw = (w - kernel_extend_w) / stride_w + 1;
95
    int outh = (h - kernel_extend_h) / stride_h + 1;
96
    int outd = (d - kernel_extend_d) / stride_d + 1;
97

98
    const int maxk = kernel_w * kernel_h * kernel_d;
99

100
    // kernel offsets
101
    std::vector<int> _space_ofs(maxk);
102
    int* space_ofs = &_space_ofs[0];
103
    {
104
        int p1 = 0;
105
        int p2 = 0;
106
        int gap0 = w * dilation_h - kernel_w * dilation_w;
107
        int gap1 = h * w * dilation_d - w * kernel_h * dilation_h;
108
        for (int z = 0; z < kernel_d; z++)
109
        {
110
            for (int i = 0; i < kernel_h; i++)
111
            {
112
                for (int j = 0; j < kernel_w; j++)
113
                {
114
                    space_ofs[p1] = p2;
115
                    p1++;
116
                    p2 += dilation_w;
117
                }
118
                p2 += gap0;
119
            }
120
            p2 += gap1;
121
        }
122
    }
123

124
    top_blob.create(outw, outh, outd, num_output, elemsize, opt.blob_allocator);
125
    if (top_blob.empty())
126
        return -100;
127

128
    // depth-wise
129
    if (channels == group && group == num_output)
130
    {
131
        #pragma omp parallel for num_threads(opt.num_threads)
132
        for (int g = 0; g < group; g++)
133
        {
134
            float* outptr = top_blob.channel(g);
135
            const float* kptr = (const float*)weight_data + maxk * g;
136
            const Mat m = bottom_blob_bordered.channel(g);
137

138
            for (int z = 0; z < outd; z++)
139
            {
140
                for (int i = 0; i < outh; i++)
141
                {
142
                    for (int j = 0; j < outw; j++)
143
                    {
144
                        float sum = 0.f;
145

146
                        if (bias_term)
147
                            sum = bias_data[g];
148

149
                        const float* sptr = m.depth(z * stride_d).row(i * stride_h) + j * stride_w;
150

151
                        for (int k = 0; k < maxk; k++)
152
                        {
153
                            float val = sptr[space_ofs[k]];
154
                            float w = kptr[k];
155
                            sum += val * w;
156
                        }
157

158
                        outptr[j] = activation_ss(sum, activation_type, activation_params);
159
                    }
160

161
                    outptr += outw;
162
                }
163
            }
164
        }
165
    }
166
    else
167
    {
168
        // group convolution
169
        const int channels_g = channels / group;
170
        const int num_output_g = num_output / group;
171

172
#ifdef _WIN32
173
        #pragma omp parallel for num_threads(opt.num_threads)
174
#else
175
        #pragma omp parallel for collapse(2) num_threads(opt.num_threads)
176
#endif
177
        for (int g = 0; g < group; g++)
178
        {
179
            for (int p = 0; p < num_output_g; p++)
180
            {
181
                float* outptr = top_blob.channel(g * num_output_g + p);
182
                const float* weight_data_ptr = (const float*)weight_data + maxk * channels_g * num_output_g * g;
183

184
                // shadowed variable for less openmp task args
185
                const int outw = top_blob.w;
186
                const int outh = top_blob.h;
187
                const int outd = top_blob.d;
188

189
                for (int z = 0; z < outd; z++)
190
                {
191
                    for (int i = 0; i < outh; i++)
192
                    {
193
                        for (int j = 0; j < outw; j++)
194
                        {
195
                            float sum = 0.f;
196

197
                            if (bias_term)
198
                                sum = bias_data[num_output_g * g + p];
199

200
                            const float* kptr = weight_data_ptr + maxk * channels_g * p;
201

202
                            for (int q = 0; q < channels_g; q++)
203
                            {
204
                                const Mat m = bottom_blob_bordered.channel(channels_g * g + q);
205
                                const float* sptr = m.depth(z * stride_d).row(i * stride_h) + j * stride_w;
206

207
                                for (int l = 0; l < maxk; l++)
208
                                {
209
                                    float val = sptr[space_ofs[l]];
210

211
                                    float wt = kptr[l];
212
                                    sum += val * wt;
213
                                }
214

215
                                kptr += maxk;
216
                            }
217

218
                            outptr[j] = activation_ss(sum, activation_type, activation_params);
219
                        }
220

221
                        outptr += outw;
222
                    }
223
                }
224
            }
225
        }
226
    }
227

228
    return 0;
229
}
230

231
void ConvolutionDepthWise3D::make_padding(const Mat& bottom_blob, Mat& bottom_blob_bordered, const Option& opt) const
232
{
233
    int w = bottom_blob.w;
234
    int h = bottom_blob.h;
235
    int d = bottom_blob.d;
236

237
    const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
238
    const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
239
    const int kernel_extent_d = dilation_d * (kernel_d - 1) + 1;
240

241
    bottom_blob_bordered = bottom_blob;
242
    if (pad_left > 0 || pad_right > 0 || pad_top > 0 || pad_bottom > 0 || pad_front > 0 || pad_behind > 0)
243
    {
244
        Option opt_b = opt;
245
        opt_b.blob_allocator = opt.workspace_allocator;
246
        copy_make_border_3d(bottom_blob, bottom_blob_bordered, pad_top, pad_bottom, pad_left, pad_right, pad_front, pad_behind, BORDER_CONSTANT, pad_value, opt_b);
247
    }
248
    else if (pad_left == -233 && pad_right == -233 && pad_top == -233 && pad_bottom == -233 && pad_front == -233 && pad_behind == -233)
249
    {
250
        // tensorflow padding=SAME or onnx padding=SAME_UPPER
251
        int wpad = kernel_extent_w + (w - 1) / stride_w * stride_w - w;
252
        int hpad = kernel_extent_h + (h - 1) / stride_h * stride_h - h;
253
        int dpad = kernel_extent_d + (d - 1) / stride_d * stride_d - d;
254
        if (wpad > 0 || hpad > 0 || dpad > 0)
255
        {
256
            Option opt_b = opt;
257
            opt_b.blob_allocator = opt.workspace_allocator;
258
            copy_make_border_3d(bottom_blob, bottom_blob_bordered, hpad / 2, hpad - hpad / 2, wpad / 2, wpad - wpad / 2, dpad / 2, dpad - dpad / 2, BORDER_CONSTANT, pad_value, opt_b);
259
        }
260
    }
261
    else if (pad_left == -234 && pad_right == -234 && pad_top == -234 && pad_bottom == -234 && pad_front == -234 && pad_behind == -234)
262
    {
263
        // onnx padding=SAME_LOWER
264
        int wpad = kernel_extent_w + (w - 1) / stride_w * stride_w - w;
265
        int hpad = kernel_extent_h + (h - 1) / stride_h * stride_h - h;
266
        int dpad = kernel_extent_d + (d - 1) / stride_d * stride_d - d;
267
        if (wpad > 0 || hpad > 0 || dpad > 0)
268
        {
269
            Option opt_b = opt;
270
            opt_b.blob_allocator = opt.workspace_allocator;
271
            copy_make_border_3d(bottom_blob, bottom_blob_bordered, hpad - hpad / 2, hpad / 2, wpad - wpad / 2, wpad / 2, dpad / 2, dpad - dpad / 2, BORDER_CONSTANT, pad_value, opt_b);
272
        }
273
    }
274
}
275

276
} // namespace ncnn
277

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.