ncnn

Форк
0
/
deconvolutiondepthwise3d.cpp 
301 строка · 10.2 Кб
1
// Tencent is pleased to support the open source community by making ncnn available.
2
//
3
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
4
//
5
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6
// in compliance with the License. You may obtain a copy of the License at
7
//
8
// https://opensource.org/licenses/BSD-3-Clause
9
//
10
// Unless required by applicable law or agreed to in writing, software distributed
11
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
13
// specific language governing permissions and limitations under the License.
14

15
#include "deconvolutiondepthwise3d.h"
16

17
#include "fused_activation.h"
18

19
namespace ncnn {
20

21
DeconvolutionDepthWise3D::DeconvolutionDepthWise3D()
22
{
23
    one_blob_only = true;
24
    support_inplace = false;
25
}
26

27
int DeconvolutionDepthWise3D::load_param(const ParamDict& pd)
28
{
29
    num_output = pd.get(0, 0);
30
    kernel_w = pd.get(1, 0);
31
    kernel_h = pd.get(11, kernel_w);
32
    kernel_d = pd.get(21, kernel_w);
33
    dilation_w = pd.get(2, 1);
34
    dilation_h = pd.get(12, dilation_w);
35
    dilation_d = pd.get(22, dilation_w);
36
    stride_w = pd.get(3, 1);
37
    stride_h = pd.get(13, stride_w);
38
    stride_d = pd.get(23, stride_w);
39
    pad_left = pd.get(4, 0);
40
    pad_right = pd.get(15, pad_left);
41
    pad_top = pd.get(14, pad_left);
42
    pad_bottom = pd.get(16, pad_top);
43
    pad_front = pd.get(24, pad_left);
44
    pad_behind = pd.get(17, pad_front);
45
    output_pad_right = pd.get(18, 0);
46
    output_pad_bottom = pd.get(19, output_pad_right);
47
    output_pad_behind = pd.get(20, output_pad_right);
48
    output_w = pd.get(25, 0);
49
    output_h = pd.get(26, output_w);
50
    output_d = pd.get(27, output_w);
51
    bias_term = pd.get(5, 0);
52
    weight_data_size = pd.get(6, 0);
53
    group = pd.get(7, 1);
54
    activation_type = pd.get(9, 0);
55
    activation_params = pd.get(10, Mat());
56

57
    return 0;
58
}
59

60
int DeconvolutionDepthWise3D::load_model(const ModelBin& mb)
61
{
62
    weight_data = mb.load(weight_data_size, 0);
63
    if (weight_data.empty())
64
        return -100;
65

66
    if (bias_term)
67
    {
68
        bias_data = mb.load(num_output, 1);
69
        if (bias_data.empty())
70
            return -100;
71
    }
72

73
    return 0;
74
}
75

76
static int deconvolutiondepthwise3d(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data, const Mat& bias_data, int kernel_w, int kernel_h, int kernel_d, int stride_w, int stride_h, int stride_d, int dilation_w, int dilation_h, int dilation_d, int group, int activation_type, const Mat& activation_params, const Option& opt)
77
{
78
    const int inch = bottom_blob.c;
79

80
    const int outw = top_blob.w;
81
    const int outh = top_blob.h;
82
    const int outch = top_blob.c;
83

84
    const int maxk = kernel_w * kernel_h * kernel_d;
85

86
    // kernel offsets
87
    std::vector<int> _space_ofs(maxk);
88
    int* space_ofs = &_space_ofs[0];
89
    {
90
        int p1 = 0;
91
        int p2 = 0;
92
        int gap0 = outw * dilation_h - kernel_w * dilation_w;
93
        int gap1 = outh * outw * dilation_d - outw * kernel_h * dilation_h;
94
        for (int z = 0; z < kernel_d; z++)
95
        {
96
            for (int i = 0; i < kernel_h; i++)
97
            {
98
                for (int j = 0; j < kernel_w; j++)
99
                {
100
                    space_ofs[p1] = p2;
101
                    p1++;
102
                    p2 += dilation_w;
103
                }
104
                p2 += gap0;
105
            }
106
            p2 += gap1;
107
        }
108
    }
109

110
    // depth-wise
111
    if (inch == group && group == outch)
112
    {
113
        #pragma omp parallel for num_threads(opt.num_threads)
114
        for (int g = 0; g < group; g++)
115
        {
116
            const float* inptr = bottom_blob.channel(g);
117
            const float* kptr = (const float*)weight_data + maxk * g;
118
            Mat out = top_blob.channel(g);
119

120
            const float bias = bias_data.empty() ? 0.f : bias_data[g];
121

122
            out.fill(bias);
123

124
            // shadowed variable for less openmp task args
125
            const int w = bottom_blob.w;
126
            const int h = bottom_blob.h;
127
            const int d = bottom_blob.d;
128
            const int outw = top_blob.w;
129
            const int outh = top_blob.h;
130
            const int outd = top_blob.d;
131

132
            for (int z = 0; z < d; z++)
133
            {
134
                for (int i = 0; i < h; i++)
135
                {
136
                    for (int j = 0; j < w; j++)
137
                    {
138
                        float* outptr = out.depth(z * stride_d).row(i * stride_h) + j * stride_w;
139

140
                        const float val = inptr[z * w * h + i * w + j];
141

142
                        for (int k = 0; k < maxk; k++)
143
                        {
144
                            float w = kptr[k];
145
                            outptr[space_ofs[k]] += val * w;
146
                        }
147
                    }
148
                }
149
            }
150

151
            {
152
                float* outptr = out;
153
                int size = outw * outh * outd;
154

155
                for (int i = 0; i < size; i++)
156
                {
157
                    outptr[i] = activation_ss(outptr[i], activation_type, activation_params);
158
                }
159
            }
160
        }
161
    }
162
    else
163
    {
164
        const int inch_g = inch / group;
165
        const int outch_g = outch / group;
166

167
#ifdef _WIN32
168
        #pragma omp parallel for num_threads(opt.num_threads)
169
#else
170
        #pragma omp parallel for collapse(2) num_threads(opt.num_threads)
171
#endif
172
        for (int g = 0; g < group; g++)
173
        {
174
            for (int p = 0; p < outch_g; p++)
175
            {
176
                Mat out = top_blob.channel(g * outch_g + p);
177

178
                const float* weight_data_ptr = (const float*)weight_data + maxk * inch_g * outch_g * g;
179

180
                const float bias = bias_data.empty() ? 0.f : bias_data[g * outch_g + p];
181

182
                out.fill(bias);
183

184
                // shadowed variable for less openmp task args
185
                const int w = bottom_blob.w;
186
                const int h = bottom_blob.h;
187
                const int d = bottom_blob.d;
188
                const int outw = top_blob.w;
189
                const int outh = top_blob.h;
190
                const int outd = top_blob.d;
191

192
                for (int z = 0; z < d; z++)
193
                {
194
                    for (int i = 0; i < h; i++)
195
                    {
196
                        for (int j = 0; j < w; j++)
197
                        {
198
                            float* outptr = out.depth(z * stride_d).row(i * stride_h) + j * stride_w;
199

200
                            const float* kptr = weight_data_ptr + maxk * inch_g * p;
201

202
                            for (int q = 0; q < inch_g; q++)
203
                            {
204
                                const float val = bottom_blob.channel(inch_g * g + q).depth(z).row(i)[j];
205

206
                                for (int k = 0; k < maxk; k++)
207
                                {
208
                                    outptr[space_ofs[k]] += val * kptr[k];
209
                                }
210

211
                                kptr += maxk;
212
                            }
213
                        }
214
                    }
215
                }
216

217
                {
218
                    float* outptr = out;
219
                    int size = outw * outh * outd;
220

221
                    for (int i = 0; i < size; i++)
222
                    {
223
                        outptr[i] = activation_ss(outptr[i], activation_type, activation_params);
224
                    }
225
                }
226
            }
227
        }
228
    }
229

230
    return 0;
231
}
232

233
int DeconvolutionDepthWise3D::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
234
{
235
    int w = bottom_blob.w;
236
    int h = bottom_blob.h;
237
    int d = bottom_blob.d;
238
    size_t elemsize = bottom_blob.elemsize;
239

240
    const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
241
    const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
242
    const int kernel_extent_d = dilation_d * (kernel_d - 1) + 1;
243

244
    int outw = (w - 1) * stride_w + kernel_extent_w + output_pad_right;
245
    int outh = (h - 1) * stride_h + kernel_extent_h + output_pad_bottom;
246
    int outd = (d - 1) * stride_d + kernel_extent_d + output_pad_behind;
247

248
    Mat top_blob_bordered;
249
    if (pad_left > 0 || pad_right > 0 || pad_top > 0 || pad_bottom > 0 || pad_front > 0 || pad_behind > 0 || (output_w > 0 && output_h > 0 && output_d > 0))
250
    {
251
        top_blob_bordered.create(outw, outh, outd, num_output, elemsize, opt.workspace_allocator);
252
    }
253
    else
254
    {
255
        top_blob_bordered = top_blob;
256
        top_blob_bordered.create(outw, outh, outd, num_output, elemsize, opt.blob_allocator);
257
    }
258
    if (top_blob_bordered.empty())
259
        return -100;
260

261
    int ret = deconvolutiondepthwise3d(bottom_blob, top_blob_bordered, weight_data, bias_data, kernel_w, kernel_h, kernel_d, stride_w, stride_h, stride_d, dilation_w, dilation_h, dilation_d, group, activation_type, activation_params, opt);
262
    if (ret != 0)
263
        return ret;
264

265
    cut_padding(top_blob_bordered, top_blob, opt);
266
    if (top_blob.empty())
267
        return -100;
268

269
    return 0;
270
}
271

272
void DeconvolutionDepthWise3D::cut_padding(const Mat& top_blob_bordered, Mat& top_blob, const Option& opt) const
273
{
274
    if (pad_left > 0 || pad_right > 0 || pad_top > 0 || pad_bottom > 0 || pad_front > 0 || pad_behind > 0)
275
    {
276
        copy_cut_border_3d(top_blob_bordered, top_blob, pad_top, pad_bottom, pad_left, pad_right, pad_front, pad_behind, opt);
277
    }
278
    else if (output_w > 0 && output_h > 0 && output_d > 0)
279
    {
280
        int wcut = top_blob_bordered.w - output_w;
281
        int hcut = top_blob_bordered.h - output_h;
282
        int dcut = top_blob_bordered.d - output_d;
283

284
        if (pad_left == -233 || pad_right == -233 || pad_top == -233 || pad_bottom == -233 || pad_front == -233 || pad_behind == -233)
285
        {
286
            // onnx padding=SAME_UPPER
287
            copy_cut_border_3d(top_blob_bordered, top_blob, hcut / 2, hcut - hcut / 2, wcut / 2, wcut - wcut / 2, dcut / 2, dcut - dcut / 2, opt);
288
        }
289
        else if (pad_left == -234 || pad_right == -234 || pad_top == -234 || pad_bottom == -234 || pad_front == -234 || pad_behind == -234)
290
        {
291
            // onnx padding=SAME_LOWER
292
            copy_cut_border_3d(top_blob_bordered, top_blob, hcut - hcut / 2, hcut / 2, wcut - wcut / 2, wcut / 2, dcut - dcut / 2, dcut / 2, opt);
293
        }
294
    }
295
    else
296
    {
297
        top_blob = top_blob_bordered;
298
    }
299
}
300

301
} // namespace ncnn
302

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.