ncnn

Форк
0
/
quantize.cpp 
137 строк · 3.5 Кб
1
// Tencent is pleased to support the open source community by making ncnn available.
2
//
3
// Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
4
//
5
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6
// in compliance with the License. You may obtain a copy of the License at
7
//
8
// https://opensource.org/licenses/BSD-3-Clause
9
//
10
// Unless required by applicable law or agreed to in writing, software distributed
11
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
13
// specific language governing permissions and limitations under the License.
14

15
#include "quantize.h"
16

17
namespace ncnn {
18

19
Quantize::Quantize()
20
{
21
    one_blob_only = true;
22
    support_inplace = false;
23
}
24

25
int Quantize::load_param(const ParamDict& pd)
26
{
27
    scale_data_size = pd.get(0, 1);
28

29
    return 0;
30
}
31

32
int Quantize::load_model(const ModelBin& mb)
33
{
34
    scale_data = mb.load(scale_data_size, 1);
35
    if (scale_data.empty())
36
        return -100;
37

38
    return 0;
39
}
40

41
static inline signed char float2int8(float v)
42
{
43
    int int32 = static_cast<int>(round(v));
44
    if (int32 > 127) return 127;
45
    if (int32 < -127) return -127;
46
    return (signed char)int32;
47
}
48

49
int Quantize::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
50
{
51
    int dims = bottom_blob.dims;
52

53
    if (dims == 1)
54
    {
55
        int w = bottom_blob.w;
56

57
        top_blob.create(w, (size_t)1u, opt.blob_allocator);
58
        if (top_blob.empty())
59
            return -100;
60

61
        const float* ptr = bottom_blob;
62
        signed char* outptr = top_blob;
63

64
        if (scale_data_size == 1)
65
        {
66
            const float scale = scale_data[0];
67

68
            #pragma omp parallel for num_threads(opt.num_threads)
69
            for (int i = 0; i < w; i++)
70
            {
71
                outptr[i] = float2int8(ptr[i] * scale);
72
            }
73
        }
74
        else
75
        {
76
            #pragma omp parallel for num_threads(opt.num_threads)
77
            for (int i = 0; i < w; i++)
78
            {
79
                outptr[i] = float2int8(ptr[i] * scale_data[i]);
80
            }
81
        }
82
    }
83

84
    if (dims == 2)
85
    {
86
        int w = bottom_blob.w;
87
        int h = bottom_blob.h;
88

89
        top_blob.create(w, h, (size_t)1u, opt.blob_allocator);
90
        if (top_blob.empty())
91
            return -100;
92

93
        #pragma omp parallel for num_threads(opt.num_threads)
94
        for (int i = 0; i < h; i++)
95
        {
96
            const float* ptr0 = bottom_blob.row(i);
97
            signed char* outptr0 = top_blob.row<signed char>(i);
98

99
            const float scale = scale_data_size == 1 ? scale_data[0] : scale_data[i];
100

101
            for (int j = 0; j < w; j++)
102
            {
103
                outptr0[j] = float2int8(ptr0[j] * scale);
104
            }
105
        }
106
    }
107

108
    if (dims == 3)
109
    {
110
        int w = bottom_blob.w;
111
        int h = bottom_blob.h;
112
        int channels = bottom_blob.c;
113
        int size = w * h;
114

115
        top_blob.create(w, h, channels, (size_t)1u, opt.blob_allocator);
116
        if (top_blob.empty())
117
            return -100;
118

119
        #pragma omp parallel for num_threads(opt.num_threads)
120
        for (int q = 0; q < channels; q++)
121
        {
122
            const float* ptr = bottom_blob.channel(q);
123
            signed char* outptr = top_blob.channel(q);
124

125
            const float scale = scale_data_size == 1 ? scale_data[0] : scale_data[q];
126

127
            for (int i = 0; i < size; i++)
128
            {
129
                outptr[i] = float2int8(ptr[i] * scale);
130
            }
131
        }
132
    }
133

134
    return 0;
135
}
136

137
} // namespace ncnn
138

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.