opencv

Форк
0
/
adler32_neon.c 
215 строк · 7.5 Кб
1
/* Copyright (C) 1995-2011, 2016 Mark Adler
2
 * Copyright (C) 2017 ARM Holdings Inc.
3
 * Authors:
4
 *   Adenilson Cavalcanti <adenilson.cavalcanti@arm.com>
5
 *   Adam Stylinski <kungfujesus06@gmail.com>
6
 * For conditions of distribution and use, see copyright notice in zlib.h
7
 */
8
#ifdef ARM_NEON
9
#include "neon_intrins.h"
10
#include "../../zbuild.h"
11
#include "../../adler32_p.h"
12

13
static void NEON_accum32(uint32_t *s, const uint8_t *buf, size_t len) {
14
    static const uint16_t ALIGNED_(16) taps[64] = {
15
        64, 63, 62, 61, 60, 59, 58, 57,
16
        56, 55, 54, 53, 52, 51, 50, 49,
17
        48, 47, 46, 45, 44, 43, 42, 41,
18
        40, 39, 38, 37, 36, 35, 34, 33,
19
        32, 31, 30, 29, 28, 27, 26, 25,
20
        24, 23, 22, 21, 20, 19, 18, 17,
21
        16, 15, 14, 13, 12, 11, 10, 9,
22
        8, 7, 6, 5, 4, 3, 2, 1 };
23

24
    uint32x4_t adacc = vdupq_n_u32(0);
25
    uint32x4_t s2acc = vdupq_n_u32(0);
26
    uint32x4_t s2acc_0 = vdupq_n_u32(0);
27
    uint32x4_t s2acc_1 = vdupq_n_u32(0);
28
    uint32x4_t s2acc_2 = vdupq_n_u32(0);
29

30
    adacc = vsetq_lane_u32(s[0], adacc, 0);
31
    s2acc = vsetq_lane_u32(s[1], s2acc, 0);
32

33
    uint32x4_t s3acc = vdupq_n_u32(0);
34
    uint32x4_t adacc_prev = adacc;
35

36
    uint16x8_t s2_0, s2_1, s2_2, s2_3;
37
    s2_0 = s2_1 = s2_2 = s2_3 = vdupq_n_u16(0);
38

39
    uint16x8_t s2_4, s2_5, s2_6, s2_7;
40
    s2_4 = s2_5 = s2_6 = s2_7 = vdupq_n_u16(0);
41

42
    size_t num_iter = len >> 2;
43
    int rem = len & 3;
44

45
    for (size_t i = 0; i < num_iter; ++i) {
46
        uint8x16x4_t d0_d3 = vld1q_u8_x4(buf);
47

48
        /* Unfortunately it doesn't look like there's a direct sum 8 bit to 32
49
         * bit instruction, we'll have to make due summing to 16 bits first */
50
        uint16x8x2_t hsum, hsum_fold;
51
        hsum.val[0] = vpaddlq_u8(d0_d3.val[0]);
52
        hsum.val[1] = vpaddlq_u8(d0_d3.val[1]);
53

54
        hsum_fold.val[0] = vpadalq_u8(hsum.val[0], d0_d3.val[2]);
55
        hsum_fold.val[1] = vpadalq_u8(hsum.val[1], d0_d3.val[3]);
56

57
        adacc = vpadalq_u16(adacc, hsum_fold.val[0]);
58
        s3acc = vaddq_u32(s3acc, adacc_prev);
59
        adacc = vpadalq_u16(adacc, hsum_fold.val[1]);
60

61
        /* If we do straight widening additions to the 16 bit values, we don't incur
62
         * the usual penalties of a pairwise add. We can defer the multiplications
63
         * until the very end. These will not overflow because we are incurring at
64
         * most 408 loop iterations (NMAX / 64), and a given lane is only going to be
65
         * summed into once. This means for the maximum input size, the largest value
66
         * we will see is 255 * 102 = 26010, safely under uint16 max */
67
        s2_0 = vaddw_u8(s2_0, vget_low_u8(d0_d3.val[0]));
68
        s2_1 = vaddw_high_u8(s2_1, d0_d3.val[0]);
69
        s2_2 = vaddw_u8(s2_2, vget_low_u8(d0_d3.val[1]));
70
        s2_3 = vaddw_high_u8(s2_3, d0_d3.val[1]);
71
        s2_4 = vaddw_u8(s2_4, vget_low_u8(d0_d3.val[2]));
72
        s2_5 = vaddw_high_u8(s2_5, d0_d3.val[2]);
73
        s2_6 = vaddw_u8(s2_6, vget_low_u8(d0_d3.val[3]));
74
        s2_7 = vaddw_high_u8(s2_7, d0_d3.val[3]);
75

76
        adacc_prev = adacc;
77
        buf += 64;
78
    }
79

80
    s3acc = vshlq_n_u32(s3acc, 6);
81

82
    if (rem) {
83
        uint32x4_t s3acc_0 = vdupq_n_u32(0);
84
        while (rem--) {
85
            uint8x16_t d0 = vld1q_u8(buf);
86
            uint16x8_t adler;
87
            adler = vpaddlq_u8(d0);
88
            s2_6 = vaddw_u8(s2_6, vget_low_u8(d0));
89
            s2_7 = vaddw_high_u8(s2_7, d0);
90
            adacc = vpadalq_u16(adacc, adler);
91
            s3acc_0 = vaddq_u32(s3acc_0, adacc_prev);
92
            adacc_prev = adacc;
93
            buf += 16;
94
        }
95

96
        s3acc_0 = vshlq_n_u32(s3acc_0, 4);
97
        s3acc = vaddq_u32(s3acc_0, s3acc);
98
    }
99

100
    uint16x8x4_t t0_t3 = vld1q_u16_x4(taps);
101
    uint16x8x4_t t4_t7 = vld1q_u16_x4(taps + 32);
102

103
    s2acc = vmlal_high_u16(s2acc, t0_t3.val[0], s2_0);
104
    s2acc_0 = vmlal_u16(s2acc_0, vget_low_u16(t0_t3.val[0]), vget_low_u16(s2_0));
105
    s2acc_1 = vmlal_high_u16(s2acc_1, t0_t3.val[1], s2_1);
106
    s2acc_2 = vmlal_u16(s2acc_2, vget_low_u16(t0_t3.val[1]), vget_low_u16(s2_1));
107

108
    s2acc = vmlal_high_u16(s2acc, t0_t3.val[2], s2_2);
109
    s2acc_0 = vmlal_u16(s2acc_0, vget_low_u16(t0_t3.val[2]), vget_low_u16(s2_2));
110
    s2acc_1 = vmlal_high_u16(s2acc_1, t0_t3.val[3], s2_3);
111
    s2acc_2 = vmlal_u16(s2acc_2, vget_low_u16(t0_t3.val[3]), vget_low_u16(s2_3));
112

113
    s2acc = vmlal_high_u16(s2acc, t4_t7.val[0], s2_4);
114
    s2acc_0 = vmlal_u16(s2acc_0, vget_low_u16(t4_t7.val[0]), vget_low_u16(s2_4));
115
    s2acc_1 = vmlal_high_u16(s2acc_1, t4_t7.val[1], s2_5);
116
    s2acc_2 = vmlal_u16(s2acc_2, vget_low_u16(t4_t7.val[1]), vget_low_u16(s2_5));
117

118
    s2acc = vmlal_high_u16(s2acc, t4_t7.val[2], s2_6);
119
    s2acc_0 = vmlal_u16(s2acc_0, vget_low_u16(t4_t7.val[2]), vget_low_u16(s2_6));
120
    s2acc_1 = vmlal_high_u16(s2acc_1, t4_t7.val[3], s2_7);
121
    s2acc_2 = vmlal_u16(s2acc_2, vget_low_u16(t4_t7.val[3]), vget_low_u16(s2_7));
122

123
    s2acc = vaddq_u32(s2acc_0, s2acc);
124
    s2acc_2 = vaddq_u32(s2acc_1, s2acc_2);
125
    s2acc = vaddq_u32(s2acc, s2acc_2);
126

127
    uint32x2_t adacc2, s2acc2, as;
128
    s2acc = vaddq_u32(s2acc, s3acc);
129
    adacc2 = vpadd_u32(vget_low_u32(adacc), vget_high_u32(adacc));
130
    s2acc2 = vpadd_u32(vget_low_u32(s2acc), vget_high_u32(s2acc));
131
    as = vpadd_u32(adacc2, s2acc2);
132
    s[0] = vget_lane_u32(as, 0);
133
    s[1] = vget_lane_u32(as, 1);
134
}
135

136
static void NEON_handle_tail(uint32_t *pair, const uint8_t *buf, size_t len) {
137
    unsigned int i;
138
    for (i = 0; i < len; ++i) {
139
        pair[0] += buf[i];
140
        pair[1] += pair[0];
141
    }
142
}
143

144
Z_INTERNAL uint32_t adler32_neon(uint32_t adler, const uint8_t *buf, size_t len) {
145
    /* split Adler-32 into component sums */
146
    uint32_t sum2 = (adler >> 16) & 0xffff;
147
    adler &= 0xffff;
148

149
    /* in case user likes doing a byte at a time, keep it fast */
150
    if (len == 1)
151
        return adler32_len_1(adler, buf, sum2);
152

153
    /* initial Adler-32 value (deferred check for len == 1 speed) */
154
    if (buf == NULL)
155
        return 1L;
156

157
    /* in case short lengths are provided, keep it somewhat fast */
158
    if (len < 16)
159
        return adler32_len_16(adler, buf, len, sum2);
160

161
    uint32_t pair[2];
162
    int n = NMAX;
163
    unsigned int done = 0;
164

165
    /* Split Adler-32 into component sums, it can be supplied by
166
     * the caller sites (e.g. in a PNG file).
167
     */
168
    pair[0] = adler;
169
    pair[1] = sum2;
170

171
    /* If memory is not SIMD aligned, do scalar sums to an aligned
172
     * offset, provided that doing so doesn't completely eliminate
173
     * SIMD operation. Aligned loads are still faster on ARM, even
174
     * though there's no explicit aligned load instruction */
175
    unsigned int align_offset = ((uintptr_t)buf & 15);
176
    unsigned int align_adj = (align_offset) ? 16 - align_offset : 0;
177

178
    if (align_offset && len >= (16 + align_adj)) {
179
        NEON_handle_tail(pair, buf, align_adj);
180
        n -= align_adj;
181
        done += align_adj;
182

183
    } else {
184
        /* If here, we failed the len criteria test, it wouldn't be
185
         * worthwhile to do scalar aligning sums */
186
        align_adj = 0;
187
    }
188

189
    while (done < len) {
190
        int remaining = (int)(len - done);
191
        n = MIN(remaining, (done == align_adj) ? n : NMAX);
192

193
        if (n < 16)
194
            break;
195

196
        NEON_accum32(pair, buf + done, n >> 4);
197
        pair[0] %= BASE;
198
        pair[1] %= BASE;
199

200
        int actual_nsums = (n >> 4) << 4;
201
        done += actual_nsums;
202
    }
203

204
    /* Handle the tail elements. */
205
    if (done < len) {
206
        NEON_handle_tail(pair, (buf + done), len - done);
207
        pair[0] %= BASE;
208
        pair[1] %= BASE;
209
    }
210

211
    /* D = B * 65536 + A, see: https://en.wikipedia.org/wiki/Adler-32. */
212
    return (pair[1] << 16) | pair[0];
213
}
214

215
#endif
216

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.