opencv

Форк
0
/
chunkset_avx2.c 
133 строки · 5.1 Кб
1
/* chunkset_avx2.c -- AVX2 inline functions to copy small data chunks.
2
 * For conditions of distribution and use, see copyright notice in zlib.h
3
 */
4
#include "zbuild.h"
5

6
#ifdef X86_AVX2
7
#include <immintrin.h>
8
#include "../generic/chunk_permute_table.h"
9

10
typedef __m256i chunk_t;
11

12
#define CHUNK_SIZE 32
13

14
#define HAVE_CHUNKMEMSET_2
15
#define HAVE_CHUNKMEMSET_4
16
#define HAVE_CHUNKMEMSET_8
17
#define HAVE_CHUNK_MAG
18

19
/* Populate don't cares so that this is a direct lookup (with some indirection into the permute table), because dist can
20
 * never be 0 - 2, we'll start with an offset, subtracting 3 from the input */
21
static const lut_rem_pair perm_idx_lut[29] = {
22
    { 0, 2},                /* 3 */
23
    { 0, 0},                /* don't care */
24
    { 1 * 32, 2},           /* 5 */
25
    { 2 * 32, 2},           /* 6 */
26
    { 3 * 32, 4},           /* 7 */
27
    { 0 * 32, 0},           /* don't care */
28
    { 4 * 32, 5},           /* 9 */
29
    { 5 * 32, 22},          /* 10 */
30
    { 6 * 32, 21},          /* 11 */
31
    { 7 * 32, 20},          /* 12 */
32
    { 8 * 32, 6},           /* 13 */
33
    { 9 * 32, 4},           /* 14 */
34
    {10 * 32, 2},           /* 15 */
35
    { 0 * 32, 0},           /* don't care */
36
    {11 * 32, 15},          /* 17 */
37
    {11 * 32 + 16, 14},     /* 18 */
38
    {11 * 32 + 16 * 2, 13}, /* 19 */
39
    {11 * 32 + 16 * 3, 12}, /* 20 */
40
    {11 * 32 + 16 * 4, 11}, /* 21 */
41
    {11 * 32 + 16 * 5, 10}, /* 22 */
42
    {11 * 32 + 16 * 6,  9}, /* 23 */
43
    {11 * 32 + 16 * 7,  8}, /* 24 */
44
    {11 * 32 + 16 * 8,  7}, /* 25 */
45
    {11 * 32 + 16 * 9,  6}, /* 26 */
46
    {11 * 32 + 16 * 10, 5}, /* 27 */
47
    {11 * 32 + 16 * 11, 4}, /* 28 */
48
    {11 * 32 + 16 * 12, 3}, /* 29 */
49
    {11 * 32 + 16 * 13, 2}, /* 30 */
50
    {11 * 32 + 16 * 14, 1}  /* 31 */
51
};
52

53
static inline void chunkmemset_2(uint8_t *from, chunk_t *chunk) {
54
    int16_t tmp;
55
    memcpy(&tmp, from, sizeof(tmp));
56
    *chunk = _mm256_set1_epi16(tmp);
57
}
58

59
static inline void chunkmemset_4(uint8_t *from, chunk_t *chunk) {
60
    int32_t tmp;
61
    memcpy(&tmp, from, sizeof(tmp));
62
    *chunk = _mm256_set1_epi32(tmp);
63
}
64

65
static inline void chunkmemset_8(uint8_t *from, chunk_t *chunk) {
66
    int64_t tmp;
67
    memcpy(&tmp, from, sizeof(tmp));
68
    *chunk = _mm256_set1_epi64x(tmp);
69
}
70

71
static inline void loadchunk(uint8_t const *s, chunk_t *chunk) {
72
    *chunk = _mm256_loadu_si256((__m256i *)s);
73
}
74

75
static inline void storechunk(uint8_t *out, chunk_t *chunk) {
76
    _mm256_storeu_si256((__m256i *)out, *chunk);
77
}
78

79
static inline chunk_t GET_CHUNK_MAG(uint8_t *buf, uint32_t *chunk_rem, uint32_t dist) {
80
    lut_rem_pair lut_rem = perm_idx_lut[dist - 3];
81
    __m256i ret_vec;
82
    /* While technically we only need to read 4 or 8 bytes into this vector register for a lot of cases, GCC is
83
     * compiling this to a shared load for all branches, preferring the simpler code.  Given that the buf value isn't in
84
     * GPRs to begin with the 256 bit load is _probably_ just as inexpensive */
85
    *chunk_rem = lut_rem.remval;
86

87
    /* See note in chunkset_ssse3.c for why this is ok */
88
    __msan_unpoison(buf + dist, 32 - dist);
89

90
    if (dist < 16) {
91
        /* This simpler case still requires us to shuffle in 128 bit lanes, so we must apply a static offset after
92
         * broadcasting the first vector register to both halves. This is _marginally_ faster than doing two separate
93
         * shuffles and combining the halves later */
94
        const __m256i permute_xform =
95
            _mm256_setr_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
96
                             16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16);
97
        __m256i perm_vec = _mm256_load_si256((__m256i*)(permute_table+lut_rem.idx));
98
        __m128i ret_vec0 = _mm_loadu_si128((__m128i*)buf);
99
        perm_vec = _mm256_add_epi8(perm_vec, permute_xform);
100
        ret_vec = _mm256_inserti128_si256(_mm256_castsi128_si256(ret_vec0), ret_vec0, 1);
101
        ret_vec = _mm256_shuffle_epi8(ret_vec, perm_vec);
102
    } else if (dist == 16) {
103
        __m128i ret_vec0 = _mm_loadu_si128((__m128i*)buf);
104
        return _mm256_inserti128_si256(_mm256_castsi128_si256(ret_vec0), ret_vec0, 1);
105
    } else {
106
        __m128i ret_vec0 = _mm_loadu_si128((__m128i*)buf);
107
        __m128i ret_vec1 = _mm_loadu_si128((__m128i*)(buf + 16));
108
        /* Take advantage of the fact that only the latter half of the 256 bit vector will actually differ */
109
        __m128i perm_vec1 = _mm_load_si128((__m128i*)(permute_table + lut_rem.idx));
110
        __m128i xlane_permutes = _mm_cmpgt_epi8(_mm_set1_epi8(16), perm_vec1);
111
        __m128i xlane_res  = _mm_shuffle_epi8(ret_vec0, perm_vec1);
112
        /* Since we can't wrap twice, we can simply keep the later half exactly how it is instead of having to _also_
113
         * shuffle those values */
114
        __m128i latter_half = _mm_blendv_epi8(ret_vec1, xlane_res, xlane_permutes);
115
        ret_vec = _mm256_inserti128_si256(_mm256_castsi128_si256(ret_vec0), latter_half, 1);
116
    }
117

118
    return ret_vec;
119
}
120

121
#define CHUNKSIZE        chunksize_avx2
122
#define CHUNKCOPY        chunkcopy_avx2
123
#define CHUNKUNROLL      chunkunroll_avx2
124
#define CHUNKMEMSET      chunkmemset_avx2
125
#define CHUNKMEMSET_SAFE chunkmemset_safe_avx2
126

127
#include "chunkset_tpl.h"
128

129
#define INFLATE_FAST     inflate_fast_avx2
130

131
#include "inffast_tpl.h"
132

133
#endif
134

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.