jdk

Форк
0
259 строк · 8.8 Кб
1
/*
2
 * Copyright (c) 2006, 2024, Oracle and/or its affiliates. All rights reserved.
3
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
 *
5
 * This code is free software; you can redistribute it and/or modify it
6
 * under the terms of the GNU General Public License version 2 only, as
7
 * published by the Free Software Foundation.
8
 *
9
 * This code is distributed in the hope that it will be useful, but WITHOUT
10
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12
 * version 2 for more details (a copy is included in the LICENSE file that
13
 * accompanied this code).
14
 *
15
 * You should have received a copy of the GNU General Public License version
16
 * 2 along with this work; if not, write to the Free Software Foundation,
17
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
 *
19
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
 * or visit www.oracle.com if you need additional information or have any
21
 * questions.
22
 *
23
 */
24

25
#include "precompiled.hpp"
26
#include "utilities/copy.hpp"
27
#include "runtime/sharedRuntime.hpp"
28
#include "utilities/align.hpp"
29
#include "utilities/byteswap.hpp"
30
#include "utilities/copy.hpp"
31

32

33
// Copy bytes; larger units are filled atomically if everything is aligned.
34
void Copy::conjoint_memory_atomic(const void* from, void* to, size_t size) {
35
  uintptr_t bits = (uintptr_t) from | (uintptr_t) to | (uintptr_t) size;
36

37
  // (Note:  We could improve performance by ignoring the low bits of size,
38
  // and putting a short cleanup loop after each bulk copy loop.
39
  // There are plenty of other ways to make this faster also,
40
  // and it's a slippery slope.  For now, let's keep this code simple
41
  // since the simplicity helps clarify the atomicity semantics of
42
  // this operation.  There are also CPU-specific assembly versions
43
  // which may or may not want to include such optimizations.)
44

45
  if (bits % sizeof(jlong) == 0) {
46
    Copy::conjoint_jlongs_atomic((const jlong*) from, (jlong*) to, size / sizeof(jlong));
47
  } else if (bits % sizeof(jint) == 0) {
48
    Copy::conjoint_jints_atomic((const jint*) from, (jint*) to, size / sizeof(jint));
49
  } else if (bits % sizeof(jshort) == 0) {
50
    Copy::conjoint_jshorts_atomic((const jshort*) from, (jshort*) to, size / sizeof(jshort));
51
  } else {
52
    // Not aligned, so no need to be atomic.
53
    Copy::conjoint_jbytes((const void*) from, (void*) to, size);
54
  }
55
}
56

57
class CopySwap : AllStatic {
58
public:
59
  /**
60
   * Copy and optionally byte swap elements
61
   *
62
   * <swap> - true if elements should be byte swapped
63
   *
64
   * @param src address of source
65
   * @param dst address of destination
66
   * @param byte_count number of bytes to copy
67
   * @param elem_size size of the elements to copy-swap
68
   */
69
  template<bool swap>
70
  static void conjoint_swap_if_needed(const void* src, void* dst, size_t byte_count, size_t elem_size) {
71
    assert(src != nullptr, "address must not be null");
72
    assert(dst != nullptr, "address must not be null");
73
    assert(elem_size == 2 || elem_size == 4 || elem_size == 8,
74
           "incorrect element size: " SIZE_FORMAT, elem_size);
75
    assert(is_aligned(byte_count, elem_size),
76
           "byte_count " SIZE_FORMAT " must be multiple of element size " SIZE_FORMAT, byte_count, elem_size);
77

78
    address src_end = (address)src + byte_count;
79

80
    if (dst <= src || dst >= src_end) {
81
      do_conjoint_swap<RIGHT,swap>(src, dst, byte_count, elem_size);
82
    } else {
83
      do_conjoint_swap<LEFT,swap>(src, dst, byte_count, elem_size);
84
    }
85
  }
86

87
private:
88
  enum CopyDirection {
89
    RIGHT, // lower -> higher address
90
    LEFT   // higher -> lower address
91
  };
92

93
  /**
94
   * Copy and byte swap elements
95
   *
96
   * <T> - type of element to copy
97
   * <D> - copy direction
98
   * <is_src_aligned> - true if src argument is aligned to element size
99
   * <is_dst_aligned> - true if dst argument is aligned to element size
100
   *
101
   * @param src address of source
102
   * @param dst address of destination
103
   * @param byte_count number of bytes to copy
104
   */
105
  template <typename T, CopyDirection D, bool swap, bool is_src_aligned, bool is_dst_aligned>
106
  static void do_conjoint_swap(const void* src, void* dst, size_t byte_count) {
107
    const char* cur_src;
108
    char* cur_dst;
109

110
    switch (D) {
111
    case RIGHT:
112
      cur_src = (const char*)src;
113
      cur_dst = (char*)dst;
114
      break;
115
    case LEFT:
116
      cur_src = (const char*)src + byte_count - sizeof(T);
117
      cur_dst = (char*)dst + byte_count - sizeof(T);
118
      break;
119
    }
120

121
    for (size_t i = 0; i < byte_count / sizeof(T); i++) {
122
      T tmp;
123

124
      if (is_src_aligned) {
125
        tmp = *(T*)cur_src;
126
      } else {
127
        memcpy(&tmp, cur_src, sizeof(T));
128
      }
129

130
      if (swap) {
131
        tmp = byteswap(tmp);
132
      }
133

134
      if (is_dst_aligned) {
135
        *(T*)cur_dst = tmp;
136
      } else {
137
        memcpy(cur_dst, &tmp, sizeof(T));
138
      }
139

140
      switch (D) {
141
      case RIGHT:
142
        cur_src += sizeof(T);
143
        cur_dst += sizeof(T);
144
        break;
145
      case LEFT:
146
        cur_src -= sizeof(T);
147
        cur_dst -= sizeof(T);
148
        break;
149
      }
150
    }
151
  }
152

153
  /**
154
   * Copy and byte swap elements
155
   *
156
   * <T>    - type of element to copy
157
   * <D>    - copy direction
158
   * <swap> - true if elements should be byte swapped
159
   *
160
   * @param src address of source
161
   * @param dst address of destination
162
   * @param byte_count number of bytes to copy
163
   */
164
  template <typename T, CopyDirection direction, bool swap>
165
  static void do_conjoint_swap(const void* src, void* dst, size_t byte_count) {
166
    if (is_aligned(src, sizeof(T))) {
167
      if (is_aligned(dst, sizeof(T))) {
168
        do_conjoint_swap<T,direction,swap,true,true>(src, dst, byte_count);
169
      } else {
170
        do_conjoint_swap<T,direction,swap,true,false>(src, dst, byte_count);
171
      }
172
    } else {
173
      if (is_aligned(dst, sizeof(T))) {
174
        do_conjoint_swap<T,direction,swap,false,true>(src, dst, byte_count);
175
      } else {
176
        do_conjoint_swap<T,direction,swap,false,false>(src, dst, byte_count);
177
      }
178
    }
179
  }
180

181

182
  /**
183
   * Copy and byte swap elements
184
   *
185
   * <D>    - copy direction
186
   * <swap> - true if elements should be byte swapped
187
   *
188
   * @param src address of source
189
   * @param dst address of destination
190
   * @param byte_count number of bytes to copy
191
   * @param elem_size size of the elements to copy-swap
192
   */
193
  template <CopyDirection D, bool swap>
194
  static void do_conjoint_swap(const void* src, void* dst, size_t byte_count, size_t elem_size) {
195
    switch (elem_size) {
196
    case 2: do_conjoint_swap<uint16_t,D,swap>(src, dst, byte_count); break;
197
    case 4: do_conjoint_swap<uint32_t,D,swap>(src, dst, byte_count); break;
198
    case 8: do_conjoint_swap<uint64_t,D,swap>(src, dst, byte_count); break;
199
    default: guarantee(false, "do_conjoint_swap: Invalid elem_size " SIZE_FORMAT "\n", elem_size);
200
    }
201
  }
202
};
203

204
void Copy::conjoint_copy(const void* src, void* dst, size_t byte_count, size_t elem_size) {
205
  CopySwap::conjoint_swap_if_needed<false>(src, dst, byte_count, elem_size);
206
}
207

208
void Copy::conjoint_swap(const void* src, void* dst, size_t byte_count, size_t elem_size) {
209
  CopySwap::conjoint_swap_if_needed<true>(src, dst, byte_count, elem_size);
210
}
211

212
// Fill bytes; larger units are filled atomically if everything is aligned.
213
void Copy::fill_to_memory_atomic(void* to, size_t size, jubyte value) {
214
  address dst = (address)to;
215
  uintptr_t bits = (uintptr_t)to | (uintptr_t)size;
216
  if (bits % sizeof(jlong) == 0) {
217
    jlong fill = (julong)((jubyte)value);  // zero-extend
218
    if (fill != 0) {
219
      fill += fill << 8;
220
      fill += fill << 16;
221
      fill += fill << 32;
222
    }
223
    // Copy::fill_to_jlongs_atomic((jlong*) dst, size / sizeof(jlong));
224
    for (uintptr_t off = 0; off < size; off += sizeof(jlong)) {
225
      *(jlong*)(dst + off) = fill;
226
    }
227
  } else if (bits % sizeof(jint) == 0) {
228
    jint fill = (juint)((jubyte)value);  // zero-extend
229
    if (fill != 0) {
230
      fill += fill << 8;
231
      fill += fill << 16;
232
    }
233
    // Copy::fill_to_jints_atomic((jint*) dst, size / sizeof(jint));
234
    for (uintptr_t off = 0; off < size; off += sizeof(jint)) {
235
      *(jint*)(dst + off) = fill;
236
    }
237
  } else if (bits % sizeof(jshort) == 0) {
238
    jshort fill = (jushort)((jubyte)value);  // zero-extend
239
    fill += (jshort)(fill << 8);
240
    // Copy::fill_to_jshorts_atomic((jshort*) dst, size / sizeof(jshort));
241
    for (uintptr_t off = 0; off < size; off += sizeof(jshort)) {
242
      *(jshort*)(dst + off) = fill;
243
    }
244
  } else {
245
    // Not aligned, so no need to be atomic.
246
#ifdef MUSL_LIBC
247
    // This code is used by Unsafe and may hit the next page after truncation
248
    // of mapped memory. Therefore, we use volatile to prevent compilers from
249
    // replacing the loop by memset which may not trigger SIGBUS as needed
250
    // (observed on Alpine Linux x86_64)
251
    jbyte fill = value;
252
    for (uintptr_t off = 0; off < size; off += sizeof(jbyte)) {
253
      *(volatile jbyte*)(dst + off) = fill;
254
    }
255
#else
256
    Copy::fill_to_bytes(dst, size, value);
257
#endif
258
  }
259
}
260

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.