jdk

Форк
0
/
test_arena.cpp 
384 строки · 12.9 Кб
1
/*
2
 * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
3
 * Copyright (c) 2021 SAP SE. All rights reserved.
4
 *
5
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6
 *
7
 * This code is free software; you can redistribute it and/or modify it
8
 * under the terms of the GNU General Public License version 2 only, as
9
 * published by the Free Software Foundation.
10
 *
11
 * This code is distributed in the hope that it will be useful, but WITHOUT
12
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14
 * version 2 for more details (a copy is included in the LICENSE file that
15
 * accompanied this code).
16
 *
17
 * You should have received a copy of the GNU General Public License version
18
 * 2 along with this work; if not, write to the Free Software Foundation,
19
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20
 *
21
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22
 * or visit www.oracle.com if you need additional information or have any
23
 * questions.
24
 */
25

26
#include "precompiled.hpp"
27
#include "memory/arena.hpp"
28
#include "runtime/os.hpp"
29
#include "utilities/align.hpp"
30
#include "utilities/globalDefinitions.hpp"
31
#include "unittest.hpp"
32
#include "testutils.hpp"
33

34
#define ASSERT_CONTAINS(ar, p) ASSERT_TRUE(ar.contains(p))
35

36
// Note:
37
// - Amalloc returns 64bit aligned pointer (also on 32-bit)
38
// - AmallocWords returns word-aligned pointer
39
#define ASSERT_ALIGN_AMALLOC(p)       ASSERT_ALIGN(p, ARENA_AMALLOC_ALIGNMENT)
40
#define ASSERT_ALIGN_AMALLOCWORDS(p)  ASSERT_ALIGN(p, BytesPerWords)
41

42
// Do a couple of checks on the return of a successful Amalloc
43
#define ASSERT_AMALLOC(ar, p) \
44
  ASSERT_NOT_NULL(p); \
45
  ASSERT_CONTAINS(ar, p); \
46
  ASSERT_ALIGN_AMALLOC(p);
47

48
// #define LOG(s) tty->print_cr s;
49
#define LOG(s)
50

51
// Test behavior for Amalloc(0).
52
// Amalloc just ignores Amalloc(0) and returns the current hwm without increasing it.
53
// Therefore, the returned pointer should be not null, aligned, but not (!) contained
54
// in the arena since hwm points beyond the arena.
55
TEST_VM(Arena, alloc_size_0) {
56
  Arena ar(mtTest);
57
  void* p = ar.Amalloc(0);
58
  ASSERT_NOT_NULL(p);
59
  ASSERT_ALIGN_AMALLOC(p);
60

61
  ASSERT_FALSE(ar.contains(p));
62
  // Allocate again. The new allocations should have the same position as the 0-sized
63
  // first one.
64
  void* p2 = ar.Amalloc(1);
65
  ASSERT_AMALLOC(ar, p2);
66
  ASSERT_EQ(p2, p);
67
}
68

69
// Test behavior for Arealloc(p, 0)
70
TEST_VM(Arena, realloc_size_0) {
71
  // Arealloc(p, 0) behaves like Afree(p). It should release the memory
72
  // and, if top position, roll back the hwm.
73
  Arena ar(mtTest);
74
  void* p1 = ar.Amalloc(0x10);
75
  ASSERT_AMALLOC(ar, p1);
76
  void* p2 = ar.Arealloc(p1, 0x10, 0);
77
  ASSERT_NULL(p2);
78

79
  // a subsequent allocation should get the same pointer
80
  void* p3 = ar.Amalloc(0x20);
81
  ASSERT_EQ(p3, p1);
82
}
83

84
// Realloc equal sizes is a noop
85
TEST_VM(Arena, realloc_same_size) {
86
  Arena ar(mtTest);
87
  void* p1 = ar.Amalloc(0x200);
88
  ASSERT_AMALLOC(ar, p1);
89
  GtestUtils::mark_range(p1, 0x200);
90

91
  void* p2 = ar.Arealloc(p1, 0x200, 0x200);
92

93
  ASSERT_EQ(p2, p1);
94
  ASSERT_RANGE_IS_MARKED(p2, 0x200);
95
}
96

97
// Test behavior for Afree(nullptr) and Arealloc(nullptr, x)
98
TEST_VM(Arena, free_null) {
99
  Arena ar(mtTest);
100
  ar.Afree(nullptr, 10); // should just be ignored
101
}
102

103
TEST_VM(Arena, realloc_null) {
104
  Arena ar(mtTest);
105
  void* p = ar.Arealloc(nullptr, 0, 20); // equivalent to Amalloc(20)
106
  ASSERT_AMALLOC(ar, p);
107
}
108

109
// Check Arena.Afree in a non-top position.
110
// The free'd allocation should be zapped (debug only),
111
// surrounding blocks should be unaffected.
112
TEST_VM(Arena, free_nontop) {
113
  Arena ar(mtTest);
114

115
  void* p_before = ar.Amalloc(0x10);
116
  ASSERT_AMALLOC(ar, p_before);
117
  GtestUtils::mark_range(p_before, 0x10);
118

119
  void* p = ar.Amalloc(0x10);
120
  ASSERT_AMALLOC(ar, p);
121
  GtestUtils::mark_range_with(p, 0x10, 'Z');
122

123
  void* p_after = ar.Amalloc(0x10);
124
  ASSERT_AMALLOC(ar, p_after);
125
  GtestUtils::mark_range(p_after, 0x10);
126

127
  ASSERT_RANGE_IS_MARKED(p_before, 0x10);
128
  ASSERT_RANGE_IS_MARKED_WITH(p, 0x10, 'Z');
129
  ASSERT_RANGE_IS_MARKED(p_after, 0x10);
130

131
  ar.Afree(p, 0x10);
132

133
  ASSERT_RANGE_IS_MARKED(p_before, 0x10);
134
  DEBUG_ONLY(ASSERT_RANGE_IS_MARKED_WITH(p, 0x10, badResourceValue);)
135
  ASSERT_RANGE_IS_MARKED(p_after, 0x10);
136
}
137

138
// Check Arena.Afree in a top position.
139
// The free'd allocation (non-top) should be zapped (debug only),
140
// the hwm should have been rolled back.
141
TEST_VM(Arena, free_top) {
142
  Arena ar(mtTest);
143

144
  void* p = ar.Amalloc(0x10);
145
  ASSERT_AMALLOC(ar, p);
146
  GtestUtils::mark_range_with(p, 0x10, 'Z');
147

148
  ar.Afree(p, 0x10);
149
  DEBUG_ONLY(ASSERT_RANGE_IS_MARKED_WITH(p, 0x10, badResourceValue);)
150

151
  // a subsequent allocation should get the same pointer
152
  void* p2 = ar.Amalloc(0x20);
153
  ASSERT_EQ(p2, p);
154
}
155

156

157
// In-place shrinking.
158
TEST_VM(Arena, realloc_top_shrink) {
159
  Arena ar(mtTest);
160

161
  void* p1 = ar.Amalloc(0x200);
162
  ASSERT_AMALLOC(ar, p1);
163
  GtestUtils::mark_range(p1, 0x200);
164

165
  void* p2 = ar.Arealloc(p1, 0x200, 0x100);
166
  ASSERT_EQ(p1, p2);
167
  ASSERT_RANGE_IS_MARKED(p2, 0x100); // realloc should preserve old content
168

169
  // A subsequent allocation should be placed right after the end of the first, shrunk, allocation
170
  void* p3 = ar.Amalloc(1);
171
  ASSERT_EQ(p3, ((char*)p1) + 0x100);
172
}
173

174
// not-in-place shrinking.
175
TEST_VM(Arena, realloc_nontop_shrink) {
176
  Arena ar(mtTest);
177

178
  void* p1 = ar.Amalloc(200);
179
  ASSERT_AMALLOC(ar, p1);
180
  GtestUtils::mark_range(p1, 200);
181

182
  void* p_other = ar.Amalloc(20); // new top, p1 not top anymore
183

184
  void* p2 = ar.Arealloc(p1, 200, 100);
185
  ASSERT_EQ(p1, p2); // should still shrink in place
186
  ASSERT_RANGE_IS_MARKED(p2, 100); // realloc should preserve old content
187
}
188

189
// in-place growing.
190
TEST_VM(Arena, realloc_top_grow) {
191
  Arena ar(mtTest); // initial chunk size large enough to ensure below allocation grows in-place.
192

193
  void* p1 = ar.Amalloc(0x10);
194
  ASSERT_AMALLOC(ar, p1);
195
  GtestUtils::mark_range(p1, 0x10);
196

197
  void* p2 = ar.Arealloc(p1, 0x10, 0x20);
198
  ASSERT_EQ(p1, p2);
199
  ASSERT_RANGE_IS_MARKED(p2, 0x10); // realloc should preserve old content
200
}
201

202
// not-in-place growing.
203
TEST_VM(Arena, realloc_nontop_grow) {
204
  Arena ar(mtTest);
205

206
  void* p1 = ar.Amalloc(10);
207
  ASSERT_AMALLOC(ar, p1);
208
  GtestUtils::mark_range(p1, 10);
209

210
  void* p_other = ar.Amalloc(20); // new top, p1 not top anymore
211

212
  void* p2 = ar.Arealloc(p1, 10, 20);
213
  ASSERT_AMALLOC(ar, p2);
214
  ASSERT_RANGE_IS_MARKED(p2, 10); // realloc should preserve old content
215
}
216

217
// -------- random alloc test -------------
218

219
static uint8_t canary(int i) {
220
  return (uint8_t)('A' + i % 26);
221
}
222

223
// Randomly allocate and reallocate with random sizes and differing alignments;
224
//  check alignment; check for overwriters.
225
// We do this a number of times, to give chunk pool handling a good workout too.
226
TEST_VM(Arena, random_allocs) {
227

228
  const int num_allocs = 250 * 1000;
229
  const int avg_alloc_size = 64;
230

231
  void** ptrs = NEW_C_HEAP_ARRAY(void*, num_allocs, mtTest);
232
  size_t* sizes = NEW_C_HEAP_ARRAY(size_t, num_allocs, mtTest);
233
  size_t* alignments = NEW_C_HEAP_ARRAY(size_t, num_allocs, mtTest);
234

235
  Arena ar(mtTest);
236

237
  // Allocate
238
  for (int i = 0; i < num_allocs; i ++) {
239
    size_t size = os::random() % (avg_alloc_size * 2); // Note: size==0 is okay; we want to test that too
240
    size_t alignment = 0;
241
    void* p = nullptr;
242
    if (os::random() % 2) { // randomly switch between Amalloc and AmallocWords
243
      p = ar.Amalloc(size);
244
      alignment = BytesPerLong;
245
    } else {
246
      // Inconsistency: AmallocWords wants its input size word aligned, whereas Amalloc takes
247
      //  care of alignment itself. We may want to clean this up, but for now just go with it.
248
      size = align_up(size, BytesPerWord);
249
      p = ar.AmallocWords(size);
250
      alignment = BytesPerWord;
251
    }
252
    LOG(("[%d]: " PTR_FORMAT ", size " SIZE_FORMAT ", aligned " SIZE_FORMAT,
253
         i, p2i(p), size, alignment));
254
    ASSERT_NOT_NULL(p);
255
    ASSERT_ALIGN(p, alignment);
256
    if (size > 0) {
257
      ASSERT_CONTAINS(ar, p);
258
    }
259
    GtestUtils::mark_range_with(p, size, canary(i));
260
    ptrs[i] = p; sizes[i] = size; alignments[i] = alignment;
261
  }
262

263
  // Check pattern in allocations for overwriters.
264
  for (int i = 0; i < num_allocs; i ++) {
265
    ASSERT_RANGE_IS_MARKED_WITH(ptrs[i], sizes[i], canary(i));
266
  }
267

268
  // realloc all of them
269
  for (int i = 0; i < num_allocs; i ++) {
270
    size_t new_size = os::random() % (avg_alloc_size * 2);  // Note: 0 is possible and should work
271
    void* p2 = ar.Arealloc(ptrs[i], sizes[i], new_size);
272
    if (new_size > 0) {
273
      ASSERT_NOT_NULL(p2);
274
      ASSERT_CONTAINS(ar, p2);
275
      ASSERT_ALIGN(p2, alignments[i]); // Realloc guarantees at least the original alignment
276
      ASSERT_RANGE_IS_MARKED_WITH(p2, MIN2(sizes[i], new_size), canary(i)); // old content should have been preserved
277

278
      GtestUtils::mark_range_with(p2, new_size, canary(i)); // mark new range with canary
279
    } else {
280
      ASSERT_NULL(p2);
281
    }
282
    ptrs[i] = p2; sizes[i] = new_size;
283
    LOG(("[%d]: realloc " PTR_FORMAT ", size " SIZE_FORMAT ", aligned " SIZE_FORMAT,
284
         i, p2i(p2), new_size, alignments[i]));
285
  }
286

287
  // Check test pattern again
288
  //  Note that we don't check the gap pattern anymore since if allocations had been shrunk in place
289
  //  this now gets difficult.
290
  for (int i = 0; i < num_allocs; i ++) {
291
    ASSERT_RANGE_IS_MARKED_WITH(ptrs[i], sizes[i], canary(i));
292
  }
293

294
  // Randomly free a bunch of allocations.
295
  for (int i = 0; i < num_allocs; i ++) {
296
    if (os::random() % 10 == 0) {
297
      ar.Afree(ptrs[i], sizes[i]);
298
      // In debug builds the freed space should be filled the space with badResourceValue
299
      DEBUG_ONLY(ASSERT_RANGE_IS_MARKED_WITH(ptrs[i], sizes[i], badResourceValue));
300
      ptrs[i] = nullptr;
301
    }
302
  }
303

304
  // Check test pattern again
305
  for (int i = 0; i < num_allocs; i ++) {
306
    ASSERT_RANGE_IS_MARKED_WITH(ptrs[i], sizes[i], canary(i));
307
  }
308

309
  // Free temp data
310
  FREE_C_HEAP_ARRAY(char*, ptrs);
311
  FREE_C_HEAP_ARRAY(size_t, sizes);
312
  FREE_C_HEAP_ARRAY(size_t, alignments);
313
}
314

315
#ifndef LP64
316
// These tests below are about alignment issues when mixing Amalloc and AmallocWords.
317
// Since on 64-bit these APIs offer the same alignment, they only matter for 32-bit.
318

319
TEST_VM(Arena, mixed_alignment_allocation) {
320
  // Test that mixed alignment allocations work and provide allocations with the correct
321
  // alignment
322
  Arena ar(mtTest);
323
  void* p1 = ar.AmallocWords(BytesPerWord);
324
  void* p2 = ar.Amalloc(BytesPerLong);
325
  ASSERT_TRUE(is_aligned(p1, BytesPerWord));
326
  ASSERT_TRUE(is_aligned(p2, ARENA_AMALLOC_ALIGNMENT));
327
}
328

329
TEST_VM(Arena, Arena_with_crooked_initial_size) {
330
  // Test that an arena with a crooked, not 64-bit aligned initial size
331
  // works
332
  Arena ar(mtTest, Arena::Tag::tag_other, 4097);
333
  void* p1 = ar.AmallocWords(BytesPerWord);
334
  void* p2 = ar.Amalloc(BytesPerLong);
335
  ASSERT_TRUE(is_aligned(p1, BytesPerWord));
336
  ASSERT_TRUE(is_aligned(p2, ARENA_AMALLOC_ALIGNMENT));
337
}
338

339
TEST_VM(Arena, Arena_grows_large_unaligned) {
340
  // Test that if the arena grows with a large unaligned value, nothing bad happens.
341
  // We trigger allocation of a new, large, unaligned chunk with a non-standard size
342
  // (only possible on 32-bit when allocating with word alignment).
343
  // Then we alloc some more. If Arena::grow() does not correctly align, on 32-bit
344
  // something should assert at some point.
345
  Arena ar(mtTest, Arena::Tag::tag_other, 100); // first chunk is small
346
  void* p = ar.AmallocWords(Chunk::size + BytesPerWord); // if Arena::grow() misaligns, this asserts
347
  // some more allocations for good measure
348
  for (int i = 0; i < 100; i ++) {
349
    ar.Amalloc(1);
350
  }
351
}
352

353
#endif //  LP64
354

355
static size_t random_arena_chunk_size() {
356
  // Return with a 50% rate a standard size, otherwise some random size
357
  if (os::random() % 10 < 5) {
358
    static const size_t standard_sizes[4] = {
359
        Chunk::tiny_size, Chunk::init_size, Chunk::size, Chunk::medium_size
360
    };
361
    return standard_sizes[os::random() % 4];
362
  }
363
  return ARENA_ALIGN(os::random() % 1024);
364
}
365

366
TEST_VM(Arena, different_chunk_sizes) {
367
  // Test the creation/pooling of chunks; since ChunkPool is hidden, the
368
  //  only way to test this is to create/destroy arenas with different init sizes,
369
  //  which determines the initial chunk size.
370
  // Note that since the chunk pools are global and get cleaned out periodically,
371
  //  there is no safe way to actually test their occupancy here.
372
  for (int i = 0; i < 1000; i ++) {
373
    // Unfortunately, Arenas cannot be newed,
374
    // so we are left with awkwardly placing a few on the stack.
375
    Arena ar0(mtTest, Arena::Tag::tag_other, random_arena_chunk_size());
376
    Arena ar1(mtTest, Arena::Tag::tag_other, random_arena_chunk_size());
377
    Arena ar2(mtTest, Arena::Tag::tag_other, random_arena_chunk_size());
378
    Arena ar3(mtTest, Arena::Tag::tag_other, random_arena_chunk_size());
379
    Arena ar4(mtTest, Arena::Tag::tag_other, random_arena_chunk_size());
380
    Arena ar5(mtTest, Arena::Tag::tag_other, random_arena_chunk_size());
381
    Arena ar6(mtTest, Arena::Tag::tag_other, random_arena_chunk_size());
382
    Arena ar7(mtTest, Arena::Tag::tag_other, random_arena_chunk_size());
383
  }
384
}
385

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.