2
* Copyright (c) 2020, 2023 SAP SE. All rights reserved.
3
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
4
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6
* This code is free software; you can redistribute it and/or modify it
7
* under the terms of the GNU General Public License version 2 only, as
8
* published by the Free Software Foundation.
10
* This code is distributed in the hope that it will be useful, but WITHOUT
11
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13
* version 2 for more details (a copy is included in the LICENSE file that
14
* accompanied this code).
16
* You should have received a copy of the GNU General Public License version
17
* 2 along with this work; if not, write to the Free Software Foundation,
18
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21
* or visit www.oracle.com if you need additional information or have any
26
#include "precompiled.hpp"
27
#include "memory/metaspace/chunkManager.hpp"
28
#include "memory/metaspace/freeChunkList.hpp"
29
#include "memory/metaspace/metachunk.hpp"
30
#include "memory/metaspace/metaspaceSettings.hpp"
31
#include "memory/metaspace/virtualSpaceNode.hpp"
32
#include "metaspaceGtestCommon.hpp"
33
#include "metaspaceGtestContexts.hpp"
34
#include "runtime/mutexLocker.hpp"
36
using metaspace::ChunkManager;
37
using metaspace::FreeChunkListVector;
38
using metaspace::Metachunk;
39
using metaspace::Settings;
40
using metaspace::VirtualSpaceNode;
41
using namespace metaspace::chunklevel;
43
// Test ChunkManager::get_chunk
44
TEST_VM(metaspace, get_chunk) {
46
ChunkGtestContext context(8 * M);
47
Metachunk* c = nullptr;
49
for (chunklevel_t pref_lvl = LOWEST_CHUNK_LEVEL; pref_lvl <= HIGHEST_CHUNK_LEVEL; pref_lvl++) {
51
for (chunklevel_t max_lvl = pref_lvl; max_lvl <= HIGHEST_CHUNK_LEVEL; max_lvl++) {
53
for (size_t min_committed_words = Settings::commit_granule_words();
54
min_committed_words <= word_size_for_level(max_lvl); min_committed_words *= 2) {
55
context.alloc_chunk_expect_success(&c, pref_lvl, max_lvl, min_committed_words);
56
context.return_chunk(c);
62
// Test ChunkManager::get_chunk, but with a commit limit.
63
TEST_VM(metaspace, get_chunk_with_commit_limit) {
65
// A commit limit that is smaller than the largest possible chunk size.
67
// Here we test different combinations of commit limit, preferred and highest chunk level, and min_committed_size.
69
for (size_t commit_limit_words = Settings::commit_granule_words();
70
commit_limit_words < MAX_CHUNK_WORD_SIZE * 2; commit_limit_words *= 2) {
72
ChunkGtestContext context(commit_limit_words);
73
Metachunk* c = nullptr;
75
for (chunklevel_t pref_lvl = LOWEST_CHUNK_LEVEL; pref_lvl <= HIGHEST_CHUNK_LEVEL; pref_lvl++) {
77
for (chunklevel_t max_lvl = pref_lvl; max_lvl <= HIGHEST_CHUNK_LEVEL; max_lvl++) {
79
for (size_t min_committed_words = Settings::commit_granule_words();
80
min_committed_words <= word_size_for_level(max_lvl); min_committed_words *= 2) {
82
// When should commit work? As long as min_committed_words is smaller than commit_limit_words.
83
bool commit_should_work = min_committed_words <= commit_limit_words;
85
// printf("commit_limit: " SIZE_FORMAT ", min_committed_words: " SIZE_FORMAT
86
// ", max chunk level: " CHKLVL_FORMAT ", preferred chunk level: " CHKLVL_FORMAT ", should work: %d\n",
87
// commit_limit_words, min_committed_words, max_lvl, pref_lvl, commit_should_work);
90
if (commit_should_work) {
91
context.alloc_chunk_expect_success(&c, pref_lvl, max_lvl, min_committed_words);
92
context.return_chunk(c);
94
context.alloc_chunk_expect_failure(pref_lvl, max_lvl, min_committed_words);
102
// Test that recommitting the used portion of a chunk will preserve the original content.
103
TEST_VM(metaspace, get_chunk_recommit) {
105
ChunkGtestContext context;
106
Metachunk* c = nullptr;
107
context.alloc_chunk_expect_success(&c, ROOT_CHUNK_LEVEL, ROOT_CHUNK_LEVEL, 0);
108
context.uncommit_chunk_with_test(c);
110
context.commit_chunk_with_test(c, Settings::commit_granule_words());
111
context.allocate_from_chunk(c, Settings::commit_granule_words());
113
c->ensure_committed(Settings::commit_granule_words());
114
check_range_for_pattern(c->base(), c->used_words(), (uintx)c);
116
c->ensure_committed(Settings::commit_granule_words() * 2);
117
check_range_for_pattern(c->base(), c->used_words(), (uintx)c);
119
context.return_chunk(c);
123
// Test ChunkManager::get_chunk, but with a reserve limit.
124
// (meaning, the underlying VirtualSpaceList cannot expand, like compressed class space).
125
TEST_VM(metaspace, get_chunk_with_reserve_limit) {
127
const size_t reserve_limit_words = word_size_for_level(ROOT_CHUNK_LEVEL);
128
const size_t commit_limit_words = 1024 * M; // just very high
129
ChunkGtestContext context(commit_limit_words, reserve_limit_words);
131
// Reserve limit works at root chunk size granularity: if the chunk manager cannot satisfy
132
// a request for a chunk from its freelists, it will acquire a new root chunk from the
133
// underlying virtual space list. If that list is full and cannot be expanded (think ccs)
134
// we should get an error.
135
// Testing this is simply testing a chunk allocation which should cause allocation of a new
138
// Cause allocation of the firstone root chunk, should still work:
139
Metachunk* c = nullptr;
140
context.alloc_chunk_expect_success(&c, HIGHEST_CHUNK_LEVEL);
142
// and this should need a new root chunk and hence fail:
143
context.alloc_chunk_expect_failure(ROOT_CHUNK_LEVEL);
145
context.return_chunk(c);
149
// Test MetaChunk::allocate
150
TEST_VM(metaspace, chunk_allocate_full) {
152
ChunkGtestContext context;
154
for (chunklevel_t lvl = LOWEST_CHUNK_LEVEL; lvl <= HIGHEST_CHUNK_LEVEL; lvl++) {
155
Metachunk* c = nullptr;
156
context.alloc_chunk_expect_success(&c, lvl);
157
context.allocate_from_chunk(c, c->word_size());
158
context.return_chunk(c);
163
// Test MetaChunk::allocate
164
TEST_VM(metaspace, chunk_allocate_random) {
166
ChunkGtestContext context;
168
for (chunklevel_t lvl = LOWEST_CHUNK_LEVEL; lvl <= HIGHEST_CHUNK_LEVEL; lvl++) {
170
Metachunk* c = nullptr;
171
context.alloc_chunk_expect_success(&c, lvl);
172
context.uncommit_chunk_with_test(c); // start out fully uncommitted
174
RandSizeGenerator rgen(1, c->word_size() / 30);
178
const size_t s = rgen.get();
179
if (s <= c->free_words()) {
180
context.commit_chunk_with_test(c, s);
181
context.allocate_from_chunk(c, s);
187
context.return_chunk(c);
193
TEST_VM(metaspace, chunk_buddy_stuff) {
195
for (chunklevel_t l = ROOT_CHUNK_LEVEL + 1; l <= HIGHEST_CHUNK_LEVEL; l++) {
197
ChunkGtestContext context;
199
// Allocate two chunks; since we know the first chunk is the first in its area,
200
// it has to be a leader, and the next one of the same size its buddy.
202
// (Note: strictly speaking the ChunkManager does not promise any placement but
203
// we know how the placement works so these tests make sense).
205
Metachunk* c1 = nullptr;
206
context.alloc_chunk(&c1, CHUNK_LEVEL_1K);
207
EXPECT_TRUE(c1->is_leader());
209
Metachunk* c2 = nullptr;
210
context.alloc_chunk(&c2, CHUNK_LEVEL_1K);
211
EXPECT_FALSE(c2->is_leader());
213
// buddies are adjacent in memory
214
// (next/prev_in_vs needs lock)
216
MutexLocker fcl(Metaspace_lock, Mutex::_no_safepoint_check_flag);
217
EXPECT_EQ(c1->next_in_vs(), c2);
218
EXPECT_EQ(c1->end(), c2->base());
219
EXPECT_NULL(c1->prev_in_vs()); // since we know this is the first in the area
220
EXPECT_EQ(c2->prev_in_vs(), c1);
223
context.return_chunk(c1);
224
context.return_chunk(c2);
230
TEST_VM(metaspace, chunk_allocate_with_commit_limit) {
232
const size_t granule_sz = Settings::commit_granule_words();
233
const size_t commit_limit = granule_sz * 3;
234
ChunkGtestContext context(commit_limit);
236
// A big chunk, but uncommitted.
237
Metachunk* c = nullptr;
238
context.alloc_chunk_expect_success(&c, ROOT_CHUNK_LEVEL, ROOT_CHUNK_LEVEL, 0);
239
context.uncommit_chunk_with_test(c); // ... just to make sure.
242
context.commit_chunk_with_test(c, granule_sz);
243
context.allocate_from_chunk(c, granule_sz);
246
context.commit_chunk_with_test(c, granule_sz);
247
context.allocate_from_chunk(c, granule_sz);
250
context.commit_chunk_with_test(c, granule_sz);
251
context.allocate_from_chunk(c, granule_sz);
253
// This should fail now.
254
context.commit_chunk_expect_failure(c, granule_sz);
256
context.return_chunk(c);
260
// Test splitting a chunk
261
TEST_VM(metaspace, chunk_split_and_merge) {
263
// Split works like this:
265
// ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
267
// ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
269
// ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
270
// | A' | b | c | d | e |
271
// ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
273
// A original chunk (A) is split to form a target chunk (A') and as a result splinter
274
// chunks form (b..e). A' is the leader of the (A',b) pair, which is the leader of the
275
// ((A',b), c) pair and so on. In other words, A' will be a leader chunk, all splinter
276
// chunks are follower chunks.
278
// Merging reverses this operation:
280
// ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
281
// | A | b | c | d | e |
282
// ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
284
// ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
286
// ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
288
// (A) will be merged with its buddy b, (A+b) with its buddy c and so on. The result
290
// Note that merging also works, of course, if we were to start the merge at (b) (so,
291
// with a follower chunk, not a leader). Also, at any point in the merge
292
// process we may arrive at a follower chunk. So, the fact that in this test
293
// we only expect a leader merge is a feature of the test, and of the fact that we
294
// start each split test with a fresh ChunkTestsContext.
296
// Note: Splitting and merging chunks is usually done from within the ChunkManager and
297
// subject to a lot of assumptions and hence asserts. Here, we have to explicitly use
298
// VirtualSpaceNode::split/::merge and therefore have to observe rules:
299
// - both split and merge expect free chunks, so state has to be "free"
300
// - but that would trigger the "ideally merged" assertion in the RootChunkArea, so the
301
// original chunk has to be a root chunk, we cannot just split any chunk manually.
302
// - Also, after the split we have to completely re-merge to avoid triggering asserts
303
// in ~RootChunkArea()
304
// - finally we have to lock manually
306
ChunkGtestContext context;
308
const chunklevel_t orig_lvl = ROOT_CHUNK_LEVEL;
309
for (chunklevel_t target_lvl = orig_lvl + 1; target_lvl <= HIGHEST_CHUNK_LEVEL; target_lvl++) {
311
// Split a fully committed chunk. The resulting chunk should be fully
312
// committed as well, and have its content preserved.
313
Metachunk* c = nullptr;
314
context.alloc_chunk_expect_success(&c, orig_lvl);
316
// We allocate from this chunk to be able to completely paint the payload.
317
context.allocate_from_chunk(c, c->word_size());
319
const uintx canary = os::random();
320
fill_range_with_pattern(c->base(), c->word_size(), canary);
322
FreeChunkListVector splinters;
325
// Splitting/Merging chunks is usually done by the chunkmanager, and no explicit
326
// outside API exists. So we split/merge chunks via the underlying vs node, directly.
327
// This means that we have to go through some extra hoops to not trigger any asserts.
328
MutexLocker fcl(Metaspace_lock, Mutex::_no_safepoint_check_flag);
329
c->reset_used_words();
331
c->vsnode()->split(target_lvl, c, &splinters);
334
DEBUG_ONLY(context.verify();)
336
EXPECT_EQ(c->level(), target_lvl);
337
EXPECT_TRUE(c->is_fully_committed());
338
EXPECT_FALSE(c->is_root_chunk());
339
EXPECT_TRUE(c->is_leader());
341
check_range_for_pattern(c->base(), c->word_size(), canary);
343
// I expect splinter chunks (one for each splinter level:
344
// e.g. splitting a 1M chunk to get a 64K chunk should yield splinters: [512K, 256K, 128K, 64K]
345
for (chunklevel_t l = LOWEST_CHUNK_LEVEL; l < HIGHEST_CHUNK_LEVEL; l++) {
346
const Metachunk* c2 = splinters.first_at_level(l);
347
if (l > orig_lvl && l <= target_lvl) {
349
EXPECT_EQ(c2->level(), l);
350
EXPECT_TRUE(c2->is_free());
351
EXPECT_TRUE(!c2->is_leader());
352
DEBUG_ONLY(c2->verify());
353
check_range_for_pattern(c2->base(), c2->word_size(), canary);
359
// Revert the split by using merge. This should result in all splinters coalescing
362
MutexLocker fcl(Metaspace_lock, Mutex::_no_safepoint_check_flag);
363
Metachunk* merged = c->vsnode()->merge(c, &splinters);
365
// the merged chunk should occupy the same address as the splinter
366
// since it should have been the leader in the split.
367
EXPECT_EQ(merged, c);
368
EXPECT_TRUE(merged->is_root_chunk() || merged->is_leader());
370
// Splitting should have arrived at the original chunk since none of the splinters are in use.
371
EXPECT_EQ(c->level(), orig_lvl);
373
// All splinters should have been removed from the list
374
EXPECT_EQ(splinters.num_chunks(), 0);
377
context.return_chunk(c);
383
TEST_VM(metaspace, chunk_enlarge_in_place) {
385
ChunkGtestContext context;
387
// Starting with the smallest chunk size, attempt to enlarge the chunk in place until we arrive
388
// at root chunk size. Since the state is clean, this should work.
390
Metachunk* c = nullptr;
391
context.alloc_chunk_expect_success(&c, HIGHEST_CHUNK_LEVEL);
393
chunklevel_t l = c->level();
395
while (l != ROOT_CHUNK_LEVEL) {
397
// commit and allocate from chunk to pattern it...
398
const size_t original_chunk_size = c->word_size();
399
context.commit_chunk_with_test(c, c->free_words());
400
context.allocate_from_chunk(c, c->free_words());
402
size_t used_before = c->used_words();
403
size_t free_before = c->free_words();
404
size_t free_below_committed_before = c->free_below_committed_words();
405
const MetaWord* top_before = c->top();
407
EXPECT_TRUE(context.cm().attempt_enlarge_chunk(c));
408
EXPECT_EQ(l - 1, c->level());
409
EXPECT_EQ(c->word_size(), original_chunk_size * 2);
411
// Used words should not have changed
412
EXPECT_EQ(c->used_words(), used_before);
413
EXPECT_EQ(c->top(), top_before);
415
// free words should be expanded by the old size (since old chunk is doubled in size)
416
EXPECT_EQ(c->free_words(), free_before + original_chunk_size);
418
// free below committed can be larger but never smaller
419
EXPECT_GE(c->free_below_committed_words(), free_below_committed_before);
421
// Old content should be preserved
422
check_range_for_pattern(c->base(), original_chunk_size, (uintx)c);
427
context.return_chunk(c);