2
* Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
24
#include "precompiled.hpp"
25
#include "logging/log.hpp"
26
#include "memory/metaspaceStats.hpp"
27
#include "memory/metaspaceUtils.hpp"
28
#include "nmt/memTracker.hpp"
29
#include "nmt/nativeCallStackPrinter.hpp"
30
#include "nmt/threadStackTracker.hpp"
31
#include "nmt/virtualMemoryTracker.hpp"
32
#include "runtime/os.hpp"
33
#include "runtime/threadCritical.hpp"
34
#include "utilities/ostream.hpp"
36
VirtualMemorySnapshot VirtualMemorySummary::_snapshot;
38
void VirtualMemory::update_peak(size_t size) {
39
size_t peak_sz = peak_size();
40
while (peak_sz < size) {
41
size_t old_sz = Atomic::cmpxchg(&_peak_size, peak_sz, size, memory_order_relaxed);
42
if (old_sz == peak_sz) {
50
void VirtualMemorySummary::snapshot(VirtualMemorySnapshot* s) {
51
// Snapshot current thread stacks
52
VirtualMemoryTracker::snapshot_thread_stacks();
53
as_snapshot()->copy_to(s);
56
SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* VirtualMemoryTracker::_reserved_regions;
58
int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) {
59
return r1.compare(r2);
62
int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) {
63
return r1.compare(r2);
66
static bool is_mergeable_with(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) {
67
return rgn->adjacent_to(addr, size) && rgn->call_stack()->equals(stack);
70
static bool is_same_as(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) {
71
// It would have made sense to use rgn->equals(...), but equals returns true for overlapping regions.
72
return rgn->same_region(addr, size) && rgn->call_stack()->equals(stack);
75
static LinkedListNode<CommittedMemoryRegion>* find_preceding_node_from(LinkedListNode<CommittedMemoryRegion>* from, address addr) {
76
LinkedListNode<CommittedMemoryRegion>* preceding = nullptr;
78
for (LinkedListNode<CommittedMemoryRegion>* node = from; node != nullptr; node = node->next()) {
79
CommittedMemoryRegion* rgn = node->data();
81
// We searched past the region start.
82
if (rgn->end() > addr) {
92
static bool try_merge_with(LinkedListNode<CommittedMemoryRegion>* node, address addr, size_t size, const NativeCallStack& stack) {
93
if (node != nullptr) {
94
CommittedMemoryRegion* rgn = node->data();
96
if (is_mergeable_with(rgn, addr, size, stack)) {
97
rgn->expand_region(addr, size);
105
static bool try_merge_with(LinkedListNode<CommittedMemoryRegion>* node, LinkedListNode<CommittedMemoryRegion>* other) {
106
if (other == nullptr) {
110
CommittedMemoryRegion* rgn = other->data();
111
return try_merge_with(node, rgn->base(), rgn->size(), *rgn->call_stack());
114
bool ReservedMemoryRegion::add_committed_region(address addr, size_t size, const NativeCallStack& stack) {
115
assert(addr != nullptr, "Invalid address");
116
assert(size > 0, "Invalid size");
117
assert(contain_region(addr, size), "Not contain this region");
119
// Find the region that fully precedes the [addr, addr + size) region.
120
LinkedListNode<CommittedMemoryRegion>* prev = find_preceding_node_from(_committed_regions.head(), addr);
121
LinkedListNode<CommittedMemoryRegion>* next = (prev != nullptr ? prev->next() : _committed_regions.head());
123
if (next != nullptr) {
124
// Ignore request if region already exists.
125
if (is_same_as(next->data(), addr, size, stack)) {
129
// The new region is after prev, and either overlaps with the
130
// next region (and maybe more regions), or overlaps with no region.
131
if (next->data()->overlap_region(addr, size)) {
132
// Remove _all_ overlapping regions, and parts of regions,
133
// in preparation for the addition of this new region.
134
remove_uncommitted_region(addr, size);
136
// The remove could have split a region into two and created a
137
// new prev region. Need to reset the prev and next pointers.
138
prev = find_preceding_node_from((prev != nullptr ? prev : _committed_regions.head()), addr);
139
next = (prev != nullptr ? prev->next() : _committed_regions.head());
143
// At this point the previous overlapping regions have been
144
// cleared, and the full region is guaranteed to be inserted.
145
VirtualMemorySummary::record_committed_memory(size, flag());
147
// Try to merge with prev and possibly next.
148
if (try_merge_with(prev, addr, size, stack)) {
149
if (try_merge_with(prev, next)) {
150
// prev was expanded to contain the new region
151
// and next, need to remove next from the list
152
_committed_regions.remove_after(prev);
158
// Didn't merge with prev, try with next.
159
if (try_merge_with(next, addr, size, stack)) {
163
// Couldn't merge with any regions - create a new region.
164
return add_committed_region(CommittedMemoryRegion(addr, size, stack));
167
bool ReservedMemoryRegion::remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node,
168
address addr, size_t size) {
169
assert(addr != nullptr, "Invalid address");
170
assert(size > 0, "Invalid size");
172
CommittedMemoryRegion* rgn = node->data();
173
assert(rgn->contain_region(addr, size), "Has to be contained");
174
assert(!rgn->same_region(addr, size), "Can not be the same region");
176
if (rgn->base() == addr ||
177
rgn->end() == addr + size) {
178
rgn->exclude_region(addr, size);
182
address top =rgn->end();
183
// use this region for lower part
184
size_t exclude_size = rgn->end() - addr;
185
rgn->exclude_region(addr, exclude_size);
188
address high_base = addr + size;
189
size_t high_size = top - high_base;
191
CommittedMemoryRegion high_rgn(high_base, high_size, *rgn->call_stack());
192
LinkedListNode<CommittedMemoryRegion>* high_node = _committed_regions.add(high_rgn);
193
assert(high_node == nullptr || node->next() == high_node, "Should be right after");
194
return (high_node != nullptr);
200
bool ReservedMemoryRegion::remove_uncommitted_region(address addr, size_t sz) {
201
assert(addr != nullptr, "Invalid address");
202
assert(sz > 0, "Invalid size");
204
CommittedMemoryRegion del_rgn(addr, sz, *call_stack());
205
address end = addr + sz;
207
LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head();
208
LinkedListNode<CommittedMemoryRegion>* prev = nullptr;
209
CommittedMemoryRegion* crgn;
211
while (head != nullptr) {
214
if (crgn->same_region(addr, sz)) {
215
VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag());
216
_committed_regions.remove_after(prev);
220
// del_rgn contains crgn
221
if (del_rgn.contain_region(crgn->base(), crgn->size())) {
222
VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag());
224
_committed_regions.remove_after(prev);
225
continue; // don't update head or prev
228
// Found addr in the current crgn. There are 2 subcases:
229
if (crgn->contain_address(addr)) {
231
// (1) Found addr+size in current crgn as well. (del_rgn is contained in crgn)
232
if (crgn->contain_address(end - 1)) {
233
VirtualMemorySummary::record_uncommitted_memory(sz, flag());
234
return remove_uncommitted_region(head, addr, sz); // done!
236
// (2) Did not find del_rgn's end in crgn.
237
size_t size = crgn->end() - del_rgn.base();
238
crgn->exclude_region(addr, size);
239
VirtualMemorySummary::record_uncommitted_memory(size, flag());
242
} else if (crgn->contain_address(end - 1)) {
243
// Found del_rgn's end, but not its base addr.
244
size_t size = del_rgn.end() - crgn->base();
245
crgn->exclude_region(crgn->base(), size);
246
VirtualMemorySummary::record_uncommitted_memory(size, flag());
247
return true; // should be done if the list is sorted properly!
257
void ReservedMemoryRegion::move_committed_regions(address addr, ReservedMemoryRegion& rgn) {
258
assert(addr != nullptr, "Invalid address");
260
// split committed regions
261
LinkedListNode<CommittedMemoryRegion>* head =
262
_committed_regions.head();
263
LinkedListNode<CommittedMemoryRegion>* prev = nullptr;
265
while (head != nullptr) {
266
if (head->data()->base() >= addr) {
273
if (head != nullptr) {
274
if (prev != nullptr) {
275
prev->set_next(head->next());
277
_committed_regions.set_head(nullptr);
281
rgn._committed_regions.set_head(head);
284
size_t ReservedMemoryRegion::committed_size() const {
285
size_t committed = 0;
286
LinkedListNode<CommittedMemoryRegion>* head =
287
_committed_regions.head();
288
while (head != nullptr) {
289
committed += head->data()->size();
295
void ReservedMemoryRegion::set_flag(MEMFLAGS f) {
296
assert((flag() == mtNone || flag() == f),
297
"Overwrite memory type for region [" INTPTR_FORMAT "-" INTPTR_FORMAT "), %u->%u.",
298
p2i(base()), p2i(end()), (unsigned)flag(), (unsigned)f);
300
VirtualMemorySummary::move_reserved_memory(flag(), f, size());
301
VirtualMemorySummary::move_committed_memory(flag(), f, committed_size());
306
address ReservedMemoryRegion::thread_stack_uncommitted_bottom() const {
307
assert(flag() == mtThreadStack, "Only for thread stack");
308
LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head();
309
address bottom = base();
310
address top = base() + size();
311
while (head != nullptr) {
312
address committed_top = head->data()->base() + head->data()->size();
313
if (committed_top < top) {
314
// committed stack guard pages, skip them
315
bottom = head->data()->base() + head->data()->size();
318
assert(top == committed_top, "Sanity");
326
bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) {
327
assert(_reserved_regions == nullptr, "only call once");
328
if (level >= NMT_summary) {
329
_reserved_regions = new (std::nothrow, mtNMT)
330
SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>();
331
return (_reserved_regions != nullptr);
336
bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
337
const NativeCallStack& stack, MEMFLAGS flag) {
338
assert(base_addr != nullptr, "Invalid address");
339
assert(size > 0, "Invalid size");
340
assert(_reserved_regions != nullptr, "Sanity check");
341
ReservedMemoryRegion rgn(base_addr, size, stack, flag);
342
ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
344
log_debug(nmt)("Add reserved region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ")",
345
rgn.flag_name(), p2i(rgn.base()), rgn.size());
346
if (reserved_rgn == nullptr) {
347
VirtualMemorySummary::record_reserved_memory(size, flag);
348
return _reserved_regions->add(rgn) != nullptr;
350
// Deal with recursive reservation
351
// os::reserve_memory() -> pd_reserve_memory() -> os::reserve_memory()
353
if (reserved_rgn->same_region(base_addr, size) &&
354
(reserved_rgn->flag() == flag || reserved_rgn->flag() == mtNone)) {
355
reserved_rgn->set_call_stack(stack);
356
reserved_rgn->set_flag(flag);
359
assert(reserved_rgn->overlap_region(base_addr, size), "Must be");
361
// Overlapped reservation.
362
// It can happen when the regions are thread stacks, as JNI
363
// thread does not detach from VM before exits, and leads to
364
// leak JavaThread object
365
if (reserved_rgn->flag() == mtThreadStack) {
366
guarantee(!CheckJNICalls, "Attached JNI thread exited without being detached");
367
// Overwrite with new region
369
// Release old region
370
VirtualMemorySummary::record_uncommitted_memory(reserved_rgn->committed_size(), reserved_rgn->flag());
371
VirtualMemorySummary::record_released_memory(reserved_rgn->size(), reserved_rgn->flag());
374
VirtualMemorySummary::record_reserved_memory(rgn.size(), flag);
380
// CDS mapping region.
381
// CDS reserves the whole region for mapping CDS archive, then maps each section into the region.
382
// NMT reports CDS as a whole.
383
if (reserved_rgn->flag() == mtClassShared) {
384
log_debug(nmt)("CDS reserved region \'%s\' as a whole (" INTPTR_FORMAT ", " SIZE_FORMAT ")",
385
reserved_rgn->flag_name(), p2i(reserved_rgn->base()), reserved_rgn->size());
386
assert(reserved_rgn->contain_region(base_addr, size), "Reserved CDS region should contain this mapping region");
390
// Mapped CDS string region.
391
// The string region(s) is part of the java heap.
392
if (reserved_rgn->flag() == mtJavaHeap) {
393
log_debug(nmt)("CDS reserved region \'%s\' as a whole (" INTPTR_FORMAT ", " SIZE_FORMAT ")",
394
reserved_rgn->flag_name(), p2i(reserved_rgn->base()), reserved_rgn->size());
395
assert(reserved_rgn->contain_region(base_addr, size), "Reserved heap region should contain this mapping region");
399
// Print some more details. Don't use UL here to avoid circularities.
400
tty->print_cr("Error: existing region: [" INTPTR_FORMAT "-" INTPTR_FORMAT "), flag %u.\n"
401
" new region: [" INTPTR_FORMAT "-" INTPTR_FORMAT "), flag %u.",
402
p2i(reserved_rgn->base()), p2i(reserved_rgn->end()), (unsigned)reserved_rgn->flag(),
403
p2i(base_addr), p2i(base_addr + size), (unsigned)flag);
404
if (MemTracker::tracking_level() == NMT_detail) {
405
tty->print_cr("Existing region allocated from:");
406
reserved_rgn->call_stack()->print_on(tty);
407
tty->print_cr("New region allocated from:");
410
ShouldNotReachHere();
416
void VirtualMemoryTracker::set_reserved_region_type(address addr, MEMFLAGS flag) {
417
assert(addr != nullptr, "Invalid address");
418
assert(_reserved_regions != nullptr, "Sanity check");
420
ReservedMemoryRegion rgn(addr, 1);
421
ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
422
if (reserved_rgn != nullptr) {
423
assert(reserved_rgn->contain_address(addr), "Containment");
424
if (reserved_rgn->flag() != flag) {
425
assert(reserved_rgn->flag() == mtNone, "Overwrite memory type (should be mtNone, is: \"%s\")",
426
NMTUtil::flag_to_name(reserved_rgn->flag()));
427
reserved_rgn->set_flag(flag);
432
bool VirtualMemoryTracker::add_committed_region(address addr, size_t size,
433
const NativeCallStack& stack) {
434
assert(addr != nullptr, "Invalid address");
435
assert(size > 0, "Invalid size");
436
assert(_reserved_regions != nullptr, "Sanity check");
438
ReservedMemoryRegion rgn(addr, size);
439
ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
441
if (reserved_rgn == nullptr) {
442
log_debug(nmt)("Add committed region \'%s\', No reserved region found for (" INTPTR_FORMAT ", " SIZE_FORMAT ")",
443
rgn.flag_name(), p2i(rgn.base()), rgn.size());
445
assert(reserved_rgn != nullptr, "Add committed region, No reserved region found");
446
assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
447
bool result = reserved_rgn->add_committed_region(addr, size, stack);
448
log_debug(nmt)("Add committed region \'%s\'(" INTPTR_FORMAT ", " SIZE_FORMAT ") %s",
449
reserved_rgn->flag_name(), p2i(rgn.base()), rgn.size(), (result ? "Succeeded" : "Failed"));
453
bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) {
454
assert(addr != nullptr, "Invalid address");
455
assert(size > 0, "Invalid size");
456
assert(_reserved_regions != nullptr, "Sanity check");
458
ReservedMemoryRegion rgn(addr, size);
459
ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
460
assert(reserved_rgn != nullptr, "No reserved region (" INTPTR_FORMAT ", " SIZE_FORMAT ")", p2i(addr), size);
461
assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
462
const char* flag_name = reserved_rgn->flag_name(); // after remove, info is not complete
463
bool result = reserved_rgn->remove_uncommitted_region(addr, size);
464
log_debug(nmt)("Removed uncommitted region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ") %s",
465
flag_name, p2i(addr), size, (result ? " Succeeded" : "Failed"));
469
bool VirtualMemoryTracker::remove_released_region(ReservedMemoryRegion* rgn) {
470
assert(rgn != nullptr, "Sanity check");
471
assert(_reserved_regions != nullptr, "Sanity check");
473
// uncommit regions within the released region
474
ReservedMemoryRegion backup(*rgn);
475
bool result = rgn->remove_uncommitted_region(rgn->base(), rgn->size());
476
log_debug(nmt)("Remove uncommitted region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ") %s",
477
backup.flag_name(), p2i(backup.base()), backup.size(), (result ? "Succeeded" : "Failed"));
482
VirtualMemorySummary::record_released_memory(rgn->size(), rgn->flag());
483
result = _reserved_regions->remove(*rgn);
484
log_debug(nmt)("Removed region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ") from _reserved_regions %s" ,
485
backup.flag_name(), p2i(backup.base()), backup.size(), (result ? "Succeeded" : "Failed"));
489
bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) {
490
assert(addr != nullptr, "Invalid address");
491
assert(size > 0, "Invalid size");
492
assert(_reserved_regions != nullptr, "Sanity check");
494
ReservedMemoryRegion rgn(addr, size);
495
ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
497
if (reserved_rgn == nullptr) {
498
log_debug(nmt)("No reserved region found for (" INTPTR_FORMAT ", " SIZE_FORMAT ")!",
499
p2i(rgn.base()), rgn.size());
501
assert(reserved_rgn != nullptr, "No reserved region");
502
if (reserved_rgn->same_region(addr, size)) {
503
return remove_released_region(reserved_rgn);
506
// uncommit regions within the released region
507
if (!reserved_rgn->remove_uncommitted_region(addr, size)) {
511
if (reserved_rgn->flag() == mtClassShared) {
512
if (reserved_rgn->contain_region(addr, size)) {
513
// This is an unmapped CDS region, which is part of the reserved shared
515
// See special handling in VirtualMemoryTracker::add_reserved_region also.
519
if (size > reserved_rgn->size()) {
520
// This is from release the whole region spanning from archive space to class space,
521
// so we release them altogether.
522
ReservedMemoryRegion class_rgn(addr + reserved_rgn->size(),
523
(size - reserved_rgn->size()));
524
ReservedMemoryRegion* cls_rgn = _reserved_regions->find(class_rgn);
525
assert(cls_rgn != nullptr, "Class space region not recorded?");
526
assert(cls_rgn->flag() == mtClass, "Must be class type");
527
remove_released_region(reserved_rgn);
528
remove_released_region(cls_rgn);
533
VirtualMemorySummary::record_released_memory(size, reserved_rgn->flag());
535
assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
536
if (reserved_rgn->base() == addr ||
537
reserved_rgn->end() == addr + size) {
538
reserved_rgn->exclude_region(addr, size);
541
address top = reserved_rgn->end();
542
address high_base = addr + size;
543
ReservedMemoryRegion high_rgn(high_base, top - high_base,
544
*reserved_rgn->call_stack(), reserved_rgn->flag());
546
// use original region for lower region
547
reserved_rgn->exclude_region(addr, top - addr);
548
LinkedListNode<ReservedMemoryRegion>* new_rgn = _reserved_regions->add(high_rgn);
549
if (new_rgn == nullptr) {
552
reserved_rgn->move_committed_regions(addr, *new_rgn->data());
558
// Given an existing memory mapping registered with NMT, split the mapping in
559
// two. The newly created two mappings will be registered under the call
560
// stack and the memory flags of the original section.
561
bool VirtualMemoryTracker::split_reserved_region(address addr, size_t size, size_t split, MEMFLAGS flag, MEMFLAGS split_flag) {
563
ReservedMemoryRegion rgn(addr, size);
564
ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
565
assert(reserved_rgn->same_region(addr, size), "Must be identical region");
566
assert(reserved_rgn != nullptr, "No reserved region");
567
assert(reserved_rgn->committed_size() == 0, "Splitting committed region?");
569
NativeCallStack original_stack = *reserved_rgn->call_stack();
570
MEMFLAGS original_flags = reserved_rgn->flag();
572
const char* name = reserved_rgn->flag_name();
573
remove_released_region(reserved_rgn);
574
log_debug(nmt)("Split region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ") with size " SIZE_FORMAT,
575
name, p2i(rgn.base()), rgn.size(), split);
576
// Now, create two new regions.
577
add_reserved_region(addr, split, original_stack, flag);
578
add_reserved_region(addr + split, size - split, original_stack, split_flag);
584
// Iterate the range, find committed region within its bound.
585
class RegionIterator : public StackObj {
587
const address _start;
590
address _current_start;
592
RegionIterator(address start, size_t size) :
593
_start(start), _size(size), _current_start(start) {
596
// return true if committed region is found
597
bool next_committed(address& start, size_t& size);
599
address end() const { return _start + _size; }
602
bool RegionIterator::next_committed(address& committed_start, size_t& committed_size) {
603
if (end() <= _current_start) return false;
605
const size_t page_sz = os::vm_page_size();
606
const size_t current_size = end() - _current_start;
607
if (os::committed_in_range(_current_start, current_size, committed_start, committed_size)) {
608
assert(committed_start != nullptr, "Must be");
609
assert(committed_size > 0 && is_aligned(committed_size, os::vm_page_size()), "Must be");
611
_current_start = committed_start + committed_size;
618
// Walk all known thread stacks, snapshot their committed ranges.
619
class SnapshotThreadStackWalker : public VirtualMemoryWalker {
621
SnapshotThreadStackWalker() {}
623
bool do_allocation_site(const ReservedMemoryRegion* rgn) {
624
if (rgn->flag() == mtThreadStack) {
625
address stack_bottom = rgn->thread_stack_uncommitted_bottom();
626
address committed_start;
627
size_t committed_size;
628
size_t stack_size = rgn->base() + rgn->size() - stack_bottom;
629
// Align the size to work with full pages (Alpine and AIX stack top is not page aligned)
630
size_t aligned_stack_size = align_up(stack_size, os::vm_page_size());
632
ReservedMemoryRegion* region = const_cast<ReservedMemoryRegion*>(rgn);
633
NativeCallStack ncs; // empty stack
635
RegionIterator itr(stack_bottom, aligned_stack_size);
636
DEBUG_ONLY(bool found_stack = false;)
637
while (itr.next_committed(committed_start, committed_size)) {
638
assert(committed_start != nullptr, "Should not be null");
639
assert(committed_size > 0, "Should not be 0");
640
// unaligned stack_size case: correct the region to fit the actual stack_size
641
if (stack_bottom + stack_size < committed_start + committed_size) {
642
committed_size = stack_bottom + stack_size - committed_start;
644
region->add_committed_region(committed_start, committed_size, ncs);
645
DEBUG_ONLY(found_stack = true;)
649
log_debug(thread)("Thread exited without proper cleanup, may leak thread object");
657
void VirtualMemoryTracker::snapshot_thread_stacks() {
658
SnapshotThreadStackWalker walker;
659
walk_virtual_memory(&walker);
662
bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
663
assert(_reserved_regions != nullptr, "Sanity check");
665
// Check that the _reserved_regions haven't been deleted.
666
if (_reserved_regions != nullptr) {
667
LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions->head();
668
while (head != nullptr) {
669
const ReservedMemoryRegion* rgn = head->peek();
670
if (!walker->do_allocation_site(rgn)) {
679
class PrintRegionWalker : public VirtualMemoryWalker {
683
NativeCallStackPrinter _stackprinter;
685
PrintRegionWalker(const void* p, outputStream* st) :
686
_p((address)p), _st(st), _stackprinter(st) { }
688
bool do_allocation_site(const ReservedMemoryRegion* rgn) {
689
if (rgn->contain_address(_p)) {
690
_st->print_cr(PTR_FORMAT " in mmap'd memory region [" PTR_FORMAT " - " PTR_FORMAT "], tag %s",
691
p2i(_p), p2i(rgn->base()), p2i(rgn->base() + rgn->size()), NMTUtil::flag_to_enum_name(rgn->flag()));
692
if (MemTracker::tracking_level() == NMT_detail) {
693
_stackprinter.print_stack(rgn->call_stack());
702
// If p is contained within a known memory region, print information about it to the
703
// given stream and return true; false otherwise.
704
bool VirtualMemoryTracker::print_containing_region(const void* p, outputStream* st) {
705
PrintRegionWalker walker(p, st);
706
return !walk_virtual_memory(&walker);