2
* Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
24
#include "precompiled.hpp"
25
#include "cds/filemap.hpp"
26
#include "memory/metaspace.hpp"
27
#include "memory/metaspaceUtils.hpp"
28
#include "nmt/mallocTracker.hpp"
29
#include "nmt/memflags.hpp"
30
#include "nmt/memReporter.hpp"
31
#include "nmt/memoryFileTracker.hpp"
32
#include "nmt/threadStackTracker.hpp"
33
#include "nmt/virtualMemoryTracker.hpp"
34
#include "utilities/debug.hpp"
35
#include "utilities/globalDefinitions.hpp"
36
#include "utilities/ostream.hpp"
38
#define INDENT_BY(num_chars, CODE) { \
39
streamIndentor si(out, num_chars); \
43
// Diff two counters, express them as signed, with range checks
44
static ssize_t counter_diff(size_t c1, size_t c2) {
45
assert(c1 <= SSIZE_MAX, "counter out of range: " SIZE_FORMAT ".", c1);
46
assert(c2 <= SSIZE_MAX, "counter out of range: " SIZE_FORMAT ".", c2);
47
if (c1 > SSIZE_MAX || c2 > SSIZE_MAX) {
53
MemReporterBase::MemReporterBase(outputStream* out, size_t scale) :
54
_scale(scale), _output(out), _auto_indentor(out) {}
56
size_t MemReporterBase::reserved_total(const MallocMemory* malloc, const VirtualMemory* vm) {
57
return malloc->malloc_size() + malloc->arena_size() + vm->reserved();
60
size_t MemReporterBase::committed_total(const MallocMemory* malloc, const VirtualMemory* vm) {
61
return malloc->malloc_size() + malloc->arena_size() + vm->committed();
64
void MemReporterBase::print_total(size_t reserved, size_t committed, size_t peak) const {
65
const char* scale = current_scale();
66
output()->print("reserved=" SIZE_FORMAT "%s, committed=" SIZE_FORMAT "%s",
67
amount_in_current_scale(reserved), scale, amount_in_current_scale(committed), scale);
69
output()->print(", peak=" SIZE_FORMAT "%s", amount_in_current_scale(peak), scale);
73
void MemReporterBase::print_malloc(const MemoryCounter* c, MEMFLAGS flag) const {
74
const char* scale = current_scale();
75
outputStream* out = output();
76
const char* alloc_type = (flag == mtThreadStack) ? "" : "malloc=";
78
const size_t amount = c->size();
79
const size_t count = c->count();
82
out->print("(%s" SIZE_FORMAT "%s type=%s", alloc_type,
83
amount_in_current_scale(amount), scale, NMTUtil::flag_to_name(flag));
85
out->print("(%s" SIZE_FORMAT "%s", alloc_type,
86
amount_in_current_scale(amount), scale);
89
// blends out mtChunk count number
91
out->print(" #" SIZE_FORMAT "", count);
96
size_t pk_amount = c->peak_size();
97
if (pk_amount == amount) {
98
out->print_raw(" (at peak)");
99
} else if (pk_amount > amount) {
100
size_t pk_count = c->peak_count();
101
out->print(" (peak=" SIZE_FORMAT "%s #" SIZE_FORMAT ")",
102
amount_in_current_scale(pk_amount), scale, pk_count);
106
void MemReporterBase::print_virtual_memory(size_t reserved, size_t committed, size_t peak) const {
107
outputStream* out = output();
108
const char* scale = current_scale();
109
out->print("(mmap: reserved=" SIZE_FORMAT "%s, committed=" SIZE_FORMAT "%s, ",
110
amount_in_current_scale(reserved), scale, amount_in_current_scale(committed), scale);
111
if (peak == committed) {
112
out->print_raw("at peak)");
114
out->print("peak=" SIZE_FORMAT "%s)", amount_in_current_scale(peak), scale);
118
void MemReporterBase::print_arena(const MemoryCounter* c) const {
119
const char* scale = current_scale();
120
outputStream* out = output();
122
const size_t amount = c->size();
123
const size_t count = c->count();
125
out->print("(arena=" SIZE_FORMAT "%s #" SIZE_FORMAT ")",
126
amount_in_current_scale(amount), scale, count);
128
size_t pk_amount = c->peak_size();
129
if (pk_amount == amount) {
130
out->print_raw(" (at peak)");
131
} else if (pk_amount > amount) {
132
size_t pk_count = c->peak_count();
133
out->print(" (peak=" SIZE_FORMAT "%s #" SIZE_FORMAT ")",
134
amount_in_current_scale(pk_amount), scale, pk_count);
138
void MemReporterBase::print_virtual_memory_region(const char* type, address base, size_t size) const {
139
const char* scale = current_scale();
140
output()->print("[" PTR_FORMAT " - " PTR_FORMAT "] %s " SIZE_FORMAT "%s",
141
p2i(base), p2i(base + size), type, amount_in_current_scale(size), scale);
145
void MemSummaryReporter::report() {
146
outputStream* out = output();
147
const size_t total_malloced_bytes = _malloc_snapshot->total();
148
const size_t total_mmap_reserved_bytes = _vm_snapshot->total_reserved();
149
const size_t total_mmap_committed_bytes = _vm_snapshot->total_committed();
151
size_t total_reserved_amount = total_malloced_bytes + total_mmap_reserved_bytes;
152
size_t total_committed_amount = total_malloced_bytes + total_mmap_committed_bytes;
156
out->print_cr("Native Memory Tracking:");
160
out->print_cr("(Omitting categories weighting less than 1%s)", current_scale());
164
out->print("Total: ");
165
print_total(total_reserved_amount, total_committed_amount);
168
out->print_cr("malloc: " SIZE_FORMAT "%s #" SIZE_FORMAT ", peak=" SIZE_FORMAT "%s #" SIZE_FORMAT,
169
amount_in_current_scale(total_malloced_bytes), current_scale(),
170
_malloc_snapshot->total_count(),
171
amount_in_current_scale(_malloc_snapshot->total_peak()),
172
current_scale(), _malloc_snapshot->total_peak_count());
173
out->print("mmap: ");
174
print_total(total_mmap_reserved_bytes, total_mmap_committed_bytes);
179
// Summary by memory type
180
for (int index = 0; index < mt_number_of_types; index ++) {
181
MEMFLAGS flag = NMTUtil::index_to_flag(index);
182
// thread stack is reported as part of thread category
183
if (flag == mtThreadStack) continue;
184
MallocMemory* malloc_memory = _malloc_snapshot->by_type(flag);
185
VirtualMemory* virtual_memory = _vm_snapshot->by_type(flag);
187
report_summary_of_type(flag, malloc_memory, virtual_memory);
191
void MemSummaryReporter::report_summary_of_type(MEMFLAGS flag,
192
MallocMemory* malloc_memory, VirtualMemory* virtual_memory) {
194
size_t reserved_amount = reserved_total (malloc_memory, virtual_memory);
195
size_t committed_amount = committed_total(malloc_memory, virtual_memory);
197
// Count thread's native stack in "Thread" category
198
if (flag == mtThread) {
199
const VirtualMemory* thread_stack_usage =
200
(const VirtualMemory*)_vm_snapshot->by_type(mtThreadStack);
201
reserved_amount += thread_stack_usage->reserved();
202
committed_amount += thread_stack_usage->committed();
203
} else if (flag == mtNMT) {
204
// Count malloc headers in "NMT" category
205
reserved_amount += _malloc_snapshot->malloc_overhead();
206
committed_amount += _malloc_snapshot->malloc_overhead();
209
// Omit printing if the current reserved value as well as all historical peaks (malloc, mmap committed, arena)
210
// fall below scale threshold
211
const size_t pk_vm = virtual_memory->peak_size();
212
const size_t pk_malloc = malloc_memory->malloc_peak_size();
213
const size_t pk_arena = malloc_memory->arena_peak_size();
215
if (amount_in_current_scale(MAX4(reserved_amount, pk_vm, pk_malloc, pk_arena)) == 0) {
219
outputStream* out = output();
220
const char* scale = current_scale();
221
constexpr int indent = 28;
222
out->print("-%*s (", indent - 2, NMTUtil::flag_to_name(flag));
223
print_total(reserved_amount, committed_amount);
225
if (flag == mtClassShared) {
226
size_t read_only_bytes = FileMapInfo::readonly_total();
227
output()->print(", readonly=" SIZE_FORMAT "%s",
228
amount_in_current_scale(read_only_bytes), scale);
233
streamIndentor si(out, indent);
235
if (flag == mtClass) {
236
// report class count
237
out->print_cr("(classes #" SIZE_FORMAT ")", (_instance_class_count + _array_class_count));
238
out->print_cr("( instance classes #" SIZE_FORMAT ", array classes #" SIZE_FORMAT ")",
239
_instance_class_count, _array_class_count);
240
} else if (flag == mtThread) {
241
const VirtualMemory* thread_stack_usage =
242
_vm_snapshot->by_type(mtThreadStack);
243
// report thread count
244
out->print_cr("(threads #" SIZE_FORMAT ")", ThreadStackTracker::thread_count());
245
out->print("(stack: ");
246
print_total(thread_stack_usage->reserved(), thread_stack_usage->committed(), thread_stack_usage->peak_size());
250
// report malloc'd memory
251
if (amount_in_current_scale(MAX2(malloc_memory->malloc_size(), pk_malloc)) > 0) {
252
print_malloc(malloc_memory->malloc_counter());
256
if (amount_in_current_scale(MAX2(virtual_memory->reserved(), pk_vm)) > 0) {
257
print_virtual_memory(virtual_memory->reserved(), virtual_memory->committed(), virtual_memory->peak_size());
261
if (amount_in_current_scale(MAX2(malloc_memory->arena_size(), pk_arena)) > 0) {
262
print_arena(malloc_memory->arena_counter());
267
amount_in_current_scale(_malloc_snapshot->malloc_overhead()) > 0) {
268
out->print_cr("(tracking overhead=" SIZE_FORMAT "%s)",
269
amount_in_current_scale(_malloc_snapshot->malloc_overhead()), scale);
270
} else if (flag == mtClass) {
271
// Metadata information
272
report_metadata(Metaspace::NonClassType);
273
if (Metaspace::using_class_space()) {
274
report_metadata(Metaspace::ClassType);
280
void MemSummaryReporter::report_metadata(Metaspace::MetadataType type) const {
282
// NMT reports may be triggered (as part of error handling) very early. Make sure
283
// Metaspace is already initialized.
284
if (!Metaspace::initialized()) {
288
assert(type == Metaspace::NonClassType || type == Metaspace::ClassType,
289
"Invalid metadata type");
290
const char* name = (type == Metaspace::NonClassType) ?
291
"Metadata: " : "Class space:";
293
outputStream* out = output();
294
const char* scale = current_scale();
295
const MetaspaceStats stats = MetaspaceUtils::get_statistics(type);
297
size_t waste = stats.committed() - stats.used();
298
float waste_percentage = stats.committed() > 0 ? (((float)waste * 100)/(float)stats.committed()) : 0.0f;
300
out->print_cr("( %s)", name);
302
print_total(stats.reserved(), stats.committed());
304
out->print_cr("( used=" SIZE_FORMAT "%s)", amount_in_current_scale(stats.used()), scale);
305
out->print_cr("( waste=" SIZE_FORMAT "%s =%2.2f%%)", amount_in_current_scale(waste),
306
scale, waste_percentage);
309
void MemDetailReporter::report_detail() {
310
// Start detail report
311
outputStream* out = output();
312
out->print_cr("Details:\n");
315
report_malloc_sites() +
316
report_virtual_memory_allocation_sites();
317
if (num_omitted > 0) {
318
assert(scale() > 1, "sanity");
319
out->print_cr("(%d call sites weighting less than 1%s each omitted.)",
320
num_omitted, current_scale());
325
int MemDetailReporter::report_malloc_sites() {
326
MallocSiteIterator malloc_itr = _baseline.malloc_sites(MemBaseline::by_size);
327
if (malloc_itr.is_empty()) return 0;
329
outputStream* out = output();
331
const MallocSite* malloc_site;
333
while ((malloc_site = malloc_itr.next()) != nullptr) {
334
// Omit printing if the current value and the historic peak value both fall below the reporting scale threshold
335
if (amount_in_current_scale(MAX2(malloc_site->size(), malloc_site->peak_size())) == 0) {
339
const NativeCallStack* stack = malloc_site->call_stack();
340
_stackprinter.print_stack(stack);
341
MEMFLAGS flag = malloc_site->flag();
342
assert(NMTUtil::flag_is_valid(flag) && flag != mtNone,
343
"Must have a valid memory type");
346
print_malloc(malloc_site->counter(), flag);
354
int MemDetailReporter::report_virtual_memory_allocation_sites() {
355
VirtualMemorySiteIterator virtual_memory_itr =
356
_baseline.virtual_memory_sites(MemBaseline::by_size);
358
if (virtual_memory_itr.is_empty()) return 0;
360
outputStream* out = output();
362
const VirtualMemoryAllocationSite* virtual_memory_site;
364
while ((virtual_memory_site = virtual_memory_itr.next()) != nullptr) {
365
// Don't report free sites; does not count toward omitted count.
366
if (virtual_memory_site->reserved() == 0) {
369
// Omit printing if the current value and the historic peak value both fall below the
370
// reporting scale threshold
371
if (amount_in_current_scale(MAX2(virtual_memory_site->reserved(),
372
virtual_memory_site->peak_size())) == 0) {
376
const NativeCallStack* stack = virtual_memory_site->call_stack();
377
_stackprinter.print_stack(stack);
380
print_total(virtual_memory_site->reserved(), virtual_memory_site->committed());
381
const MEMFLAGS flag = virtual_memory_site->flag();
382
if (flag != mtNone) {
383
out->print(" Type=%s", NMTUtil::flag_to_name(flag));
393
void MemDetailReporter::report_virtual_memory_map() {
394
// Virtual memory map always in base address order
395
VirtualMemoryAllocationIterator itr = _baseline.virtual_memory_allocations();
396
const ReservedMemoryRegion* rgn;
398
output()->print_cr("Virtual memory map:");
399
while ((rgn = itr.next()) != nullptr) {
400
report_virtual_memory_region(rgn);
404
void MemDetailReporter::report_virtual_memory_region(const ReservedMemoryRegion* reserved_rgn) {
405
assert(reserved_rgn != nullptr, "null pointer");
407
// We don't bother about reporting peaks here.
408
// That is because peaks - in the context of virtual memory, peak of committed areas - make little sense
409
// when we report *by region*, which are identified by their location in memory. There is a philosophical
410
// question about identity here: e.g. a committed region that has been split into three regions by
411
// uncommitting a middle section of it, should that still count as "having peaked" before the split? If
412
// yes, which of the three new regions would be the spiritual successor? Rather than introducing more
413
// complexity, we avoid printing peaks altogether. Note that peaks should still be printed when reporting
414
// usage *by callsite*.
416
// Don't report if size is too small.
417
if (amount_in_current_scale(reserved_rgn->size()) == 0) return;
419
outputStream* out = output();
420
const char* scale = current_scale();
421
const NativeCallStack* stack = reserved_rgn->call_stack();
422
bool all_committed = reserved_rgn->size() == reserved_rgn->committed_size();
423
const char* region_type = (all_committed ? "reserved and committed" : "reserved");
425
print_virtual_memory_region(region_type, reserved_rgn->base(), reserved_rgn->size());
426
out->print(" for %s", NMTUtil::flag_to_name(reserved_rgn->flag()));
427
if (stack->is_empty()) {
430
out->print_cr(" from");
431
INDENT_BY(4, _stackprinter.print_stack(stack);)
435
CommittedRegionIterator itr = reserved_rgn->iterate_committed_regions();
436
const CommittedMemoryRegion* committed_rgn = itr.next();
437
if (committed_rgn->size() == reserved_rgn->size() && committed_rgn->call_stack()->equals(*stack)) {
438
// One region spanning the entire reserved region, with the same stack trace.
439
// Don't print this regions because the "reserved and committed" line above
440
// already indicates that the region is committed.
441
assert(itr.next() == nullptr, "Unexpectedly more than one regions");
446
CommittedRegionIterator itr = reserved_rgn->iterate_committed_regions();
447
const CommittedMemoryRegion* committed_rgn;
448
while ((committed_rgn = itr.next()) != nullptr) {
449
// Don't report if size is too small
450
if (amount_in_current_scale(committed_rgn->size()) == 0) continue;
451
stack = committed_rgn->call_stack();
454
print_virtual_memory_region("committed", committed_rgn->base(), committed_rgn->size());
455
if (stack->is_empty()) {
458
out->print_cr(" from");
459
INDENT_BY(4, stack->print_on(out);)
465
void MemDetailReporter::report_memory_file_allocations() {
468
MemoryFileTracker::Instance::Locker lock;
469
MemoryFileTracker::Instance::print_all_reports_on(&st, scale());
471
output()->print_raw(st.freeze());
474
void MemSummaryDiffReporter::report_diff() {
475
outputStream* out = output();
477
out->print_cr("Native Memory Tracking:");
481
out->print_cr("(Omitting categories weighting less than 1%s)", current_scale());
486
out->print("Total: ");
487
print_virtual_memory_diff(_current_baseline.total_reserved_memory(),
488
_current_baseline.total_committed_memory(), _early_baseline.total_reserved_memory(),
489
_early_baseline.total_committed_memory());
494
const size_t early_malloced_bytes =
495
_early_baseline.malloc_memory_snapshot()->total();
496
const size_t early_count =
497
_early_baseline.malloc_memory_snapshot()->total_count();
498
const size_t current_malloced_bytes =
499
_current_baseline.malloc_memory_snapshot()->total();
500
const size_t current_count =
501
_current_baseline.malloc_memory_snapshot()->total_count();
502
print_malloc_diff(current_malloced_bytes, current_count, early_malloced_bytes,
503
early_count, mtNone);
508
out->print("mmap: ");
509
const size_t early_reserved =
510
_early_baseline.virtual_memory_snapshot()->total_reserved();
511
const size_t early_committed =
512
_early_baseline.virtual_memory_snapshot()->total_committed();
513
const size_t current_reserved =
514
_current_baseline.virtual_memory_snapshot()->total_reserved();
515
const size_t current_committed =
516
_current_baseline.virtual_memory_snapshot()->total_committed();
517
print_virtual_memory_diff(current_reserved, current_committed, early_reserved,
522
// Summary diff by memory type
523
for (int index = 0; index < mt_number_of_types; index ++) {
524
MEMFLAGS flag = NMTUtil::index_to_flag(index);
525
// thread stack is reported as part of thread category
526
if (flag == mtThreadStack) continue;
527
diff_summary_of_type(flag,
528
_early_baseline.malloc_memory(flag),
529
_early_baseline.virtual_memory(flag),
530
_early_baseline.metaspace_stats(),
531
_current_baseline.malloc_memory(flag),
532
_current_baseline.virtual_memory(flag),
533
_current_baseline.metaspace_stats());
537
void MemSummaryDiffReporter::print_malloc_diff(size_t current_amount, size_t current_count,
538
size_t early_amount, size_t early_count, MEMFLAGS flags) const {
539
const char* scale = current_scale();
540
outputStream* out = output();
541
const char* alloc_type = (flags == mtThread) ? "" : "malloc=";
543
out->print("%s" SIZE_FORMAT "%s", alloc_type, amount_in_current_scale(current_amount), scale);
544
// Report type only if it is valid and not under "thread" category
545
if (flags != mtNone && flags != mtThread) {
546
out->print(" type=%s", NMTUtil::flag_to_name(flags));
549
int64_t amount_diff = diff_in_current_scale(current_amount, early_amount);
550
if (amount_diff != 0) {
551
out->print(" " INT64_PLUS_FORMAT "%s", amount_diff, scale);
553
if (current_count > 0) {
554
out->print(" #" SIZE_FORMAT "", current_count);
555
const ssize_t delta_count = counter_diff(current_count, early_count);
556
if (delta_count != 0) {
557
out->print(" " SSIZE_PLUS_FORMAT, delta_count);
562
void MemSummaryDiffReporter::print_arena_diff(size_t current_amount, size_t current_count,
563
size_t early_amount, size_t early_count) const {
564
const char* scale = current_scale();
565
outputStream* out = output();
566
out->print("arena=" SIZE_FORMAT "%s", amount_in_current_scale(current_amount), scale);
567
int64_t amount_diff = diff_in_current_scale(current_amount, early_amount);
568
if (amount_diff != 0) {
569
out->print(" " INT64_PLUS_FORMAT "%s", amount_diff, scale);
572
out->print(" #" SIZE_FORMAT "", current_count);
573
const ssize_t delta_count = counter_diff(current_count, early_count);
574
if (delta_count != 0) {
575
out->print(" " SSIZE_PLUS_FORMAT, delta_count);
579
void MemSummaryDiffReporter::print_virtual_memory_diff(size_t current_reserved, size_t current_committed,
580
size_t early_reserved, size_t early_committed) const {
581
const char* scale = current_scale();
582
outputStream* out = output();
583
out->print("reserved=" SIZE_FORMAT "%s", amount_in_current_scale(current_reserved), scale);
584
int64_t reserved_diff = diff_in_current_scale(current_reserved, early_reserved);
585
if (reserved_diff != 0) {
586
out->print(" " INT64_PLUS_FORMAT "%s", reserved_diff, scale);
589
out->print(", committed=" SIZE_FORMAT "%s", amount_in_current_scale(current_committed), scale);
590
int64_t committed_diff = diff_in_current_scale(current_committed, early_committed);
591
if (committed_diff != 0) {
592
out->print(" " INT64_PLUS_FORMAT "%s", committed_diff, scale);
597
void MemSummaryDiffReporter::diff_summary_of_type(MEMFLAGS flag,
598
const MallocMemory* early_malloc, const VirtualMemory* early_vm,
599
const MetaspaceCombinedStats& early_ms,
600
const MallocMemory* current_malloc, const VirtualMemory* current_vm,
601
const MetaspaceCombinedStats& current_ms) const {
603
outputStream* out = output();
604
const char* scale = current_scale();
605
constexpr int indent = 28;
607
// Total reserved and committed memory in current baseline
608
size_t current_reserved_amount = reserved_total (current_malloc, current_vm);
609
size_t current_committed_amount = committed_total(current_malloc, current_vm);
611
// Total reserved and committed memory in early baseline
612
size_t early_reserved_amount = reserved_total(early_malloc, early_vm);
613
size_t early_committed_amount = committed_total(early_malloc, early_vm);
615
// Adjust virtual memory total
616
if (flag == mtThread) {
617
const VirtualMemory* early_thread_stack_usage =
618
_early_baseline.virtual_memory(mtThreadStack);
619
const VirtualMemory* current_thread_stack_usage =
620
_current_baseline.virtual_memory(mtThreadStack);
622
early_reserved_amount += early_thread_stack_usage->reserved();
623
early_committed_amount += early_thread_stack_usage->committed();
625
current_reserved_amount += current_thread_stack_usage->reserved();
626
current_committed_amount += current_thread_stack_usage->committed();
627
} else if (flag == mtNMT) {
628
early_reserved_amount += _early_baseline.malloc_tracking_overhead();
629
early_committed_amount += _early_baseline.malloc_tracking_overhead();
631
current_reserved_amount += _current_baseline.malloc_tracking_overhead();
632
current_committed_amount += _current_baseline.malloc_tracking_overhead();
635
if (amount_in_current_scale(current_reserved_amount) > 0 ||
636
diff_in_current_scale(current_reserved_amount, early_reserved_amount) != 0) {
638
// print summary line
639
out->print("-%*s (", indent - 2, NMTUtil::flag_to_name(flag));
640
print_virtual_memory_diff(current_reserved_amount, current_committed_amount,
641
early_reserved_amount, early_committed_amount);
644
streamIndentor si(out, indent);
647
if (flag == mtClass) {
648
// report class count
649
out->print("(classes #" SIZE_FORMAT, _current_baseline.class_count());
650
const ssize_t class_count_diff =
651
counter_diff(_current_baseline.class_count(), _early_baseline.class_count());
652
if (class_count_diff != 0) {
653
out->print(" " SSIZE_PLUS_FORMAT, class_count_diff);
657
out->print("( instance classes #" SIZE_FORMAT, _current_baseline.instance_class_count());
658
const ssize_t instance_class_count_diff =
659
counter_diff(_current_baseline.instance_class_count(), _early_baseline.instance_class_count());
660
if (instance_class_count_diff != 0) {
661
out->print(" " SSIZE_PLUS_FORMAT, instance_class_count_diff);
663
out->print(", array classes #" SIZE_FORMAT, _current_baseline.array_class_count());
664
const ssize_t array_class_count_diff =
665
counter_diff(_current_baseline.array_class_count(), _early_baseline.array_class_count());
666
if (array_class_count_diff != 0) {
667
out->print(" " SSIZE_PLUS_FORMAT, array_class_count_diff);
671
} else if (flag == mtThread) {
672
// report thread count
673
out->print("(threads #" SIZE_FORMAT, _current_baseline.thread_count());
674
const ssize_t thread_count_diff = counter_diff(_current_baseline.thread_count(), _early_baseline.thread_count());
675
if (thread_count_diff != 0) {
676
out->print(" " SSIZE_PLUS_FORMAT, thread_count_diff);
680
out->print("(stack: ");
681
// report thread stack
682
const VirtualMemory* current_thread_stack =
683
_current_baseline.virtual_memory(mtThreadStack);
684
const VirtualMemory* early_thread_stack =
685
_early_baseline.virtual_memory(mtThreadStack);
687
print_virtual_memory_diff(current_thread_stack->reserved(), current_thread_stack->committed(),
688
early_thread_stack->reserved(), early_thread_stack->committed());
693
// Report malloc'd memory
694
size_t current_malloc_amount = current_malloc->malloc_size();
695
size_t early_malloc_amount = early_malloc->malloc_size();
696
if (amount_in_current_scale(current_malloc_amount) > 0 ||
697
diff_in_current_scale(current_malloc_amount, early_malloc_amount) != 0) {
699
print_malloc_diff(current_malloc_amount, (flag == mtChunk) ? 0 : current_malloc->malloc_count(),
700
early_malloc_amount, early_malloc->malloc_count(), mtNone);
704
// Report virtual memory
705
if (amount_in_current_scale(current_vm->reserved()) > 0 ||
706
diff_in_current_scale(current_vm->reserved(), early_vm->reserved()) != 0) {
707
out->print("(mmap: ");
708
print_virtual_memory_diff(current_vm->reserved(), current_vm->committed(),
709
early_vm->reserved(), early_vm->committed());
713
// Report arena memory
714
if (amount_in_current_scale(current_malloc->arena_size()) > 0 ||
715
diff_in_current_scale(current_malloc->arena_size(), early_malloc->arena_size()) != 0) {
717
print_arena_diff(current_malloc->arena_size(), current_malloc->arena_count(),
718
early_malloc->arena_size(), early_malloc->arena_count());
722
// Report native memory tracking overhead
724
size_t current_tracking_overhead = amount_in_current_scale(_current_baseline.malloc_tracking_overhead());
725
size_t early_tracking_overhead = amount_in_current_scale(_early_baseline.malloc_tracking_overhead());
727
out->print("(tracking overhead=" SIZE_FORMAT "%s",
728
amount_in_current_scale(_current_baseline.malloc_tracking_overhead()), scale);
730
int64_t overhead_diff = diff_in_current_scale(_current_baseline.malloc_tracking_overhead(),
731
_early_baseline.malloc_tracking_overhead());
732
if (overhead_diff != 0) {
733
out->print(" " INT64_PLUS_FORMAT "%s", overhead_diff, scale);
736
} else if (flag == mtClass) {
737
print_metaspace_diff(current_ms, early_ms);
743
void MemSummaryDiffReporter::print_metaspace_diff(const MetaspaceCombinedStats& current_ms,
744
const MetaspaceCombinedStats& early_ms) const {
745
print_metaspace_diff("Metadata", current_ms.non_class_space_stats(), early_ms.non_class_space_stats());
746
if (Metaspace::using_class_space()) {
747
print_metaspace_diff("Class space", current_ms.class_space_stats(), early_ms.class_space_stats());
751
void MemSummaryDiffReporter::print_metaspace_diff(const char* header,
752
const MetaspaceStats& current_stats,
753
const MetaspaceStats& early_stats) const {
754
outputStream* out = output();
755
const char* scale = current_scale();
757
out->print_cr("( %s)", header);
759
print_virtual_memory_diff(current_stats.reserved(),
760
current_stats.committed(),
761
early_stats.reserved(),
762
early_stats.committed());
765
int64_t diff_used = diff_in_current_scale(current_stats.used(),
768
size_t current_waste = current_stats.committed() - current_stats.used();
769
size_t early_waste = early_stats.committed() - early_stats.used();
770
int64_t diff_waste = diff_in_current_scale(current_waste, early_waste);
773
out->print("( used=" SIZE_FORMAT "%s",
774
amount_in_current_scale(current_stats.used()), scale);
775
if (diff_used != 0) {
776
out->print(" " INT64_PLUS_FORMAT "%s", diff_used, scale);
781
const float waste_percentage = current_stats.committed() == 0 ? 0.0f :
782
((float)current_waste * 100.0f) / (float)current_stats.committed();
783
out->print("( waste=" SIZE_FORMAT "%s =%2.2f%%",
784
amount_in_current_scale(current_waste), scale, waste_percentage);
785
if (diff_waste != 0) {
786
out->print(" " INT64_PLUS_FORMAT "%s", diff_waste, scale);
791
void MemDetailDiffReporter::report_diff() {
792
MemSummaryDiffReporter::report_diff();
794
diff_virtual_memory_sites();
797
void MemDetailDiffReporter::diff_malloc_sites() const {
798
MallocSiteIterator early_itr = _early_baseline.malloc_sites(MemBaseline::by_site_and_type);
799
MallocSiteIterator current_itr = _current_baseline.malloc_sites(MemBaseline::by_site_and_type);
801
const MallocSite* early_site = early_itr.next();
802
const MallocSite* current_site = current_itr.next();
804
while (early_site != nullptr || current_site != nullptr) {
805
if (early_site == nullptr) {
806
new_malloc_site(current_site);
807
current_site = current_itr.next();
808
} else if (current_site == nullptr) {
809
old_malloc_site(early_site);
810
early_site = early_itr.next();
812
int compVal = current_site->call_stack()->compare(*early_site->call_stack());
814
new_malloc_site(current_site);
815
current_site = current_itr.next();
816
} else if (compVal > 0) {
817
old_malloc_site(early_site);
818
early_site = early_itr.next();
820
diff_malloc_site(early_site, current_site);
821
early_site = early_itr.next();
822
current_site = current_itr.next();
828
void MemDetailDiffReporter::diff_virtual_memory_sites() const {
829
VirtualMemorySiteIterator early_itr = _early_baseline.virtual_memory_sites(MemBaseline::by_site);
830
VirtualMemorySiteIterator current_itr = _current_baseline.virtual_memory_sites(MemBaseline::by_site);
832
const VirtualMemoryAllocationSite* early_site = early_itr.next();
833
const VirtualMemoryAllocationSite* current_site = current_itr.next();
835
while (early_site != nullptr || current_site != nullptr) {
836
if (early_site == nullptr) {
837
new_virtual_memory_site(current_site);
838
current_site = current_itr.next();
839
} else if (current_site == nullptr) {
840
old_virtual_memory_site(early_site);
841
early_site = early_itr.next();
843
int compVal = current_site->call_stack()->compare(*early_site->call_stack());
845
new_virtual_memory_site(current_site);
846
current_site = current_itr.next();
847
} else if (compVal > 0) {
848
old_virtual_memory_site(early_site);
849
early_site = early_itr.next();
850
} else if (early_site->flag() != current_site->flag()) {
851
// This site was originally allocated with one flag, then released,
852
// then re-allocated at the same site (as far as we can tell) with a different flag.
853
old_virtual_memory_site(early_site);
854
early_site = early_itr.next();
855
new_virtual_memory_site(current_site);
856
current_site = current_itr.next();
858
diff_virtual_memory_site(early_site, current_site);
859
early_site = early_itr.next();
860
current_site = current_itr.next();
867
void MemDetailDiffReporter::new_malloc_site(const MallocSite* malloc_site) const {
868
diff_malloc_site(malloc_site->call_stack(), malloc_site->size(), malloc_site->count(),
869
0, 0, malloc_site->flag());
872
void MemDetailDiffReporter::old_malloc_site(const MallocSite* malloc_site) const {
873
diff_malloc_site(malloc_site->call_stack(), 0, 0, malloc_site->size(),
874
malloc_site->count(), malloc_site->flag());
877
void MemDetailDiffReporter::diff_malloc_site(const MallocSite* early,
878
const MallocSite* current) const {
879
if (early->flag() != current->flag()) {
880
// If malloc site type changed, treat it as deallocation of old type and
881
// allocation of new type.
882
old_malloc_site(early);
883
new_malloc_site(current);
885
diff_malloc_site(current->call_stack(), current->size(), current->count(),
886
early->size(), early->count(), early->flag());
890
void MemDetailDiffReporter::diff_malloc_site(const NativeCallStack* stack, size_t current_size,
891
size_t current_count, size_t early_size, size_t early_count, MEMFLAGS flags) const {
892
outputStream* out = output();
894
assert(stack != nullptr, "null stack");
896
if (diff_in_current_scale(current_size, early_size) == 0) {
900
_stackprinter.print_stack(stack);
903
print_malloc_diff(current_size, current_count, early_size, early_count, flags);
911
void MemDetailDiffReporter::new_virtual_memory_site(const VirtualMemoryAllocationSite* site) const {
912
diff_virtual_memory_site(site->call_stack(), site->reserved(), site->committed(), 0, 0, site->flag());
915
void MemDetailDiffReporter::old_virtual_memory_site(const VirtualMemoryAllocationSite* site) const {
916
diff_virtual_memory_site(site->call_stack(), 0, 0, site->reserved(), site->committed(), site->flag());
919
void MemDetailDiffReporter::diff_virtual_memory_site(const VirtualMemoryAllocationSite* early,
920
const VirtualMemoryAllocationSite* current) const {
921
diff_virtual_memory_site(current->call_stack(), current->reserved(), current->committed(),
922
early->reserved(), early->committed(), current->flag());
925
void MemDetailDiffReporter::diff_virtual_memory_site(const NativeCallStack* stack, size_t current_reserved,
926
size_t current_committed, size_t early_reserved, size_t early_committed, MEMFLAGS flag) const {
927
outputStream* out = output();
930
if (diff_in_current_scale(current_reserved, early_reserved) == 0 &&
931
diff_in_current_scale(current_committed, early_committed) == 0) {
935
_stackprinter.print_stack(stack);
937
out->print("(mmap: ");
938
print_virtual_memory_diff(current_reserved, current_committed, early_reserved, early_committed);
939
if (flag != mtNone) {
940
out->print(" Type=%s", NMTUtil::flag_to_name(flag));