jdk

Форк
0
/
memReporter.cpp 
945 строк · 35.8 Кб
1
/*
2
 * Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
3
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
 *
5
 * This code is free software; you can redistribute it and/or modify it
6
 * under the terms of the GNU General Public License version 2 only, as
7
 * published by the Free Software Foundation.
8
 *
9
 * This code is distributed in the hope that it will be useful, but WITHOUT
10
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12
 * version 2 for more details (a copy is included in the LICENSE file that
13
 * accompanied this code).
14
 *
15
 * You should have received a copy of the GNU General Public License version
16
 * 2 along with this work; if not, write to the Free Software Foundation,
17
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
 *
19
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
 * or visit www.oracle.com if you need additional information or have any
21
 * questions.
22
 *
23
 */
24
#include "precompiled.hpp"
25
#include "cds/filemap.hpp"
26
#include "memory/metaspace.hpp"
27
#include "memory/metaspaceUtils.hpp"
28
#include "nmt/mallocTracker.hpp"
29
#include "nmt/memflags.hpp"
30
#include "nmt/memReporter.hpp"
31
#include "nmt/memoryFileTracker.hpp"
32
#include "nmt/threadStackTracker.hpp"
33
#include "nmt/virtualMemoryTracker.hpp"
34
#include "utilities/debug.hpp"
35
#include "utilities/globalDefinitions.hpp"
36
#include "utilities/ostream.hpp"
37

38
#define INDENT_BY(num_chars, CODE) { \
39
  streamIndentor si(out, num_chars); \
40
  { CODE }                           \
41
}
42

43
// Diff two counters, express them as signed, with range checks
44
static ssize_t counter_diff(size_t c1, size_t c2) {
45
  assert(c1 <= SSIZE_MAX, "counter out of range: " SIZE_FORMAT ".", c1);
46
  assert(c2 <= SSIZE_MAX, "counter out of range: " SIZE_FORMAT ".", c2);
47
  if (c1 > SSIZE_MAX || c2 > SSIZE_MAX) {
48
    return 0;
49
  }
50
  return c1 - c2;
51
}
52

53
MemReporterBase::MemReporterBase(outputStream* out, size_t scale) :
54
  _scale(scale), _output(out), _auto_indentor(out) {}
55

56
size_t MemReporterBase::reserved_total(const MallocMemory* malloc, const VirtualMemory* vm) {
57
  return malloc->malloc_size() + malloc->arena_size() + vm->reserved();
58
}
59

60
size_t MemReporterBase::committed_total(const MallocMemory* malloc, const VirtualMemory* vm) {
61
  return malloc->malloc_size() + malloc->arena_size() + vm->committed();
62
}
63

64
void MemReporterBase::print_total(size_t reserved, size_t committed, size_t peak) const {
65
  const char* scale = current_scale();
66
  output()->print("reserved=" SIZE_FORMAT "%s, committed=" SIZE_FORMAT "%s",
67
    amount_in_current_scale(reserved), scale, amount_in_current_scale(committed), scale);
68
  if (peak != 0) {
69
    output()->print(", peak=" SIZE_FORMAT "%s", amount_in_current_scale(peak), scale);
70
  }
71
}
72

73
void MemReporterBase::print_malloc(const MemoryCounter* c, MEMFLAGS flag) const {
74
  const char* scale = current_scale();
75
  outputStream* out = output();
76
  const char* alloc_type = (flag == mtThreadStack) ? "" : "malloc=";
77

78
  const size_t amount = c->size();
79
  const size_t count = c->count();
80

81
  if (flag != mtNone) {
82
    out->print("(%s" SIZE_FORMAT "%s type=%s", alloc_type,
83
      amount_in_current_scale(amount), scale, NMTUtil::flag_to_name(flag));
84
  } else {
85
    out->print("(%s" SIZE_FORMAT "%s", alloc_type,
86
      amount_in_current_scale(amount), scale);
87
  }
88

89
  // blends out mtChunk count number
90
  if (count > 0) {
91
    out->print(" #" SIZE_FORMAT "", count);
92
  }
93

94
  out->print(")");
95

96
  size_t pk_amount = c->peak_size();
97
  if (pk_amount == amount) {
98
    out->print_raw(" (at peak)");
99
  } else if (pk_amount > amount) {
100
    size_t pk_count = c->peak_count();
101
    out->print(" (peak=" SIZE_FORMAT "%s #" SIZE_FORMAT ")",
102
        amount_in_current_scale(pk_amount), scale, pk_count);
103
  }
104
}
105

106
void MemReporterBase::print_virtual_memory(size_t reserved, size_t committed, size_t peak) const {
107
  outputStream* out = output();
108
  const char* scale = current_scale();
109
  out->print("(mmap: reserved=" SIZE_FORMAT "%s, committed=" SIZE_FORMAT "%s, ",
110
    amount_in_current_scale(reserved), scale, amount_in_current_scale(committed), scale);
111
  if (peak == committed) {
112
    out->print_raw("at peak)");
113
  } else {
114
    out->print("peak=" SIZE_FORMAT "%s)", amount_in_current_scale(peak), scale);
115
  }
116
}
117

118
void MemReporterBase::print_arena(const MemoryCounter* c) const {
119
  const char* scale = current_scale();
120
  outputStream* out = output();
121

122
  const size_t amount = c->size();
123
  const size_t count = c->count();
124

125
  out->print("(arena=" SIZE_FORMAT "%s #" SIZE_FORMAT ")",
126
             amount_in_current_scale(amount), scale, count);
127

128
  size_t pk_amount = c->peak_size();
129
  if (pk_amount == amount) {
130
    out->print_raw(" (at peak)");
131
  } else if (pk_amount > amount) {
132
    size_t pk_count = c->peak_count();
133
    out->print(" (peak=" SIZE_FORMAT "%s #" SIZE_FORMAT ")",
134
        amount_in_current_scale(pk_amount), scale, pk_count);
135
  }
136
}
137

138
void MemReporterBase::print_virtual_memory_region(const char* type, address base, size_t size) const {
139
  const char* scale = current_scale();
140
  output()->print("[" PTR_FORMAT " - " PTR_FORMAT "] %s " SIZE_FORMAT "%s",
141
    p2i(base), p2i(base + size), type, amount_in_current_scale(size), scale);
142
}
143

144

145
void MemSummaryReporter::report() {
146
  outputStream* out = output();
147
  const size_t total_malloced_bytes = _malloc_snapshot->total();
148
  const size_t total_mmap_reserved_bytes = _vm_snapshot->total_reserved();
149
  const size_t total_mmap_committed_bytes = _vm_snapshot->total_committed();
150

151
  size_t total_reserved_amount = total_malloced_bytes + total_mmap_reserved_bytes;
152
  size_t total_committed_amount = total_malloced_bytes + total_mmap_committed_bytes;
153

154
  // Overall total
155
  out->cr();
156
  out->print_cr("Native Memory Tracking:");
157
  out->cr();
158

159
  if (scale() > 1) {
160
    out->print_cr("(Omitting categories weighting less than 1%s)", current_scale());
161
    out->cr();
162
  }
163

164
  out->print("Total: ");
165
  print_total(total_reserved_amount, total_committed_amount);
166
  out->cr();
167
  INDENT_BY(7,
168
    out->print_cr("malloc: " SIZE_FORMAT "%s #" SIZE_FORMAT ", peak=" SIZE_FORMAT "%s #" SIZE_FORMAT,
169
                  amount_in_current_scale(total_malloced_bytes), current_scale(),
170
                  _malloc_snapshot->total_count(),
171
                  amount_in_current_scale(_malloc_snapshot->total_peak()),
172
                  current_scale(), _malloc_snapshot->total_peak_count());
173
    out->print("mmap:   ");
174
    print_total(total_mmap_reserved_bytes, total_mmap_committed_bytes);
175
  )
176
  out->cr();
177
  out->cr();
178

179
  // Summary by memory type
180
  for (int index = 0; index < mt_number_of_types; index ++) {
181
    MEMFLAGS flag = NMTUtil::index_to_flag(index);
182
    // thread stack is reported as part of thread category
183
    if (flag == mtThreadStack) continue;
184
    MallocMemory* malloc_memory = _malloc_snapshot->by_type(flag);
185
    VirtualMemory* virtual_memory = _vm_snapshot->by_type(flag);
186

187
    report_summary_of_type(flag, malloc_memory, virtual_memory);
188
  }
189
}
190

191
void MemSummaryReporter::report_summary_of_type(MEMFLAGS flag,
192
  MallocMemory*  malloc_memory, VirtualMemory* virtual_memory) {
193

194
  size_t reserved_amount  = reserved_total (malloc_memory, virtual_memory);
195
  size_t committed_amount = committed_total(malloc_memory, virtual_memory);
196

197
  // Count thread's native stack in "Thread" category
198
  if (flag == mtThread) {
199
    const VirtualMemory* thread_stack_usage =
200
      (const VirtualMemory*)_vm_snapshot->by_type(mtThreadStack);
201
    reserved_amount  += thread_stack_usage->reserved();
202
    committed_amount += thread_stack_usage->committed();
203
  } else if (flag == mtNMT) {
204
    // Count malloc headers in "NMT" category
205
    reserved_amount  += _malloc_snapshot->malloc_overhead();
206
    committed_amount += _malloc_snapshot->malloc_overhead();
207
  }
208

209
  // Omit printing if the current reserved value as well as all historical peaks (malloc, mmap committed, arena)
210
  // fall below scale threshold
211
  const size_t pk_vm = virtual_memory->peak_size();
212
  const size_t pk_malloc = malloc_memory->malloc_peak_size();
213
  const size_t pk_arena = malloc_memory->arena_peak_size();
214

215
  if (amount_in_current_scale(MAX4(reserved_amount, pk_vm, pk_malloc, pk_arena)) == 0) {
216
    return;
217
  }
218

219
  outputStream* out   = output();
220
  const char*   scale = current_scale();
221
  constexpr int indent = 28;
222
  out->print("-%*s (", indent - 2, NMTUtil::flag_to_name(flag));
223
  print_total(reserved_amount, committed_amount);
224
#if INCLUDE_CDS
225
  if (flag == mtClassShared) {
226
      size_t read_only_bytes = FileMapInfo::readonly_total();
227
    output()->print(", readonly=" SIZE_FORMAT "%s",
228
                    amount_in_current_scale(read_only_bytes), scale);
229
  }
230
#endif
231
  out->print_cr(")");
232

233
  streamIndentor si(out, indent);
234

235
  if (flag == mtClass) {
236
    // report class count
237
    out->print_cr("(classes #" SIZE_FORMAT ")", (_instance_class_count + _array_class_count));
238
    out->print_cr("(  instance classes #" SIZE_FORMAT ", array classes #" SIZE_FORMAT ")",
239
                  _instance_class_count, _array_class_count);
240
  } else if (flag == mtThread) {
241
    const VirtualMemory* thread_stack_usage =
242
     _vm_snapshot->by_type(mtThreadStack);
243
    // report thread count
244
    out->print_cr("(threads #" SIZE_FORMAT ")", ThreadStackTracker::thread_count());
245
    out->print("(stack: ");
246
    print_total(thread_stack_usage->reserved(), thread_stack_usage->committed(), thread_stack_usage->peak_size());
247
    out->print_cr(")");
248
  }
249

250
   // report malloc'd memory
251
  if (amount_in_current_scale(MAX2(malloc_memory->malloc_size(), pk_malloc)) > 0) {
252
    print_malloc(malloc_memory->malloc_counter());
253
    out->cr();
254
  }
255

256
  if (amount_in_current_scale(MAX2(virtual_memory->reserved(), pk_vm)) > 0) {
257
    print_virtual_memory(virtual_memory->reserved(), virtual_memory->committed(), virtual_memory->peak_size());
258
    out->cr();
259
  }
260

261
  if (amount_in_current_scale(MAX2(malloc_memory->arena_size(), pk_arena)) > 0) {
262
    print_arena(malloc_memory->arena_counter());
263
    out->cr();
264
  }
265

266
  if (flag == mtNMT &&
267
    amount_in_current_scale(_malloc_snapshot->malloc_overhead()) > 0) {
268
    out->print_cr("(tracking overhead=" SIZE_FORMAT "%s)",
269
                   amount_in_current_scale(_malloc_snapshot->malloc_overhead()), scale);
270
  } else if (flag == mtClass) {
271
    // Metadata information
272
    report_metadata(Metaspace::NonClassType);
273
    if (Metaspace::using_class_space()) {
274
      report_metadata(Metaspace::ClassType);
275
    }
276
  }
277
  out->cr();
278
}
279

280
void MemSummaryReporter::report_metadata(Metaspace::MetadataType type) const {
281

282
  // NMT reports may be triggered (as part of error handling) very early. Make sure
283
  // Metaspace is already initialized.
284
  if (!Metaspace::initialized()) {
285
    return;
286
  }
287

288
  assert(type == Metaspace::NonClassType || type == Metaspace::ClassType,
289
    "Invalid metadata type");
290
  const char* name = (type == Metaspace::NonClassType) ?
291
    "Metadata:   " : "Class space:";
292

293
  outputStream* out = output();
294
  const char* scale = current_scale();
295
  const MetaspaceStats stats = MetaspaceUtils::get_statistics(type);
296

297
  size_t waste = stats.committed() - stats.used();
298
  float waste_percentage = stats.committed() > 0 ? (((float)waste * 100)/(float)stats.committed()) : 0.0f;
299

300
  out->print_cr("(  %s)", name);
301
  out->print("(    ");
302
  print_total(stats.reserved(), stats.committed());
303
  out->print_cr(")");
304
  out->print_cr("(    used=" SIZE_FORMAT "%s)", amount_in_current_scale(stats.used()), scale);
305
  out->print_cr("(    waste=" SIZE_FORMAT "%s =%2.2f%%)", amount_in_current_scale(waste),
306
                scale, waste_percentage);
307
}
308

309
void MemDetailReporter::report_detail() {
310
  // Start detail report
311
  outputStream* out = output();
312
  out->print_cr("Details:\n");
313

314
  int num_omitted =
315
      report_malloc_sites() +
316
      report_virtual_memory_allocation_sites();
317
  if (num_omitted > 0) {
318
    assert(scale() > 1, "sanity");
319
    out->print_cr("(%d call sites weighting less than 1%s each omitted.)",
320
                   num_omitted, current_scale());
321
    out->cr();
322
  }
323
}
324

325
int MemDetailReporter::report_malloc_sites() {
326
  MallocSiteIterator         malloc_itr = _baseline.malloc_sites(MemBaseline::by_size);
327
  if (malloc_itr.is_empty()) return 0;
328

329
  outputStream* out = output();
330

331
  const MallocSite* malloc_site;
332
  int num_omitted = 0;
333
  while ((malloc_site = malloc_itr.next()) != nullptr) {
334
    // Omit printing if the current value and the historic peak value both fall below the reporting scale threshold
335
    if (amount_in_current_scale(MAX2(malloc_site->size(), malloc_site->peak_size())) == 0) {
336
      num_omitted ++;
337
      continue;
338
    }
339
    const NativeCallStack* stack = malloc_site->call_stack();
340
    _stackprinter.print_stack(stack);
341
    MEMFLAGS flag = malloc_site->flag();
342
    assert(NMTUtil::flag_is_valid(flag) && flag != mtNone,
343
      "Must have a valid memory type");
344
    INDENT_BY(29,
345
      out->print("(");
346
      print_malloc(malloc_site->counter(), flag);
347
      out->print_cr(")");
348
    )
349
    out->cr();
350
  }
351
  return num_omitted;
352
}
353

354
int MemDetailReporter::report_virtual_memory_allocation_sites()  {
355
  VirtualMemorySiteIterator  virtual_memory_itr =
356
    _baseline.virtual_memory_sites(MemBaseline::by_size);
357

358
  if (virtual_memory_itr.is_empty()) return 0;
359

360
  outputStream* out = output();
361

362
  const VirtualMemoryAllocationSite*  virtual_memory_site;
363
  int num_omitted = 0;
364
  while ((virtual_memory_site = virtual_memory_itr.next()) != nullptr) {
365
    // Don't report free sites; does not count toward omitted count.
366
    if (virtual_memory_site->reserved() == 0) {
367
      continue;
368
    }
369
    // Omit printing if the current value and the historic peak value both fall below the
370
    // reporting scale threshold
371
    if (amount_in_current_scale(MAX2(virtual_memory_site->reserved(),
372
                                     virtual_memory_site->peak_size())) == 0) {
373
      num_omitted++;
374
      continue;
375
    }
376
    const NativeCallStack* stack = virtual_memory_site->call_stack();
377
    _stackprinter.print_stack(stack);
378
    INDENT_BY(29,
379
      out->print("(");
380
      print_total(virtual_memory_site->reserved(), virtual_memory_site->committed());
381
      const MEMFLAGS flag = virtual_memory_site->flag();
382
      if (flag != mtNone) {
383
        out->print(" Type=%s", NMTUtil::flag_to_name(flag));
384
      }
385
      out->print_cr(")");
386
    )
387
    out->cr();
388
  }
389
  return num_omitted;
390
}
391

392

393
void MemDetailReporter::report_virtual_memory_map() {
394
  // Virtual memory map always in base address order
395
  VirtualMemoryAllocationIterator itr = _baseline.virtual_memory_allocations();
396
  const ReservedMemoryRegion* rgn;
397

398
  output()->print_cr("Virtual memory map:");
399
  while ((rgn = itr.next()) != nullptr) {
400
    report_virtual_memory_region(rgn);
401
  }
402
}
403

404
void MemDetailReporter::report_virtual_memory_region(const ReservedMemoryRegion* reserved_rgn) {
405
  assert(reserved_rgn != nullptr, "null pointer");
406

407
  // We don't bother about reporting peaks here.
408
  // That is because peaks - in the context of virtual memory, peak of committed areas - make little sense
409
  // when we report *by region*, which are identified by their location in memory. There is a philosophical
410
  // question about identity here: e.g. a committed region that has been split into three regions by
411
  // uncommitting a middle section of it, should that still count as "having peaked" before the split? If
412
  // yes, which of the three new regions would be the spiritual successor? Rather than introducing more
413
  // complexity, we avoid printing peaks altogether. Note that peaks should still be printed when reporting
414
  // usage *by callsite*.
415

416
  // Don't report if size is too small.
417
  if (amount_in_current_scale(reserved_rgn->size()) == 0) return;
418

419
  outputStream* out = output();
420
  const char* scale = current_scale();
421
  const NativeCallStack*  stack = reserved_rgn->call_stack();
422
  bool all_committed = reserved_rgn->size() == reserved_rgn->committed_size();
423
  const char* region_type = (all_committed ? "reserved and committed" : "reserved");
424
  out->cr();
425
  print_virtual_memory_region(region_type, reserved_rgn->base(), reserved_rgn->size());
426
  out->print(" for %s", NMTUtil::flag_to_name(reserved_rgn->flag()));
427
  if (stack->is_empty()) {
428
    out->cr();
429
  } else {
430
    out->print_cr(" from");
431
    INDENT_BY(4, _stackprinter.print_stack(stack);)
432
  }
433

434
  if (all_committed) {
435
    CommittedRegionIterator itr = reserved_rgn->iterate_committed_regions();
436
    const CommittedMemoryRegion* committed_rgn = itr.next();
437
    if (committed_rgn->size() == reserved_rgn->size() && committed_rgn->call_stack()->equals(*stack)) {
438
      // One region spanning the entire reserved region, with the same stack trace.
439
      // Don't print this regions because the "reserved and committed" line above
440
      // already indicates that the region is committed.
441
      assert(itr.next() == nullptr, "Unexpectedly more than one regions");
442
      return;
443
    }
444
  }
445

446
  CommittedRegionIterator itr = reserved_rgn->iterate_committed_regions();
447
  const CommittedMemoryRegion* committed_rgn;
448
  while ((committed_rgn = itr.next()) != nullptr) {
449
    // Don't report if size is too small
450
    if (amount_in_current_scale(committed_rgn->size()) == 0) continue;
451
    stack = committed_rgn->call_stack();
452
    out->cr();
453
    INDENT_BY(8,
454
      print_virtual_memory_region("committed", committed_rgn->base(), committed_rgn->size());
455
      if (stack->is_empty()) {
456
        out->cr();
457
      } else {
458
        out->print_cr(" from");
459
        INDENT_BY(4, stack->print_on(out);)
460
      }
461
    )
462
  }
463
}
464

465
void MemDetailReporter::report_memory_file_allocations() {
466
  stringStream st;
467
  {
468
    MemoryFileTracker::Instance::Locker lock;
469
    MemoryFileTracker::Instance::print_all_reports_on(&st, scale());
470
  }
471
  output()->print_raw(st.freeze());
472
}
473

474
void MemSummaryDiffReporter::report_diff() {
475
  outputStream* out = output();
476
  out->cr();
477
  out->print_cr("Native Memory Tracking:");
478
  out->cr();
479

480
  if (scale() > 1) {
481
    out->print_cr("(Omitting categories weighting less than 1%s)", current_scale());
482
    out->cr();
483
  }
484

485
  // Overall diff
486
  out->print("Total: ");
487
  print_virtual_memory_diff(_current_baseline.total_reserved_memory(),
488
    _current_baseline.total_committed_memory(), _early_baseline.total_reserved_memory(),
489
    _early_baseline.total_committed_memory());
490
  out->cr();
491
  out->cr();
492

493
  // malloc diff
494
  const size_t early_malloced_bytes =
495
    _early_baseline.malloc_memory_snapshot()->total();
496
  const size_t early_count =
497
    _early_baseline.malloc_memory_snapshot()->total_count();
498
  const size_t current_malloced_bytes =
499
    _current_baseline.malloc_memory_snapshot()->total();
500
  const size_t current_count =
501
    _current_baseline.malloc_memory_snapshot()->total_count();
502
  print_malloc_diff(current_malloced_bytes, current_count, early_malloced_bytes,
503
                    early_count, mtNone);
504
  out->cr();
505
  out->cr();
506

507
  // mmap diff
508
  out->print("mmap: ");
509
  const size_t early_reserved =
510
    _early_baseline.virtual_memory_snapshot()->total_reserved();
511
  const size_t early_committed =
512
    _early_baseline.virtual_memory_snapshot()->total_committed();
513
  const size_t current_reserved =
514
    _current_baseline.virtual_memory_snapshot()->total_reserved();
515
  const size_t current_committed =
516
    _current_baseline.virtual_memory_snapshot()->total_committed();
517
  print_virtual_memory_diff(current_reserved, current_committed, early_reserved,
518
                            early_committed);
519
  out->cr();
520
  out->cr();
521

522
  // Summary diff by memory type
523
  for (int index = 0; index < mt_number_of_types; index ++) {
524
    MEMFLAGS flag = NMTUtil::index_to_flag(index);
525
    // thread stack is reported as part of thread category
526
    if (flag == mtThreadStack) continue;
527
    diff_summary_of_type(flag,
528
      _early_baseline.malloc_memory(flag),
529
      _early_baseline.virtual_memory(flag),
530
      _early_baseline.metaspace_stats(),
531
      _current_baseline.malloc_memory(flag),
532
      _current_baseline.virtual_memory(flag),
533
      _current_baseline.metaspace_stats());
534
  }
535
}
536

537
void MemSummaryDiffReporter::print_malloc_diff(size_t current_amount, size_t current_count,
538
    size_t early_amount, size_t early_count, MEMFLAGS flags) const {
539
  const char* scale = current_scale();
540
  outputStream* out = output();
541
  const char* alloc_type = (flags == mtThread) ? "" : "malloc=";
542

543
  out->print("%s" SIZE_FORMAT "%s", alloc_type, amount_in_current_scale(current_amount), scale);
544
  // Report type only if it is valid and not under "thread" category
545
  if (flags != mtNone && flags != mtThread) {
546
    out->print(" type=%s", NMTUtil::flag_to_name(flags));
547
  }
548

549
  int64_t amount_diff = diff_in_current_scale(current_amount, early_amount);
550
  if (amount_diff != 0) {
551
    out->print(" " INT64_PLUS_FORMAT "%s", amount_diff, scale);
552
  }
553
  if (current_count > 0) {
554
    out->print(" #" SIZE_FORMAT "", current_count);
555
    const ssize_t delta_count = counter_diff(current_count, early_count);
556
    if (delta_count != 0) {
557
      out->print(" " SSIZE_PLUS_FORMAT, delta_count);
558
    }
559
  }
560
}
561

562
void MemSummaryDiffReporter::print_arena_diff(size_t current_amount, size_t current_count,
563
  size_t early_amount, size_t early_count) const {
564
  const char* scale = current_scale();
565
  outputStream* out = output();
566
  out->print("arena=" SIZE_FORMAT "%s", amount_in_current_scale(current_amount), scale);
567
  int64_t amount_diff = diff_in_current_scale(current_amount, early_amount);
568
  if (amount_diff != 0) {
569
    out->print(" " INT64_PLUS_FORMAT "%s", amount_diff, scale);
570
  }
571

572
  out->print(" #" SIZE_FORMAT "", current_count);
573
  const ssize_t delta_count = counter_diff(current_count, early_count);
574
  if (delta_count != 0) {
575
    out->print(" " SSIZE_PLUS_FORMAT, delta_count);
576
  }
577
}
578

579
void MemSummaryDiffReporter::print_virtual_memory_diff(size_t current_reserved, size_t current_committed,
580
    size_t early_reserved, size_t early_committed) const {
581
  const char* scale = current_scale();
582
  outputStream* out = output();
583
  out->print("reserved=" SIZE_FORMAT "%s", amount_in_current_scale(current_reserved), scale);
584
  int64_t reserved_diff = diff_in_current_scale(current_reserved, early_reserved);
585
  if (reserved_diff != 0) {
586
    out->print(" " INT64_PLUS_FORMAT "%s", reserved_diff, scale);
587
  }
588

589
  out->print(", committed=" SIZE_FORMAT "%s", amount_in_current_scale(current_committed), scale);
590
  int64_t committed_diff = diff_in_current_scale(current_committed, early_committed);
591
  if (committed_diff != 0) {
592
    out->print(" " INT64_PLUS_FORMAT "%s", committed_diff, scale);
593
  }
594
}
595

596

597
void MemSummaryDiffReporter::diff_summary_of_type(MEMFLAGS flag,
598
  const MallocMemory* early_malloc, const VirtualMemory* early_vm,
599
  const MetaspaceCombinedStats& early_ms,
600
  const MallocMemory* current_malloc, const VirtualMemory* current_vm,
601
  const MetaspaceCombinedStats& current_ms) const {
602

603
  outputStream* out = output();
604
  const char* scale = current_scale();
605
  constexpr int indent = 28;
606

607
  // Total reserved and committed memory in current baseline
608
  size_t current_reserved_amount  = reserved_total (current_malloc, current_vm);
609
  size_t current_committed_amount = committed_total(current_malloc, current_vm);
610

611
  // Total reserved and committed memory in early baseline
612
  size_t early_reserved_amount  = reserved_total(early_malloc, early_vm);
613
  size_t early_committed_amount = committed_total(early_malloc, early_vm);
614

615
  // Adjust virtual memory total
616
  if (flag == mtThread) {
617
    const VirtualMemory* early_thread_stack_usage =
618
      _early_baseline.virtual_memory(mtThreadStack);
619
    const VirtualMemory* current_thread_stack_usage =
620
      _current_baseline.virtual_memory(mtThreadStack);
621

622
    early_reserved_amount  += early_thread_stack_usage->reserved();
623
    early_committed_amount += early_thread_stack_usage->committed();
624

625
    current_reserved_amount  += current_thread_stack_usage->reserved();
626
    current_committed_amount += current_thread_stack_usage->committed();
627
  } else if (flag == mtNMT) {
628
    early_reserved_amount  += _early_baseline.malloc_tracking_overhead();
629
    early_committed_amount += _early_baseline.malloc_tracking_overhead();
630

631
    current_reserved_amount  += _current_baseline.malloc_tracking_overhead();
632
    current_committed_amount += _current_baseline.malloc_tracking_overhead();
633
  }
634

635
  if (amount_in_current_scale(current_reserved_amount) > 0 ||
636
      diff_in_current_scale(current_reserved_amount, early_reserved_amount) != 0) {
637

638
    // print summary line
639
    out->print("-%*s (", indent - 2, NMTUtil::flag_to_name(flag));
640
    print_virtual_memory_diff(current_reserved_amount, current_committed_amount,
641
      early_reserved_amount, early_committed_amount);
642
    out->print_cr(")");
643

644
    streamIndentor si(out, indent);
645

646
    // detail lines
647
    if (flag == mtClass) {
648
      // report class count
649
      out->print("(classes #" SIZE_FORMAT, _current_baseline.class_count());
650
      const ssize_t class_count_diff =
651
          counter_diff(_current_baseline.class_count(), _early_baseline.class_count());
652
      if (class_count_diff != 0) {
653
        out->print(" " SSIZE_PLUS_FORMAT, class_count_diff);
654
      }
655
      out->print_cr(")");
656

657
      out->print("(  instance classes #" SIZE_FORMAT, _current_baseline.instance_class_count());
658
      const ssize_t instance_class_count_diff =
659
          counter_diff(_current_baseline.instance_class_count(), _early_baseline.instance_class_count());
660
      if (instance_class_count_diff != 0) {
661
        out->print(" " SSIZE_PLUS_FORMAT, instance_class_count_diff);
662
      }
663
      out->print(", array classes #" SIZE_FORMAT, _current_baseline.array_class_count());
664
      const ssize_t array_class_count_diff =
665
          counter_diff(_current_baseline.array_class_count(), _early_baseline.array_class_count());
666
      if (array_class_count_diff != 0) {
667
        out->print(" " SSIZE_PLUS_FORMAT, array_class_count_diff);
668
      }
669
      out->print_cr(")");
670

671
    } else if (flag == mtThread) {
672
      // report thread count
673
      out->print("(threads #" SIZE_FORMAT, _current_baseline.thread_count());
674
      const ssize_t thread_count_diff = counter_diff(_current_baseline.thread_count(), _early_baseline.thread_count());
675
      if (thread_count_diff != 0) {
676
        out->print(" " SSIZE_PLUS_FORMAT, thread_count_diff);
677
      }
678
      out->print_cr(")");
679

680
      out->print("(stack: ");
681
      // report thread stack
682
      const VirtualMemory* current_thread_stack =
683
        _current_baseline.virtual_memory(mtThreadStack);
684
      const VirtualMemory* early_thread_stack =
685
        _early_baseline.virtual_memory(mtThreadStack);
686

687
      print_virtual_memory_diff(current_thread_stack->reserved(), current_thread_stack->committed(),
688
        early_thread_stack->reserved(), early_thread_stack->committed());
689

690
      out->print_cr(")");
691
    }
692

693
    // Report malloc'd memory
694
    size_t current_malloc_amount = current_malloc->malloc_size();
695
    size_t early_malloc_amount   = early_malloc->malloc_size();
696
    if (amount_in_current_scale(current_malloc_amount) > 0 ||
697
        diff_in_current_scale(current_malloc_amount, early_malloc_amount) != 0) {
698
      out->print("(");
699
      print_malloc_diff(current_malloc_amount, (flag == mtChunk) ? 0 : current_malloc->malloc_count(),
700
        early_malloc_amount, early_malloc->malloc_count(), mtNone);
701
      out->print_cr(")");
702
    }
703

704
    // Report virtual memory
705
    if (amount_in_current_scale(current_vm->reserved()) > 0 ||
706
        diff_in_current_scale(current_vm->reserved(), early_vm->reserved()) != 0) {
707
      out->print("(mmap: ");
708
      print_virtual_memory_diff(current_vm->reserved(), current_vm->committed(),
709
        early_vm->reserved(), early_vm->committed());
710
      out->print_cr(")");
711
    }
712

713
    // Report arena memory
714
    if (amount_in_current_scale(current_malloc->arena_size()) > 0 ||
715
        diff_in_current_scale(current_malloc->arena_size(), early_malloc->arena_size()) != 0) {
716
      out->print("(");
717
      print_arena_diff(current_malloc->arena_size(), current_malloc->arena_count(),
718
        early_malloc->arena_size(), early_malloc->arena_count());
719
      out->print_cr(")");
720
    }
721

722
    // Report native memory tracking overhead
723
    if (flag == mtNMT) {
724
      size_t current_tracking_overhead = amount_in_current_scale(_current_baseline.malloc_tracking_overhead());
725
      size_t early_tracking_overhead   = amount_in_current_scale(_early_baseline.malloc_tracking_overhead());
726

727
      out->print("(tracking overhead=" SIZE_FORMAT "%s",
728
                 amount_in_current_scale(_current_baseline.malloc_tracking_overhead()), scale);
729

730
      int64_t overhead_diff = diff_in_current_scale(_current_baseline.malloc_tracking_overhead(),
731
                                                    _early_baseline.malloc_tracking_overhead());
732
      if (overhead_diff != 0) {
733
        out->print(" " INT64_PLUS_FORMAT "%s", overhead_diff, scale);
734
      }
735
      out->print_cr(")");
736
    } else if (flag == mtClass) {
737
      print_metaspace_diff(current_ms, early_ms);
738
    }
739
    out->cr();
740
  }
741
}
742

743
void MemSummaryDiffReporter::print_metaspace_diff(const MetaspaceCombinedStats& current_ms,
744
                                                  const MetaspaceCombinedStats& early_ms) const {
745
  print_metaspace_diff("Metadata", current_ms.non_class_space_stats(), early_ms.non_class_space_stats());
746
  if (Metaspace::using_class_space()) {
747
    print_metaspace_diff("Class space", current_ms.class_space_stats(), early_ms.class_space_stats());
748
  }
749
}
750

751
void MemSummaryDiffReporter::print_metaspace_diff(const char* header,
752
                                                  const MetaspaceStats& current_stats,
753
                                                  const MetaspaceStats& early_stats) const {
754
  outputStream* out = output();
755
  const char* scale = current_scale();
756

757
  out->print_cr("(  %s)", header);
758
  out->print("(    ");
759
  print_virtual_memory_diff(current_stats.reserved(),
760
                            current_stats.committed(),
761
                            early_stats.reserved(),
762
                            early_stats.committed());
763
  out->print_cr(")");
764

765
  int64_t diff_used = diff_in_current_scale(current_stats.used(),
766
                                            early_stats.used());
767

768
  size_t current_waste = current_stats.committed() - current_stats.used();
769
  size_t early_waste = early_stats.committed() - early_stats.used();
770
  int64_t diff_waste = diff_in_current_scale(current_waste, early_waste);
771

772
  // Diff used
773
  out->print("(    used=" SIZE_FORMAT "%s",
774
             amount_in_current_scale(current_stats.used()), scale);
775
  if (diff_used != 0) {
776
    out->print(" " INT64_PLUS_FORMAT "%s", diff_used, scale);
777
  }
778
  out->print_cr(")");
779

780
  // Diff waste
781
  const float waste_percentage = current_stats.committed() == 0 ? 0.0f :
782
                                 ((float)current_waste * 100.0f) / (float)current_stats.committed();
783
  out->print("(    waste=" SIZE_FORMAT "%s =%2.2f%%",
784
             amount_in_current_scale(current_waste), scale, waste_percentage);
785
  if (diff_waste != 0) {
786
    out->print(" " INT64_PLUS_FORMAT "%s", diff_waste, scale);
787
  }
788
  out->print_cr(")");
789
}
790

791
void MemDetailDiffReporter::report_diff() {
792
  MemSummaryDiffReporter::report_diff();
793
  diff_malloc_sites();
794
  diff_virtual_memory_sites();
795
}
796

797
void MemDetailDiffReporter::diff_malloc_sites() const {
798
  MallocSiteIterator early_itr = _early_baseline.malloc_sites(MemBaseline::by_site_and_type);
799
  MallocSiteIterator current_itr = _current_baseline.malloc_sites(MemBaseline::by_site_and_type);
800

801
  const MallocSite* early_site   = early_itr.next();
802
  const MallocSite* current_site = current_itr.next();
803

804
  while (early_site != nullptr || current_site != nullptr) {
805
    if (early_site == nullptr) {
806
      new_malloc_site(current_site);
807
      current_site = current_itr.next();
808
    } else if (current_site == nullptr) {
809
      old_malloc_site(early_site);
810
      early_site = early_itr.next();
811
    } else {
812
      int compVal = current_site->call_stack()->compare(*early_site->call_stack());
813
      if (compVal < 0) {
814
        new_malloc_site(current_site);
815
        current_site = current_itr.next();
816
      } else if (compVal > 0) {
817
        old_malloc_site(early_site);
818
        early_site = early_itr.next();
819
      } else {
820
        diff_malloc_site(early_site, current_site);
821
        early_site   = early_itr.next();
822
        current_site = current_itr.next();
823
      }
824
    }
825
  }
826
}
827

828
void MemDetailDiffReporter::diff_virtual_memory_sites() const {
829
  VirtualMemorySiteIterator early_itr = _early_baseline.virtual_memory_sites(MemBaseline::by_site);
830
  VirtualMemorySiteIterator current_itr = _current_baseline.virtual_memory_sites(MemBaseline::by_site);
831

832
  const VirtualMemoryAllocationSite* early_site   = early_itr.next();
833
  const VirtualMemoryAllocationSite* current_site = current_itr.next();
834

835
  while (early_site != nullptr || current_site != nullptr) {
836
    if (early_site == nullptr) {
837
      new_virtual_memory_site(current_site);
838
      current_site = current_itr.next();
839
    } else if (current_site == nullptr) {
840
      old_virtual_memory_site(early_site);
841
      early_site = early_itr.next();
842
    } else {
843
      int compVal = current_site->call_stack()->compare(*early_site->call_stack());
844
      if (compVal < 0) {
845
        new_virtual_memory_site(current_site);
846
        current_site = current_itr.next();
847
      } else if (compVal > 0) {
848
        old_virtual_memory_site(early_site);
849
        early_site = early_itr.next();
850
      } else if (early_site->flag() != current_site->flag()) {
851
        // This site was originally allocated with one flag, then released,
852
        // then re-allocated at the same site (as far as we can tell) with a different flag.
853
        old_virtual_memory_site(early_site);
854
        early_site = early_itr.next();
855
        new_virtual_memory_site(current_site);
856
        current_site = current_itr.next();
857
      } else {
858
        diff_virtual_memory_site(early_site, current_site);
859
        early_site   = early_itr.next();
860
        current_site = current_itr.next();
861
      }
862
    }
863
  }
864
}
865

866

867
void MemDetailDiffReporter::new_malloc_site(const MallocSite* malloc_site) const {
868
  diff_malloc_site(malloc_site->call_stack(), malloc_site->size(), malloc_site->count(),
869
    0, 0, malloc_site->flag());
870
}
871

872
void MemDetailDiffReporter::old_malloc_site(const MallocSite* malloc_site) const {
873
  diff_malloc_site(malloc_site->call_stack(), 0, 0, malloc_site->size(),
874
    malloc_site->count(), malloc_site->flag());
875
}
876

877
void MemDetailDiffReporter::diff_malloc_site(const MallocSite* early,
878
  const MallocSite* current)  const {
879
  if (early->flag() != current->flag()) {
880
    // If malloc site type changed, treat it as deallocation of old type and
881
    // allocation of new type.
882
    old_malloc_site(early);
883
    new_malloc_site(current);
884
  } else {
885
    diff_malloc_site(current->call_stack(), current->size(), current->count(),
886
      early->size(), early->count(), early->flag());
887
  }
888
}
889

890
void MemDetailDiffReporter::diff_malloc_site(const NativeCallStack* stack, size_t current_size,
891
  size_t current_count, size_t early_size, size_t early_count, MEMFLAGS flags) const {
892
  outputStream* out = output();
893

894
  assert(stack != nullptr, "null stack");
895

896
  if (diff_in_current_scale(current_size, early_size) == 0) {
897
      return;
898
  }
899

900
  _stackprinter.print_stack(stack);
901
  INDENT_BY(28,
902
    out->print("(");
903
    print_malloc_diff(current_size, current_count, early_size, early_count, flags);
904
    out->print_cr(")");
905
  )
906
  out->cr();
907

908
}
909

910

911
void MemDetailDiffReporter::new_virtual_memory_site(const VirtualMemoryAllocationSite* site) const {
912
  diff_virtual_memory_site(site->call_stack(), site->reserved(), site->committed(), 0, 0, site->flag());
913
}
914

915
void MemDetailDiffReporter::old_virtual_memory_site(const VirtualMemoryAllocationSite* site) const {
916
  diff_virtual_memory_site(site->call_stack(), 0, 0, site->reserved(), site->committed(), site->flag());
917
}
918

919
void MemDetailDiffReporter::diff_virtual_memory_site(const VirtualMemoryAllocationSite* early,
920
  const VirtualMemoryAllocationSite* current) const {
921
  diff_virtual_memory_site(current->call_stack(), current->reserved(), current->committed(),
922
    early->reserved(), early->committed(), current->flag());
923
}
924

925
void MemDetailDiffReporter::diff_virtual_memory_site(const NativeCallStack* stack, size_t current_reserved,
926
  size_t current_committed, size_t early_reserved, size_t early_committed, MEMFLAGS flag) const  {
927
  outputStream* out = output();
928

929
  // no change
930
  if (diff_in_current_scale(current_reserved, early_reserved) == 0 &&
931
      diff_in_current_scale(current_committed, early_committed) == 0) {
932
    return;
933
  }
934

935
  _stackprinter.print_stack(stack);
936
  INDENT_BY(28,
937
    out->print("(mmap: ");
938
    print_virtual_memory_diff(current_reserved, current_committed, early_reserved, early_committed);
939
    if (flag != mtNone) {
940
      out->print(" Type=%s", NMTUtil::flag_to_name(flag));
941
    }
942
    out->print_cr(")");
943
  )
944
  out->cr();
945
}
946

947

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.