jdk

Форк
0
/
mallocTracker.cpp 
313 строк · 11.1 Кб
1
/*
2
 * Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved.
3
 * Copyright (c) 2021, 2023 SAP SE. All rights reserved.
4
 * Copyright (c) 2023, 2024, Red Hat, Inc. and/or its affiliates.
5
 *
6
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7
 *
8
 * This code is free software; you can redistribute it and/or modify it
9
 * under the terms of the GNU General Public License version 2 only, as
10
 * published by the Free Software Foundation.
11
 *
12
 * This code is distributed in the hope that it will be useful, but WITHOUT
13
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15
 * version 2 for more details (a copy is included in the LICENSE file that
16
 * accompanied this code).
17
 *
18
 * You should have received a copy of the GNU General Public License version
19
 * 2 along with this work; if not, write to the Free Software Foundation,
20
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
21
 *
22
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
23
 * or visit www.oracle.com if you need additional information or have any
24
 * questions.
25
 *
26
 */
27

28
#include "precompiled.hpp"
29
#include "jvm_io.h"
30
#include "logging/log.hpp"
31
#include "logging/logStream.hpp"
32
#include "nmt/mallocHeader.inline.hpp"
33
#include "nmt/mallocLimit.hpp"
34
#include "nmt/mallocSiteTable.hpp"
35
#include "nmt/mallocTracker.hpp"
36
#include "nmt/memTracker.hpp"
37
#include "runtime/arguments.hpp"
38
#include "runtime/atomic.hpp"
39
#include "runtime/globals.hpp"
40
#include "runtime/os.hpp"
41
#include "runtime/safefetch.hpp"
42
#include "utilities/debug.hpp"
43
#include "utilities/macros.hpp"
44
#include "utilities/ostream.hpp"
45
#include "utilities/vmError.hpp"
46
#include "utilities/globalDefinitions.hpp"
47

48
MallocMemorySnapshot MallocMemorySummary::_snapshot;
49

50
void MemoryCounter::update_peak(size_t size, size_t cnt) {
51
  size_t peak_sz = peak_size();
52
  while (peak_sz < size) {
53
    size_t old_sz = Atomic::cmpxchg(&_peak_size, peak_sz, size, memory_order_relaxed);
54
    if (old_sz == peak_sz) {
55
      // I won
56
      _peak_count = cnt;
57
      break;
58
    } else {
59
      peak_sz = old_sz;
60
    }
61
  }
62
}
63

64
void MallocMemorySnapshot::copy_to(MallocMemorySnapshot* s) {
65
  // Use ThreadCritical to make sure that mtChunks don't get deallocated while the
66
  // copy is going on, because their size is adjusted using this
67
  // buffer in make_adjustment().
68
  ThreadCritical tc;
69
  s->_all_mallocs = _all_mallocs;
70
  size_t total_size = 0;
71
  size_t total_count = 0;
72
  for (int index = 0; index < mt_number_of_types; index ++) {
73
    s->_malloc[index] = _malloc[index];
74
    total_size += s->_malloc[index].malloc_size();
75
    total_count += s->_malloc[index].malloc_count();
76
  }
77
  // malloc counters may be updated concurrently
78
  s->_all_mallocs.set_size_and_count(total_size, total_count);
79
}
80

81
// Total malloc'd memory used by arenas
82
size_t MallocMemorySnapshot::total_arena() const {
83
  size_t amount = 0;
84
  for (int index = 0; index < mt_number_of_types; index ++) {
85
    amount += _malloc[index].arena_size();
86
  }
87
  return amount;
88
}
89

90
// Make adjustment by subtracting chunks used by arenas
91
// from total chunks to get total free chunk size
92
void MallocMemorySnapshot::make_adjustment() {
93
  size_t arena_size = total_arena();
94
  int chunk_idx = NMTUtil::flag_to_index(mtChunk);
95
  _malloc[chunk_idx].record_free(arena_size);
96
  _all_mallocs.deallocate(arena_size);
97
}
98

99
void MallocMemorySummary::initialize() {
100
  // Uses placement new operator to initialize static area.
101
  MallocLimitHandler::initialize(MallocLimit);
102
}
103

104
bool MallocMemorySummary::total_limit_reached(size_t s, size_t so_far, const malloclimit* limit) {
105

106
#define FORMATTED \
107
  "MallocLimit: reached global limit (triggering allocation size: " PROPERFMT ", allocated so far: " PROPERFMT ", limit: " PROPERFMT ") ", \
108
  PROPERFMTARGS(s), PROPERFMTARGS(so_far), PROPERFMTARGS(limit->sz)
109

110
  // If we hit the limit during error reporting, we print a short warning but otherwise ignore it.
111
  // We don't want to risk recursive assertion or torn hs-err logs.
112
  if (VMError::is_error_reported()) {
113
    // Print warning, but only the first n times to avoid flooding output.
114
    static int stopafter = 10;
115
    if (stopafter-- > 0) {
116
      log_warning(nmt)(FORMATTED);
117
    }
118
    return false;
119
  }
120

121
  if (limit->mode == MallocLimitMode::trigger_fatal) {
122
    fatal(FORMATTED);
123
  } else {
124
    log_warning(nmt)(FORMATTED);
125
  }
126
#undef FORMATTED
127

128
  return true;
129
}
130

131
bool MallocMemorySummary::category_limit_reached(MEMFLAGS f, size_t s, size_t so_far, const malloclimit* limit) {
132

133
#define FORMATTED \
134
  "MallocLimit: reached category \"%s\" limit (triggering allocation size: " PROPERFMT ", allocated so far: " PROPERFMT ", limit: " PROPERFMT ") ", \
135
  NMTUtil::flag_to_enum_name(f), PROPERFMTARGS(s), PROPERFMTARGS(so_far), PROPERFMTARGS(limit->sz)
136

137
  // If we hit the limit during error reporting, we print a short warning but otherwise ignore it.
138
  // We don't want to risk recursive assertion or torn hs-err logs.
139
  if (VMError::is_error_reported()) {
140
    // Print warning, but only the first n times to avoid flooding output.
141
    static int stopafter = 10;
142
    if (stopafter-- > 0) {
143
      log_warning(nmt)(FORMATTED);
144
    }
145
    return false;
146
  }
147

148
  if (limit->mode == MallocLimitMode::trigger_fatal) {
149
    fatal(FORMATTED);
150
  } else {
151
    log_warning(nmt)(FORMATTED);
152
  }
153
#undef FORMATTED
154

155
  return true;
156
}
157

158
bool MallocTracker::initialize(NMT_TrackingLevel level) {
159
  if (level >= NMT_summary) {
160
    MallocMemorySummary::initialize();
161
  }
162

163
  if (level == NMT_detail) {
164
    return MallocSiteTable::initialize();
165
  }
166
  return true;
167
}
168

169
// Record a malloc memory allocation
170
void* MallocTracker::record_malloc(void* malloc_base, size_t size, MEMFLAGS flags,
171
  const NativeCallStack& stack)
172
{
173
  assert(MemTracker::enabled(), "precondition");
174
  assert(malloc_base != nullptr, "precondition");
175

176
  MallocMemorySummary::record_malloc(size, flags);
177
  uint32_t mst_marker = 0;
178
  if (MemTracker::tracking_level() == NMT_detail) {
179
    MallocSiteTable::allocation_at(stack, size, &mst_marker, flags);
180
  }
181

182
  // Uses placement global new operator to initialize malloc header
183
  MallocHeader* const header = ::new (malloc_base)MallocHeader(size, flags, mst_marker);
184
  void* const memblock = (void*)((char*)malloc_base + sizeof(MallocHeader));
185

186
  // The alignment check: 8 bytes alignment for 32 bit systems.
187
  //                      16 bytes alignment for 64-bit systems.
188
  assert(((size_t)memblock & (sizeof(size_t) * 2 - 1)) == 0, "Alignment check");
189

190
#ifdef ASSERT
191
  // Read back
192
  {
193
    const MallocHeader* header2 = MallocHeader::resolve_checked(memblock);
194
    assert(header2->size() == size, "Wrong size");
195
    assert(header2->flags() == flags, "Wrong flags");
196
  }
197
#endif
198

199
  return memblock;
200
}
201

202
void* MallocTracker::record_free_block(void* memblock) {
203
  assert(MemTracker::enabled(), "Sanity");
204
  assert(memblock != nullptr, "precondition");
205

206
  MallocHeader* header = MallocHeader::resolve_checked(memblock);
207

208
  deaccount(header->free_info());
209

210
  header->mark_block_as_dead();
211

212
  return (void*)header;
213
}
214

215
void MallocTracker::deaccount(MallocHeader::FreeInfo free_info) {
216
  MallocMemorySummary::record_free(free_info.size, free_info.flags);
217
  if (MemTracker::tracking_level() == NMT_detail) {
218
    MallocSiteTable::deallocation_at(free_info.size, free_info.mst_marker);
219
  }
220
}
221

222
// Given a pointer, look for the containing malloc block.
223
// Print the block. Note that since there is very low risk of memory looking
224
// accidentally like a valid malloc block header (canaries and all) so this is not
225
// totally failproof and may give a wrong answer. It is safe in that it will never
226
// crash, even when encountering unmapped memory.
227
bool MallocTracker::print_pointer_information(const void* p, outputStream* st) {
228
  assert(MemTracker::enabled(), "NMT not enabled");
229

230
#if !INCLUDE_ASAN
231

232
  address addr = (address)p;
233

234
  // Carefully feel your way upwards and try to find a malloc header. Then check if
235
  // we are within the block.
236
  // We give preference to found live blocks; but if no live block had been found,
237
  // but the pointer points into remnants of a dead block, print that instead.
238
  const MallocHeader* likely_dead_block = nullptr;
239
  const MallocHeader* likely_live_block = nullptr;
240
  {
241
    const size_t smallest_possible_alignment = sizeof(void*);
242
    const uint8_t* here = align_down(addr, smallest_possible_alignment);
243
    const uint8_t* const end = here - (0x1000 + sizeof(MallocHeader)); // stop searching after 4k
244
    for (; here >= end; here -= smallest_possible_alignment) {
245
      // JDK-8306561: cast to a MallocHeader needs to guarantee it can reside in readable memory
246
      if (!os::is_readable_range(here, here + sizeof(MallocHeader))) {
247
        // Probably OOB, give up
248
        break;
249
      }
250
      const MallocHeader* const candidate = (const MallocHeader*)here;
251
      if (!candidate->looks_valid()) {
252
        // This is definitely not a header, go on to the next candidate.
253
        continue;
254
      }
255

256
      // fudge factor:
257
      // We don't report blocks for which p is clearly outside of. That would cause us to return true and possibly prevent
258
      // subsequent tests of p, see os::print_location(). But if p is just outside of the found block, this may be a
259
      // narrow oob error and we'd like to know that.
260
      const int fudge = 8;
261
      const address start_block = (address)candidate;
262
      const address start_payload = (address)(candidate + 1);
263
      const address end_payload = start_payload + candidate->size();
264
      const address end_payload_plus_fudge = end_payload + fudge;
265
      if (addr >= start_block && addr < end_payload_plus_fudge) {
266
        // We found a block the pointer is pointing into, or almost into.
267
        // If its a live block, we have our info. If its a dead block, we still
268
        // may be within the borders of a larger live block we have not found yet -
269
        // continue search.
270
        if (candidate->is_live()) {
271
          likely_live_block = candidate;
272
          break;
273
        } else {
274
          likely_dead_block = candidate;
275
          continue;
276
        }
277
      }
278
    }
279
  }
280

281
  // If we've found a reasonable candidate. Print the info.
282
  const MallocHeader* block = likely_live_block != nullptr ? likely_live_block : likely_dead_block;
283
  if (block != nullptr) {
284
    const char* where = nullptr;
285
    const address start_block = (address)block;
286
    const address start_payload = (address)(block + 1);
287
    const address end_payload = start_payload + block->size();
288
    if (addr < start_payload) {
289
      where = "into header of";
290
    } else if (addr < end_payload) {
291
      where = "into";
292
    } else {
293
      where = "just outside of";
294
    }
295
    st->print_cr(PTR_FORMAT " %s %s malloced block starting at " PTR_FORMAT ", size " SIZE_FORMAT ", tag %s",
296
                 p2i(p), where,
297
                 (block->is_dead() ? "dead" : "live"),
298
                 p2i(block + 1), // lets print the payload start, not the header
299
                 block->size(), NMTUtil::flag_to_enum_name(block->flags()));
300
    if (MemTracker::tracking_level() == NMT_detail) {
301
      NativeCallStack ncs;
302
      if (MallocSiteTable::access_stack(ncs, *block)) {
303
        ncs.print_on(st);
304
        st->cr();
305
      }
306
    }
307
    return true;
308
  }
309

310
#endif // !INCLUDE_ASAN
311

312
  return false;
313
}
314

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.