2
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
25
#include "precompiled.hpp"
26
#include "asm/assembler.inline.hpp"
27
#include "code/codeCache.hpp"
28
#include "code/compiledIC.hpp"
29
#include "code/dependencies.hpp"
30
#include "code/nativeInst.hpp"
31
#include "code/nmethod.inline.hpp"
32
#include "code/scopeDesc.hpp"
33
#include "compiler/abstractCompiler.hpp"
34
#include "compiler/compilationLog.hpp"
35
#include "compiler/compileBroker.hpp"
36
#include "compiler/compileLog.hpp"
37
#include "compiler/compileTask.hpp"
38
#include "compiler/compilerDirectives.hpp"
39
#include "compiler/compilerOracle.hpp"
40
#include "compiler/directivesParser.hpp"
41
#include "compiler/disassembler.hpp"
42
#include "compiler/oopMap.inline.hpp"
43
#include "gc/shared/barrierSet.hpp"
44
#include "gc/shared/barrierSetNMethod.hpp"
45
#include "gc/shared/classUnloadingContext.hpp"
46
#include "gc/shared/collectedHeap.hpp"
47
#include "interpreter/bytecode.inline.hpp"
49
#include "logging/log.hpp"
50
#include "logging/logStream.hpp"
51
#include "memory/allocation.inline.hpp"
52
#include "memory/resourceArea.hpp"
53
#include "memory/universe.hpp"
54
#include "oops/access.inline.hpp"
55
#include "oops/klass.inline.hpp"
56
#include "oops/method.inline.hpp"
57
#include "oops/methodData.hpp"
58
#include "oops/oop.inline.hpp"
59
#include "oops/weakHandle.inline.hpp"
60
#include "prims/jvmtiImpl.hpp"
61
#include "prims/jvmtiThreadState.hpp"
62
#include "prims/methodHandles.hpp"
63
#include "runtime/continuation.hpp"
64
#include "runtime/atomic.hpp"
65
#include "runtime/deoptimization.hpp"
66
#include "runtime/flags/flagSetting.hpp"
67
#include "runtime/frame.inline.hpp"
68
#include "runtime/handles.inline.hpp"
69
#include "runtime/jniHandles.inline.hpp"
70
#include "runtime/orderAccess.hpp"
71
#include "runtime/os.hpp"
72
#include "runtime/safepointVerifiers.hpp"
73
#include "runtime/serviceThread.hpp"
74
#include "runtime/sharedRuntime.hpp"
75
#include "runtime/signature.hpp"
76
#include "runtime/threadWXSetters.inline.hpp"
77
#include "runtime/vmThread.hpp"
78
#include "utilities/align.hpp"
79
#include "utilities/copy.hpp"
80
#include "utilities/dtrace.hpp"
81
#include "utilities/events.hpp"
82
#include "utilities/globalDefinitions.hpp"
83
#include "utilities/resourceHash.hpp"
84
#include "utilities/xmlstream.hpp"
86
#include "jvmci/jvmciRuntime.hpp"
91
// Only bother with this argument setup if dtrace is available
93
#define DTRACE_METHOD_UNLOAD_PROBE(method) \
95
Method* m = (method); \
97
Symbol* klass_name = m->klass_name(); \
98
Symbol* name = m->name(); \
99
Symbol* signature = m->signature(); \
100
HOTSPOT_COMPILED_METHOD_UNLOAD( \
101
(char *) klass_name->bytes(), klass_name->utf8_length(), \
102
(char *) name->bytes(), name->utf8_length(), \
103
(char *) signature->bytes(), signature->utf8_length()); \
107
#else // ndef DTRACE_ENABLED
109
#define DTRACE_METHOD_UNLOAD_PROBE(method)
113
// Cast from int value to narrow type
114
#define CHECKED_CAST(result, T, thing) \
115
result = static_cast<T>(thing); \
116
assert(static_cast<int>(result) == thing, "failed: %d != %d", static_cast<int>(result), thing);
118
//---------------------------------------------------------------------------------
120
// They are printed under various flags, including:
121
// PrintC1Statistics, PrintOptoStatistics, LogVMOutput, and LogCompilation.
122
// (In the latter two cases, they like other stats are printed to the log only.)
125
// These variables are put into one block to reduce relocations
126
// and make it simpler to print from the debugger.
127
struct java_nmethod_stats_struct {
130
uint total_immut_size;
131
uint relocation_size;
137
uint dependencies_size;
138
uint nul_chk_table_size;
139
uint handler_table_size;
140
uint scopes_pcs_size;
141
uint scopes_data_size;
143
uint speculations_size;
144
uint jvmci_data_size;
147
void note_nmethod(nmethod* nm) {
149
total_nm_size += nm->size();
150
total_immut_size += nm->immutable_data_size();
151
relocation_size += nm->relocation_size();
152
consts_size += nm->consts_size();
153
insts_size += nm->insts_size();
154
stub_size += nm->stub_size();
155
oops_size += nm->oops_size();
156
metadata_size += nm->metadata_size();
157
scopes_data_size += nm->scopes_data_size();
158
scopes_pcs_size += nm->scopes_pcs_size();
159
dependencies_size += nm->dependencies_size();
160
handler_table_size += nm->handler_table_size();
161
nul_chk_table_size += nm->nul_chk_table_size();
163
speculations_size += nm->speculations_size();
164
jvmci_data_size += nm->jvmci_data_size();
167
void print_nmethod_stats(const char* name) {
168
if (nmethod_count == 0) return;
169
tty->print_cr("Statistics for %u bytecoded nmethods for %s:", nmethod_count, name);
170
uint total_size = total_nm_size + total_immut_size;
171
if (total_nm_size != 0) {
172
tty->print_cr(" total size = %u (100%%)", total_size);
173
tty->print_cr(" in CodeCache = %u (%f%%)", total_nm_size, (total_nm_size * 100.0f)/total_size);
175
uint header_size = (uint)(nmethod_count * sizeof(nmethod));
176
if (nmethod_count != 0) {
177
tty->print_cr(" header = %u (%f%%)", header_size, (header_size * 100.0f)/total_nm_size);
179
if (relocation_size != 0) {
180
tty->print_cr(" relocation = %u (%f%%)", relocation_size, (relocation_size * 100.0f)/total_nm_size);
182
if (consts_size != 0) {
183
tty->print_cr(" constants = %u (%f%%)", consts_size, (consts_size * 100.0f)/total_nm_size);
185
if (insts_size != 0) {
186
tty->print_cr(" main code = %u (%f%%)", insts_size, (insts_size * 100.0f)/total_nm_size);
188
if (stub_size != 0) {
189
tty->print_cr(" stub code = %u (%f%%)", stub_size, (stub_size * 100.0f)/total_nm_size);
191
if (oops_size != 0) {
192
tty->print_cr(" oops = %u (%f%%)", oops_size, (oops_size * 100.0f)/total_nm_size);
194
if (metadata_size != 0) {
195
tty->print_cr(" metadata = %u (%f%%)", metadata_size, (metadata_size * 100.0f)/total_nm_size);
198
if (jvmci_data_size != 0) {
199
tty->print_cr(" JVMCI data = %u (%f%%)", jvmci_data_size, (jvmci_data_size * 100.0f)/total_nm_size);
202
if (total_immut_size != 0) {
203
tty->print_cr(" immutable data = %u (%f%%)", total_immut_size, (total_immut_size * 100.0f)/total_size);
205
if (dependencies_size != 0) {
206
tty->print_cr(" dependencies = %u (%f%%)", dependencies_size, (dependencies_size * 100.0f)/total_immut_size);
208
if (nul_chk_table_size != 0) {
209
tty->print_cr(" nul chk table = %u (%f%%)", nul_chk_table_size, (nul_chk_table_size * 100.0f)/total_immut_size);
211
if (handler_table_size != 0) {
212
tty->print_cr(" handler table = %u (%f%%)", handler_table_size, (handler_table_size * 100.0f)/total_immut_size);
214
if (scopes_pcs_size != 0) {
215
tty->print_cr(" scopes pcs = %u (%f%%)", scopes_pcs_size, (scopes_pcs_size * 100.0f)/total_immut_size);
217
if (scopes_data_size != 0) {
218
tty->print_cr(" scopes data = %u (%f%%)", scopes_data_size, (scopes_data_size * 100.0f)/total_immut_size);
221
if (speculations_size != 0) {
222
tty->print_cr(" speculations = %u (%f%%)", speculations_size, (speculations_size * 100.0f)/total_immut_size);
228
struct native_nmethod_stats_struct {
229
uint native_nmethod_count;
230
uint native_total_size;
231
uint native_relocation_size;
232
uint native_insts_size;
233
uint native_oops_size;
234
uint native_metadata_size;
235
void note_native_nmethod(nmethod* nm) {
236
native_nmethod_count += 1;
237
native_total_size += nm->size();
238
native_relocation_size += nm->relocation_size();
239
native_insts_size += nm->insts_size();
240
native_oops_size += nm->oops_size();
241
native_metadata_size += nm->metadata_size();
243
void print_native_nmethod_stats() {
244
if (native_nmethod_count == 0) return;
245
tty->print_cr("Statistics for %u native nmethods:", native_nmethod_count);
246
if (native_total_size != 0) tty->print_cr(" N. total size = %u", native_total_size);
247
if (native_relocation_size != 0) tty->print_cr(" N. relocation = %u", native_relocation_size);
248
if (native_insts_size != 0) tty->print_cr(" N. main code = %u", native_insts_size);
249
if (native_oops_size != 0) tty->print_cr(" N. oops = %u", native_oops_size);
250
if (native_metadata_size != 0) tty->print_cr(" N. metadata = %u", native_metadata_size);
254
struct pc_nmethod_stats_struct {
255
uint pc_desc_init; // number of initialization of cache (= number of caches)
256
uint pc_desc_queries; // queries to nmethod::find_pc_desc
257
uint pc_desc_approx; // number of those which have approximate true
258
uint pc_desc_repeats; // number of _pc_descs[0] hits
259
uint pc_desc_hits; // number of LRU cache hits
260
uint pc_desc_tests; // total number of PcDesc examinations
261
uint pc_desc_searches; // total number of quasi-binary search steps
262
uint pc_desc_adds; // number of LUR cache insertions
264
void print_pc_stats() {
265
tty->print_cr("PcDesc Statistics: %u queries, %.2f comparisons per query",
267
(double)(pc_desc_tests + pc_desc_searches)
269
tty->print_cr(" caches=%d queries=%u/%u, hits=%u+%u, tests=%u+%u, adds=%u",
271
pc_desc_queries, pc_desc_approx,
272
pc_desc_repeats, pc_desc_hits,
273
pc_desc_tests, pc_desc_searches, pc_desc_adds);
278
static java_nmethod_stats_struct c1_java_nmethod_stats;
281
static java_nmethod_stats_struct c2_java_nmethod_stats;
284
static java_nmethod_stats_struct jvmci_java_nmethod_stats;
286
static java_nmethod_stats_struct unknown_java_nmethod_stats;
288
static native_nmethod_stats_struct native_nmethod_stats;
289
static pc_nmethod_stats_struct pc_nmethod_stats;
291
static void note_java_nmethod(nmethod* nm) {
293
if (nm->is_compiled_by_c1()) {
294
c1_java_nmethod_stats.note_nmethod(nm);
298
if (nm->is_compiled_by_c2()) {
299
c2_java_nmethod_stats.note_nmethod(nm);
303
if (nm->is_compiled_by_jvmci()) {
304
jvmci_java_nmethod_stats.note_nmethod(nm);
308
unknown_java_nmethod_stats.note_nmethod(nm);
313
//---------------------------------------------------------------------------------
316
ExceptionCache::ExceptionCache(Handle exception, address pc, address handler) {
317
assert(pc != nullptr, "Must be non null");
318
assert(exception.not_null(), "Must be non null");
319
assert(handler != nullptr, "Must be non null");
322
_exception_type = exception->klass();
324
_purge_list_next = nullptr;
326
add_address_and_handler(pc,handler);
330
address ExceptionCache::match(Handle exception, address pc) {
331
assert(pc != nullptr,"Must be non null");
332
assert(exception.not_null(),"Must be non null");
333
if (exception->klass() == exception_type()) {
334
return (test_address(pc));
341
bool ExceptionCache::match_exception_with_space(Handle exception) {
342
assert(exception.not_null(),"Must be non null");
343
if (exception->klass() == exception_type() && count() < cache_size) {
350
address ExceptionCache::test_address(address addr) {
352
for (int i = 0; i < limit; i++) {
353
if (pc_at(i) == addr) {
354
return handler_at(i);
361
bool ExceptionCache::add_address_and_handler(address addr, address handler) {
362
if (test_address(addr) == handler) return true;
365
if (index < cache_size) {
366
set_pc_at(index, addr);
367
set_handler_at(index, handler);
374
ExceptionCache* ExceptionCache::next() {
375
return Atomic::load(&_next);
378
void ExceptionCache::set_next(ExceptionCache *ec) {
379
Atomic::store(&_next, ec);
382
//-----------------------------------------------------------------------------
385
// Helper used by both find_pc_desc methods.
386
static inline bool match_desc(PcDesc* pc, int pc_offset, bool approximate) {
387
NOT_PRODUCT(++pc_nmethod_stats.pc_desc_tests);
389
return pc->pc_offset() == pc_offset;
391
return (pc-1)->pc_offset() < pc_offset && pc_offset <= pc->pc_offset();
395
void PcDescCache::init_to(PcDesc* initial_pc_desc) {
396
NOT_PRODUCT(++pc_nmethod_stats.pc_desc_init);
397
// initialize the cache by filling it with benign (non-null) values
398
assert(initial_pc_desc != nullptr && initial_pc_desc->pc_offset() == PcDesc::lower_offset_limit,
399
"must start with a sentinel");
400
for (int i = 0; i < cache_size; i++) {
401
_pc_descs[i] = initial_pc_desc;
405
PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) {
406
// Note: one might think that caching the most recently
407
// read value separately would be a win, but one would be
408
// wrong. When many threads are updating it, the cache
409
// line it's in would bounce between caches, negating
412
// In order to prevent race conditions do not load cache elements
413
// repeatedly, but use a local copy:
416
// Step one: Check the most recently added value.
418
assert(res != nullptr, "PcDesc cache should be initialized already");
420
// Approximate only here since PcDescContainer::find_pc_desc() checked for exact case.
421
if (approximate && match_desc(res, pc_offset, approximate)) {
422
NOT_PRODUCT(++pc_nmethod_stats.pc_desc_repeats);
426
// Step two: Check the rest of the LRU cache.
427
for (int i = 1; i < cache_size; ++i) {
429
if (res->pc_offset() < 0) break; // optimization: skip empty cache
430
if (match_desc(res, pc_offset, approximate)) {
431
NOT_PRODUCT(++pc_nmethod_stats.pc_desc_hits);
440
void PcDescCache::add_pc_desc(PcDesc* pc_desc) {
441
NOT_PRODUCT(++pc_nmethod_stats.pc_desc_adds);
442
// Update the LRU cache by shifting pc_desc forward.
443
for (int i = 0; i < cache_size; i++) {
444
PcDesc* next = _pc_descs[i];
445
_pc_descs[i] = pc_desc;
450
// adjust pcs_size so that it is a multiple of both oopSize and
451
// sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple
452
// of oopSize, then 2*sizeof(PcDesc) is)
453
static int adjust_pcs_size(int pcs_size) {
454
int nsize = align_up(pcs_size, oopSize);
455
if ((nsize % sizeof(PcDesc)) != 0) {
456
nsize = pcs_size + sizeof(PcDesc);
458
assert((nsize % oopSize) == 0, "correct alignment");
462
bool nmethod::is_method_handle_return(address return_pc) {
463
if (!has_method_handle_invokes()) return false;
464
PcDesc* pd = pc_desc_at(return_pc);
467
return pd->is_method_handle_invoke();
470
// Returns a string version of the method state.
471
const char* nmethod::state() const {
472
int state = get_state();
475
return "not installed";
479
return "not_entrant";
481
fatal("unexpected method state: %d", state);
486
void nmethod::set_deoptimized_done() {
487
ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
488
if (_deoptimization_status != deoptimize_done) { // can't go backwards
489
Atomic::store(&_deoptimization_status, deoptimize_done);
493
ExceptionCache* nmethod::exception_cache_acquire() const {
494
return Atomic::load_acquire(&_exception_cache);
497
void nmethod::add_exception_cache_entry(ExceptionCache* new_entry) {
498
assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
499
assert(new_entry != nullptr,"Must be non null");
500
assert(new_entry->next() == nullptr, "Must be null");
503
ExceptionCache *ec = exception_cache();
505
Klass* ex_klass = ec->exception_type();
506
if (!ex_klass->is_loader_alive()) {
507
// We must guarantee that entries are not inserted with new next pointer
508
// edges to ExceptionCache entries with dead klasses, due to bad interactions
509
// with concurrent ExceptionCache cleanup. Therefore, the inserts roll
510
// the head pointer forward to the first live ExceptionCache, so that the new
511
// next pointers always point at live ExceptionCaches, that are not removed due
512
// to concurrent ExceptionCache cleanup.
513
ExceptionCache* next = ec->next();
514
if (Atomic::cmpxchg(&_exception_cache, ec, next) == ec) {
515
CodeCache::release_exception_cache(ec);
519
ec = exception_cache();
521
new_entry->set_next(ec);
524
if (Atomic::cmpxchg(&_exception_cache, ec, new_entry) == ec) {
530
void nmethod::clean_exception_cache() {
531
// For each nmethod, only a single thread may call this cleanup function
532
// at the same time, whether called in STW cleanup or concurrent cleanup.
533
// Note that if the GC is processing exception cache cleaning in a concurrent phase,
534
// then a single writer may contend with cleaning up the head pointer to the
535
// first ExceptionCache node that has a Klass* that is alive. That is fine,
536
// as long as there is no concurrent cleanup of next pointers from concurrent writers.
537
// And the concurrent writers do not clean up next pointers, only the head.
538
// Also note that concurrent readers will walk through Klass* pointers that are not
539
// alive. That does not cause ABA problems, because Klass* is deleted after
540
// a handshake with all threads, after all stale ExceptionCaches have been
541
// unlinked. That is also when the CodeCache::exception_cache_purge_list()
542
// is deleted, with all ExceptionCache entries that were cleaned concurrently.
543
// That similarly implies that CAS operations on ExceptionCache entries do not
544
// suffer from ABA problems as unlinking and deletion is separated by a global
545
// handshake operation.
546
ExceptionCache* prev = nullptr;
547
ExceptionCache* curr = exception_cache_acquire();
549
while (curr != nullptr) {
550
ExceptionCache* next = curr->next();
552
if (!curr->exception_type()->is_loader_alive()) {
553
if (prev == nullptr) {
554
// Try to clean head; this is contended by concurrent inserts, that
555
// both lazily clean the head, and insert entries at the head. If
556
// the CAS fails, the operation is restarted.
557
if (Atomic::cmpxchg(&_exception_cache, curr, next) != curr) {
559
curr = exception_cache_acquire();
563
// It is impossible to during cleanup connect the next pointer to
564
// an ExceptionCache that has not been published before a safepoint
565
// prior to the cleanup. Therefore, release is not required.
566
prev->set_next(next);
568
// prev stays the same.
570
CodeCache::release_exception_cache(curr);
579
// public method for accessing the exception cache
580
// These are the public access methods.
581
address nmethod::handler_for_exception_and_pc(Handle exception, address pc) {
582
// We never grab a lock to read the exception cache, so we may
583
// have false negatives. This is okay, as it can only happen during
584
// the first few exception lookups for a given nmethod.
585
ExceptionCache* ec = exception_cache_acquire();
586
while (ec != nullptr) {
588
if ((ret_val = ec->match(exception,pc)) != nullptr) {
596
void nmethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
597
// There are potential race conditions during exception cache updates, so we
598
// must own the ExceptionCache_lock before doing ANY modifications. Because
599
// we don't lock during reads, it is possible to have several threads attempt
600
// to update the cache with the same data. We need to check for already inserted
601
// copies of the current data before adding it.
603
MutexLocker ml(ExceptionCache_lock);
604
ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
606
if (target_entry == nullptr || !target_entry->add_address_and_handler(pc,handler)) {
607
target_entry = new ExceptionCache(exception,pc,handler);
608
add_exception_cache_entry(target_entry);
612
// private method for handling exception cache
613
// These methods are private, and used to manipulate the exception cache
615
ExceptionCache* nmethod::exception_cache_entry_for_exception(Handle exception) {
616
ExceptionCache* ec = exception_cache_acquire();
617
while (ec != nullptr) {
618
if (ec->match_exception_with_space(exception)) {
626
bool nmethod::is_at_poll_return(address pc) {
627
RelocIterator iter(this, pc, pc+1);
628
while (iter.next()) {
629
if (iter.type() == relocInfo::poll_return_type)
636
bool nmethod::is_at_poll_or_poll_return(address pc) {
637
RelocIterator iter(this, pc, pc+1);
638
while (iter.next()) {
639
relocInfo::relocType t = iter.type();
640
if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
646
void nmethod::verify_oop_relocations() {
647
// Ensure sure that the code matches the current oop values
648
RelocIterator iter(this, nullptr, nullptr);
649
while (iter.next()) {
650
if (iter.type() == relocInfo::oop_type) {
651
oop_Relocation* reloc = iter.oop_reloc();
652
if (!reloc->oop_is_immediate()) {
653
reloc->verify_oop_relocation();
660
ScopeDesc* nmethod::scope_desc_at(address pc) {
661
PcDesc* pd = pc_desc_at(pc);
662
guarantee(pd != nullptr, "scope must be present");
663
return new ScopeDesc(this, pd);
666
ScopeDesc* nmethod::scope_desc_near(address pc) {
667
PcDesc* pd = pc_desc_near(pc);
668
guarantee(pd != nullptr, "scope must be present");
669
return new ScopeDesc(this, pd);
672
address nmethod::oops_reloc_begin() const {
673
// If the method is not entrant then a JMP is plastered over the
674
// first few bytes. If an oop in the old code was there, that oop
675
// should not get GC'd. Skip the first few bytes of oops on
676
// not-entrant methods.
677
if (frame_complete_offset() != CodeOffsets::frame_never_safe &&
678
code_begin() + frame_complete_offset() >
679
verified_entry_point() + NativeJump::instruction_size)
681
// If we have a frame_complete_offset after the native jump, then there
682
// is no point trying to look for oops before that. This is a requirement
683
// for being allowed to scan oops concurrently.
684
return code_begin() + frame_complete_offset();
687
// It is not safe to read oops concurrently using entry barriers, if their
688
// location depend on whether the nmethod is entrant or not.
689
// assert(BarrierSet::barrier_set()->barrier_set_nmethod() == nullptr, "Not safe oop scan");
691
address low_boundary = verified_entry_point();
693
low_boundary += NativeJump::instruction_size;
694
// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
695
// This means that the low_boundary is going to be a little too high.
696
// This shouldn't matter, since oops of non-entrant methods are never used.
697
// In fact, why are we bothering to look at oops in a non-entrant method??
702
// Method that knows how to preserve outgoing arguments at call. This method must be
703
// called with a frame corresponding to a Java invoke
704
void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
705
if (method() == nullptr) {
709
// handle the case of an anchor explicitly set in continuation code that doesn't have a callee
710
JavaThread* thread = reg_map->thread();
711
if (thread->has_last_Java_frame() && fr.sp() == thread->last_Java_sp()) {
715
if (!method()->is_native()) {
716
address pc = fr.pc();
717
bool has_receiver, has_appendix;
720
// The method attached by JIT-compilers should be used, if present.
721
// Bytecode can be inaccurate in such case.
722
Method* callee = attached_method_before_pc(pc);
723
if (callee != nullptr) {
724
has_receiver = !(callee->access_flags().is_static());
725
has_appendix = false;
726
signature = callee->signature();
728
SimpleScopeDesc ssd(this, pc);
730
Bytecode_invoke call(methodHandle(Thread::current(), ssd.method()), ssd.bci());
731
has_receiver = call.has_receiver();
732
has_appendix = call.has_appendix();
733
signature = call.signature();
736
fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
737
} else if (method()->is_continuation_enter_intrinsic()) {
738
// This method only calls Continuation.enter()
739
Symbol* signature = vmSymbols::continuationEnter_signature();
740
fr.oops_compiled_arguments_do(signature, false, false, reg_map, f);
744
Method* nmethod::attached_method(address call_instr) {
745
assert(code_contains(call_instr), "not part of the nmethod");
746
RelocIterator iter(this, call_instr, call_instr + 1);
747
while (iter.next()) {
748
if (iter.addr() == call_instr) {
749
switch(iter.type()) {
750
case relocInfo::static_call_type: return iter.static_call_reloc()->method_value();
751
case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value();
752
case relocInfo::virtual_call_type: return iter.virtual_call_reloc()->method_value();
757
return nullptr; // not found
760
Method* nmethod::attached_method_before_pc(address pc) {
761
if (NativeCall::is_call_before(pc)) {
762
NativeCall* ncall = nativeCall_before(pc);
763
return attached_method(ncall->instruction_address());
765
return nullptr; // not a call
768
void nmethod::clear_inline_caches() {
769
assert(SafepointSynchronize::is_at_safepoint(), "clearing of IC's only allowed at safepoint");
770
RelocIterator iter(this);
771
while (iter.next()) {
772
iter.reloc()->clear_inline_cache();
777
// Check class_loader is alive for this bit of metadata.
778
class CheckClass : public MetadataClosure {
779
void do_metadata(Metadata* md) {
780
Klass* klass = nullptr;
781
if (md->is_klass()) {
782
klass = ((Klass*)md);
783
} else if (md->is_method()) {
784
klass = ((Method*)md)->method_holder();
785
} else if (md->is_methodData()) {
786
klass = ((MethodData*)md)->method()->method_holder();
789
ShouldNotReachHere();
791
assert(klass->is_loader_alive(), "must be alive");
797
static void clean_ic_if_metadata_is_dead(CompiledIC *ic) {
798
ic->clean_metadata();
801
// Clean references to unloaded nmethods at addr from this one, which is not unloaded.
802
template <typename CallsiteT>
803
static void clean_if_nmethod_is_unloaded(CallsiteT* callsite, nmethod* from,
805
CodeBlob* cb = CodeCache::find_blob(callsite->destination());
806
if (!cb->is_nmethod()) {
809
nmethod* nm = cb->as_nmethod();
810
if (clean_all || !nm->is_in_use() || nm->is_unloading() || nm->method()->code() != nm) {
811
callsite->set_to_clean();
815
// Cleans caches in nmethods that point to either classes that are unloaded
816
// or nmethods that are unloaded.
818
// Can be called either in parallel by G1 currently or after all
819
// nmethods are unloaded. Return postponed=true in the parallel case for
820
// inline caches found that point to nmethods that are not yet visited during
821
// the do_unloading walk.
822
void nmethod::unload_nmethod_caches(bool unloading_occurred) {
825
// Exception cache only needs to be called if unloading occurred
826
if (unloading_occurred) {
827
clean_exception_cache();
830
cleanup_inline_caches_impl(unloading_occurred, false);
833
// Check that the metadata embedded in the nmethod is alive
834
CheckClass check_class;
835
metadata_do(&check_class);
839
void nmethod::run_nmethod_entry_barrier() {
840
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
841
if (bs_nm != nullptr) {
842
// We want to keep an invariant that nmethods found through iterations of a Thread's
843
// nmethods found in safepoints have gone through an entry barrier and are not armed.
844
// By calling this nmethod entry barrier, it plays along and acts
845
// like any other nmethod found on the stack of a thread (fewer surprises).
847
bool alive = bs_nm->nmethod_entry_barrier(nm);
848
assert(alive, "should be alive");
852
// Only called by whitebox test
853
void nmethod::cleanup_inline_caches_whitebox() {
854
assert_locked_or_safepoint(CodeCache_lock);
855
CompiledICLocker ic_locker(this);
856
cleanup_inline_caches_impl(false /* unloading_occurred */, true /* clean_all */);
859
address* nmethod::orig_pc_addr(const frame* fr) {
860
return (address*) ((address)fr->unextended_sp() + orig_pc_offset());
863
// Called to clean up after class unloading for live nmethods
864
void nmethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) {
865
assert(CompiledICLocker::is_safe(this), "mt unsafe call");
868
// Find all calls in an nmethod and clear the ones that point to bad nmethods.
869
RelocIterator iter(this, oops_reloc_begin());
870
bool is_in_static_stub = false;
873
switch (iter.type()) {
875
case relocInfo::virtual_call_type:
876
if (unloading_occurred) {
877
// If class unloading occurred we first clear ICs where the cached metadata
878
// is referring to an unloaded klass or method.
879
clean_ic_if_metadata_is_dead(CompiledIC_at(&iter));
882
clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all);
885
case relocInfo::opt_virtual_call_type:
886
case relocInfo::static_call_type:
887
clean_if_nmethod_is_unloaded(CompiledDirectCall::at(iter.reloc()), this, clean_all);
890
case relocInfo::static_stub_type: {
891
is_in_static_stub = true;
895
case relocInfo::metadata_type: {
896
// Only the metadata relocations contained in static/opt virtual call stubs
897
// contains the Method* passed to c2i adapters. It is the only metadata
898
// relocation that needs to be walked, as it is the one metadata relocation
899
// that violates the invariant that all metadata relocations have an oop
900
// in the compiled method (due to deferred resolution and code patching).
902
// This causes dead metadata to remain in compiled methods that are not
903
// unloading. Unless these slippery metadata relocations of the static
904
// stubs are at least cleared, subsequent class redefinition operations
905
// will access potentially free memory, and JavaThread execution
906
// concurrent to class unloading may call c2i adapters with dead methods.
907
if (!is_in_static_stub) {
908
// The first metadata relocation after a static stub relocation is the
909
// metadata relocation of the static stub used to pass the Method* to
913
is_in_static_stub = false;
914
if (is_unloading()) {
915
// If the nmethod itself is dying, then it may point at dead metadata.
916
// Nobody should follow that metadata; it is strictly unsafe.
919
metadata_Relocation* r = iter.metadata_reloc();
920
Metadata* md = r->metadata_value();
921
if (md != nullptr && md->is_method()) {
922
Method* method = static_cast<Method*>(md);
923
if (!method->method_holder()->is_loader_alive()) {
924
Atomic::store(r->metadata_addr(), (Method*)nullptr);
926
if (!r->metadata_is_immediate()) {
927
r->fix_metadata_relocation();
940
address nmethod::continuation_for_implicit_exception(address pc, bool for_div0_check) {
941
// Exception happened outside inline-cache check code => we are inside
942
// an active nmethod => use cpc to determine a return address
943
int exception_offset = int(pc - code_begin());
944
int cont_offset = ImplicitExceptionTable(this).continuation_offset( exception_offset );
946
if (cont_offset == 0) {
947
Thread* thread = Thread::current();
948
ResourceMark rm(thread);
949
CodeBlob* cb = CodeCache::find_blob(pc);
950
assert(cb != nullptr && cb == this, "");
952
// Keep tty output consistent. To avoid ttyLocker, we buffer in stream, and print all at once.
954
ss.print_cr("implicit exception happened at " INTPTR_FORMAT, p2i(pc));
956
method()->print_codes_on(&ss);
959
tty->print("%s", ss.as_string()); // print all at once
962
if (cont_offset == 0) {
963
// Let the normal error handling report the exception
966
if (cont_offset == exception_offset) {
968
Deoptimization::DeoptReason deopt_reason = for_div0_check ? Deoptimization::Reason_div0_check : Deoptimization::Reason_null_check;
969
JavaThread *thread = JavaThread::current();
970
thread->set_jvmci_implicit_exception_pc(pc);
971
thread->set_pending_deoptimization(Deoptimization::make_trap_request(deopt_reason,
972
Deoptimization::Action_reinterpret));
973
return (SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());
975
ShouldNotReachHere();
978
return code_begin() + cont_offset;
981
class HasEvolDependency : public MetadataClosure {
982
bool _has_evol_dependency;
984
HasEvolDependency() : _has_evol_dependency(false) {}
985
void do_metadata(Metadata* md) {
986
if (md->is_method()) {
987
Method* method = (Method*)md;
988
if (method->is_old()) {
989
_has_evol_dependency = true;
993
bool has_evol_dependency() const { return _has_evol_dependency; }
996
bool nmethod::has_evol_metadata() {
997
// Check the metadata in relocIter and CompiledIC and also deoptimize
998
// any nmethod that has reference to old methods.
999
HasEvolDependency check_evol;
1000
metadata_do(&check_evol);
1001
if (check_evol.has_evol_dependency() && log_is_enabled(Debug, redefine, class, nmethod)) {
1003
log_debug(redefine, class, nmethod)
1004
("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on in nmethod metadata",
1005
_method->method_holder()->external_name(),
1006
_method->name()->as_C_string(),
1007
_method->signature()->as_C_string(),
1010
return check_evol.has_evol_dependency();
1013
int nmethod::total_size() const {
1018
scopes_data_size() +
1020
handler_table_size() +
1021
nul_chk_table_size();
1024
const char* nmethod::compile_kind() const {
1025
if (is_osr_method()) return "osr";
1026
if (method() != nullptr && is_native_method()) {
1027
if (method()->is_continuation_native_intrinsic()) {
1035
const char* nmethod::compiler_name() const {
1036
return compilertype2name(_compiler_type);
1040
class CheckForOopsClosure : public OopClosure {
1041
bool _found_oop = false;
1043
virtual void do_oop(oop* o) { _found_oop = true; }
1044
virtual void do_oop(narrowOop* o) { _found_oop = true; }
1045
bool found_oop() { return _found_oop; }
1047
class CheckForMetadataClosure : public MetadataClosure {
1048
bool _found_metadata = false;
1049
Metadata* _ignore = nullptr;
1051
CheckForMetadataClosure(Metadata* ignore) : _ignore(ignore) {}
1052
virtual void do_metadata(Metadata* md) { if (md != _ignore) _found_metadata = true; }
1053
bool found_metadata() { return _found_metadata; }
1056
static void assert_no_oops_or_metadata(nmethod* nm) {
1057
if (nm == nullptr) return;
1058
assert(nm->oop_maps() == nullptr, "expectation");
1060
CheckForOopsClosure cfo;
1062
assert(!cfo.found_oop(), "no oops allowed");
1064
// We allow an exception for the own Method, but require its class to be permanent.
1065
Method* own_method = nm->method();
1066
CheckForMetadataClosure cfm(/* ignore reference to own Method */ own_method);
1067
nm->metadata_do(&cfm);
1068
assert(!cfm.found_metadata(), "no metadata allowed");
1070
assert(own_method->method_holder()->class_loader_data()->is_permanent_class_loader_data(),
1071
"Method's class needs to be permanent");
1075
nmethod* nmethod::new_native_nmethod(const methodHandle& method,
1077
CodeBuffer *code_buffer,
1081
ByteSize basic_lock_owner_sp_offset,
1082
ByteSize basic_lock_sp_offset,
1083
OopMapSet* oop_maps,
1084
int exception_handler) {
1085
code_buffer->finalize_oop_references(method);
1087
nmethod* nm = nullptr;
1088
int native_nmethod_size = CodeBlob::allocation_size(code_buffer, sizeof(nmethod));
1090
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1092
CodeOffsets offsets;
1093
offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
1094
offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
1095
if (exception_handler != -1) {
1096
offsets.set_value(CodeOffsets::Exceptions, exception_handler);
1099
// MH intrinsics are dispatch stubs which are compatible with NonNMethod space.
1100
// IsUnloadingBehaviour::is_unloading needs to handle them separately.
1101
bool allow_NonNMethod_space = method->can_be_allocated_in_NonNMethod_space();
1102
nm = new (native_nmethod_size, allow_NonNMethod_space)
1103
nmethod(method(), compiler_none, native_nmethod_size,
1104
compile_id, &offsets,
1105
code_buffer, frame_size,
1106
basic_lock_owner_sp_offset,
1107
basic_lock_sp_offset,
1109
DEBUG_ONLY( if (allow_NonNMethod_space) assert_no_oops_or_metadata(nm); )
1110
NOT_PRODUCT(if (nm != nullptr) native_nmethod_stats.note_native_nmethod(nm));
1113
if (nm != nullptr) {
1115
debug_only(nm->verify();) // might block
1117
nm->log_new_nmethod();
1122
nmethod* nmethod::new_nmethod(const methodHandle& method,
1125
CodeOffsets* offsets,
1127
DebugInformationRecorder* debug_info,
1128
Dependencies* dependencies,
1129
CodeBuffer* code_buffer, int frame_size,
1130
OopMapSet* oop_maps,
1131
ExceptionHandlerTable* handler_table,
1132
ImplicitExceptionTable* nul_chk_table,
1133
AbstractCompiler* compiler,
1134
CompLevel comp_level
1136
, char* speculations,
1137
int speculations_len,
1138
JVMCINMethodData* jvmci_data
1142
assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
1143
code_buffer->finalize_oop_references(method);
1145
nmethod* nm = nullptr;
1146
int nmethod_size = CodeBlob::allocation_size(code_buffer, sizeof(nmethod));
1148
if (compiler->is_jvmci()) {
1149
nmethod_size += align_up(jvmci_data->size(), oopSize);
1153
int immutable_data_size =
1154
adjust_pcs_size(debug_info->pcs_size())
1155
+ align_up((int)dependencies->size_in_bytes(), oopSize)
1156
+ align_up(handler_table->size_in_bytes() , oopSize)
1157
+ align_up(nul_chk_table->size_in_bytes() , oopSize)
1159
+ align_up(speculations_len , oopSize)
1161
+ align_up(debug_info->data_size() , oopSize);
1163
// First, allocate space for immutable data in C heap.
1164
address immutable_data = nullptr;
1165
if (immutable_data_size > 0) {
1166
immutable_data = (address)os::malloc(immutable_data_size, mtCode);
1167
if (immutable_data == nullptr) {
1168
vm_exit_out_of_memory(immutable_data_size, OOM_MALLOC_ERROR, "nmethod: no space for immutable data");
1173
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1175
nm = new (nmethod_size, comp_level)
1176
nmethod(method(), compiler->type(), nmethod_size, immutable_data_size,
1177
compile_id, entry_bci, immutable_data, offsets, orig_pc_offset,
1178
debug_info, dependencies, code_buffer, frame_size, oop_maps,
1179
handler_table, nul_chk_table, compiler, comp_level
1187
if (nm != nullptr) {
1188
// To make dependency checking during class loading fast, record
1189
// the nmethod dependencies in the classes it is dependent on.
1190
// This allows the dependency checking code to simply walk the
1191
// class hierarchy above the loaded class, checking only nmethods
1192
// which are dependent on those classes. The slow way is to
1193
// check every nmethod for dependencies which makes it linear in
1194
// the number of methods compiled. For applications with a lot
1195
// classes the slow way is too slow.
1196
for (Dependencies::DepStream deps(nm); deps.next(); ) {
1197
if (deps.type() == Dependencies::call_site_target_value) {
1198
// CallSite dependencies are managed on per-CallSite instance basis.
1199
oop call_site = deps.argument_oop(0);
1200
MethodHandles::add_dependent_nmethod(call_site, nm);
1202
InstanceKlass* ik = deps.context_type();
1203
if (ik == nullptr) {
1204
continue; // ignore things like evol_method
1206
// record this nmethod as dependent on this klass
1207
ik->add_dependent_nmethod(nm);
1210
NOT_PRODUCT(if (nm != nullptr) note_java_nmethod(nm));
1213
// Do verification and logging outside CodeCache_lock.
1214
if (nm != nullptr) {
1215
// Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet.
1216
DEBUG_ONLY(nm->verify();)
1217
nm->log_new_nmethod();
1222
// Fill in default values for various fields
1223
void nmethod::init_defaults(CodeBuffer *code_buffer, CodeOffsets* offsets) {
1224
// avoid uninitialized fields, even for short time periods
1225
_exception_cache = nullptr;
1227
_oops_do_mark_link = nullptr;
1228
_compiled_ic_data = nullptr;
1230
_is_unloading_state = 0;
1231
_state = not_installed;
1233
_has_unsafe_access = 0;
1234
_has_method_handle_invokes = 0;
1235
_has_wide_vectors = 0;
1237
_has_scoped_access = 0;
1238
_has_flushed_dependencies = 0;
1240
_load_reported = 0; // jvmti state
1242
_deoptimization_status = not_marked;
1244
// SECT_CONSTS is first in code buffer so the offset should be 0.
1245
int consts_offset = code_buffer->total_offset_of(code_buffer->consts());
1246
assert(consts_offset == 0, "const_offset: %d", consts_offset);
1248
_stub_offset = content_offset() + code_buffer->total_offset_of(code_buffer->stubs());
1250
CHECKED_CAST(_entry_offset, uint16_t, (offsets->value(CodeOffsets::Entry)));
1251
CHECKED_CAST(_verified_entry_offset, uint16_t, (offsets->value(CodeOffsets::Verified_Entry)));
1253
_skipped_instructions_size = code_buffer->total_skipped_instructions_size();
1256
// Post initialization
1257
void nmethod::post_init() {
1258
clear_unloading_state();
1260
finalize_relocations();
1262
Universe::heap()->register_nmethod(this);
1263
debug_only(Universe::heap()->verify_nmethod(this));
1265
CodeCache::commit(this);
1268
// For native wrappers
1274
CodeOffsets* offsets,
1275
CodeBuffer* code_buffer,
1277
ByteSize basic_lock_owner_sp_offset,
1278
ByteSize basic_lock_sp_offset,
1279
OopMapSet* oop_maps )
1280
: CodeBlob("native nmethod", CodeBlobKind::Nmethod, code_buffer, nmethod_size, sizeof(nmethod),
1281
offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false),
1282
_deoptimization_generation(0),
1283
_gc_epoch(CodeCache::gc_epoch()),
1285
_native_receiver_sp_offset(basic_lock_owner_sp_offset),
1286
_native_basic_lock_sp_offset(basic_lock_sp_offset)
1289
debug_only(NoSafepointVerifier nsv;)
1290
assert_locked_or_safepoint(CodeCache_lock);
1292
init_defaults(code_buffer, offsets);
1294
_osr_entry_point = nullptr;
1295
_pc_desc_container = nullptr;
1296
_entry_bci = InvocationEntryBci;
1297
_compile_id = compile_id;
1298
_comp_level = CompLevel_none;
1299
_compiler_type = type;
1300
_orig_pc_offset = 0;
1301
_num_stack_arg_slots = _method->constMethod()->num_stack_arg_slots();
1303
if (offsets->value(CodeOffsets::Exceptions) != -1) {
1304
// Continuation enter intrinsic
1305
_exception_offset = code_offset() + offsets->value(CodeOffsets::Exceptions);
1307
_exception_offset = 0;
1309
// Native wrappers do not have deopt handlers. Make the values
1310
// something that will never match a pc like the nmethod vtable entry
1311
_deopt_handler_offset = 0;
1312
_deopt_mh_handler_offset = 0;
1313
_unwind_handler_offset = 0;
1315
CHECKED_CAST(_metadata_offset, uint16_t, (align_up(code_buffer->total_oop_size(), oopSize)));
1316
int data_end_offset = _metadata_offset + align_up(code_buffer->total_metadata_size(), wordSize);
1318
// jvmci_data_size is 0 in native wrapper but we need to set offset
1319
// to correctly calculate metadata_end address
1320
CHECKED_CAST(_jvmci_data_offset, uint16_t, data_end_offset);
1322
assert((data_offset() + data_end_offset) <= nmethod_size, "wrong nmethod's size: %d < %d", nmethod_size, (data_offset() + data_end_offset));
1324
// native wrapper does not have read-only data but we need unique not null address
1325
_immutable_data = data_end();
1326
_immutable_data_size = 0;
1327
_nul_chk_table_offset = 0;
1328
_handler_table_offset = 0;
1329
_scopes_pcs_offset = 0;
1330
_scopes_data_offset = 0;
1332
_speculations_offset = 0;
1335
code_buffer->copy_code_and_locs_to(this);
1336
code_buffer->copy_values_to(this);
1341
if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
1342
ttyLocker ttyl; // keep the following output all in one block
1343
// This output goes directly to the tty, not the compiler log.
1344
// To enable tools to match it up with the compilation activity,
1345
// be sure to tag this tty output with the compile ID.
1346
if (xtty != nullptr) {
1347
xtty->begin_head("print_native_nmethod");
1348
xtty->method(_method);
1350
xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
1352
// Print the header part, then print the requested information.
1353
// This is both handled in decode2(), called via print_code() -> decode()
1354
if (PrintNativeNMethods) {
1355
tty->print_cr("-------------------------- Assembly (native nmethod) ---------------------------");
1357
tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1358
#if defined(SUPPORT_DATA_STRUCTS)
1359
if (AbstractDisassembler::show_structs()) {
1360
if (oop_maps != nullptr) {
1361
tty->print("oop maps:"); // oop_maps->print_on(tty) outputs a cr() at the beginning
1362
oop_maps->print_on(tty);
1363
tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1368
print(); // print the header part only.
1370
#if defined(SUPPORT_DATA_STRUCTS)
1371
if (AbstractDisassembler::show_structs()) {
1372
if (PrintRelocations) {
1373
print_relocations();
1374
tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1378
if (xtty != nullptr) {
1379
xtty->tail("print_native_nmethod");
1384
void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw () {
1385
return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level));
1388
void* nmethod::operator new(size_t size, int nmethod_size, bool allow_NonNMethod_space) throw () {
1389
// Try MethodNonProfiled and MethodProfiled.
1390
void* return_value = CodeCache::allocate(nmethod_size, CodeBlobType::MethodNonProfiled);
1391
if (return_value != nullptr || !allow_NonNMethod_space) return return_value;
1392
// Try NonNMethod or give up.
1393
return CodeCache::allocate(nmethod_size, CodeBlobType::NonNMethod);
1396
// For normal JIT compiled code
1401
int immutable_data_size,
1404
address immutable_data,
1405
CodeOffsets* offsets,
1407
DebugInformationRecorder* debug_info,
1408
Dependencies* dependencies,
1409
CodeBuffer *code_buffer,
1411
OopMapSet* oop_maps,
1412
ExceptionHandlerTable* handler_table,
1413
ImplicitExceptionTable* nul_chk_table,
1414
AbstractCompiler* compiler,
1415
CompLevel comp_level
1417
, char* speculations,
1418
int speculations_len,
1419
JVMCINMethodData* jvmci_data
1422
: CodeBlob("nmethod", CodeBlobKind::Nmethod, code_buffer, nmethod_size, sizeof(nmethod),
1423
offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false),
1424
_deoptimization_generation(0),
1425
_gc_epoch(CodeCache::gc_epoch()),
1429
assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
1431
debug_only(NoSafepointVerifier nsv;)
1432
assert_locked_or_safepoint(CodeCache_lock);
1434
init_defaults(code_buffer, offsets);
1436
_osr_entry_point = code_begin() + offsets->value(CodeOffsets::OSR_Entry);
1437
_entry_bci = entry_bci;
1438
_compile_id = compile_id;
1439
_comp_level = comp_level;
1440
_compiler_type = type;
1441
_orig_pc_offset = orig_pc_offset;
1443
_num_stack_arg_slots = entry_bci != InvocationEntryBci ? 0 : _method->constMethod()->num_stack_arg_slots();
1445
set_ctable_begin(header_begin() + content_offset());
1448
if (compiler->is_jvmci()) {
1449
// JVMCI might not produce any stub sections
1450
if (offsets->value(CodeOffsets::Exceptions) != -1) {
1451
_exception_offset = code_offset() + offsets->value(CodeOffsets::Exceptions);
1453
_exception_offset = -1;
1455
if (offsets->value(CodeOffsets::Deopt) != -1) {
1456
_deopt_handler_offset = code_offset() + offsets->value(CodeOffsets::Deopt);
1458
_deopt_handler_offset = -1;
1460
if (offsets->value(CodeOffsets::DeoptMH) != -1) {
1461
_deopt_mh_handler_offset = code_offset() + offsets->value(CodeOffsets::DeoptMH);
1463
_deopt_mh_handler_offset = -1;
1468
// Exception handler and deopt handler are in the stub section
1469
assert(offsets->value(CodeOffsets::Exceptions) != -1, "must be set");
1470
assert(offsets->value(CodeOffsets::Deopt ) != -1, "must be set");
1472
_exception_offset = _stub_offset + offsets->value(CodeOffsets::Exceptions);
1473
_deopt_handler_offset = _stub_offset + offsets->value(CodeOffsets::Deopt);
1474
if (offsets->value(CodeOffsets::DeoptMH) != -1) {
1475
_deopt_mh_handler_offset = _stub_offset + offsets->value(CodeOffsets::DeoptMH);
1477
_deopt_mh_handler_offset = -1;
1480
if (offsets->value(CodeOffsets::UnwindHandler) != -1) {
1481
// C1 generates UnwindHandler at the end of instructions section.
1482
// Calculate positive offset as distance between the start of stubs section
1483
// (which is also the end of instructions section) and the start of the handler.
1484
int unwind_handler_offset = code_offset() + offsets->value(CodeOffsets::UnwindHandler);
1485
CHECKED_CAST(_unwind_handler_offset, int16_t, (_stub_offset - unwind_handler_offset));
1487
_unwind_handler_offset = -1;
1489
CHECKED_CAST(_metadata_offset, uint16_t, (align_up(code_buffer->total_oop_size(), oopSize)));
1490
int metadata_end_offset = _metadata_offset + align_up(code_buffer->total_metadata_size(), wordSize);
1493
CHECKED_CAST(_jvmci_data_offset, uint16_t, metadata_end_offset);
1494
int jvmci_data_size = compiler->is_jvmci() ? jvmci_data->size() : 0;
1495
DEBUG_ONLY( int data_end_offset = _jvmci_data_offset + align_up(jvmci_data_size, oopSize); )
1497
DEBUG_ONLY( int data_end_offset = metadata_end_offset; )
1499
assert((data_offset() + data_end_offset) <= nmethod_size, "wrong nmethod's size: %d > %d",
1500
(data_offset() + data_end_offset), nmethod_size);
1502
_immutable_data_size = immutable_data_size;
1503
if (immutable_data_size > 0) {
1504
assert(immutable_data != nullptr, "required");
1505
_immutable_data = immutable_data;
1507
// We need unique not null address
1508
_immutable_data = data_end();
1510
CHECKED_CAST(_nul_chk_table_offset, uint16_t, (align_up((int)dependencies->size_in_bytes(), oopSize)));
1511
CHECKED_CAST(_handler_table_offset, uint16_t, (_nul_chk_table_offset + align_up(nul_chk_table->size_in_bytes(), oopSize)));
1512
_scopes_pcs_offset = _handler_table_offset + align_up(handler_table->size_in_bytes(), oopSize);
1513
_scopes_data_offset = _scopes_pcs_offset + adjust_pcs_size(debug_info->pcs_size());
1516
_speculations_offset = _scopes_data_offset + align_up(debug_info->data_size(), oopSize);
1517
DEBUG_ONLY( int immutable_data_end_offset = _speculations_offset + align_up(speculations_len, oopSize); )
1519
DEBUG_ONLY( int immutable_data_end_offset = _scopes_data_offset + align_up(debug_info->data_size(), oopSize); )
1521
assert(immutable_data_end_offset <= immutable_data_size, "wrong read-only data size: %d > %d",
1522
immutable_data_end_offset, immutable_data_size);
1524
// Copy code and relocation info
1525
code_buffer->copy_code_and_locs_to(this);
1526
// Copy oops and metadata
1527
code_buffer->copy_values_to(this);
1528
dependencies->copy_to(this);
1529
// Copy PcDesc and ScopeDesc data
1530
debug_info->copy_to(this);
1532
// Create cache after PcDesc data is copied - it will be used to initialize cache
1533
_pc_desc_container = new PcDescContainer(scopes_pcs_begin());
1536
if (compiler->is_jvmci()) {
1537
// Initialize the JVMCINMethodData object inlined into nm
1538
jvmci_nmethod_data()->copy(jvmci_data);
1542
// Copy contents of ExceptionHandlerTable to nmethod
1543
handler_table->copy_to(this);
1544
nul_chk_table->copy_to(this);
1547
// Copy speculations to nmethod
1548
if (speculations_size() != 0) {
1549
memcpy(speculations_begin(), speculations, speculations_len);
1555
// we use the information of entry points to find out if a method is
1556
// static or non static
1557
assert(compiler->is_c2() || compiler->is_jvmci() ||
1558
_method->is_static() == (entry_point() == verified_entry_point()),
1559
" entry points must be same for static methods and vice versa");
1563
// Print a short set of xml attributes to identify this nmethod. The
1564
// output should be embedded in some other element.
1565
void nmethod::log_identity(xmlStream* log) const {
1566
log->print(" compile_id='%d'", compile_id());
1567
const char* nm_kind = compile_kind();
1568
if (nm_kind != nullptr) log->print(" compile_kind='%s'", nm_kind);
1569
log->print(" compiler='%s'", compiler_name());
1570
if (TieredCompilation) {
1571
log->print(" level='%d'", comp_level());
1574
if (jvmci_nmethod_data() != nullptr) {
1575
const char* jvmci_name = jvmci_nmethod_data()->name();
1576
if (jvmci_name != nullptr) {
1577
log->print(" jvmci_mirror_name='");
1578
log->text("%s", jvmci_name);
1586
#define LOG_OFFSET(log, name) \
1587
if (p2i(name##_end()) - p2i(name##_begin())) \
1588
log->print(" " XSTR(name) "_offset='" INTX_FORMAT "'" , \
1589
p2i(name##_begin()) - p2i(this))
1592
void nmethod::log_new_nmethod() const {
1593
if (LogCompilation && xtty != nullptr) {
1595
xtty->begin_elem("nmethod");
1597
xtty->print(" entry='" INTPTR_FORMAT "' size='%d'", p2i(code_begin()), size());
1598
xtty->print(" address='" INTPTR_FORMAT "'", p2i(this));
1600
LOG_OFFSET(xtty, relocation);
1601
LOG_OFFSET(xtty, consts);
1602
LOG_OFFSET(xtty, insts);
1603
LOG_OFFSET(xtty, stub);
1604
LOG_OFFSET(xtty, scopes_data);
1605
LOG_OFFSET(xtty, scopes_pcs);
1606
LOG_OFFSET(xtty, dependencies);
1607
LOG_OFFSET(xtty, handler_table);
1608
LOG_OFFSET(xtty, nul_chk_table);
1609
LOG_OFFSET(xtty, oops);
1610
LOG_OFFSET(xtty, metadata);
1612
xtty->method(method());
1621
// Print out more verbose output usually for a newly created nmethod.
1622
void nmethod::print_on(outputStream* st, const char* msg) const {
1623
if (st != nullptr) {
1626
CompileTask::print(st, this, msg, /*short_form:*/ true);
1627
st->print_cr(" (" INTPTR_FORMAT ")", p2i(this));
1629
CompileTask::print(st, this, msg, /*short_form:*/ false);
1634
void nmethod::maybe_print_nmethod(const DirectiveSet* directive) {
1635
bool printnmethods = directive->PrintAssemblyOption || directive->PrintNMethodsOption;
1636
if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) {
1637
print_nmethod(printnmethods);
1641
void nmethod::print_nmethod(bool printmethod) {
1642
ttyLocker ttyl; // keep the following output all in one block
1643
if (xtty != nullptr) {
1644
xtty->begin_head("print_nmethod");
1649
// Print the header part, then print the requested information.
1650
// This is both handled in decode2().
1653
if (is_compiled_by_c1()) {
1655
tty->print_cr("============================= C1-compiled nmethod ==============================");
1657
if (is_compiled_by_jvmci()) {
1659
tty->print_cr("=========================== JVMCI-compiled nmethod =============================");
1661
tty->print_cr("----------------------------------- Assembly -----------------------------------");
1663
#if defined(SUPPORT_DATA_STRUCTS)
1664
if (AbstractDisassembler::show_structs()) {
1665
// Print the oops from the underlying CodeBlob as well.
1666
tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1668
tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1669
print_metadata(tty);
1670
tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1672
tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1673
if (oop_maps() != nullptr) {
1674
tty->print("oop maps:"); // oop_maps()->print_on(tty) outputs a cr() at the beginning
1675
oop_maps()->print_on(tty);
1676
tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1681
print(); // print the header part only.
1684
#if defined(SUPPORT_DATA_STRUCTS)
1685
if (AbstractDisassembler::show_structs()) {
1686
methodHandle mh(Thread::current(), _method);
1687
if (printmethod || PrintDebugInfo || CompilerOracle::has_option(mh, CompileCommandEnum::PrintDebugInfo)) {
1689
tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1691
if (printmethod || PrintRelocations || CompilerOracle::has_option(mh, CompileCommandEnum::PrintRelocations)) {
1692
print_relocations();
1693
tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1695
if (printmethod || PrintDependencies || CompilerOracle::has_option(mh, CompileCommandEnum::PrintDependencies)) {
1696
print_dependencies_on(tty);
1697
tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1699
if (printmethod || PrintExceptionHandlers) {
1700
print_handler_table();
1701
tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1702
print_nul_chk_table();
1703
tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1707
print_recorded_oops();
1708
tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1709
print_recorded_metadata();
1710
tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1715
if (xtty != nullptr) {
1716
xtty->tail("print_nmethod");
1721
// Promote one word from an assembly-time handle to a live embedded oop.
1722
inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) {
1723
if (handle == nullptr ||
1724
// As a special case, IC oops are initialized to 1 or -1.
1725
handle == (jobject) Universe::non_oop_word()) {
1726
*(void**)dest = handle;
1728
*dest = JNIHandles::resolve_non_null(handle);
1733
// Have to have the same name because it's called by a template
1734
void nmethod::copy_values(GrowableArray<jobject>* array) {
1735
int length = array->length();
1736
assert((address)(oops_begin() + length) <= (address)oops_end(), "oops big enough");
1737
oop* dest = oops_begin();
1738
for (int index = 0 ; index < length; index++) {
1739
initialize_immediate_oop(&dest[index], array->at(index));
1742
// Now we can fix up all the oops in the code. We need to do this
1743
// in the code because the assembler uses jobjects as placeholders.
1744
// The code and relocations have already been initialized by the
1745
// CodeBlob constructor, so it is valid even at this early point to
1746
// iterate over relocations and patch the code.
1747
fix_oop_relocations(nullptr, nullptr, /*initialize_immediates=*/ true);
1750
void nmethod::copy_values(GrowableArray<Metadata*>* array) {
1751
int length = array->length();
1752
assert((address)(metadata_begin() + length) <= (address)metadata_end(), "big enough");
1753
Metadata** dest = metadata_begin();
1754
for (int index = 0 ; index < length; index++) {
1755
dest[index] = array->at(index);
1759
void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) {
1760
// re-patch all oop-bearing instructions, just in case some oops moved
1761
RelocIterator iter(this, begin, end);
1762
while (iter.next()) {
1763
if (iter.type() == relocInfo::oop_type) {
1764
oop_Relocation* reloc = iter.oop_reloc();
1765
if (initialize_immediates && reloc->oop_is_immediate()) {
1766
oop* dest = reloc->oop_addr();
1767
jobject obj = *reinterpret_cast<jobject*>(dest);
1768
initialize_immediate_oop(dest, obj);
1770
// Refresh the oop-related bits of this instruction.
1771
reloc->fix_oop_relocation();
1772
} else if (iter.type() == relocInfo::metadata_type) {
1773
metadata_Relocation* reloc = iter.metadata_reloc();
1774
reloc->fix_metadata_relocation();
1779
static void install_post_call_nop_displacement(nmethod* nm, address pc) {
1780
NativePostCallNop* nop = nativePostCallNop_at((address) pc);
1781
intptr_t cbaddr = (intptr_t) nm;
1782
intptr_t offset = ((intptr_t) pc) - cbaddr;
1784
int oopmap_slot = nm->oop_maps()->find_slot_for_offset(int((intptr_t) pc - (intptr_t) nm->code_begin()));
1785
if (oopmap_slot < 0) { // this can happen at asynchronous (non-safepoint) stackwalks
1786
log_debug(codecache)("failed to find oopmap for cb: " INTPTR_FORMAT " offset: %d", cbaddr, (int) offset);
1787
} else if (!nop->patch(oopmap_slot, offset)) {
1788
log_debug(codecache)("failed to encode %d %d", oopmap_slot, (int) offset);
1792
void nmethod::finalize_relocations() {
1793
NoSafepointVerifier nsv;
1795
GrowableArray<NativeMovConstReg*> virtual_call_data;
1797
// Make sure that post call nops fill in nmethod offsets eagerly so
1798
// we don't have to race with deoptimization
1799
RelocIterator iter(this);
1800
while (iter.next()) {
1801
if (iter.type() == relocInfo::virtual_call_type) {
1802
virtual_call_Relocation* r = iter.virtual_call_reloc();
1803
NativeMovConstReg* value = nativeMovConstReg_at(r->cached_value());
1804
virtual_call_data.append(value);
1805
} else if (iter.type() == relocInfo::post_call_nop_type) {
1806
post_call_nop_Relocation* const reloc = iter.post_call_nop_reloc();
1807
address pc = reloc->addr();
1808
install_post_call_nop_displacement(this, pc);
1812
if (virtual_call_data.length() > 0) {
1813
// We allocate a block of CompiledICData per nmethod so the GC can purge this faster.
1814
_compiled_ic_data = new CompiledICData[virtual_call_data.length()];
1815
CompiledICData* next_data = _compiled_ic_data;
1817
for (NativeMovConstReg* value : virtual_call_data) {
1818
value->set_data((intptr_t)next_data);
1824
void nmethod::make_deoptimized() {
1825
if (!Continuations::enabled()) {
1826
// Don't deopt this again.
1827
set_deoptimized_done();
1831
assert(method() == nullptr || can_be_deoptimized(), "");
1833
CompiledICLocker ml(this);
1834
assert(CompiledICLocker::is_safe(this), "mt unsafe call");
1836
// If post call nops have been already patched, we can just bail-out.
1837
if (has_been_deoptimized()) {
1842
RelocIterator iter(this, oops_reloc_begin());
1844
while (iter.next()) {
1846
switch (iter.type()) {
1847
case relocInfo::virtual_call_type: {
1848
CompiledIC *ic = CompiledIC_at(&iter);
1849
address pc = ic->end_of_call();
1850
NativePostCallNop* nop = nativePostCallNop_at(pc);
1851
if (nop != nullptr) {
1854
assert(NativeDeoptInstruction::is_deopt_at(pc), "check");
1857
case relocInfo::static_call_type:
1858
case relocInfo::opt_virtual_call_type: {
1859
CompiledDirectCall *csc = CompiledDirectCall::at(iter.reloc());
1860
address pc = csc->end_of_call();
1861
NativePostCallNop* nop = nativePostCallNop_at(pc);
1862
//tty->print_cr(" - static pc %p", pc);
1863
if (nop != nullptr) {
1866
// We can't assert here, there are some calls to stubs / runtime
1867
// that have reloc data and doesn't have a post call NOP.
1868
//assert(NativeDeoptInstruction::is_deopt_at(pc), "check");
1875
// Don't deopt this again.
1876
set_deoptimized_done();
1879
void nmethod::verify_clean_inline_caches() {
1880
assert(CompiledICLocker::is_safe(this), "mt unsafe call");
1883
RelocIterator iter(this, oops_reloc_begin());
1884
while(iter.next()) {
1885
switch(iter.type()) {
1886
case relocInfo::virtual_call_type: {
1887
CompiledIC *ic = CompiledIC_at(&iter);
1888
CodeBlob *cb = CodeCache::find_blob(ic->destination());
1889
assert(cb != nullptr, "destination not in CodeBlob?");
1890
nmethod* nm = cb->as_nmethod_or_null();
1891
if (nm != nullptr) {
1892
// Verify that inline caches pointing to bad nmethods are clean
1893
if (!nm->is_in_use() || nm->is_unloading()) {
1894
assert(ic->is_clean(), "IC should be clean");
1899
case relocInfo::static_call_type:
1900
case relocInfo::opt_virtual_call_type: {
1901
CompiledDirectCall *cdc = CompiledDirectCall::at(iter.reloc());
1902
CodeBlob *cb = CodeCache::find_blob(cdc->destination());
1903
assert(cb != nullptr, "destination not in CodeBlob?");
1904
nmethod* nm = cb->as_nmethod_or_null();
1905
if (nm != nullptr) {
1906
// Verify that inline caches pointing to bad nmethods are clean
1907
if (!nm->is_in_use() || nm->is_unloading() || nm->method()->code() != nm) {
1908
assert(cdc->is_clean(), "IC should be clean");
1919
void nmethod::mark_as_maybe_on_stack() {
1920
Atomic::store(&_gc_epoch, CodeCache::gc_epoch());
1923
bool nmethod::is_maybe_on_stack() {
1924
// If the condition below is true, it means that the nmethod was found to
1925
// be alive the previous completed marking cycle.
1926
return Atomic::load(&_gc_epoch) >= CodeCache::previous_completed_gc_marking_cycle();
1929
void nmethod::inc_decompile_count() {
1930
if (!is_compiled_by_c2() && !is_compiled_by_jvmci()) return;
1931
// Could be gated by ProfileTraps, but do not bother...
1932
Method* m = method();
1933
if (m == nullptr) return;
1934
MethodData* mdo = m->method_data();
1935
if (mdo == nullptr) return;
1936
// There is a benign race here. See comments in methodData.hpp.
1937
mdo->inc_decompile_count();
1940
bool nmethod::try_transition(signed char new_state_int) {
1941
signed char new_state = new_state_int;
1942
assert_lock_strong(NMethodState_lock);
1943
signed char old_state = _state;
1944
if (old_state >= new_state) {
1945
// Ensure monotonicity of transitions.
1948
Atomic::store(&_state, new_state);
1952
void nmethod::invalidate_osr_method() {
1953
assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
1954
// Remove from list of active nmethods
1955
if (method() != nullptr) {
1956
method()->method_holder()->remove_osr_nmethod(this);
1960
void nmethod::log_state_change() const {
1961
if (LogCompilation) {
1962
if (xtty != nullptr) {
1963
ttyLocker ttyl; // keep the following output all in one block
1964
xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'",
1965
os::current_thread_id());
1972
CompileTask::print_ul(this, "made not entrant");
1973
if (PrintCompilation) {
1974
print_on(tty, "made not entrant");
1978
void nmethod::unlink_from_method() {
1979
if (method() != nullptr) {
1980
method()->unlink_code(this);
1985
bool nmethod::make_not_entrant() {
1986
// This can be called while the system is already at a safepoint which is ok
1987
NoSafepointVerifier nsv;
1989
if (is_unloading()) {
1990
// If the nmethod is unloading, then it is already not entrant through
1991
// the nmethod entry barriers. No need to do anything; GC will unload it.
1995
if (Atomic::load(&_state) == not_entrant) {
1996
// Avoid taking the lock if already in required state.
1997
// This is safe from races because the state is an end-state,
1998
// which the nmethod cannot back out of once entered.
1999
// No need for fencing either.
2004
// Enter critical section. Does not block for safepoint.
2005
ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
2007
if (Atomic::load(&_state) == not_entrant) {
2008
// another thread already performed this transition so nothing
2009
// to do, but return false to indicate this.
2013
if (is_osr_method()) {
2014
// This logic is equivalent to the logic below for patching the
2015
// verified entry point of regular methods.
2016
// this effectively makes the osr nmethod not entrant
2017
invalidate_osr_method();
2019
// The caller can be calling the method statically or through an inline
2021
NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
2022
SharedRuntime::get_handle_wrong_method_stub());
2025
if (update_recompile_counts()) {
2026
// Mark the method as decompiled.
2027
inc_decompile_count();
2030
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2031
if (bs_nm == nullptr || !bs_nm->supports_entry_barrier(this)) {
2032
// If nmethod entry barriers are not supported, we won't mark
2033
// nmethods as on-stack when they become on-stack. So we
2034
// degrade to a less accurate flushing strategy, for now.
2035
mark_as_maybe_on_stack();
2039
bool success = try_transition(not_entrant);
2040
assert(success, "Transition can't fail");
2042
// Log the transition once
2045
// Remove nmethod from method.
2046
unlink_from_method();
2048
} // leave critical region under NMethodState_lock
2051
// Invalidate can't occur while holding the Patching lock
2052
JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
2053
if (nmethod_data != nullptr) {
2054
nmethod_data->invalidate_nmethod_mirror(this);
2059
if (is_osr_method() && method() != nullptr) {
2060
// Make sure osr nmethod is invalidated, i.e. not on the list
2061
bool found = method()->method_holder()->remove_osr_nmethod(this);
2062
assert(!found, "osr nmethod should have been invalidated");
2069
// For concurrent GCs, there must be a handshake between unlink and flush
2070
void nmethod::unlink() {
2071
if (is_unlinked()) {
2072
// Already unlinked.
2076
flush_dependencies();
2078
// unlink_from_method will take the NMethodState_lock.
2079
// In this case we don't strictly need it when unlinking nmethods from
2080
// the Method, because it is only concurrently unlinked by
2081
// the entry barrier, which acquires the per nmethod lock.
2082
unlink_from_method();
2084
if (is_osr_method()) {
2085
invalidate_osr_method();
2089
// Clear the link between this nmethod and a HotSpotNmethod mirror
2090
JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
2091
if (nmethod_data != nullptr) {
2092
nmethod_data->invalidate_nmethod_mirror(this);
2096
// Post before flushing as jmethodID is being used
2097
post_compiled_method_unload();
2099
// Register for flushing when it is safe. For concurrent class unloading,
2100
// that would be after the unloading handshake, and for STW class unloading
2101
// that would be when getting back to the VM thread.
2102
ClassUnloadingContext::context()->register_unlinked_nmethod(this);
2105
void nmethod::purge(bool unregister_nmethod) {
2107
MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag);
2109
// completely deallocate this method
2110
Events::log_nmethod_flush(Thread::current(), "flushing %s nmethod " INTPTR_FORMAT, is_osr_method() ? "osr" : "", p2i(this));
2111
log_debug(codecache)("*flushing %s nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT
2112
"/Free CodeCache:" SIZE_FORMAT "Kb",
2113
is_osr_method() ? "osr" : "",_compile_id, p2i(this), CodeCache::blob_count(),
2114
CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(this))/1024);
2116
// We need to deallocate any ExceptionCache data.
2117
// Note that we do not need to grab the nmethod lock for this, it
2118
// better be thread safe if we're disposing of it!
2119
ExceptionCache* ec = exception_cache();
2120
while(ec != nullptr) {
2121
ExceptionCache* next = ec->next();
2125
if (_pc_desc_container != nullptr) {
2126
delete _pc_desc_container;
2128
delete[] _compiled_ic_data;
2130
if (_immutable_data != data_end()) {
2131
os::free(_immutable_data);
2132
_immutable_data = data_end(); // Valid not null address
2134
if (unregister_nmethod) {
2135
Universe::heap()->unregister_nmethod(this);
2137
CodeCache::unregister_old_nmethod(this);
2142
oop nmethod::oop_at(int index) const {
2146
return NMethodAccess<AS_NO_KEEPALIVE>::oop_load(oop_addr_at(index));
2149
oop nmethod::oop_at_phantom(int index) const {
2153
return NMethodAccess<ON_PHANTOM_OOP_REF>::oop_load(oop_addr_at(index));
2157
// Notify all classes this nmethod is dependent on that it is no
2160
void nmethod::flush_dependencies() {
2161
if (!has_flushed_dependencies()) {
2162
set_has_flushed_dependencies(true);
2163
for (Dependencies::DepStream deps(this); deps.next(); ) {
2164
if (deps.type() == Dependencies::call_site_target_value) {
2165
// CallSite dependencies are managed on per-CallSite instance basis.
2166
oop call_site = deps.argument_oop(0);
2167
MethodHandles::clean_dependency_context(call_site);
2169
InstanceKlass* ik = deps.context_type();
2170
if (ik == nullptr) {
2171
continue; // ignore things like evol_method
2173
// During GC liveness of dependee determines class that needs to be updated.
2174
// The GC may clean dependency contexts concurrently and in parallel.
2175
ik->clean_dependency_context();
2181
void nmethod::post_compiled_method(CompileTask* task) {
2182
task->mark_success();
2183
task->set_nm_content_size(content_size());
2184
task->set_nm_insts_size(insts_size());
2185
task->set_nm_total_size(total_size());
2187
// JVMTI -- compiled method notification (must be done outside lock)
2188
post_compiled_method_load_event();
2190
if (CompilationLog::log() != nullptr) {
2191
CompilationLog::log()->log_nmethod(JavaThread::current(), this);
2194
const DirectiveSet* directive = task->directive();
2195
maybe_print_nmethod(directive);
2198
// ------------------------------------------------------------------
2199
// post_compiled_method_load_event
2200
// new method for install_code() path
2201
// Transfer information from compilation to jvmti
2202
void nmethod::post_compiled_method_load_event(JvmtiThreadState* state) {
2203
// This is a bad time for a safepoint. We don't want
2204
// this nmethod to get unloaded while we're queueing the event.
2205
NoSafepointVerifier nsv;
2207
Method* m = method();
2208
HOTSPOT_COMPILED_METHOD_LOAD(
2209
(char *) m->klass_name()->bytes(),
2210
m->klass_name()->utf8_length(),
2211
(char *) m->name()->bytes(),
2212
m->name()->utf8_length(),
2213
(char *) m->signature()->bytes(),
2214
m->signature()->utf8_length(),
2215
insts_begin(), insts_size());
2218
if (JvmtiExport::should_post_compiled_method_load()) {
2219
// Only post unload events if load events are found.
2220
set_load_reported();
2221
// If a JavaThread hasn't been passed in, let the Service thread
2222
// (which is a real Java thread) post the event
2223
JvmtiDeferredEvent event = JvmtiDeferredEvent::compiled_method_load_event(this);
2224
if (state == nullptr) {
2225
// Execute any barrier code for this nmethod as if it's called, since
2226
// keeping it alive looks like stack walking.
2227
run_nmethod_entry_barrier();
2228
ServiceThread::enqueue_deferred_event(&event);
2230
// This enters the nmethod barrier outside in the caller.
2231
state->enqueue_event(&event);
2236
void nmethod::post_compiled_method_unload() {
2237
assert(_method != nullptr, "just checking");
2238
DTRACE_METHOD_UNLOAD_PROBE(method());
2240
// If a JVMTI agent has enabled the CompiledMethodUnload event then
2241
// post the event. The Method* will not be valid when this is freed.
2243
// Don't bother posting the unload if the load event wasn't posted.
2244
if (load_reported() && JvmtiExport::should_post_compiled_method_unload()) {
2245
JvmtiDeferredEvent event =
2246
JvmtiDeferredEvent::compiled_method_unload_event(
2247
method()->jmethod_id(), insts_begin());
2248
ServiceThread::enqueue_deferred_event(&event);
2252
// Iterate over metadata calling this function. Used by RedefineClasses
2253
void nmethod::metadata_do(MetadataClosure* f) {
2255
// Visit all immediate references that are embedded in the instruction stream.
2256
RelocIterator iter(this, oops_reloc_begin());
2257
while (iter.next()) {
2258
if (iter.type() == relocInfo::metadata_type) {
2259
metadata_Relocation* r = iter.metadata_reloc();
2260
// In this metadata, we must only follow those metadatas directly embedded in
2261
// the code. Other metadatas (oop_index>0) are seen as part of
2262
// the metadata section below.
2263
assert(1 == (r->metadata_is_immediate()) +
2264
(r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),
2265
"metadata must be found in exactly one place");
2266
if (r->metadata_is_immediate() && r->metadata_value() != nullptr) {
2267
Metadata* md = r->metadata_value();
2268
if (md != _method) f->do_metadata(md);
2270
} else if (iter.type() == relocInfo::virtual_call_type) {
2271
// Check compiledIC holders associated with this nmethod
2273
CompiledIC *ic = CompiledIC_at(&iter);
2279
// Visit the metadata section
2280
for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
2281
if (*p == Universe::non_oop_word() || *p == nullptr) continue; // skip non-oops
2286
// Visit metadata not embedded in the other places.
2287
if (_method != nullptr) f->do_metadata(_method);
2290
// Heuristic for nuking nmethods even though their oops are live.
2291
// Main purpose is to reduce code cache pressure and get rid of
2292
// nmethods that don't seem to be all that relevant any longer.
2293
bool nmethod::is_cold() {
2294
if (!MethodFlushing || is_native_method() || is_not_installed()) {
2295
// No heuristic unloading at all
2299
if (!is_maybe_on_stack() && is_not_entrant()) {
2300
// Not entrant nmethods that are not on any stack can just
2305
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2306
if (bs_nm == nullptr || !bs_nm->supports_entry_barrier(this)) {
2307
// On platforms that don't support nmethod entry barriers, we can't
2308
// trust the temporal aspect of the gc epochs. So we can't detect
2309
// cold nmethods on such platforms.
2313
if (!UseCodeCacheFlushing) {
2314
// Bail out if we don't heuristically remove nmethods
2318
// Other code can be phased out more gradually after N GCs
2319
return CodeCache::previous_completed_gc_marking_cycle() > _gc_epoch + 2 * CodeCache::cold_gc_count();
2322
// The _is_unloading_state encodes a tuple comprising the unloading cycle
2323
// and the result of IsUnloadingBehaviour::is_unloading() for that cycle.
2324
// This is the bit layout of the _is_unloading_state byte: 00000CCU
2325
// CC refers to the cycle, which has 2 bits, and U refers to the result of
2326
// IsUnloadingBehaviour::is_unloading() for that unloading cycle.
2328
class IsUnloadingState: public AllStatic {
2329
static const uint8_t _is_unloading_mask = 1;
2330
static const uint8_t _is_unloading_shift = 0;
2331
static const uint8_t _unloading_cycle_mask = 6;
2332
static const uint8_t _unloading_cycle_shift = 1;
2334
static uint8_t set_is_unloading(uint8_t state, bool value) {
2335
state &= (uint8_t)~_is_unloading_mask;
2337
state |= 1 << _is_unloading_shift;
2339
assert(is_unloading(state) == value, "unexpected unloading cycle overflow");
2343
static uint8_t set_unloading_cycle(uint8_t state, uint8_t value) {
2344
state &= (uint8_t)~_unloading_cycle_mask;
2345
state |= (uint8_t)(value << _unloading_cycle_shift);
2346
assert(unloading_cycle(state) == value, "unexpected unloading cycle overflow");
2351
static bool is_unloading(uint8_t state) { return (state & _is_unloading_mask) >> _is_unloading_shift == 1; }
2352
static uint8_t unloading_cycle(uint8_t state) { return (state & _unloading_cycle_mask) >> _unloading_cycle_shift; }
2354
static uint8_t create(bool is_unloading, uint8_t unloading_cycle) {
2356
state = set_is_unloading(state, is_unloading);
2357
state = set_unloading_cycle(state, unloading_cycle);
2362
bool nmethod::is_unloading() {
2363
uint8_t state = Atomic::load(&_is_unloading_state);
2364
bool state_is_unloading = IsUnloadingState::is_unloading(state);
2365
if (state_is_unloading) {
2368
uint8_t state_unloading_cycle = IsUnloadingState::unloading_cycle(state);
2369
uint8_t current_cycle = CodeCache::unloading_cycle();
2370
if (state_unloading_cycle == current_cycle) {
2374
// The IsUnloadingBehaviour is responsible for calculating if the nmethod
2375
// should be unloaded. This can be either because there is a dead oop,
2376
// or because is_cold() heuristically determines it is time to unload.
2377
state_unloading_cycle = current_cycle;
2378
state_is_unloading = IsUnloadingBehaviour::is_unloading(this);
2379
uint8_t new_state = IsUnloadingState::create(state_is_unloading, state_unloading_cycle);
2381
// Note that if an nmethod has dead oops, everyone will agree that the
2382
// nmethod is_unloading. However, the is_cold heuristics can yield
2383
// different outcomes, so we guard the computed result with a CAS
2384
// to ensure all threads have a shared view of whether an nmethod
2385
// is_unloading or not.
2386
uint8_t found_state = Atomic::cmpxchg(&_is_unloading_state, state, new_state, memory_order_relaxed);
2388
if (found_state == state) {
2389
// First to change state, we win
2390
return state_is_unloading;
2392
// State already set, so use it
2393
return IsUnloadingState::is_unloading(found_state);
2397
void nmethod::clear_unloading_state() {
2398
uint8_t state = IsUnloadingState::create(false, CodeCache::unloading_cycle());
2399
Atomic::store(&_is_unloading_state, state);
2403
// This is called at the end of the strong tracing/marking phase of a
2404
// GC to unload an nmethod if it contains otherwise unreachable
2405
// oops or is heuristically found to be not important.
2406
void nmethod::do_unloading(bool unloading_occurred) {
2407
// Make sure the oop's ready to receive visitors
2408
if (is_unloading()) {
2411
unload_nmethod_caches(unloading_occurred);
2412
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2413
if (bs_nm != nullptr) {
2414
bs_nm->disarm(this);
2419
void nmethod::oops_do(OopClosure* f, bool allow_dead) {
2420
// Prevent extra code cache walk for platforms that don't have immediate oops.
2421
if (relocInfo::mustIterateImmediateOopsInCode()) {
2422
RelocIterator iter(this, oops_reloc_begin());
2424
while (iter.next()) {
2425
if (iter.type() == relocInfo::oop_type ) {
2426
oop_Relocation* r = iter.oop_reloc();
2427
// In this loop, we must only follow those oops directly embedded in
2428
// the code. Other oops (oop_index>0) are seen as part of scopes_oops.
2429
assert(1 == (r->oop_is_immediate()) +
2430
(r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
2431
"oop must be found in exactly one place");
2432
if (r->oop_is_immediate() && r->oop_value() != nullptr) {
2433
f->do_oop(r->oop_addr());
2440
// This includes oop constants not inlined in the code stream.
2441
for (oop* p = oops_begin(); p < oops_end(); p++) {
2442
if (*p == Universe::non_oop_word()) continue; // skip non-oops
2447
void nmethod::follow_nmethod(OopIterateClosure* cl) {
2448
// Process oops in the nmethod
2451
// CodeCache unloading support
2452
mark_as_maybe_on_stack();
2454
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2455
bs_nm->disarm(this);
2457
// There's an assumption made that this function is not used by GCs that
2458
// relocate objects, and therefore we don't call fix_oop_relocations.
2461
nmethod* volatile nmethod::_oops_do_mark_nmethods;
2463
void nmethod::oops_do_log_change(const char* state) {
2464
LogTarget(Trace, gc, nmethod) lt;
2465
if (lt.is_enabled()) {
2467
CompileTask::print(&ls, this, state, true /* short_form */);
2471
bool nmethod::oops_do_try_claim() {
2472
if (oops_do_try_claim_weak_request()) {
2473
nmethod* result = oops_do_try_add_to_list_as_weak_done();
2474
assert(result == nullptr, "adding to global list as weak done must always succeed.");
2480
bool nmethod::oops_do_try_claim_weak_request() {
2481
assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
2483
if ((_oops_do_mark_link == nullptr) &&
2484
(Atomic::replace_if_null(&_oops_do_mark_link, mark_link(this, claim_weak_request_tag)))) {
2485
oops_do_log_change("oops_do, mark weak request");
2491
void nmethod::oops_do_set_strong_done(nmethod* old_head) {
2492
_oops_do_mark_link = mark_link(old_head, claim_strong_done_tag);
2495
nmethod::oops_do_mark_link* nmethod::oops_do_try_claim_strong_done() {
2496
assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
2498
oops_do_mark_link* old_next = Atomic::cmpxchg(&_oops_do_mark_link, mark_link(nullptr, claim_weak_request_tag), mark_link(this, claim_strong_done_tag));
2499
if (old_next == nullptr) {
2500
oops_do_log_change("oops_do, mark strong done");
2505
nmethod::oops_do_mark_link* nmethod::oops_do_try_add_strong_request(nmethod::oops_do_mark_link* next) {
2506
assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
2507
assert(next == mark_link(this, claim_weak_request_tag), "Should be claimed as weak");
2509
oops_do_mark_link* old_next = Atomic::cmpxchg(&_oops_do_mark_link, next, mark_link(this, claim_strong_request_tag));
2510
if (old_next == next) {
2511
oops_do_log_change("oops_do, mark strong request");
2516
bool nmethod::oops_do_try_claim_weak_done_as_strong_done(nmethod::oops_do_mark_link* next) {
2517
assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
2518
assert(extract_state(next) == claim_weak_done_tag, "Should be claimed as weak done");
2520
oops_do_mark_link* old_next = Atomic::cmpxchg(&_oops_do_mark_link, next, mark_link(extract_nmethod(next), claim_strong_done_tag));
2521
if (old_next == next) {
2522
oops_do_log_change("oops_do, mark weak done -> mark strong done");
2528
nmethod* nmethod::oops_do_try_add_to_list_as_weak_done() {
2529
assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
2531
assert(extract_state(_oops_do_mark_link) == claim_weak_request_tag ||
2532
extract_state(_oops_do_mark_link) == claim_strong_request_tag,
2533
"must be but is nmethod " PTR_FORMAT " %u", p2i(extract_nmethod(_oops_do_mark_link)), extract_state(_oops_do_mark_link));
2535
nmethod* old_head = Atomic::xchg(&_oops_do_mark_nmethods, this);
2536
// Self-loop if needed.
2537
if (old_head == nullptr) {
2540
// Try to install end of list and weak done tag.
2541
if (Atomic::cmpxchg(&_oops_do_mark_link, mark_link(this, claim_weak_request_tag), mark_link(old_head, claim_weak_done_tag)) == mark_link(this, claim_weak_request_tag)) {
2542
oops_do_log_change("oops_do, mark weak done");
2549
void nmethod::oops_do_add_to_list_as_strong_done() {
2550
assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
2552
nmethod* old_head = Atomic::xchg(&_oops_do_mark_nmethods, this);
2553
// Self-loop if needed.
2554
if (old_head == nullptr) {
2557
assert(_oops_do_mark_link == mark_link(this, claim_strong_done_tag), "must be but is nmethod " PTR_FORMAT " state %u",
2558
p2i(extract_nmethod(_oops_do_mark_link)), extract_state(_oops_do_mark_link));
2560
oops_do_set_strong_done(old_head);
2563
void nmethod::oops_do_process_weak(OopsDoProcessor* p) {
2564
if (!oops_do_try_claim_weak_request()) {
2565
// Failed to claim for weak processing.
2566
oops_do_log_change("oops_do, mark weak request fail");
2570
p->do_regular_processing(this);
2572
nmethod* old_head = oops_do_try_add_to_list_as_weak_done();
2573
if (old_head == nullptr) {
2576
oops_do_log_change("oops_do, mark weak done fail");
2577
// Adding to global list failed, another thread added a strong request.
2578
assert(extract_state(_oops_do_mark_link) == claim_strong_request_tag,
2579
"must be but is %u", extract_state(_oops_do_mark_link));
2581
oops_do_log_change("oops_do, mark weak request -> mark strong done");
2583
oops_do_set_strong_done(old_head);
2584
// Do missing strong processing.
2585
p->do_remaining_strong_processing(this);
2588
void nmethod::oops_do_process_strong(OopsDoProcessor* p) {
2589
oops_do_mark_link* next_raw = oops_do_try_claim_strong_done();
2590
if (next_raw == nullptr) {
2591
p->do_regular_processing(this);
2592
oops_do_add_to_list_as_strong_done();
2595
// Claim failed. Figure out why and handle it.
2596
if (oops_do_has_weak_request(next_raw)) {
2597
oops_do_mark_link* old = next_raw;
2598
// Claim failed because being weak processed (state == "weak request").
2599
// Try to request deferred strong processing.
2600
next_raw = oops_do_try_add_strong_request(old);
2601
if (next_raw == old) {
2602
// Successfully requested deferred strong processing.
2605
// Failed because of a concurrent transition. No longer in "weak request" state.
2607
if (oops_do_has_any_strong_state(next_raw)) {
2608
// Already claimed for strong processing or requested for such.
2611
if (oops_do_try_claim_weak_done_as_strong_done(next_raw)) {
2612
// Successfully claimed "weak done" as "strong done". Do the missing marking.
2613
p->do_remaining_strong_processing(this);
2616
// Claim failed, some other thread got it.
2619
void nmethod::oops_do_marking_prologue() {
2620
assert_at_safepoint();
2622
log_trace(gc, nmethod)("oops_do_marking_prologue");
2623
assert(_oops_do_mark_nmethods == nullptr, "must be empty");
2626
void nmethod::oops_do_marking_epilogue() {
2627
assert_at_safepoint();
2629
nmethod* next = _oops_do_mark_nmethods;
2630
_oops_do_mark_nmethods = nullptr;
2631
if (next != nullptr) {
2635
next = extract_nmethod(cur->_oops_do_mark_link);
2636
cur->_oops_do_mark_link = nullptr;
2637
DEBUG_ONLY(cur->verify_oop_relocations());
2639
LogTarget(Trace, gc, nmethod) lt;
2640
if (lt.is_enabled()) {
2642
CompileTask::print(&ls, cur, "oops_do, unmark", /*short_form:*/ true);
2644
// End if self-loop has been detected.
2645
} while (cur != next);
2647
log_trace(gc, nmethod)("oops_do_marking_epilogue");
2650
inline bool includes(void* p, void* from, void* to) {
2651
return from <= p && p < to;
2655
void nmethod::copy_scopes_pcs(PcDesc* pcs, int count) {
2656
assert(count >= 2, "must be sentinel values, at least");
2659
// must be sorted and unique; we do a binary search in find_pc_desc()
2660
int prev_offset = pcs[0].pc_offset();
2661
assert(prev_offset == PcDesc::lower_offset_limit,
2662
"must start with a sentinel");
2663
for (int i = 1; i < count; i++) {
2664
int this_offset = pcs[i].pc_offset();
2665
assert(this_offset > prev_offset, "offsets must be sorted");
2666
prev_offset = this_offset;
2668
assert(prev_offset == PcDesc::upper_offset_limit,
2669
"must end with a sentinel");
2672
// Search for MethodHandle invokes and tag the nmethod.
2673
for (int i = 0; i < count; i++) {
2674
if (pcs[i].is_method_handle_invoke()) {
2675
set_has_method_handle_invokes(true);
2679
assert(has_method_handle_invokes() == (_deopt_mh_handler_offset != -1), "must have deopt mh handler");
2681
int size = count * sizeof(PcDesc);
2682
assert(scopes_pcs_size() >= size, "oob");
2683
memcpy(scopes_pcs_begin(), pcs, size);
2685
// Adjust the final sentinel downward.
2686
PcDesc* last_pc = &scopes_pcs_begin()[count-1];
2687
assert(last_pc->pc_offset() == PcDesc::upper_offset_limit, "sanity");
2688
last_pc->set_pc_offset(content_size() + 1);
2689
for (; last_pc + 1 < scopes_pcs_end(); last_pc += 1) {
2690
// Fill any rounding gaps with copies of the last record.
2691
last_pc[1] = last_pc[0];
2693
// The following assert could fail if sizeof(PcDesc) is not
2694
// an integral multiple of oopSize (the rounding term).
2695
// If it fails, change the logic to always allocate a multiple
2696
// of sizeof(PcDesc), and fill unused words with copies of *last_pc.
2697
assert(last_pc + 1 == scopes_pcs_end(), "must match exactly");
2700
void nmethod::copy_scopes_data(u_char* buffer, int size) {
2701
assert(scopes_data_size() >= size, "oob");
2702
memcpy(scopes_data_begin(), buffer, size);
2706
static PcDesc* linear_search(int pc_offset, bool approximate, PcDesc* lower, PcDesc* upper) {
2707
PcDesc* res = nullptr;
2708
assert(lower != nullptr && lower->pc_offset() == PcDesc::lower_offset_limit,
2709
"must start with a sentinel");
2710
// lower + 1 to exclude initial sentinel
2711
for (PcDesc* p = lower + 1; p < upper; p++) {
2712
NOT_PRODUCT(--pc_nmethod_stats.pc_desc_tests); // don't count this call to match_desc
2713
if (match_desc(p, pc_offset, approximate)) {
2714
if (res == nullptr) {
2717
res = (PcDesc*) badAddress;
2727
// Version of method to collect statistic
2728
PcDesc* PcDescContainer::find_pc_desc(address pc, bool approximate, address code_begin,
2729
PcDesc* lower, PcDesc* upper) {
2730
++pc_nmethod_stats.pc_desc_queries;
2731
if (approximate) ++pc_nmethod_stats.pc_desc_approx;
2733
PcDesc* desc = _pc_desc_cache.last_pc_desc();
2734
assert(desc != nullptr, "PcDesc cache should be initialized already");
2735
if (desc->pc_offset() == (pc - code_begin)) {
2736
// Cached value matched
2737
++pc_nmethod_stats.pc_desc_tests;
2738
++pc_nmethod_stats.pc_desc_repeats;
2741
return find_pc_desc_internal(pc, approximate, code_begin, lower, upper);
2745
// Finds a PcDesc with real-pc equal to "pc"
2746
PcDesc* PcDescContainer::find_pc_desc_internal(address pc, bool approximate, address code_begin,
2747
PcDesc* lower_incl, PcDesc* upper_incl) {
2748
if ((pc < code_begin) ||
2749
(pc - code_begin) >= (ptrdiff_t) PcDesc::upper_offset_limit) {
2750
return nullptr; // PC is wildly out of range
2752
int pc_offset = (int) (pc - code_begin);
2754
// Check the PcDesc cache if it contains the desired PcDesc
2755
// (This as an almost 100% hit rate.)
2756
PcDesc* res = _pc_desc_cache.find_pc_desc(pc_offset, approximate);
2757
if (res != nullptr) {
2758
assert(res == linear_search(pc_offset, approximate, lower_incl, upper_incl), "cache ok");
2762
// Fallback algorithm: quasi-linear search for the PcDesc
2763
// Find the last pc_offset less than the given offset.
2764
// The successor must be the required match, if there is a match at all.
2765
// (Use a fixed radix to avoid expensive affine pointer arithmetic.)
2766
PcDesc* lower = lower_incl; // this is initial sentinel
2767
PcDesc* upper = upper_incl - 1; // exclude final sentinel
2768
if (lower >= upper) return nullptr; // no PcDescs at all
2770
#define assert_LU_OK \
2771
/* invariant on lower..upper during the following search: */ \
2772
assert(lower->pc_offset() < pc_offset, "sanity"); \
2773
assert(upper->pc_offset() >= pc_offset, "sanity")
2776
// Use the last successful return as a split point.
2777
PcDesc* mid = _pc_desc_cache.last_pc_desc();
2778
NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches);
2779
if (mid->pc_offset() < pc_offset) {
2785
// Take giant steps at first (4096, then 256, then 16, then 1)
2786
const int LOG2_RADIX = 4 /*smaller steps in debug mode:*/ debug_only(-1);
2787
const int RADIX = (1 << LOG2_RADIX);
2788
for (int step = (1 << (LOG2_RADIX*3)); step > 1; step >>= LOG2_RADIX) {
2789
while ((mid = lower + step) < upper) {
2791
NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches);
2792
if (mid->pc_offset() < pc_offset) {
2802
// Sneak up on the value with a linear search of length ~16.
2806
NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches);
2807
if (mid->pc_offset() < pc_offset) {
2816
if (match_desc(upper, pc_offset, approximate)) {
2817
assert(upper == linear_search(pc_offset, approximate, lower_incl, upper_incl), "search mismatch");
2818
if (!Thread::current_in_asgct()) {
2819
// we don't want to modify the cache if we're in ASGCT
2820
// which is typically called in a signal handler
2821
_pc_desc_cache.add_pc_desc(upper);
2825
assert(nullptr == linear_search(pc_offset, approximate, lower_incl, upper_incl), "search mismatch");
2830
bool nmethod::check_dependency_on(DepChange& changes) {
2831
// What has happened:
2832
// 1) a new class dependee has been added
2833
// 2) dependee and all its super classes have been marked
2834
bool found_check = false; // set true if we are upset
2835
for (Dependencies::DepStream deps(this); deps.next(); ) {
2836
// Evaluate only relevant dependencies.
2837
if (deps.spot_check_dependency_at(changes) != nullptr) {
2845
// Called from mark_for_deoptimization, when dependee is invalidated.
2846
bool nmethod::is_dependent_on_method(Method* dependee) {
2847
for (Dependencies::DepStream deps(this); deps.next(); ) {
2848
if (deps.type() != Dependencies::evol_method)
2850
Method* method = deps.method_argument(0);
2851
if (method == dependee) return true;
2856
void nmethod_init() {
2857
// make sure you didn't forget to adjust the filler fields
2858
assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word");
2861
// -----------------------------------------------------------------------------
2864
class VerifyOopsClosure: public OopClosure {
2868
VerifyOopsClosure(nmethod* nm) : _nm(nm), _ok(true) { }
2869
bool ok() { return _ok; }
2870
virtual void do_oop(oop* p) {
2871
if (oopDesc::is_oop_or_null(*p)) return;
2872
// Print diagnostic information before calling print_nmethod().
2873
// Assertions therein might prevent call from returning.
2874
tty->print_cr("*** non-oop " PTR_FORMAT " found at " PTR_FORMAT " (offset %d)",
2875
p2i(*p), p2i(p), (int)((intptr_t)p - (intptr_t)_nm));
2877
_nm->print_nmethod(true);
2881
virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2884
class VerifyMetadataClosure: public MetadataClosure {
2886
void do_metadata(Metadata* md) {
2887
if (md->is_method()) {
2888
Method* method = (Method*)md;
2889
assert(!method->is_old(), "Should not be installing old methods");
2895
void nmethod::verify() {
2896
if (is_not_entrant())
2899
// Make sure all the entry points are correctly aligned for patching.
2900
NativeJump::check_verified_entry_alignment(entry_point(), verified_entry_point());
2902
// assert(oopDesc::is_oop(method()), "must be valid");
2906
if (!CodeCache::contains(this)) {
2907
fatal("nmethod at " INTPTR_FORMAT " not in zone", p2i(this));
2910
if(is_native_method() )
2913
nmethod* nm = CodeCache::find_nmethod(verified_entry_point());
2915
fatal("find_nmethod did not find this nmethod (" INTPTR_FORMAT ")", p2i(this));
2918
for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
2919
if (! p->verify(this)) {
2920
tty->print_cr("\t\tin nmethod at " INTPTR_FORMAT " (pcs)", p2i(this));
2927
// Verify that implicit exceptions that deoptimize have a PcDesc and OopMap
2928
ImmutableOopMapSet* oms = oop_maps();
2929
ImplicitExceptionTable implicit_table(this);
2930
for (uint i = 0; i < implicit_table.len(); i++) {
2931
int exec_offset = (int) implicit_table.get_exec_offset(i);
2932
if (implicit_table.get_exec_offset(i) == implicit_table.get_cont_offset(i)) {
2933
assert(pc_desc_at(code_begin() + exec_offset) != nullptr, "missing PcDesc");
2935
for (int i = 0, imax = oms->count(); i < imax; i++) {
2936
if (oms->pair_at(i)->pc_offset() == exec_offset) {
2941
assert(found, "missing oopmap");
2948
VerifyOopsClosure voc(this);
2950
assert(voc.ok(), "embedded oops must be OK");
2951
Universe::heap()->verify_nmethod(this);
2953
assert(_oops_do_mark_link == nullptr, "_oops_do_mark_link for %s should be nullptr but is " PTR_FORMAT,
2954
nm->method()->external_name(), p2i(_oops_do_mark_link));
2957
CompiledICLocker nm_verify(this);
2958
VerifyMetadataClosure vmc;
2963
void nmethod::verify_interrupt_point(address call_site, bool is_inline_cache) {
2965
// Verify IC only when nmethod installation is finished.
2966
if (!is_not_installed()) {
2967
if (CompiledICLocker::is_safe(this)) {
2968
if (is_inline_cache) {
2969
CompiledIC_at(this, call_site);
2971
CompiledDirectCall::at(call_site);
2974
CompiledICLocker ml_verify(this);
2975
if (is_inline_cache) {
2976
CompiledIC_at(this, call_site);
2978
CompiledDirectCall::at(call_site);
2983
HandleMark hm(Thread::current());
2985
PcDesc* pd = pc_desc_at(nativeCall_at(call_site)->return_address());
2986
assert(pd != nullptr, "PcDesc must exist");
2987
for (ScopeDesc* sd = new ScopeDesc(this, pd);
2988
!sd->is_top(); sd = sd->sender()) {
2993
void nmethod::verify_scopes() {
2994
if( !method() ) return; // Runtime stubs have no scope
2995
if (method()->is_native()) return; // Ignore stub methods.
2996
// iterate through all interrupt point
2997
// and verify the debug information is valid.
2998
RelocIterator iter(this);
2999
while (iter.next()) {
3000
address stub = nullptr;
3001
switch (iter.type()) {
3002
case relocInfo::virtual_call_type:
3003
verify_interrupt_point(iter.addr(), true /* is_inline_cache */);
3005
case relocInfo::opt_virtual_call_type:
3006
stub = iter.opt_virtual_call_reloc()->static_stub();
3007
verify_interrupt_point(iter.addr(), false /* is_inline_cache */);
3009
case relocInfo::static_call_type:
3010
stub = iter.static_call_reloc()->static_stub();
3011
verify_interrupt_point(iter.addr(), false /* is_inline_cache */);
3013
case relocInfo::runtime_call_type:
3014
case relocInfo::runtime_call_w_cp_type: {
3015
address destination = iter.reloc()->value();
3016
// Right now there is no way to find out which entries support
3017
// an interrupt point. It would be nice if we had this
3018
// information in a table.
3024
assert(stub == nullptr || stub_contains(stub), "static call stub outside stub section");
3029
// -----------------------------------------------------------------------------
3030
// Printing operations
3032
void nmethod::print() const {
3033
ttyLocker ttyl; // keep the following output all in one block
3037
void nmethod::print(outputStream* st) const {
3040
st->print("Compiled method ");
3042
if (is_compiled_by_c1()) {
3044
} else if (is_compiled_by_c2()) {
3046
} else if (is_compiled_by_jvmci()) {
3047
st->print("(JVMCI) ");
3049
st->print("(n/a) ");
3052
print_on(st, nullptr);
3055
st->print("((nmethod*) " INTPTR_FORMAT ") ", p2i(this));
3056
st->print(" for method " INTPTR_FORMAT , p2i(method()));
3058
st->print_cr("%s ", state());
3061
if (size () > 0) st->print_cr(" total in heap [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3065
if (relocation_size () > 0) st->print_cr(" relocation [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3066
p2i(relocation_begin()),
3067
p2i(relocation_end()),
3069
if (consts_size () > 0) st->print_cr(" constants [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3070
p2i(consts_begin()),
3073
if (insts_size () > 0) st->print_cr(" main code [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3077
if (stub_size () > 0) st->print_cr(" stub code [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3081
if (oops_size () > 0) st->print_cr(" oops [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3085
if (metadata_size () > 0) st->print_cr(" metadata [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3086
p2i(metadata_begin()),
3087
p2i(metadata_end()),
3090
if (jvmci_data_size () > 0) st->print_cr(" JVMCI data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3091
p2i(jvmci_data_begin()),
3092
p2i(jvmci_data_end()),
3095
if (immutable_data_size() > 0) st->print_cr(" immutable data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3096
p2i(immutable_data_begin()),
3097
p2i(immutable_data_end()),
3098
immutable_data_size());
3099
if (dependencies_size () > 0) st->print_cr(" dependencies [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3100
p2i(dependencies_begin()),
3101
p2i(dependencies_end()),
3102
dependencies_size());
3103
if (nul_chk_table_size() > 0) st->print_cr(" nul chk table [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3104
p2i(nul_chk_table_begin()),
3105
p2i(nul_chk_table_end()),
3106
nul_chk_table_size());
3107
if (handler_table_size() > 0) st->print_cr(" handler table [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3108
p2i(handler_table_begin()),
3109
p2i(handler_table_end()),
3110
handler_table_size());
3111
if (scopes_pcs_size () > 0) st->print_cr(" scopes pcs [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3112
p2i(scopes_pcs_begin()),
3113
p2i(scopes_pcs_end()),
3115
if (scopes_data_size () > 0) st->print_cr(" scopes data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3116
p2i(scopes_data_begin()),
3117
p2i(scopes_data_end()),
3118
scopes_data_size());
3120
if (speculations_size () > 0) st->print_cr(" speculations [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3121
p2i(speculations_begin()),
3122
p2i(speculations_end()),
3123
speculations_size());
3127
void nmethod::print_code() {
3130
// Call the specialized decode method of this class.
3134
#ifndef PRODUCT // called InstanceKlass methods are available only then. Declared as PRODUCT_RETURN
3136
void nmethod::print_dependencies_on(outputStream* out) {
3139
st.print_cr("Dependencies:");
3140
for (Dependencies::DepStream deps(this); deps.next(); ) {
3141
deps.print_dependency(&st);
3142
InstanceKlass* ctxk = deps.context_type();
3143
if (ctxk != nullptr) {
3144
if (ctxk->is_dependent_nmethod(this)) {
3145
st.print_cr(" [nmethod<=klass]%s", ctxk->external_name());
3148
deps.log_dependency(); // put it into the xml log also
3150
out->print_raw(st.as_string());
3154
#if defined(SUPPORT_DATA_STRUCTS)
3156
// Print the oops from the underlying CodeBlob.
3157
void nmethod::print_oops(outputStream* st) {
3160
if (oops_begin() < oops_end()) {
3162
for (oop* p = oops_begin(); p < oops_end(); p++) {
3163
Disassembler::print_location((unsigned char*)p, (unsigned char*)oops_begin(), (unsigned char*)oops_end(), st, true, false);
3164
st->print(PTR_FORMAT " ", *((uintptr_t*)p));
3165
if (Universe::contains_non_oop_word(p)) {
3166
st->print_cr("NON_OOP");
3167
continue; // skip non-oops
3169
if (*p == nullptr) {
3170
st->print_cr("nullptr-oop");
3171
continue; // skip non-oops
3173
(*p)->print_value_on(st);
3177
st->print_cr(" <list empty>");
3181
// Print metadata pool.
3182
void nmethod::print_metadata(outputStream* st) {
3184
st->print("Metadata:");
3185
if (metadata_begin() < metadata_end()) {
3187
for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
3188
Disassembler::print_location((unsigned char*)p, (unsigned char*)metadata_begin(), (unsigned char*)metadata_end(), st, true, false);
3189
st->print(PTR_FORMAT " ", *((uintptr_t*)p));
3190
if (*p && *p != Universe::non_oop_word()) {
3191
(*p)->print_value_on(st);
3196
st->print_cr(" <list empty>");
3200
#ifndef PRODUCT // ScopeDesc::print_on() is available only then. Declared as PRODUCT_RETURN
3201
void nmethod::print_scopes_on(outputStream* st) {
3202
// Find the first pc desc for all scopes in the code and print it.
3204
st->print("scopes:");
3205
if (scopes_pcs_begin() < scopes_pcs_end()) {
3207
for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
3208
if (p->scope_decode_offset() == DebugInformationRecorder::serialized_null)
3211
ScopeDesc* sd = scope_desc_at(p->real_pc(this));
3212
while (sd != nullptr) {
3213
sd->print_on(st, p); // print output ends with a newline
3218
st->print_cr(" <list empty>");
3223
#ifndef PRODUCT // RelocIterator does support printing only then.
3224
void nmethod::print_relocations() {
3225
ResourceMark m; // in case methods get printed via the debugger
3226
tty->print_cr("relocations:");
3227
RelocIterator iter(this);
3232
void nmethod::print_pcs_on(outputStream* st) {
3233
ResourceMark m; // in case methods get printed via debugger
3234
st->print("pc-bytecode offsets:");
3235
if (scopes_pcs_begin() < scopes_pcs_end()) {
3237
for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
3238
p->print_on(st, this); // print output ends with a newline
3241
st->print_cr(" <list empty>");
3245
void nmethod::print_handler_table() {
3246
ExceptionHandlerTable(this).print(code_begin());
3249
void nmethod::print_nul_chk_table() {
3250
ImplicitExceptionTable(this).print(code_begin());
3253
void nmethod::print_recorded_oop(int log_n, int i) {
3259
// Be careful around non-oop words. Don't create an oop
3260
// with that value, or it will assert in verification code.
3261
if (Universe::contains_non_oop_word(oop_addr_at(i))) {
3262
value = Universe::non_oop_word();
3268
tty->print("#%*d: " INTPTR_FORMAT " ", log_n, i, p2i(value));
3270
if (value == Universe::non_oop_word()) {
3271
tty->print("non-oop word");
3273
if (value == nullptr) {
3274
tty->print("nullptr-oop");
3276
oop_at(i)->print_value_on(tty);
3283
void nmethod::print_recorded_oops() {
3284
const int n = oops_count();
3285
const int log_n = (n<10) ? 1 : (n<100) ? 2 : (n<1000) ? 3 : (n<10000) ? 4 : 6;
3286
tty->print("Recorded oops:");
3289
for (int i = 0; i < n; i++) {
3290
print_recorded_oop(log_n, i);
3293
tty->print_cr(" <list empty>");
3297
void nmethod::print_recorded_metadata() {
3298
const int n = metadata_count();
3299
const int log_n = (n<10) ? 1 : (n<100) ? 2 : (n<1000) ? 3 : (n<10000) ? 4 : 6;
3300
tty->print("Recorded metadata:");
3303
for (int i = 0; i < n; i++) {
3304
Metadata* m = metadata_at(i);
3305
tty->print("#%*d: " INTPTR_FORMAT " ", log_n, i, p2i(m));
3306
if (m == (Metadata*)Universe::non_oop_word()) {
3307
tty->print("non-metadata word");
3308
} else if (m == nullptr) {
3309
tty->print("nullptr-oop");
3311
Metadata::print_value_on_maybe_null(tty, m);
3316
tty->print_cr(" <list empty>");
3321
#if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY)
3323
void nmethod::print_constant_pool(outputStream* st) {
3324
//-----------------------------------
3325
//---< Print the constant pool >---
3326
//-----------------------------------
3327
int consts_size = this->consts_size();
3328
if ( consts_size > 0 ) {
3329
unsigned char* cstart = this->consts_begin();
3330
unsigned char* cp = cstart;
3331
unsigned char* cend = cp + consts_size;
3332
unsigned int bytes_per_line = 4;
3333
unsigned int CP_alignment = 8;
3338
//---< print CP header to make clear what's printed >---
3339
if( ((uintptr_t)cp&(CP_alignment-1)) == 0 ) {
3341
st->print_cr("[Constant Pool]");
3342
Disassembler::print_location(cp, cstart, cend, st, true, true);
3343
Disassembler::print_hexdata(cp, n, st, true);
3346
n = (int)((uintptr_t)cp & (bytes_per_line-1));
3347
st->print_cr("[Constant Pool (unaligned)]");
3350
//---< print CP contents, bytes_per_line at a time >---
3352
Disassembler::print_location(cp, cstart, cend, st, true, false);
3353
Disassembler::print_hexdata(cp, n, st, false);
3359
//---< Show potential alignment gap between constant pool and code >---
3360
cend = code_begin();
3363
st->print_cr("[Code entry alignment]");
3365
Disassembler::print_location(cp, cstart, cend, st, false, false);
3371
st->print_cr("[Constant Pool (empty)]");
3378
// Disassemble this nmethod.
3379
// Print additional debug information, if requested. This could be code
3380
// comments, block comments, profiling counters, etc.
3381
// The undisassembled format is useful no disassembler library is available.
3382
// The resulting hex dump (with markers) can be disassembled later, or on
3383
// another system, when/where a disassembler library is available.
3384
void nmethod::decode2(outputStream* ost) const {
3386
// Called from frame::back_trace_with_decode without ResourceMark.
3389
// Make sure we have a valid stream to print on.
3390
outputStream* st = ost ? ost : tty;
3392
#if defined(SUPPORT_ABSTRACT_ASSEMBLY) && ! defined(SUPPORT_ASSEMBLY)
3393
const bool use_compressed_format = true;
3394
const bool compressed_with_comments = use_compressed_format && (AbstractDisassembler::show_comment() ||
3395
AbstractDisassembler::show_block_comment());
3397
const bool use_compressed_format = Disassembler::is_abstract();
3398
const bool compressed_with_comments = use_compressed_format && (AbstractDisassembler::show_comment() ||
3399
AbstractDisassembler::show_block_comment());
3406
#if defined(SUPPORT_ASSEMBLY)
3407
//----------------------------------
3408
//---< Print real disassembly >---
3409
//----------------------------------
3410
if (! use_compressed_format) {
3411
st->print_cr("[Disassembly]");
3412
Disassembler::decode(const_cast<nmethod*>(this), st);
3414
st->print_cr("[/Disassembly]");
3419
#if defined(SUPPORT_ABSTRACT_ASSEMBLY)
3421
// Compressed undisassembled disassembly format.
3422
// The following status values are defined/supported:
3423
// = 0 - currently at bol() position, nothing printed yet on current line.
3424
// = 1 - currently at position after print_location().
3425
// > 1 - in the midst of printing instruction stream bytes.
3426
int compressed_format_idx = 0;
3427
int code_comment_column = 0;
3428
const int instr_maxlen = Assembler::instr_maxlen();
3429
const uint tabspacing = 8;
3430
unsigned char* start = this->code_begin();
3431
unsigned char* p = this->code_begin();
3432
unsigned char* end = this->code_end();
3433
unsigned char* pss = p; // start of a code section (used for offsets)
3435
if ((start == nullptr) || (end == nullptr)) {
3436
st->print_cr("PrintAssembly not possible due to uninitialized section pointers");
3441
#if defined(SUPPORT_ABSTRACT_ASSEMBLY)
3442
//---< plain abstract disassembly, no comments or anything, just section headers >---
3443
if (use_compressed_format && ! compressed_with_comments) {
3444
const_cast<nmethod*>(this)->print_constant_pool(st);
3446
//---< Open the output (Marker for post-mortem disassembler) >---
3447
st->print_cr("[MachCode]");
3448
const char* header = nullptr;
3452
while ((p < end) && (header == nullptr)) {
3453
header = nmethod_section_label(p);
3455
p += Assembler::instr_len(p);
3458
AbstractDisassembler::decode_range_abstract(p0, pp, start, end, st, Assembler::instr_maxlen());
3462
} else if (header != nullptr) {
3464
st->print_cr("%s", header);
3468
//---< Close the output (Marker for post-mortem disassembler) >---
3470
st->print_cr("[/MachCode]");
3475
#if defined(SUPPORT_ABSTRACT_ASSEMBLY)
3476
//---< abstract disassembly with comments and section headers merged in >---
3477
if (compressed_with_comments) {
3478
const_cast<nmethod*>(this)->print_constant_pool(st);
3480
//---< Open the output (Marker for post-mortem disassembler) >---
3481
st->print_cr("[MachCode]");
3482
while ((p < end) && (p != nullptr)) {
3483
const int instruction_size_in_bytes = Assembler::instr_len(p);
3485
//---< Block comments for nmethod. Interrupts instruction stream, if any. >---
3486
// Outputs a bol() before and a cr() after, but only if a comment is printed.
3487
// Prints nmethod_section_label as well.
3488
if (AbstractDisassembler::show_block_comment()) {
3489
print_block_comment(st, p);
3490
if (st->position() == 0) {
3491
compressed_format_idx = 0;
3495
//---< New location information after line break >---
3496
if (compressed_format_idx == 0) {
3497
code_comment_column = Disassembler::print_location(p, pss, end, st, false, false);
3498
compressed_format_idx = 1;
3501
//---< Code comment for current instruction. Address range [p..(p+len)) >---
3502
unsigned char* p_end = p + (ssize_t)instruction_size_in_bytes;
3503
S390_ONLY(if (p_end > end) p_end = end;) // avoid getting past the end
3505
if (AbstractDisassembler::show_comment() && const_cast<nmethod*>(this)->has_code_comment(p, p_end)) {
3506
//---< interrupt instruction byte stream for code comment >---
3507
if (compressed_format_idx > 1) {
3508
st->cr(); // interrupt byte stream
3509
st->cr(); // add an empty line
3510
code_comment_column = Disassembler::print_location(p, pss, end, st, false, false);
3512
const_cast<nmethod*>(this)->print_code_comment_on(st, code_comment_column, p, p_end );
3514
compressed_format_idx = 0;
3517
//---< New location information after line break >---
3518
if (compressed_format_idx == 0) {
3519
code_comment_column = Disassembler::print_location(p, pss, end, st, false, false);
3520
compressed_format_idx = 1;
3523
//---< Nicely align instructions for readability >---
3524
if (compressed_format_idx > 1) {
3525
Disassembler::print_delimiter(st);
3528
//---< Now, finally, print the actual instruction bytes >---
3529
unsigned char* p0 = p;
3530
p = Disassembler::decode_instruction_abstract(p, st, instruction_size_in_bytes, instr_maxlen);
3531
compressed_format_idx += (int)(p - p0);
3533
if (Disassembler::start_newline(compressed_format_idx-1)) {
3535
compressed_format_idx = 0;
3538
//---< Close the output (Marker for post-mortem disassembler) >---
3540
st->print_cr("[/MachCode]");
3546
#if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY)
3548
const char* nmethod::reloc_string_for(u_char* begin, u_char* end) {
3549
RelocIterator iter(this, begin, end);
3550
bool have_one = false;
3551
while (iter.next()) {
3553
switch (iter.type()) {
3554
case relocInfo::none: return "no_reloc";
3555
case relocInfo::oop_type: {
3556
// Get a non-resizable resource-allocated stringStream.
3557
// Our callees make use of (nested) ResourceMarks.
3558
stringStream st(NEW_RESOURCE_ARRAY(char, 1024), 1024);
3559
oop_Relocation* r = iter.oop_reloc();
3560
oop obj = r->oop_value();
3562
if (obj == nullptr) st.print("nullptr");
3563
else obj->print_value_on(&st);
3565
return st.as_string();
3567
case relocInfo::metadata_type: {
3569
metadata_Relocation* r = iter.metadata_reloc();
3570
Metadata* obj = r->metadata_value();
3571
st.print("metadata(");
3572
if (obj == nullptr) st.print("nullptr");
3573
else obj->print_value_on(&st);
3575
return st.as_string();
3577
case relocInfo::runtime_call_type:
3578
case relocInfo::runtime_call_w_cp_type: {
3580
st.print("runtime_call");
3581
CallRelocation* r = (CallRelocation*)iter.reloc();
3582
address dest = r->destination();
3583
CodeBlob* cb = CodeCache::find_blob(dest);
3584
if (cb != nullptr) {
3585
st.print(" %s", cb->name());
3588
const int buflen = 1024;
3589
char* buf = NEW_RESOURCE_ARRAY(char, buflen);
3591
if (os::dll_address_to_function_name(dest, buf, buflen, &offset)) {
3592
st.print(" %s", buf);
3594
st.print("+%d", offset);
3598
return st.as_string();
3600
case relocInfo::virtual_call_type: {
3602
st.print_raw("virtual_call");
3603
virtual_call_Relocation* r = iter.virtual_call_reloc();
3604
Method* m = r->method_value();
3606
assert(m->is_method(), "");
3607
m->print_short_name(&st);
3609
return st.as_string();
3611
case relocInfo::opt_virtual_call_type: {
3613
st.print_raw("optimized virtual_call");
3614
opt_virtual_call_Relocation* r = iter.opt_virtual_call_reloc();
3615
Method* m = r->method_value();
3617
assert(m->is_method(), "");
3618
m->print_short_name(&st);
3620
return st.as_string();
3622
case relocInfo::static_call_type: {
3624
st.print_raw("static_call");
3625
static_call_Relocation* r = iter.static_call_reloc();
3626
Method* m = r->method_value();
3628
assert(m->is_method(), "");
3629
m->print_short_name(&st);
3631
return st.as_string();
3633
case relocInfo::static_stub_type: return "static_stub";
3634
case relocInfo::external_word_type: return "external_word";
3635
case relocInfo::internal_word_type: return "internal_word";
3636
case relocInfo::section_word_type: return "section_word";
3637
case relocInfo::poll_type: return "poll";
3638
case relocInfo::poll_return_type: return "poll_return";
3639
case relocInfo::trampoline_stub_type: return "trampoline_stub";
3640
case relocInfo::entry_guard_type: return "entry_guard";
3641
case relocInfo::post_call_nop_type: return "post_call_nop";
3642
case relocInfo::barrier_type: {
3643
barrier_Relocation* const reloc = iter.barrier_reloc();
3645
st.print("barrier format=%d", reloc->format());
3646
return st.as_string();
3649
case relocInfo::type_mask: return "type_bit_mask";
3653
st.print("unknown relocInfo=%d", (int) iter.type());
3654
return st.as_string();
3658
return have_one ? "other" : nullptr;
3661
// Return the last scope in (begin..end]
3662
ScopeDesc* nmethod::scope_desc_in(address begin, address end) {
3663
PcDesc* p = pc_desc_near(begin+1);
3664
if (p != nullptr && p->real_pc(this) <= end) {
3665
return new ScopeDesc(this, p);
3670
const char* nmethod::nmethod_section_label(address pos) const {
3671
const char* label = nullptr;
3672
if (pos == code_begin()) label = "[Instructions begin]";
3673
if (pos == entry_point()) label = "[Entry Point]";
3674
if (pos == verified_entry_point()) label = "[Verified Entry Point]";
3675
if (has_method_handle_invokes() && (pos == deopt_mh_handler_begin())) label = "[Deopt MH Handler Code]";
3676
if (pos == consts_begin() && pos != insts_begin()) label = "[Constants]";
3677
// Check stub_code before checking exception_handler or deopt_handler.
3678
if (pos == this->stub_begin()) label = "[Stub Code]";
3679
if (JVMCI_ONLY(_exception_offset >= 0 &&) pos == exception_begin()) label = "[Exception Handler]";
3680
if (JVMCI_ONLY(_deopt_handler_offset != -1 &&) pos == deopt_handler_begin()) label = "[Deopt Handler Code]";
3684
void nmethod::print_nmethod_labels(outputStream* stream, address block_begin, bool print_section_labels) const {
3685
if (print_section_labels) {
3686
const char* label = nmethod_section_label(block_begin);
3687
if (label != nullptr) {
3689
stream->print_cr("%s", label);
3693
if (block_begin == entry_point()) {
3694
Method* m = method();
3696
stream->print(" # ");
3697
m->print_value_on(stream);
3700
if (m != nullptr && !is_osr_method()) {
3702
int sizeargs = m->size_of_parameters();
3703
BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
3704
VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
3707
if (!m->is_static())
3708
sig_bt[sig_index++] = T_OBJECT; // 'this'
3709
for (SignatureStream ss(m->signature()); !ss.at_return_type(); ss.next()) {
3710
BasicType t = ss.type();
3711
sig_bt[sig_index++] = t;
3712
if (type2size[t] == 2) {
3713
sig_bt[sig_index++] = T_VOID;
3715
assert(type2size[t] == 1, "size is 1 or 2");
3718
assert(sig_index == sizeargs, "");
3720
const char* spname = "sp"; // make arch-specific?
3721
SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs);
3722
int stack_slot_offset = this->frame_size() * wordSize;
3723
int tab1 = 14, tab2 = 24;
3725
int arg_index = (m->is_static() ? 0 : -1);
3726
bool did_old_sp = false;
3727
for (SignatureStream ss(m->signature()); !ss.at_return_type(); ) {
3728
bool at_this = (arg_index == -1);
3729
bool at_old_sp = false;
3730
BasicType t = (at_this ? T_OBJECT : ss.type());
3731
assert(t == sig_bt[sig_index], "sigs in sync");
3733
stream->print(" # this: ");
3735
stream->print(" # parm%d: ", arg_index);
3736
stream->move_to(tab1);
3737
VMReg fst = regs[sig_index].first();
3738
VMReg snd = regs[sig_index].second();
3739
if (fst->is_reg()) {
3740
stream->print("%s", fst->name());
3741
if (snd->is_valid()) {
3742
stream->print(":%s", snd->name());
3744
} else if (fst->is_stack()) {
3745
int offset = fst->reg2stack() * VMRegImpl::stack_slot_size + stack_slot_offset;
3746
if (offset == stack_slot_offset) at_old_sp = true;
3747
stream->print("[%s+0x%x]", spname, offset);
3749
stream->print("reg%d:%d??", (int)(intptr_t)fst, (int)(intptr_t)snd);
3752
stream->move_to(tab2);
3753
stream->print("= ");
3755
m->method_holder()->print_value_on(stream);
3757
bool did_name = false;
3758
if (!at_this && ss.is_reference()) {
3759
Symbol* name = ss.as_symbol();
3760
name->print_value_on(stream);
3764
stream->print("%s", type2name(t));
3767
stream->print(" (%s of caller)", spname);
3771
sig_index += type2size[t];
3773
if (!at_this) ss.next();
3776
stream->print(" # ");
3777
stream->move_to(tab1);
3778
stream->print("[%s+0x%x]", spname, stack_slot_offset);
3779
stream->print(" (%s of caller)", spname);
3786
// Returns whether this nmethod has code comments.
3787
bool nmethod::has_code_comment(address begin, address end) {
3789
ScopeDesc* sd = scope_desc_in(begin, end);
3790
if (sd != nullptr) return true;
3793
const char* str = reloc_string_for(begin, end);
3794
if (str != nullptr) return true;
3796
// implicit exceptions?
3797
int cont_offset = ImplicitExceptionTable(this).continuation_offset((uint)(begin - code_begin()));
3798
if (cont_offset != 0) return true;
3803
void nmethod::print_code_comment_on(outputStream* st, int column, address begin, address end) {
3804
ImplicitExceptionTable implicit_table(this);
3805
int pc_offset = (int)(begin - code_begin());
3806
int cont_offset = implicit_table.continuation_offset(pc_offset);
3807
bool oop_map_required = false;
3808
if (cont_offset != 0) {
3809
st->move_to(column, 6, 0);
3810
if (pc_offset == cont_offset) {
3811
st->print("; implicit exception: deoptimizes");
3812
oop_map_required = true;
3814
st->print("; implicit exception: dispatches to " INTPTR_FORMAT, p2i(code_begin() + cont_offset));
3818
// Find an oopmap in (begin, end]. We use the odd half-closed
3819
// interval so that oop maps and scope descs which are tied to the
3820
// byte after a call are printed with the call itself. OopMaps
3821
// associated with implicit exceptions are printed with the implicit
3823
address base = code_begin();
3824
ImmutableOopMapSet* oms = oop_maps();
3825
if (oms != nullptr) {
3826
for (int i = 0, imax = oms->count(); i < imax; i++) {
3827
const ImmutableOopMapPair* pair = oms->pair_at(i);
3828
const ImmutableOopMap* om = pair->get_from(oms);
3829
address pc = base + pair->pc_offset();
3832
bool is_implicit_deopt = implicit_table.continuation_offset(pair->pc_offset()) == (uint) pair->pc_offset();
3834
bool is_implicit_deopt = false;
3836
if (is_implicit_deopt ? pc == begin : pc > begin && pc <= end) {
3837
st->move_to(column, 6, 0);
3840
oop_map_required = false;
3848
assert(!oop_map_required, "missed oopmap");
3850
Thread* thread = Thread::current();
3852
// Print any debug info present at this pc.
3853
ScopeDesc* sd = scope_desc_in(begin, end);
3854
if (sd != nullptr) {
3855
st->move_to(column, 6, 0);
3856
if (sd->bci() == SynchronizationEntryBCI) {
3857
st->print(";*synchronization entry");
3858
} else if (sd->bci() == AfterBci) {
3859
st->print(";* method exit (unlocked if synchronized)");
3860
} else if (sd->bci() == UnwindBci) {
3861
st->print(";* unwind (locked if synchronized)");
3862
} else if (sd->bci() == AfterExceptionBci) {
3863
st->print(";* unwind (unlocked if synchronized)");
3864
} else if (sd->bci() == UnknownBci) {
3865
st->print(";* unknown");
3866
} else if (sd->bci() == InvalidFrameStateBci) {
3867
st->print(";* invalid frame state");
3869
if (sd->method() == nullptr) {
3870
st->print("method is nullptr");
3871
} else if (sd->method()->is_native()) {
3872
st->print("method is native");
3874
Bytecodes::Code bc = sd->method()->java_code_at(sd->bci());
3875
st->print(";*%s", Bytecodes::name(bc));
3877
case Bytecodes::_invokevirtual:
3878
case Bytecodes::_invokespecial:
3879
case Bytecodes::_invokestatic:
3880
case Bytecodes::_invokeinterface:
3882
Bytecode_invoke invoke(methodHandle(thread, sd->method()), sd->bci());
3884
if (invoke.name() != nullptr)
3885
invoke.name()->print_symbol_on(st);
3887
st->print("<UNKNOWN>");
3890
case Bytecodes::_getfield:
3891
case Bytecodes::_putfield:
3892
case Bytecodes::_getstatic:
3893
case Bytecodes::_putstatic:
3895
Bytecode_field field(methodHandle(thread, sd->method()), sd->bci());
3897
if (field.name() != nullptr)
3898
field.name()->print_symbol_on(st);
3900
st->print("<UNKNOWN>");
3906
st->print(" {reexecute=%d rethrow=%d return_oop=%d}", sd->should_reexecute(), sd->rethrow_exception(), sd->return_oop());
3910
for (;sd != nullptr; sd = sd->sender()) {
3911
st->move_to(column, 6, 0);
3913
if (sd->should_reexecute()) {
3914
st->print(" (reexecute)");
3916
if (sd->method() == nullptr) {
3917
st->print("method is nullptr");
3919
sd->method()->print_short_name(st);
3921
int lineno = sd->method()->line_number_from_bci(sd->bci());
3923
st->print("@%d (line %d)", sd->bci(), lineno);
3925
st->print("@%d", sd->bci());
3931
// Print relocation information
3932
// Prevent memory leak: allocating without ResourceMark.
3934
const char* str = reloc_string_for(begin, end);
3935
if (str != nullptr) {
3936
if (sd != nullptr) st->cr();
3937
st->move_to(column, 6, 0);
3938
st->print("; {%s}", str);
3944
address nmethod::call_instruction_address(address pc) const {
3945
if (NativeCall::is_call_before(pc)) {
3946
NativeCall *ncall = nativeCall_before(pc);
3947
return ncall->instruction_address();
3952
#if defined(SUPPORT_DATA_STRUCTS)
3953
void nmethod::print_value_on(outputStream* st) const {
3954
st->print("nmethod");
3955
print_on(st, nullptr);
3961
void nmethod::print_calls(outputStream* st) {
3962
RelocIterator iter(this);
3963
while (iter.next()) {
3964
switch (iter.type()) {
3965
case relocInfo::virtual_call_type: {
3966
CompiledICLocker ml_verify(this);
3967
CompiledIC_at(&iter)->print();
3970
case relocInfo::static_call_type:
3971
case relocInfo::opt_virtual_call_type:
3972
st->print_cr("Direct call at " INTPTR_FORMAT, p2i(iter.reloc()->addr()));
3973
CompiledDirectCall::at(iter.reloc())->print();
3981
void nmethod::print_statistics() {
3983
if (xtty != nullptr) xtty->head("statistics type='nmethod'");
3984
native_nmethod_stats.print_native_nmethod_stats();
3986
c1_java_nmethod_stats.print_nmethod_stats("C1");
3989
c2_java_nmethod_stats.print_nmethod_stats("C2");
3992
jvmci_java_nmethod_stats.print_nmethod_stats("JVMCI");
3994
unknown_java_nmethod_stats.print_nmethod_stats("Unknown");
3995
DebugInformationRecorder::print_statistics();
3996
pc_nmethod_stats.print_pc_stats();
3997
Dependencies::print_statistics();
3998
ExternalsRecorder::print_statistics();
3999
if (xtty != nullptr) xtty->tail("statistics");
4005
void nmethod::update_speculation(JavaThread* thread) {
4006
jlong speculation = thread->pending_failed_speculation();
4007
if (speculation != 0) {
4008
guarantee(jvmci_nmethod_data() != nullptr, "failed speculation in nmethod without failed speculation list");
4009
jvmci_nmethod_data()->add_failed_speculation(this, speculation);
4010
thread->set_pending_failed_speculation(0);
4014
const char* nmethod::jvmci_name() {
4015
if (jvmci_nmethod_data() != nullptr) {
4016
return jvmci_nmethod_data()->name();