2
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
3
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
4
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6
* This code is free software; you can redistribute it and/or modify it
7
* under the terms of the GNU General Public License version 2 only, as
8
* published by the Free Software Foundation.
10
* This code is distributed in the hope that it will be useful, but WITHOUT
11
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13
* version 2 for more details (a copy is included in the LICENSE file that
14
* accompanied this code).
16
* You should have received a copy of the GNU General Public License version
17
* 2 along with this work; if not, write to the Free Software Foundation,
18
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21
* or visit www.oracle.com if you need additional information or have any
26
#include "precompiled.hpp"
27
#include "compiler/oopMap.hpp"
28
#include "interpreter/interpreter.hpp"
29
#include "memory/resourceArea.hpp"
30
#include "memory/universe.hpp"
31
#include "oops/markWord.hpp"
32
#include "oops/method.hpp"
33
#include "oops/oop.inline.hpp"
34
#include "prims/methodHandles.hpp"
35
#include "runtime/frame.inline.hpp"
36
#include "runtime/handles.inline.hpp"
37
#include "runtime/javaCalls.hpp"
38
#include "runtime/monitorChunk.hpp"
39
#include "runtime/os.inline.hpp"
40
#include "runtime/signature.hpp"
41
#include "runtime/stackWatermarkSet.hpp"
42
#include "runtime/stubCodeGenerator.hpp"
43
#include "runtime/stubRoutines.hpp"
44
#include "vmreg_aarch64.inline.hpp"
46
#include "c1/c1_Runtime1.hpp"
47
#include "runtime/vframeArray.hpp"
51
void RegisterMap::check_location_valid() {
56
// Profiling/safepoint support
58
bool frame::safe_for_sender(JavaThread *thread) {
59
if (is_heap_frame()) {
62
address sp = (address)_sp;
63
address fp = (address)_fp;
64
address unextended_sp = (address)_unextended_sp;
66
// consider stack guards when trying to determine "safe" stack pointers
67
// sp must be within the usable part of the stack (not in guards)
68
if (!thread->is_in_usable_stack(sp)) {
72
// When we are running interpreted code the machine stack pointer, SP, is
73
// set low enough so that the Java expression stack can grow and shrink
74
// without ever exceeding the machine stack bounds. So, ESP >= SP.
76
// When we call out of an interpreted method, SP is incremented so that
77
// the space between SP and ESP is removed. The SP saved in the callee's
78
// frame is the SP *before* this increment. So, when we walk a stack of
79
// interpreter frames the sender's SP saved in a frame might be less than
80
// the SP at the point of call.
82
// So unextended sp must be within the stack but we need not to check
83
// that unextended sp >= sp
84
if (!thread->is_in_full_stack_checked(unextended_sp)) {
88
// an fp must be within the stack and above (but not equal) sp
89
// second evaluation on fp+ is added to handle situation where fp is -1
90
bool fp_safe = thread->is_in_stack_range_excl(fp, sp) &&
91
thread->is_in_full_stack_checked(fp + (return_addr_offset * sizeof(void*)));
93
// We know sp/unextended_sp are safe only fp is questionable here
95
// If the current frame is known to the code cache then we can attempt to
96
// to construct the sender and do some validation of it. This goes a long way
97
// toward eliminating issues when we get in frame construction code
99
if (_cb != nullptr ) {
101
// First check if frame is complete and tester is reliable
102
// Unfortunately we can only check frame complete for runtime stubs and nmethod
103
// other generic buffer blobs are more problematic so we just assume they are
104
// ok. adapter blobs never have a frame complete and are never ok.
106
if (!_cb->is_frame_complete_at(_pc)) {
107
if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
112
// Could just be some random pointer within the codeBlob
113
if (!_cb->code_contains(_pc)) {
117
// Entry frame checks
118
if (is_entry_frame()) {
119
// an entry frame must have a valid fp.
120
return fp_safe && is_entry_frame_valid(thread);
121
} else if (is_upcall_stub_frame()) {
125
intptr_t* sender_sp = nullptr;
126
intptr_t* sender_unextended_sp = nullptr;
127
address sender_pc = nullptr;
128
intptr_t* saved_fp = nullptr;
130
if (is_interpreted_frame()) {
136
// for interpreted frames, the value below is the sender "raw" sp,
137
// which can be different from the sender unextended sp (the sp seen
138
// by the sender) because of current frame local variables
139
sender_sp = (intptr_t*) addr_at(sender_sp_offset);
140
sender_unextended_sp = (intptr_t*) this->fp()[interpreter_frame_sender_sp_offset];
141
saved_fp = (intptr_t*) this->fp()[link_offset];
142
sender_pc = pauth_strip_verifiable((address) this->fp()[return_addr_offset]);
144
// must be some sort of compiled/runtime frame
145
// fp does not have to be safe (although it could be check for c1?)
147
// check for a valid frame_size, otherwise we are unlikely to get a valid sender_pc
148
if (_cb->frame_size() <= 0) {
152
sender_sp = _unextended_sp + _cb->frame_size();
153
// Is sender_sp safe?
154
if (!thread->is_in_full_stack_checked((address)sender_sp)) {
157
sender_unextended_sp = sender_sp;
158
// Note: frame::sender_sp_offset is only valid for compiled frame
159
saved_fp = (intptr_t*) *(sender_sp - frame::sender_sp_offset);
160
// Note: PAC authentication may fail in case broken frame is passed in.
161
// Just strip it for now.
162
sender_pc = pauth_strip_pointer((address) *(sender_sp - 1));
165
if (Continuation::is_return_barrier_entry(sender_pc)) {
166
// If our sender_pc is the return barrier, then our "real" sender is the continuation entry
167
frame s = Continuation::continuation_bottom_sender(thread, *this, sender_sp);
172
// If the potential sender is the interpreter then we can do some more checking
173
if (Interpreter::contains(sender_pc)) {
175
// fp is always saved in a recognizable place in any code we generate. However
176
// only if the sender is interpreted/call_stub (c1 too?) are we certain that the saved fp
177
// is really a frame pointer.
179
if (!thread->is_in_stack_range_excl((address)saved_fp, (address)sender_sp)) {
183
// construct the potential sender
185
frame sender(sender_sp, sender_unextended_sp, saved_fp, sender_pc);
187
return sender.is_interpreted_frame_valid(thread);
191
// We must always be able to find a recognizable pc
192
CodeBlob* sender_blob = CodeCache::find_blob(sender_pc);
193
if (sender_pc == nullptr || sender_blob == nullptr) {
197
// Could just be some random pointer within the codeBlob
198
if (!sender_blob->code_contains(sender_pc)) {
202
// We should never be able to see an adapter if the current frame is something from code cache
203
if (sender_blob->is_adapter_blob()) {
207
// Could be the call_stub
208
if (StubRoutines::returns_to_call_stub(sender_pc)) {
209
if (!thread->is_in_stack_range_excl((address)saved_fp, (address)sender_sp)) {
213
// construct the potential sender
215
frame sender(sender_sp, sender_unextended_sp, saved_fp, sender_pc);
217
// Validate the JavaCallWrapper an entry frame must have
218
address jcw = (address)sender.entry_frame_call_wrapper();
220
return thread->is_in_stack_range_excl(jcw, (address)sender.fp());
221
} else if (sender_blob->is_upcall_stub()) {
225
nmethod* nm = sender_blob->as_nmethod_or_null();
227
if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) ||
228
nm->method()->is_method_handle_intrinsic()) {
233
// If the frame size is 0 something (or less) is bad because every nmethod has a non-zero frame size
234
// because the return address counts against the callee's frame.
236
if (sender_blob->frame_size() <= 0) {
237
assert(!sender_blob->is_nmethod(), "should count return address at least");
241
// We should never be able to see anything here except an nmethod. If something in the
242
// code cache (current frame) is called by an entity within the code cache that entity
243
// should not be anything but the call stub (already covered), the interpreter (already covered)
246
if (!sender_blob->is_nmethod()) {
250
// Could put some more validation for the potential non-interpreted sender
251
// frame we'd create by calling sender if I could think of any. Wait for next crash in forte...
253
// One idea is seeing if the sender_pc we have is one that we'd expect to call to current cb
255
// We've validated the potential sender that would be created
259
// Must be native-compiled frame. Since sender will try and use fp to find
260
// linkages it must be safe
266
// Will the pc we fetch be non-zero (which we'll find at the oldest frame)
268
if ( (address) this->fp()[return_addr_offset] == nullptr) return false;
271
// could try and do some more potential verification of native frame if we could think of some...
277
void frame::patch_pc(Thread* thread, address pc) {
278
assert(_cb == CodeCache::find_blob(pc), "unexpected pc");
279
address* pc_addr = &(((address*) sp())[-1]);
280
address signed_pc = pauth_sign_return_address(pc);
281
address pc_old = pauth_strip_verifiable(*pc_addr);
283
if (TracePcPatching) {
284
tty->print("patch_pc at address " INTPTR_FORMAT " [" INTPTR_FORMAT " -> " INTPTR_FORMAT "]",
285
p2i(pc_addr), p2i(pc_old), p2i(pc));
286
if (VM_Version::use_rop_protection()) {
287
tty->print(" [signed " INTPTR_FORMAT " -> " INTPTR_FORMAT "]", p2i(*pc_addr), p2i(signed_pc));
292
assert(!Continuation::is_return_barrier_entry(pc_old), "return barrier");
294
// Either the return address is the original one or we are going to
295
// patch in the same address that's already there.
296
assert(_pc == pc_old || pc == pc_old || pc_old == 0, "");
297
DEBUG_ONLY(address old_pc = _pc;)
298
*pc_addr = signed_pc;
299
_pc = pc; // must be set before call to get_deopt_original_pc
300
address original_pc = get_deopt_original_pc();
301
if (original_pc != nullptr) {
302
assert(original_pc == old_pc, "expected original PC to be stored before patching");
303
_deopt_state = is_deoptimized;
306
_deopt_state = not_deoptimized;
310
intptr_t* frame::entry_frame_argument_at(int offset) const {
311
// convert offset to index to deal with tsi
312
int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize);
313
// Entry frame's arguments are always in relation to unextended_sp()
314
return &unextended_sp()[index];
319
void frame::interpreter_frame_set_locals(intptr_t* locs) {
320
assert(is_interpreted_frame(), "interpreted frame expected");
321
// set relativized locals
322
ptr_at_put(interpreter_frame_locals_offset, (intptr_t) (locs - fp()));
327
intptr_t* frame::interpreter_frame_sender_sp() const {
328
assert(is_interpreted_frame(), "interpreted frame expected");
329
return (intptr_t*) at(interpreter_frame_sender_sp_offset);
332
void frame::set_interpreter_frame_sender_sp(intptr_t* sender_sp) {
333
assert(is_interpreted_frame(), "interpreted frame expected");
334
ptr_at_put(interpreter_frame_sender_sp_offset, (intptr_t) sender_sp);
340
BasicObjectLock* frame::interpreter_frame_monitor_begin() const {
341
return (BasicObjectLock*) addr_at(interpreter_frame_monitor_block_bottom_offset);
344
BasicObjectLock* frame::interpreter_frame_monitor_end() const {
345
BasicObjectLock* result = (BasicObjectLock*) at_relative(interpreter_frame_monitor_block_top_offset);
346
// make sure the pointer points inside the frame
347
assert(sp() <= (intptr_t*) result, "monitor end should be above the stack pointer");
348
assert((intptr_t*) result < fp(), "monitor end should be strictly below the frame pointer");
352
void frame::interpreter_frame_set_monitor_end(BasicObjectLock* value) {
353
assert(is_interpreted_frame(), "interpreted frame expected");
354
// set relativized monitor_block_top
355
ptr_at_put(interpreter_frame_monitor_block_top_offset, (intptr_t*)value - fp());
356
assert(at_absolute(interpreter_frame_monitor_block_top_offset) <= interpreter_frame_monitor_block_top_offset, "");
359
// Used by template based interpreter deoptimization
360
void frame::interpreter_frame_set_last_sp(intptr_t* sp) {
361
assert(is_interpreted_frame(), "interpreted frame expected");
362
// set relativized last_sp
363
ptr_at_put(interpreter_frame_last_sp_offset, sp != nullptr ? (sp - fp()) : 0);
366
// Used by template based interpreter deoptimization
367
void frame::interpreter_frame_set_extended_sp(intptr_t* sp) {
368
assert(is_interpreted_frame(), "interpreted frame expected");
369
// set relativized extended_sp
370
ptr_at_put(interpreter_frame_extended_sp_offset, (sp - fp()));
373
frame frame::sender_for_entry_frame(RegisterMap* map) const {
374
assert(map != nullptr, "map must be set");
375
// Java frame called from C; skip all C frames and return top C
376
// frame of that chunk as the sender
377
JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor();
378
assert(!entry_frame_is_first(), "next Java fp must be non zero");
379
assert(jfa->last_Java_sp() > sp(), "must be above this frame on stack");
380
// Since we are walking the stack now this nested anchor is obviously walkable
381
// even if it wasn't when it was stacked.
382
jfa->make_walkable();
384
assert(map->include_argument_oops(), "should be set by clear");
385
frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc());
386
fr.set_sp_is_trusted();
391
UpcallStub::FrameData* UpcallStub::frame_data_for_frame(const frame& frame) const {
392
assert(frame.is_upcall_stub_frame(), "wrong frame");
393
// need unextended_sp here, since normal sp is wrong for interpreter callees
394
return reinterpret_cast<UpcallStub::FrameData*>(
395
reinterpret_cast<address>(frame.unextended_sp()) + in_bytes(_frame_data_offset));
398
bool frame::upcall_stub_frame_is_first() const {
399
assert(is_upcall_stub_frame(), "must be optimzed entry frame");
400
UpcallStub* blob = _cb->as_upcall_stub();
401
JavaFrameAnchor* jfa = blob->jfa_for_frame(*this);
402
return jfa->last_Java_sp() == nullptr;
405
frame frame::sender_for_upcall_stub_frame(RegisterMap* map) const {
406
assert(map != nullptr, "map must be set");
407
UpcallStub* blob = _cb->as_upcall_stub();
408
// Java frame called from C; skip all C frames and return top C
409
// frame of that chunk as the sender
410
JavaFrameAnchor* jfa = blob->jfa_for_frame(*this);
411
assert(!upcall_stub_frame_is_first(), "must have a frame anchor to go back to");
412
assert(jfa->last_Java_sp() > sp(), "must be above this frame on stack");
413
// Since we are walking the stack now this nested anchor is obviously walkable
414
// even if it wasn't when it was stacked.
415
jfa->make_walkable();
417
assert(map->include_argument_oops(), "should be set by clear");
418
frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc());
423
//------------------------------------------------------------------------------
424
// frame::verify_deopt_original_pc
426
// Verifies the calculated original PC of a deoptimization PC for the
427
// given unextended SP.
429
void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp) {
432
// This is ugly but it's better than to change {get,set}_original_pc
433
// to take an SP value as argument. And it's only a debugging
435
fr._unextended_sp = unextended_sp;
437
address original_pc = nm->get_original_pc(&fr);
438
assert(nm->insts_contains_inclusive(original_pc),
439
"original PC must be in the main code section of the compiled method (or must be immediately following it)");
443
//------------------------------------------------------------------------------
444
// frame::adjust_unextended_sp
446
void frame::adjust_unextended_sp() {
447
// On aarch64, sites calling method handle intrinsics and lambda forms are treated
448
// as any other call site. Therefore, no special action is needed when we are
449
// returning to any of these call sites.
451
if (_cb != nullptr) {
452
nmethod* sender_nm = _cb->as_nmethod_or_null();
453
if (sender_nm != nullptr) {
454
// If the sender PC is a deoptimization point, get the original PC.
455
if (sender_nm->is_deopt_entry(_pc) ||
456
sender_nm->is_deopt_mh_entry(_pc)) {
457
verify_deopt_original_pc(sender_nm, _unextended_sp);
465
//------------------------------------------------------------------------------
466
// frame::sender_for_interpreter_frame
467
frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
468
// SP is the raw SP from the sender after adapter or interpreter
470
intptr_t* sender_sp = this->sender_sp();
472
// This is the sp before any possible extension (adapter/locals).
473
intptr_t* unextended_sp = interpreter_frame_sender_sp();
474
intptr_t* sender_fp = link();
476
#if COMPILER2_OR_JVMCI
477
if (map->update_map()) {
478
update_map_with_saved_link(map, (intptr_t**) addr_at(link_offset));
480
#endif // COMPILER2_OR_JVMCI
482
// For ROP protection, Interpreter will have signed the sender_pc,
483
// but there is no requirement to authenticate it here.
484
address sender_pc = pauth_strip_verifiable(sender_pc_maybe_signed());
486
if (Continuation::is_return_barrier_entry(sender_pc)) {
487
if (map->walk_cont()) { // about to walk into an h-stack
488
return Continuation::top_frame(*this, map);
490
return Continuation::continuation_bottom_sender(map->thread(), *this, sender_sp);
494
return frame(sender_sp, unextended_sp, sender_fp, sender_pc);
497
bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
498
assert(is_interpreted_frame(), "Not an interpreted frame");
499
// These are reasonable sanity checks
500
if (fp() == 0 || (intptr_t(fp()) & (wordSize-1)) != 0) {
503
if (sp() == 0 || (intptr_t(sp()) & (wordSize-1)) != 0) {
506
if (fp() + interpreter_frame_initial_sp_offset < sp()) {
509
// These are hacks to keep us out of trouble.
510
// The problem with these is that they mask other problems
511
if (fp() <= sp()) { // this attempts to deal with unsigned comparison above
515
// do some validation of frame elements
519
Method* m = safe_interpreter_frame_method();
521
// validate the method we'd find in this potential sender
522
if (!Method::is_valid_method(m)) return false;
524
// stack frames shouldn't be much larger than max_stack elements
525
// this test requires the use of unextended_sp which is the sp as seen by
526
// the current frame, and not sp which is the "raw" pc which could point
527
// further because of local variables of the callee method inserted after
529
if (fp() - unextended_sp() > 1024 + m->max_stack()*Interpreter::stackElementSize) {
535
address bcp = interpreter_frame_bcp();
536
if (m->validate_bci_from_bcp(bcp) < 0) {
540
// validate constantPoolCache*
541
ConstantPoolCache* cp = *interpreter_frame_cache_addr();
542
if (MetaspaceObj::is_valid(cp) == false) return false;
546
address locals = (address)interpreter_frame_locals();
547
return thread->is_in_stack_range_incl(locals, (address)fp());
550
BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) {
551
assert(is_interpreted_frame(), "interpreted frame expected");
552
Method* method = interpreter_frame_method();
553
BasicType type = method->result_type();
556
if (method->is_native()) {
557
// TODO : ensure AARCH64 does the same as Intel here i.e. push v0 then r0
558
// Prior to calling into the runtime to report the method_exit the possible
559
// return value is pushed to the native stack. If the result is a jfloat/jdouble
560
// then ST0 is saved before EAX/EDX. See the note in generate_native_result
561
tos_addr = (intptr_t*)sp();
562
if (type == T_FLOAT || type == T_DOUBLE) {
563
// This is times two because we do a push(ltos) after pushing XMM0
564
// and that takes two interpreter stack slots.
565
tos_addr += 2 * Interpreter::stackElementWords;
568
tos_addr = (intptr_t*)interpreter_frame_tos_address();
575
if (method->is_native()) {
576
obj = cast_to_oop(at(interpreter_frame_oop_temp_offset));
578
oop* obj_p = (oop*)tos_addr;
579
obj = (obj_p == nullptr) ? (oop)nullptr : *obj_p;
581
assert(Universe::is_in_heap_or_null(obj), "sanity check");
585
case T_BOOLEAN : value_result->z = *(jboolean*)tos_addr; break;
586
case T_BYTE : value_result->b = *(jbyte*)tos_addr; break;
587
case T_CHAR : value_result->c = *(jchar*)tos_addr; break;
588
case T_SHORT : value_result->s = *(jshort*)tos_addr; break;
589
case T_INT : value_result->i = *(jint*)tos_addr; break;
590
case T_LONG : value_result->j = *(jlong*)tos_addr; break;
592
value_result->f = *(jfloat*)tos_addr;
595
case T_DOUBLE : value_result->d = *(jdouble*)tos_addr; break;
596
case T_VOID : /* Nothing to do */ break;
597
default : ShouldNotReachHere();
603
intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
604
int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize);
605
return &interpreter_frame_tos_address()[index];
610
#define DESCRIBE_FP_OFFSET(name) \
611
values.describe(frame_no, fp() + frame::name##_offset, #name)
613
void frame::describe_pd(FrameValues& values, int frame_no) {
614
if (is_interpreted_frame()) {
615
DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp);
616
DESCRIBE_FP_OFFSET(interpreter_frame_last_sp);
617
DESCRIBE_FP_OFFSET(interpreter_frame_method);
618
DESCRIBE_FP_OFFSET(interpreter_frame_mdp);
619
DESCRIBE_FP_OFFSET(interpreter_frame_extended_sp);
620
DESCRIBE_FP_OFFSET(interpreter_frame_mirror);
621
DESCRIBE_FP_OFFSET(interpreter_frame_cache);
622
DESCRIBE_FP_OFFSET(interpreter_frame_locals);
623
DESCRIBE_FP_OFFSET(interpreter_frame_bcp);
624
DESCRIBE_FP_OFFSET(interpreter_frame_initial_sp);
627
if (is_java_frame() || Continuation::is_continuation_enterSpecial(*this)) {
628
intptr_t* ret_pc_loc;
630
if (is_interpreted_frame()) {
631
ret_pc_loc = fp() + return_addr_offset;
634
ret_pc_loc = real_fp() - return_addr_offset;
635
fp_loc = real_fp() - sender_sp_offset;
637
address ret_pc = *(address*)ret_pc_loc;
638
values.describe(frame_no, ret_pc_loc,
639
Continuation::is_return_barrier_entry(ret_pc) ? "return address (return barrier)" : "return address");
640
values.describe(-1, fp_loc, "saved fp", 0); // "unowned" as value belongs to sender
645
intptr_t *frame::initial_deoptimization_info() {
646
// Not used on aarch64, but we must return something.
650
#undef DESCRIBE_FP_OFFSET
652
#define DESCRIBE_FP_OFFSET(name) \
654
uintptr_t *p = (uintptr_t *)fp; \
655
printf(INTPTR_FORMAT " " INTPTR_FORMAT " %s\n", \
656
(uintptr_t)(p + frame::name##_offset), \
657
p[frame::name##_offset], #name); \
660
static THREAD_LOCAL uintptr_t nextfp;
661
static THREAD_LOCAL uintptr_t nextpc;
662
static THREAD_LOCAL uintptr_t nextsp;
663
static THREAD_LOCAL RegisterMap *reg_map;
665
static void printbc(Method *m, intptr_t bcx) {
668
if (m->validate_bci_from_bcp((address)bcx) < 0
669
|| !m->contains((address)bcx)) {
671
snprintf(buf, sizeof buf, "(bad)");
673
int bci = m->bci_from((address)bcx);
674
snprintf(buf, sizeof buf, "%d", bci);
675
name = Bytecodes::name(m->code_at(bci));
678
printf("%s : %s ==> %s\n", m->name_and_sig_as_C_string(), buf, name);
681
static void internal_pf(uintptr_t sp, uintptr_t fp, uintptr_t pc, uintptr_t bcx) {
685
DESCRIBE_FP_OFFSET(return_addr);
686
DESCRIBE_FP_OFFSET(link);
687
DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp);
688
DESCRIBE_FP_OFFSET(interpreter_frame_last_sp);
689
DESCRIBE_FP_OFFSET(interpreter_frame_method);
690
DESCRIBE_FP_OFFSET(interpreter_frame_mdp);
691
DESCRIBE_FP_OFFSET(interpreter_frame_extended_sp);
692
DESCRIBE_FP_OFFSET(interpreter_frame_mirror);
693
DESCRIBE_FP_OFFSET(interpreter_frame_cache);
694
DESCRIBE_FP_OFFSET(interpreter_frame_locals);
695
DESCRIBE_FP_OFFSET(interpreter_frame_bcp);
696
DESCRIBE_FP_OFFSET(interpreter_frame_initial_sp);
697
uintptr_t *p = (uintptr_t *)fp;
699
// We want to see all frames, native and Java. For compiled and
700
// interpreted frames we have special information that allows us to
701
// unwind them; for everything else we assume that the native frame
702
// pointer chain is intact.
703
frame this_frame((intptr_t*)sp, (intptr_t*)fp, (address)pc);
704
if (this_frame.is_compiled_frame() ||
705
this_frame.is_interpreted_frame()) {
706
frame sender = this_frame.sender(reg_map);
707
nextfp = (uintptr_t)sender.fp();
708
nextpc = (uintptr_t)sender.pc();
709
nextsp = (uintptr_t)sender.unextended_sp();
711
nextfp = p[frame::link_offset];
712
nextpc = p[frame::return_addr_offset];
713
nextsp = (uintptr_t)&p[frame::sender_sp_offset];
717
bcx = p[frame::interpreter_frame_bcp_offset];
719
if (Interpreter::contains((address)pc)) {
720
Method* m = (Method*)p[frame::interpreter_frame_method_offset];
721
if(m && m->is_method()) {
724
printf("not a Method\n");
726
CodeBlob *cb = CodeCache::find_blob((address)pc);
728
if (cb->is_nmethod()) {
730
nmethod* nm = (nmethod*)cb;
731
printf("nmethod %s\n", nm->method()->name_and_sig_as_C_string());
732
} else if (cb->name()) {
733
printf("CodeBlob %s\n", cb->name());
739
extern "C" void npf() {
740
CodeBlob *cb = CodeCache::find_blob((address)nextpc);
741
// C2 does not always chain the frame pointers when it can, instead
742
// preferring to use fixed offsets from SP, so a simple leave() does
743
// not work. Instead, it adds the frame size to SP then pops FP and
744
// LR. We have to do the same thing to get a good call chain.
745
if (cb && cb->frame_size())
746
nextfp = nextsp + wordSize * (cb->frame_size() - 2);
747
internal_pf (nextsp, nextfp, nextpc, -1);
750
extern "C" void pf(uintptr_t sp, uintptr_t fp, uintptr_t pc,
751
uintptr_t bcx, uintptr_t thread) {
753
reg_map = NEW_C_HEAP_OBJ(RegisterMap, mtInternal);
754
::new (reg_map) RegisterMap(reinterpret_cast<JavaThread*>(thread),
755
RegisterMap::UpdateMap::skip,
756
RegisterMap::ProcessFrames::include,
757
RegisterMap::WalkContinuation::skip);
759
*reg_map = RegisterMap(reinterpret_cast<JavaThread*>(thread),
760
RegisterMap::UpdateMap::skip,
761
RegisterMap::ProcessFrames::include,
762
RegisterMap::WalkContinuation::skip);
766
CodeBlob *cb = CodeCache::find_blob((address)pc);
767
if (cb && cb->frame_size())
768
fp = sp + wordSize * (cb->frame_size() - 2);
770
internal_pf(sp, fp, pc, bcx);
773
// support for printing out where we are in a Java method
774
// needs to be passed current fp and bcp register values
775
// prints method name, bc index and bytecode name
776
extern "C" void pm(uintptr_t fp, uintptr_t bcx) {
777
DESCRIBE_FP_OFFSET(interpreter_frame_method);
778
uintptr_t *p = (uintptr_t *)fp;
779
Method* m = (Method*)p[frame::interpreter_frame_method_offset];
784
// This is a generic constructor which is only used by pns() in debug.cpp.
785
frame::frame(void* sp, void* fp, void* pc) {
786
init((intptr_t*)sp, (intptr_t*)fp, (address)pc);
791
void JavaFrameAnchor::make_walkable() {
793
if (last_Java_sp() == nullptr) return;
795
if (walkable()) return;
796
vmassert(last_Java_sp() != nullptr, "not called from Java code?");
797
vmassert(last_Java_pc() == nullptr, "already walkable");
798
_last_Java_pc = (address)_last_Java_sp[-1];
799
vmassert(walkable(), "something went wrong");