2
* Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
3
* Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
4
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
5
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7
* This code is free software; you can redistribute it and/or modify it
8
* under the terms of the GNU General Public License version 2 only, as
9
* published by the Free Software Foundation.
11
* This code is distributed in the hope that it will be useful, but WITHOUT
12
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14
* version 2 for more details (a copy is included in the LICENSE file that
15
* accompanied this code).
17
* You should have received a copy of the GNU General Public License version
18
* 2 along with this work; if not, write to the Free Software Foundation,
19
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
21
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22
* or visit www.oracle.com if you need additional information or have any
27
#include "precompiled.hpp"
28
#include "asm/macroAssembler.hpp"
29
#include "asm/macroAssembler.inline.hpp"
30
#include "code/codeCache.hpp"
31
#include "code/compiledIC.hpp"
32
#include "code/debugInfoRec.hpp"
33
#include "code/vtableStubs.hpp"
34
#include "compiler/oopMap.hpp"
35
#include "gc/shared/barrierSetAssembler.hpp"
36
#include "interpreter/interpreter.hpp"
37
#include "interpreter/interp_masm.hpp"
38
#include "logging/log.hpp"
39
#include "memory/resourceArea.hpp"
40
#include "nativeInst_aarch64.hpp"
41
#include "oops/klass.inline.hpp"
42
#include "oops/method.inline.hpp"
43
#include "prims/methodHandles.hpp"
44
#include "runtime/continuation.hpp"
45
#include "runtime/continuationEntry.inline.hpp"
46
#include "runtime/globals.hpp"
47
#include "runtime/jniHandles.hpp"
48
#include "runtime/safepointMechanism.hpp"
49
#include "runtime/sharedRuntime.hpp"
50
#include "runtime/signature.hpp"
51
#include "runtime/stubRoutines.hpp"
52
#include "runtime/vframeArray.hpp"
53
#include "utilities/align.hpp"
54
#include "utilities/formatBuffer.hpp"
55
#include "vmreg_aarch64.inline.hpp"
57
#include "c1/c1_Runtime1.hpp"
60
#include "adfiles/ad_aarch64.hpp"
61
#include "opto/runtime.hpp"
64
#include "jvmci/jvmciJavaClasses.hpp"
69
const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
71
class SimpleRuntimeFrame {
75
// Most of the runtime stubs have this simple frame layout.
76
// This class exists to make the layout shared in one place.
77
// Offsets are for compiler stack slots, which are jints.
79
// The frame sender code expects that rbp will be in the "natural" place and
80
// will override any oopMap setting for it. We must therefore force the layout
81
// so that it agrees with the frame sender code.
82
// we don't expect any arg reg save area so aarch64 asserts that
83
// frame::arg_reg_save_area_bytes == 0
86
return_off, return_off2,
91
// FIXME -- this is used by C1
93
const bool _save_vectors;
95
RegisterSaver(bool save_vectors) : _save_vectors(save_vectors) {}
97
OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words);
98
void restore_live_registers(MacroAssembler* masm);
100
// Offsets into the register save area
101
// Used by deoptimization when it is managing result register
104
int reg_offset_in_bytes(Register r);
105
int r0_offset_in_bytes() { return reg_offset_in_bytes(r0); }
106
int rscratch1_offset_in_bytes() { return reg_offset_in_bytes(rscratch1); }
107
int v0_offset_in_bytes();
109
// Total stack size in bytes for saving sve predicate registers.
110
int total_sve_predicate_in_bytes();
112
// Capture info about frame layout
113
// Note this is only correct when not saving full vectors.
116
fpu_state_end = fpu_state_off + FPUStateSizeInWords - 1,
117
// The frame sender code expects that rfp will be in
118
// the "natural" place and will override any oopMap
119
// setting for it. We must therefore force the layout
120
// so that it agrees with the frame sender code.
121
r0_off = fpu_state_off + FPUStateSizeInWords,
122
rfp_off = r0_off + (Register::number_of_registers - 2) * Register::max_slots_per_register,
123
return_off = rfp_off + Register::max_slots_per_register, // slot for return address
124
reg_save_size = return_off + Register::max_slots_per_register};
128
int RegisterSaver::reg_offset_in_bytes(Register r) {
129
// The integer registers are located above the floating point
130
// registers in the stack frame pushed by save_live_registers() so the
131
// offset depends on whether we are saving full vectors, and whether
132
// those vectors are NEON or SVE.
134
int slots_per_vect = FloatRegister::save_slots_per_register;
136
#if COMPILER2_OR_JVMCI
138
slots_per_vect = FloatRegister::slots_per_neon_register;
141
if (Matcher::supports_scalable_vector()) {
142
slots_per_vect = Matcher::scalable_vector_reg_size(T_FLOAT);
148
int r0_offset = v0_offset_in_bytes() + (slots_per_vect * FloatRegister::number_of_registers) * BytesPerInt;
149
return r0_offset + r->encoding() * wordSize;
152
int RegisterSaver::v0_offset_in_bytes() {
153
// The floating point registers are located above the predicate registers if
154
// they are present in the stack frame pushed by save_live_registers(). So the
155
// offset depends on the saved total predicate vectors in the stack frame.
156
return (total_sve_predicate_in_bytes() / VMRegImpl::stack_slot_size) * BytesPerInt;
159
int RegisterSaver::total_sve_predicate_in_bytes() {
161
if (_save_vectors && Matcher::supports_scalable_vector()) {
162
return (Matcher::scalable_vector_reg_size(T_BYTE) >> LogBitsPerByte) *
163
PRegister::number_of_registers;
169
OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) {
170
bool use_sve = false;
171
int sve_vector_size_in_bytes = 0;
172
int sve_vector_size_in_slots = 0;
173
int sve_predicate_size_in_slots = 0;
174
int total_predicate_in_bytes = total_sve_predicate_in_bytes();
175
int total_predicate_in_slots = total_predicate_in_bytes / VMRegImpl::stack_slot_size;
178
use_sve = Matcher::supports_scalable_vector();
180
sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
181
sve_vector_size_in_slots = Matcher::scalable_vector_reg_size(T_FLOAT);
182
sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots();
186
#if COMPILER2_OR_JVMCI
188
int extra_save_slots_per_register = 0;
189
// Save upper half of vector registers
191
extra_save_slots_per_register = sve_vector_size_in_slots - FloatRegister::save_slots_per_register;
193
extra_save_slots_per_register = FloatRegister::extra_save_slots_per_neon_register;
195
int extra_vector_bytes = extra_save_slots_per_register *
196
VMRegImpl::stack_slot_size *
197
FloatRegister::number_of_registers;
198
additional_frame_words += ((extra_vector_bytes + total_predicate_in_bytes) / wordSize);
201
assert(!_save_vectors, "vectors are generated only by C2 and JVMCI");
204
int frame_size_in_bytes = align_up(additional_frame_words * wordSize +
205
reg_save_size * BytesPerInt, 16);
206
// OopMap frame size is in compiler stack slots (jint's) not bytes or words
207
int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
208
// The caller will allocate additional_frame_words
209
int additional_frame_slots = additional_frame_words * wordSize / BytesPerInt;
210
// CodeBlob frame size is in words.
211
int frame_size_in_words = frame_size_in_bytes / wordSize;
212
*total_frame_words = frame_size_in_words;
214
// Save Integer and Float registers.
216
__ push_CPU_state(_save_vectors, use_sve, sve_vector_size_in_bytes, total_predicate_in_bytes);
218
// Set an oopmap for the call site. This oopmap will map all
219
// oop-registers and debug-info registers as callee-saved. This
220
// will allow deoptimization at this safepoint to find all possible
221
// debug-info recordings, as well as let GC find all oops.
223
OopMapSet *oop_maps = new OopMapSet();
224
OopMap* oop_map = new OopMap(frame_size_in_slots, 0);
226
for (int i = 0; i < Register::number_of_registers; i++) {
227
Register r = as_Register(i);
228
if (i <= rfp->encoding() && r != rscratch1 && r != rscratch2) {
229
// SP offsets are in 4-byte words.
230
// Register slots are 8 bytes wide, 32 floating-point registers.
231
int sp_offset = Register::max_slots_per_register * i +
232
FloatRegister::save_slots_per_register * FloatRegister::number_of_registers;
233
oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset + additional_frame_slots), r->as_VMReg());
237
for (int i = 0; i < FloatRegister::number_of_registers; i++) {
238
FloatRegister r = as_FloatRegister(i);
241
sp_offset = use_sve ? (total_predicate_in_slots + sve_vector_size_in_slots * i) :
242
(FloatRegister::slots_per_neon_register * i);
244
sp_offset = FloatRegister::save_slots_per_register * i;
246
oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset), r->as_VMReg());
252
void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
254
__ pop_CPU_state(_save_vectors, Matcher::supports_scalable_vector(),
255
Matcher::scalable_vector_reg_size(T_BYTE), total_sve_predicate_in_bytes());
258
assert(!_save_vectors, "vectors are generated only by C2 and JVMCI");
260
__ pop_CPU_state(_save_vectors);
262
__ ldp(rfp, lr, Address(__ post(sp, 2 * wordSize)));
263
__ authenticate_return_address();
266
// Is vector's size (in bytes) bigger than a size saved by default?
267
// 8 bytes vector registers are saved by default on AArch64.
268
// The SVE supported min vector size is 8 bytes and we need to save
269
// predicate registers when the vector size is 8 bytes as well.
270
bool SharedRuntime::is_wide_vector(int size) {
271
return size > 8 || (UseSVE > 0 && size >= 8);
274
// ---------------------------------------------------------------------------
275
// Read the array of BasicTypes from a signature, and compute where the
276
// arguments should go. Values in the VMRegPair regs array refer to 4-byte
277
// quantities. Values less than VMRegImpl::stack0 are registers, those above
278
// refer to 4-byte stack slots. All stack slots are based off of the stack pointer
279
// as framesizes are fixed.
280
// VMRegImpl::stack0 refers to the first slot 0(sp).
281
// and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.
282
// Register up to Register::number_of_registers are the 64-bit
285
// Note: the INPUTS in sig_bt are in units of Java argument words,
286
// which are 64-bit. The OUTPUTS are in 32-bit units.
288
// The Java calling convention is a "shifted" version of the C ABI.
289
// By skipping the first C ABI register we can call non-static jni
290
// methods with small numbers of arguments without having to shuffle
291
// the arguments at all. Since we control the java ABI we ought to at
292
// least get some advantage out of it.
294
int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
296
int total_args_passed) {
298
// Create the mapping between argument positions and
300
static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = {
301
j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7
303
static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_j] = {
304
j_farg0, j_farg1, j_farg2, j_farg3,
305
j_farg4, j_farg5, j_farg6, j_farg7
313
for (int i = 0; i < total_args_passed; i++) {
320
if (int_args < Argument::n_int_register_parameters_j) {
321
regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
323
stk_args = align_up(stk_args, 2);
324
regs[i].set1(VMRegImpl::stack2reg(stk_args));
329
// halves of T_LONG or T_DOUBLE
330
assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
334
assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
339
if (int_args < Argument::n_int_register_parameters_j) {
340
regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
342
stk_args = align_up(stk_args, 2);
343
regs[i].set2(VMRegImpl::stack2reg(stk_args));
348
if (fp_args < Argument::n_float_register_parameters_j) {
349
regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
351
stk_args = align_up(stk_args, 2);
352
regs[i].set1(VMRegImpl::stack2reg(stk_args));
357
assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
358
if (fp_args < Argument::n_float_register_parameters_j) {
359
regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
361
stk_args = align_up(stk_args, 2);
362
regs[i].set2(VMRegImpl::stack2reg(stk_args));
367
ShouldNotReachHere();
375
// Patch the callers callsite with entry to compiled code if it exists.
376
static void patch_callers_callsite(MacroAssembler *masm) {
378
__ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
379
__ cbz(rscratch1, L);
384
// VM needs caller's callsite
385
// VM needs target method
386
// This needs to be a long call since we will relocate this adapter to
387
// the codeBuffer and it may not reach
390
assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
393
__ mov(c_rarg0, rmethod);
395
__ authenticate_return_address(c_rarg1);
396
__ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
399
// Explicit isb required because fixup_callers_callsite may change the code
409
static void gen_c2i_adapter(MacroAssembler *masm,
410
int total_args_passed,
411
int comp_args_on_stack,
412
const BasicType *sig_bt,
413
const VMRegPair *regs,
415
// Before we get into the guts of the C2I adapter, see if we should be here
416
// at all. We've come from compiled code and are attempting to jump to the
417
// interpreter, which means the caller made a static call to get here
418
// (vcalls always get a compiled target if there is one). Check for a
419
// compiled target. If there is one, we need to patch the caller's call.
420
patch_callers_callsite(masm);
424
int words_pushed = 0;
426
// Since all args are passed on the stack, total_args_passed *
427
// Interpreter::stackElementSize is the space we need.
429
int extraspace = total_args_passed * Interpreter::stackElementSize;
431
__ mov(r19_sender_sp, sp);
433
// stack is aligned, keep it that way
434
extraspace = align_up(extraspace, 2*wordSize);
437
__ sub(sp, sp, extraspace);
439
// Now write the args into the outgoing interpreter space
440
for (int i = 0; i < total_args_passed; i++) {
441
if (sig_bt[i] == T_VOID) {
442
assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
446
// offset to start parameters
447
int st_off = (total_args_passed - i - 1) * Interpreter::stackElementSize;
448
int next_off = st_off - Interpreter::stackElementSize;
456
// - 0 return address
458
// However to make thing extra confusing. Because we can fit a Java long/double in
459
// a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
460
// leaves one slot empty and only stores to a single slot. In this case the
461
// slot that is occupied is the T_VOID slot. See I said it was confusing.
463
VMReg r_1 = regs[i].first();
464
VMReg r_2 = regs[i].second();
465
if (!r_1->is_valid()) {
466
assert(!r_2->is_valid(), "");
469
if (r_1->is_stack()) {
470
// memory to memory use rscratch1
471
int ld_off = (r_1->reg2stack() * VMRegImpl::stack_slot_size
473
+ words_pushed * wordSize);
474
if (!r_2->is_valid()) {
476
__ ldrw(rscratch1, Address(sp, ld_off));
477
__ str(rscratch1, Address(sp, st_off));
481
__ ldr(rscratch1, Address(sp, ld_off));
483
// Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
484
// T_DOUBLE and T_LONG use two slots in the interpreter
485
if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
486
// ld_off == LSW, ld_off+wordSize == MSW
487
// st_off == MSW, next_off == LSW
488
__ str(rscratch1, Address(sp, next_off));
490
// Overwrite the unused slot with known junk
491
__ mov(rscratch1, (uint64_t)0xdeadffffdeadaaaaull);
492
__ str(rscratch1, Address(sp, st_off));
495
__ str(rscratch1, Address(sp, st_off));
498
} else if (r_1->is_Register()) {
499
Register r = r_1->as_Register();
500
if (!r_2->is_valid()) {
501
// must be only an int (or less ) so move only 32bits to slot
502
// why not sign extend??
503
__ str(r, Address(sp, st_off));
505
// Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
506
// T_DOUBLE and T_LONG use two slots in the interpreter
507
if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
508
// jlong/double in gpr
510
// Overwrite the unused slot with known junk
511
__ mov(rscratch1, (uint64_t)0xdeadffffdeadaaabull);
512
__ str(rscratch1, Address(sp, st_off));
514
__ str(r, Address(sp, next_off));
516
__ str(r, Address(sp, st_off));
520
assert(r_1->is_FloatRegister(), "");
521
if (!r_2->is_valid()) {
522
// only a float use just part of the slot
523
__ strs(r_1->as_FloatRegister(), Address(sp, st_off));
526
// Overwrite the unused slot with known junk
527
__ mov(rscratch1, (uint64_t)0xdeadffffdeadaaacull);
528
__ str(rscratch1, Address(sp, st_off));
530
__ strd(r_1->as_FloatRegister(), Address(sp, next_off));
535
__ mov(esp, sp); // Interp expects args on caller's expression stack
537
__ ldr(rscratch1, Address(rmethod, in_bytes(Method::interpreter_entry_offset())));
542
void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
543
int total_args_passed,
544
int comp_args_on_stack,
545
const BasicType *sig_bt,
546
const VMRegPair *regs) {
548
// Note: r19_sender_sp contains the senderSP on entry. We must
549
// preserve it since we may do a i2c -> c2i transition if we lose a
550
// race where compiled code goes non-entrant while we get args
553
// Adapters are frameless.
555
// An i2c adapter is frameless because the *caller* frame, which is
556
// interpreted, routinely repairs its own esp (from
557
// interpreter_frame_last_sp), even if a callee has modified the
558
// stack pointer. It also recalculates and aligns sp.
560
// A c2i adapter is frameless because the *callee* frame, which is
561
// interpreted, routinely repairs its caller's sp (from sender_sp,
562
// which is set up via the senderSP register).
564
// In other words, if *either* the caller or callee is interpreted, we can
565
// get the stack pointer repaired after a call.
567
// This is why c2i and i2c adapters cannot be indefinitely composed.
568
// In particular, if a c2i adapter were to somehow call an i2c adapter,
569
// both caller and callee would be compiled methods, and neither would
570
// clean up the stack pointer changes performed by the two adapters.
571
// If this happens, control eventually transfers back to the compiled
572
// caller, but with an uncorrected stack, causing delayed havoc.
574
if (VerifyAdapterCalls &&
575
(Interpreter::code() != nullptr || StubRoutines::final_stubs_code() != nullptr)) {
577
// So, let's test for cascading c2i/i2c adapters right now.
578
// assert(Interpreter::contains($return_addr) ||
579
// StubRoutines::contains($return_addr),
580
// "i2c adapter must return to an interpreter frame");
581
__ block_comment("verify_i2c { ");
583
if (Interpreter::code() != nullptr) {
584
range_check(masm, rax, r11,
585
Interpreter::code()->code_start(), Interpreter::code()->code_end(),
588
if (StubRoutines::initial_stubs_code() != nullptr) {
589
range_check(masm, rax, r11,
590
StubRoutines::initial_stubs_code()->code_begin(),
591
StubRoutines::initial_stubs_code()->code_end(),
594
if (StubRoutines::final_stubs_code() != nullptr) {
595
range_check(masm, rax, r11,
596
StubRoutines::final_stubs_code()->code_begin(),
597
StubRoutines::final_stubs_code()->code_end(),
600
const char* msg = "i2c adapter must return to an interpreter frame";
601
__ block_comment(msg);
604
__ block_comment("} verify_i2ce ");
608
// Cut-out for having no stack args.
609
int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
610
if (comp_args_on_stack) {
611
__ sub(rscratch1, sp, comp_words_on_stack * wordSize);
612
__ andr(sp, rscratch1, -16);
615
// Will jump to the compiled code just as if compiled code was doing it.
616
// Pre-load the register-jump target early, to schedule it better.
617
__ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_offset())));
621
// check if this call should be routed towards a specific entry point
622
__ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
623
Label no_alternative_target;
624
__ cbz(rscratch2, no_alternative_target);
625
__ mov(rscratch1, rscratch2);
626
__ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
627
__ bind(no_alternative_target);
629
#endif // INCLUDE_JVMCI
631
// Now generate the shuffle code.
632
for (int i = 0; i < total_args_passed; i++) {
633
if (sig_bt[i] == T_VOID) {
634
assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
638
// Pick up 0, 1 or 2 words from SP+offset.
640
assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
641
"scrambled load targets?");
642
// Load in argument order going down.
643
int ld_off = (total_args_passed - i - 1)*Interpreter::stackElementSize;
644
// Point to interpreter value (vs. tag)
645
int next_off = ld_off - Interpreter::stackElementSize;
649
VMReg r_1 = regs[i].first();
650
VMReg r_2 = regs[i].second();
651
if (!r_1->is_valid()) {
652
assert(!r_2->is_valid(), "");
655
if (r_1->is_stack()) {
656
// Convert stack slot to an SP offset (+ wordSize to account for return address )
657
int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size;
658
if (!r_2->is_valid()) {
660
__ ldrsw(rscratch2, Address(esp, ld_off));
661
__ str(rscratch2, Address(sp, st_off));
664
// We are using two optoregs. This can be either T_OBJECT,
665
// T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
666
// two slots but only uses one for thr T_LONG or T_DOUBLE case
667
// So we must adjust where to pick up the data to match the
670
// Interpreter local[n] == MSW, local[n+1] == LSW however locals
671
// are accessed as negative so LSW is at LOW address
673
// ld_off is MSW so get LSW
674
const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
676
__ ldr(rscratch2, Address(esp, offset));
677
// st_off is LSW (i.e. reg.first())
678
__ str(rscratch2, Address(sp, st_off));
680
} else if (r_1->is_Register()) { // Register argument
681
Register r = r_1->as_Register();
682
if (r_2->is_valid()) {
684
// We are using two VMRegs. This can be either T_OBJECT,
685
// T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
686
// two slots but only uses one for thr T_LONG or T_DOUBLE case
687
// So we must adjust where to pick up the data to match the
690
const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
693
// this can be a misaligned move
694
__ ldr(r, Address(esp, offset));
696
// sign extend and use a full word?
697
__ ldrw(r, Address(esp, ld_off));
700
if (!r_2->is_valid()) {
701
__ ldrs(r_1->as_FloatRegister(), Address(esp, ld_off));
703
__ ldrd(r_1->as_FloatRegister(), Address(esp, next_off));
708
__ mov(rscratch2, rscratch1);
709
__ push_cont_fastpath(rthread); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about; kills rscratch1
710
__ mov(rscratch1, rscratch2);
712
// 6243940 We might end up in handle_wrong_method if
713
// the callee is deoptimized as we race thru here. If that
714
// happens we don't want to take a safepoint because the
715
// caller frame will look interpreted and arguments are now
716
// "compiled" so it is much better to make this transition
717
// invisible to the stack walking code. Unfortunately if
718
// we try and find the callee by normal means a safepoint
719
// is possible. So we stash the desired callee in the thread
720
// and the vm will find there should this case occur.
722
__ str(rmethod, Address(rthread, JavaThread::callee_target_offset()));
727
// ---------------------------------------------------------------
728
AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
729
int total_args_passed,
730
int comp_args_on_stack,
731
const BasicType *sig_bt,
732
const VMRegPair *regs,
733
AdapterFingerPrint* fingerprint) {
734
address i2c_entry = __ pc();
736
gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
738
address c2i_unverified_entry = __ pc();
741
Register data = rscratch2;
742
Register receiver = j_rarg0;
743
Register tmp = r10; // A call-clobbered register not used for arg passing
745
// -------------------------------------------------------------------------
746
// Generate a C2I adapter. On entry we know rmethod holds the Method* during calls
747
// to the interpreter. The args start out packed in the compiled layout. They
748
// need to be unpacked into the interpreter layout. This will almost always
749
// require some stack space. We grow the current (compiled) stack, then repack
750
// the args. We finally end in a jump to the generic interpreter entry point.
751
// On exit from the interpreter, the interpreter will restore our SP (lest the
752
// compiled code, which relies solely on SP and not FP, get sick).
755
__ block_comment("c2i_unverified_entry {");
756
// Method might have been compiled since the call site was patched to
757
// interpreted; if that is the case treat it as a miss so we can get
758
// the call site corrected.
759
__ ic_check(1 /* end_alignment */);
760
__ ldr(rmethod, Address(data, CompiledICData::speculated_method_offset()));
762
__ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
763
__ cbz(rscratch1, skip_fixup);
764
__ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
765
__ block_comment("} c2i_unverified_entry");
768
address c2i_entry = __ pc();
770
// Class initialization barrier for static methods
771
address c2i_no_clinit_check_entry = nullptr;
772
if (VM_Version::supports_fast_class_init_checks()) {
773
Label L_skip_barrier;
775
{ // Bypass the barrier for non-static methods
776
__ ldrw(rscratch1, Address(rmethod, Method::access_flags_offset()));
777
__ andsw(zr, rscratch1, JVM_ACC_STATIC);
778
__ br(Assembler::EQ, L_skip_barrier); // non-static
781
__ load_method_holder(rscratch2, rmethod);
782
__ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
783
__ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
785
__ bind(L_skip_barrier);
786
c2i_no_clinit_check_entry = __ pc();
789
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
790
bs->c2i_entry_barrier(masm);
792
gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
794
return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry);
797
static int c_calling_convention_priv(const BasicType *sig_bt,
799
int total_args_passed) {
801
// We return the amount of VMRegImpl stack slots we need to reserve for all
802
// the arguments NOT counting out_preserve_stack_slots.
804
static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
805
c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5, c_rarg6, c_rarg7
807
static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
808
c_farg0, c_farg1, c_farg2, c_farg3,
809
c_farg4, c_farg5, c_farg6, c_farg7
814
uint stk_args = 0; // inc by 2 each time
816
for (int i = 0; i < total_args_passed; i++) {
823
if (int_args < Argument::n_int_register_parameters_c) {
824
regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
827
// Less-than word types are stored one after another.
828
// The code is unable to handle this so bailout.
831
regs[i].set1(VMRegImpl::stack2reg(stk_args));
836
assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
842
if (int_args < Argument::n_int_register_parameters_c) {
843
regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
845
regs[i].set2(VMRegImpl::stack2reg(stk_args));
850
if (fp_args < Argument::n_float_register_parameters_c) {
851
regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
854
// Less-than word types are stored one after another.
855
// The code is unable to handle this so bailout.
858
regs[i].set1(VMRegImpl::stack2reg(stk_args));
863
assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
864
if (fp_args < Argument::n_float_register_parameters_c) {
865
regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
867
regs[i].set2(VMRegImpl::stack2reg(stk_args));
871
case T_VOID: // Halves of longs and doubles
872
assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
876
ShouldNotReachHere();
884
int SharedRuntime::vector_calling_convention(VMRegPair *regs,
886
uint total_args_passed) {
891
int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
893
int total_args_passed)
895
int result = c_calling_convention_priv(sig_bt, regs, total_args_passed);
896
guarantee(result >= 0, "Unsupported arguments configuration");
901
void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
902
// We always ignore the frame_slots arg and just use the space just below frame pointer
903
// which by this time is free to use
906
__ strs(v0, Address(rfp, -wordSize));
909
__ strd(v0, Address(rfp, -wordSize));
913
__ str(r0, Address(rfp, -wordSize));
918
void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
919
// We always ignore the frame_slots arg and just use the space just below frame pointer
920
// which by this time is free to use
923
__ ldrs(v0, Address(rfp, -wordSize));
926
__ ldrd(v0, Address(rfp, -wordSize));
930
__ ldr(r0, Address(rfp, -wordSize));
934
static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
936
for ( int i = first_arg ; i < arg_count ; i++ ) {
937
if (args[i].first()->is_Register()) {
938
x = x + args[i].first()->as_Register();
939
} else if (args[i].first()->is_FloatRegister()) {
940
__ strd(args[i].first()->as_FloatRegister(), Address(__ pre(sp, -2 * wordSize)));
946
static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
948
for ( int i = first_arg ; i < arg_count ; i++ ) {
949
if (args[i].first()->is_Register()) {
950
x = x + args[i].first()->as_Register();
956
for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) {
957
if (args[i].first()->is_Register()) {
959
} else if (args[i].first()->is_FloatRegister()) {
960
__ ldrd(args[i].first()->as_FloatRegister(), Address(__ post(sp, 2 * wordSize)));
965
static void verify_oop_args(MacroAssembler* masm,
966
const methodHandle& method,
967
const BasicType* sig_bt,
968
const VMRegPair* regs) {
969
Register temp_reg = r19; // not part of any compiled calling seq
971
for (int i = 0; i < method->size_of_parameters(); i++) {
972
if (sig_bt[i] == T_OBJECT ||
973
sig_bt[i] == T_ARRAY) {
974
VMReg r = regs[i].first();
975
assert(r->is_valid(), "bad oop arg");
977
__ ldr(temp_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
978
__ verify_oop(temp_reg);
980
__ verify_oop(r->as_Register());
987
// on exit, sp points to the ContinuationEntry
988
static OopMap* continuation_enter_setup(MacroAssembler* masm, int& stack_slots) {
989
assert(ContinuationEntry::size() % VMRegImpl::stack_slot_size == 0, "");
990
assert(in_bytes(ContinuationEntry::cont_offset()) % VMRegImpl::stack_slot_size == 0, "");
991
assert(in_bytes(ContinuationEntry::chunk_offset()) % VMRegImpl::stack_slot_size == 0, "");
993
stack_slots += (int)ContinuationEntry::size()/wordSize;
994
__ sub(sp, sp, (int)ContinuationEntry::size()); // place Continuation metadata
996
OopMap* map = new OopMap(((int)ContinuationEntry::size() + wordSize)/ VMRegImpl::stack_slot_size, 0 /* arg_slots*/);
998
__ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
999
__ str(rscratch1, Address(sp, ContinuationEntry::parent_offset()));
1000
__ mov(rscratch1, sp); // we can't use sp as the source in str
1001
__ str(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
1006
// on entry c_rarg1 points to the continuation
1007
// sp points to ContinuationEntry
1008
// c_rarg3 -- isVirtualThread
1009
static void fill_continuation_entry(MacroAssembler* masm) {
1011
__ movw(rscratch1, ContinuationEntry::cookie_value());
1012
__ strw(rscratch1, Address(sp, ContinuationEntry::cookie_offset()));
1015
__ str (c_rarg1, Address(sp, ContinuationEntry::cont_offset()));
1016
__ strw(c_rarg3, Address(sp, ContinuationEntry::flags_offset()));
1017
__ str (zr, Address(sp, ContinuationEntry::chunk_offset()));
1018
__ strw(zr, Address(sp, ContinuationEntry::argsize_offset()));
1019
__ strw(zr, Address(sp, ContinuationEntry::pin_count_offset()));
1021
__ ldr(rscratch1, Address(rthread, JavaThread::cont_fastpath_offset()));
1022
__ str(rscratch1, Address(sp, ContinuationEntry::parent_cont_fastpath_offset()));
1023
__ ldr(rscratch1, Address(rthread, JavaThread::held_monitor_count_offset()));
1024
__ str(rscratch1, Address(sp, ContinuationEntry::parent_held_monitor_count_offset()));
1026
__ str(zr, Address(rthread, JavaThread::cont_fastpath_offset()));
1027
__ str(zr, Address(rthread, JavaThread::held_monitor_count_offset()));
1030
// on entry, sp points to the ContinuationEntry
1031
// on exit, rfp points to the spilled rfp in the entry frame
1032
static void continuation_enter_cleanup(MacroAssembler* masm) {
1035
__ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
1036
__ cmp(sp, rscratch1);
1037
__ br(Assembler::EQ, OK);
1038
__ stop("incorrect sp1");
1041
__ ldr(rscratch1, Address(sp, ContinuationEntry::parent_cont_fastpath_offset()));
1042
__ str(rscratch1, Address(rthread, JavaThread::cont_fastpath_offset()));
1044
if (CheckJNICalls) {
1045
// Check if this is a virtual thread continuation
1046
Label L_skip_vthread_code;
1047
__ ldrw(rscratch1, Address(sp, ContinuationEntry::flags_offset()));
1048
__ cbzw(rscratch1, L_skip_vthread_code);
1050
// If the held monitor count is > 0 and this vthread is terminating then
1051
// it failed to release a JNI monitor. So we issue the same log message
1052
// that JavaThread::exit does.
1053
__ ldr(rscratch1, Address(rthread, JavaThread::jni_monitor_count_offset()));
1054
__ cbz(rscratch1, L_skip_vthread_code);
1056
// Save return value potentially containing the exception oop in callee-saved R19.
1058
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::log_jni_monitor_still_held));
1059
// Restore potential return value.
1062
// For vthreads we have to explicitly zero the JNI monitor count of the carrier
1063
// on termination. The held count is implicitly zeroed below when we restore from
1064
// the parent held count (which has to be zero).
1065
__ str(zr, Address(rthread, JavaThread::jni_monitor_count_offset()));
1067
__ bind(L_skip_vthread_code);
1071
// Check if this is a virtual thread continuation
1072
Label L_skip_vthread_code;
1073
__ ldrw(rscratch1, Address(sp, ContinuationEntry::flags_offset()));
1074
__ cbzw(rscratch1, L_skip_vthread_code);
1076
// See comment just above. If not checking JNI calls the JNI count is only
1077
// needed for assertion checking.
1078
__ str(zr, Address(rthread, JavaThread::jni_monitor_count_offset()));
1080
__ bind(L_skip_vthread_code);
1084
__ ldr(rscratch1, Address(sp, ContinuationEntry::parent_held_monitor_count_offset()));
1085
__ str(rscratch1, Address(rthread, JavaThread::held_monitor_count_offset()));
1087
__ ldr(rscratch2, Address(sp, ContinuationEntry::parent_offset()));
1088
__ str(rscratch2, Address(rthread, JavaThread::cont_entry_offset()));
1089
__ add(rfp, sp, (int)ContinuationEntry::size());
1092
// enterSpecial(Continuation c, boolean isContinue, boolean isVirtualThread)
1093
// On entry: c_rarg1 -- the continuation object
1094
// c_rarg2 -- isContinue
1095
// c_rarg3 -- isVirtualThread
1096
static void gen_continuation_enter(MacroAssembler* masm,
1097
const methodHandle& method,
1098
const BasicType* sig_bt,
1099
const VMRegPair* regs,
1100
int& exception_offset,
1102
int& frame_complete,
1104
int& interpreted_entry_offset,
1105
int& compiled_entry_offset) {
1106
//verify_oop_args(masm, method, sig_bt, regs);
1107
Address resolve(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type);
1109
address start = __ pc();
1111
Label call_thaw, exit;
1113
// i2i entry used at interp_only_mode only
1114
interpreted_entry_offset = __ pc() - start;
1118
Label is_interp_only;
1119
__ ldrw(rscratch1, Address(rthread, JavaThread::interp_only_mode_offset()));
1120
__ cbnzw(rscratch1, is_interp_only);
1121
__ stop("enterSpecial interpreter entry called when not in interp_only_mode");
1122
__ bind(is_interp_only);
1125
// Read interpreter arguments into registers (this is an ad-hoc i2c adapter)
1126
__ ldr(c_rarg1, Address(esp, Interpreter::stackElementSize*2));
1127
__ ldr(c_rarg2, Address(esp, Interpreter::stackElementSize*1));
1128
__ ldr(c_rarg3, Address(esp, Interpreter::stackElementSize*0));
1129
__ push_cont_fastpath(rthread);
1132
stack_slots = 2; // will be adjusted in setup
1133
OopMap* map = continuation_enter_setup(masm, stack_slots);
1134
// The frame is complete here, but we only record it for the compiled entry, so the frame would appear unsafe,
1135
// but that's okay because at the very worst we'll miss an async sample, but we're in interp_only_mode anyway.
1137
fill_continuation_entry(masm);
1139
__ cbnz(c_rarg2, call_thaw);
1141
const address tr_call = __ trampoline_call(resolve);
1142
if (tr_call == nullptr) {
1143
fatal("CodeCache is full at gen_continuation_enter");
1146
oop_maps->add_gc_map(__ pc() - start, map);
1151
address stub = CompiledDirectCall::emit_to_interp_stub(masm, tr_call);
1152
if (stub == nullptr) {
1153
fatal("CodeCache is full at gen_continuation_enter");
1158
__ align(CodeEntryAlignment);
1159
compiled_entry_offset = __ pc() - start;
1162
stack_slots = 2; // will be adjusted in setup
1163
OopMap* map = continuation_enter_setup(masm, stack_slots);
1164
frame_complete = __ pc() - start;
1166
fill_continuation_entry(masm);
1168
__ cbnz(c_rarg2, call_thaw);
1170
const address tr_call = __ trampoline_call(resolve);
1171
if (tr_call == nullptr) {
1172
fatal("CodeCache is full at gen_continuation_enter");
1175
oop_maps->add_gc_map(__ pc() - start, map);
1182
__ rt_call(CAST_FROM_FN_PTR(address, StubRoutines::cont_thaw()));
1183
oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
1184
ContinuationEntry::_return_pc_offset = __ pc() - start;
1188
continuation_enter_cleanup(masm);
1192
/// exception handling
1194
exception_offset = __ pc() - start;
1196
__ mov(r19, r0); // save return value contaning the exception oop in callee-saved R19
1198
continuation_enter_cleanup(masm);
1200
__ ldr(c_rarg1, Address(rfp, wordSize)); // return address
1201
__ authenticate_return_address(c_rarg1);
1202
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rthread, c_rarg1);
1204
// see OptoRuntime::generate_exception_blob: r0 -- exception oop, r3 -- exception pc
1206
__ mov(r1, r0); // the exception handler
1207
__ mov(r0, r19); // restore return value contaning the exception oop
1212
__ br(r1); // the exception handler
1215
address stub = CompiledDirectCall::emit_to_interp_stub(masm, tr_call);
1216
if (stub == nullptr) {
1217
fatal("CodeCache is full at gen_continuation_enter");
1221
static void gen_continuation_yield(MacroAssembler* masm,
1222
const methodHandle& method,
1223
const BasicType* sig_bt,
1224
const VMRegPair* regs,
1225
OopMapSet* oop_maps,
1226
int& frame_complete,
1228
int& compiled_entry_offset) {
1234
framesize // inclusive of return address
1236
// assert(is_even(framesize/2), "sp not 16-byte aligned");
1237
stack_slots = framesize / VMRegImpl::slots_per_word;
1238
assert(stack_slots == 2, "recheck layout");
1240
address start = __ pc();
1242
compiled_entry_offset = __ pc() - start;
1245
__ mov(c_rarg1, sp);
1247
frame_complete = __ pc() - start;
1248
address the_pc = __ pc();
1250
__ post_call_nop(); // this must be exactly after the pc value that is pushed into the frame info, we use this nop for fast CodeBlob lookup
1252
__ mov(c_rarg0, rthread);
1253
__ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
1254
__ call_VM_leaf(Continuation::freeze_entry(), 2);
1255
__ reset_last_Java_frame(true);
1259
__ cbnz(r0, pinned);
1261
// We've succeeded, set sp to the ContinuationEntry
1262
__ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
1263
__ mov(sp, rscratch1);
1264
continuation_enter_cleanup(masm);
1266
__ bind(pinned); // pinned -- return to caller
1268
// handle pending exception thrown by freeze
1269
__ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
1271
__ cbz(rscratch1, ok);
1273
__ lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry()));
1280
OopMap* map = new OopMap(framesize, 1);
1281
oop_maps->add_gc_map(the_pc - start, map);
1284
static void gen_special_dispatch(MacroAssembler* masm,
1285
const methodHandle& method,
1286
const BasicType* sig_bt,
1287
const VMRegPair* regs) {
1288
verify_oop_args(masm, method, sig_bt, regs);
1289
vmIntrinsics::ID iid = method->intrinsic_id();
1291
// Now write the args into the outgoing interpreter space
1292
bool has_receiver = false;
1293
Register receiver_reg = noreg;
1294
int member_arg_pos = -1;
1295
Register member_reg = noreg;
1296
int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1297
if (ref_kind != 0) {
1298
member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument
1299
member_reg = r19; // known to be free at this point
1300
has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1301
} else if (iid == vmIntrinsics::_invokeBasic) {
1302
has_receiver = true;
1303
} else if (iid == vmIntrinsics::_linkToNative) {
1304
member_arg_pos = method->size_of_parameters() - 1; // trailing NativeEntryPoint argument
1305
member_reg = r19; // known to be free at this point
1307
fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid));
1310
if (member_reg != noreg) {
1311
// Load the member_arg into register, if necessary.
1312
SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1313
VMReg r = regs[member_arg_pos].first();
1314
if (r->is_stack()) {
1315
__ ldr(member_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1317
// no data motion is needed
1318
member_reg = r->as_Register();
1323
// Make sure the receiver is loaded into a register.
1324
assert(method->size_of_parameters() > 0, "oob");
1325
assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1326
VMReg r = regs[0].first();
1327
assert(r->is_valid(), "bad receiver arg");
1328
if (r->is_stack()) {
1329
// Porting note: This assumes that compiled calling conventions always
1330
// pass the receiver oop in a register. If this is not true on some
1331
// platform, pick a temp and load the receiver from stack.
1332
fatal("receiver always in a register");
1333
receiver_reg = r2; // known to be free at this point
1334
__ ldr(receiver_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1336
// no data motion is needed
1337
receiver_reg = r->as_Register();
1341
// Figure out which address we are really jumping to:
1342
MethodHandles::generate_method_handle_dispatch(masm, iid,
1343
receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1346
// ---------------------------------------------------------------------------
1347
// Generate a native wrapper for a given method. The method takes arguments
1348
// in the Java compiled code convention, marshals them to the native
1349
// convention (handlizes oops, etc), transitions to native, makes the call,
1350
// returns to java state (possibly blocking), unhandlizes any result and
1353
// Critical native functions are a shorthand for the use of
1354
// GetPrimtiveArrayCritical and disallow the use of any other JNI
1355
// functions. The wrapper is expected to unpack the arguments before
1356
// passing them to the callee. Critical native functions leave the state _in_Java,
1357
// since they block out GC.
1358
// Some other parts of JNI setup are skipped like the tear down of the JNI handle
1359
// block and the check for pending exceptions it's impossible for them
1362
nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1363
const methodHandle& method,
1365
BasicType* in_sig_bt,
1367
BasicType ret_type) {
1368
if (method->is_continuation_native_intrinsic()) {
1369
int exception_offset = -1;
1370
OopMapSet* oop_maps = new OopMapSet();
1371
int frame_complete = -1;
1372
int stack_slots = -1;
1373
int interpreted_entry_offset = -1;
1374
int vep_offset = -1;
1375
if (method->is_continuation_enter_intrinsic()) {
1376
gen_continuation_enter(masm,
1384
interpreted_entry_offset,
1386
} else if (method->is_continuation_yield_intrinsic()) {
1387
gen_continuation_yield(masm,
1396
guarantee(false, "Unknown Continuation native intrinsic");
1400
if (method->is_continuation_enter_intrinsic()) {
1401
assert(interpreted_entry_offset != -1, "Must be set");
1402
assert(exception_offset != -1, "Must be set");
1404
assert(interpreted_entry_offset == -1, "Must be unset");
1405
assert(exception_offset == -1, "Must be unset");
1407
assert(frame_complete != -1, "Must be set");
1408
assert(stack_slots != -1, "Must be set");
1409
assert(vep_offset != -1, "Must be set");
1413
nmethod* nm = nmethod::new_native_nmethod(method,
1423
if (nm == nullptr) return nm;
1424
if (method->is_continuation_enter_intrinsic()) {
1425
ContinuationEntry::set_enter_code(nm, interpreted_entry_offset);
1426
} else if (method->is_continuation_yield_intrinsic()) {
1427
_cont_doYield_stub = nm;
1429
guarantee(false, "Unknown Continuation native intrinsic");
1434
if (method->is_method_handle_intrinsic()) {
1435
vmIntrinsics::ID iid = method->intrinsic_id();
1436
intptr_t start = (intptr_t)__ pc();
1437
int vep_offset = ((intptr_t)__ pc()) - start;
1439
// First instruction must be a nop as it may need to be patched on deoptimisation
1441
gen_special_dispatch(masm,
1445
int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period
1447
int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually
1448
return nmethod::new_native_nmethod(method,
1453
stack_slots / VMRegImpl::slots_per_word,
1458
address native_func = method->native_function();
1459
assert(native_func != nullptr, "must have function");
1461
// An OopMap for lock (and class if static)
1462
OopMapSet *oop_maps = new OopMapSet();
1463
intptr_t start = (intptr_t)__ pc();
1465
// We have received a description of where all the java arg are located
1466
// on entry to the wrapper. We need to convert these args to where
1467
// the jni function will expect them. To figure out where they go
1468
// we convert the java signature to a C signature by inserting
1469
// the hidden arguments as arg[0] and possibly arg[1] (static method)
1471
const int total_in_args = method->size_of_parameters();
1472
int total_c_args = total_in_args + (method->is_static() ? 2 : 1);
1474
BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1475
VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1476
BasicType* in_elem_bt = nullptr;
1479
out_sig_bt[argc++] = T_ADDRESS;
1480
if (method->is_static()) {
1481
out_sig_bt[argc++] = T_OBJECT;
1484
for (int i = 0; i < total_in_args ; i++ ) {
1485
out_sig_bt[argc++] = in_sig_bt[i];
1488
// Now figure out where the args must be stored and how much stack space
1491
out_arg_slots = c_calling_convention_priv(out_sig_bt, out_regs, total_c_args);
1493
if (out_arg_slots < 0) {
1497
// Compute framesize for the wrapper. We need to handlize all oops in
1498
// incoming registers
1500
// Calculate the total number of stack slots we will need.
1502
// First count the abi requirement plus all of the outgoing args
1503
int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1505
// Now the space for the inbound oop handle area
1506
int total_save_slots = 8 * VMRegImpl::slots_per_word; // 8 arguments passed in registers
1508
int oop_handle_offset = stack_slots;
1509
stack_slots += total_save_slots;
1511
// Now any space we need for handlizing a klass if static method
1513
int klass_slot_offset = 0;
1514
int klass_offset = -1;
1515
int lock_slot_offset = 0;
1516
bool is_static = false;
1518
if (method->is_static()) {
1519
klass_slot_offset = stack_slots;
1520
stack_slots += VMRegImpl::slots_per_word;
1521
klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1525
// Plus a lock if needed
1527
if (method->is_synchronized()) {
1528
lock_slot_offset = stack_slots;
1529
stack_slots += VMRegImpl::slots_per_word;
1532
// Now a place (+2) to save return values or temp during shuffling
1533
// + 4 for return address (which we own) and saved rfp
1536
// Ok The space we have allocated will look like:
1540
// |---------------------|
1541
// | 2 slots for moves |
1542
// |---------------------|
1543
// | lock box (if sync) |
1544
// |---------------------| <- lock_slot_offset
1545
// | klass (if static) |
1546
// |---------------------| <- klass_slot_offset
1547
// | oopHandle area |
1548
// |---------------------| <- oop_handle_offset (8 java arg registers)
1549
// | outbound memory |
1550
// | based arguments |
1552
// |---------------------|
1554
// SP-> | out_preserved_slots |
1559
// Now compute actual number of stack words we need rounding to make
1560
// stack properly aligned.
1561
stack_slots = align_up(stack_slots, StackAlignmentInSlots);
1563
int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1565
// First thing make an ic check to see if we should even be here
1567
// We are free to use all registers as temps without saving them and
1568
// restoring them except rfp. rfp is the only callee save register
1569
// as far as the interpreter and the compiler(s) are concerned.
1571
const Register receiver = j_rarg0;
1573
Label exception_pending;
1575
assert_different_registers(receiver, rscratch1);
1576
__ verify_oop(receiver);
1577
__ ic_check(8 /* end_alignment */);
1579
// Verified entry point must be aligned
1580
int vep_offset = ((intptr_t)__ pc()) - start;
1582
// If we have to make this method not-entrant we'll overwrite its
1583
// first instruction with a jump. For this action to be legal we
1584
// must ensure that this first instruction is a B, BL, NOP, BKPT,
1585
// SVC, HVC, or SMC. Make it a NOP.
1588
if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
1589
Label L_skip_barrier;
1590
__ mov_metadata(rscratch2, method->method_holder()); // InstanceKlass*
1591
__ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
1592
__ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
1594
__ bind(L_skip_barrier);
1597
// Generate stack overflow check
1598
__ bang_stack_with_offset(checked_cast<int>(StackOverflow::stack_shadow_zone_size()));
1600
// Generate a new frame for the wrapper.
1602
// -2 because return address is already present and so is saved rfp
1603
__ sub(sp, sp, stack_size - 2*wordSize);
1605
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1606
bs->nmethod_entry_barrier(masm, nullptr /* slow_path */, nullptr /* continuation */, nullptr /* guard */);
1608
// Frame is now completed as far as size and linkage.
1609
int frame_complete = ((intptr_t)__ pc()) - start;
1611
// We use r20 as the oop handle for the receiver/klass
1612
// It is callee save so it survives the call to native
1614
const Register oop_handle_reg = r20;
1617
// We immediately shuffle the arguments so that any vm call we have to
1618
// make from here on out (sync slow path, jvmti, etc.) we will have
1619
// captured the oops from our caller and have a valid oopMap for
1622
// -----------------
1623
// The Grand Shuffle
1625
// The Java calling convention is either equal (linux) or denser (win64) than the
1626
// c calling convention. However the because of the jni_env argument the c calling
1627
// convention always has at least one more (and two for static) arguments than Java.
1628
// Therefore if we move the args from java -> c backwards then we will never have
1629
// a register->register conflict and we don't have to build a dependency graph
1630
// and figure out how to break any cycles.
1633
// Record esp-based slot for receiver on stack for non-static methods
1634
int receiver_offset = -1;
1636
// This is a trick. We double the stack slots so we can claim
1637
// the oops in the caller's frame. Since we are sure to have
1638
// more args than the caller doubling is enough to make
1639
// sure we can capture all the incoming oop args from the
1642
OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1644
// Mark location of rfp (someday)
1645
// map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rfp));
1652
bool reg_destroyed[Register::number_of_registers];
1653
bool freg_destroyed[FloatRegister::number_of_registers];
1654
for ( int r = 0 ; r < Register::number_of_registers ; r++ ) {
1655
reg_destroyed[r] = false;
1657
for ( int f = 0 ; f < FloatRegister::number_of_registers ; f++ ) {
1658
freg_destroyed[f] = false;
1663
// For JNI natives the incoming and outgoing registers are offset upwards.
1664
GrowableArray<int> arg_order(2 * total_in_args);
1665
VMRegPair tmp_vmreg;
1666
tmp_vmreg.set2(r19->as_VMReg());
1668
for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
1670
arg_order.push(c_arg);
1674
for (int ai = 0; ai < arg_order.length(); ai += 2) {
1675
int i = arg_order.at(ai);
1676
int c_arg = arg_order.at(ai + 1);
1677
__ block_comment(err_msg("move %d -> %d", i, c_arg));
1678
assert(c_arg != -1 && i != -1, "wrong order");
1680
if (in_regs[i].first()->is_Register()) {
1681
assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
1682
} else if (in_regs[i].first()->is_FloatRegister()) {
1683
assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding()], "destroyed reg!");
1685
if (out_regs[c_arg].first()->is_Register()) {
1686
reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1687
} else if (out_regs[c_arg].first()->is_FloatRegister()) {
1688
freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true;
1691
switch (in_sig_bt[i]) {
1694
__ object_move(map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1695
((i == 0) && (!is_static)),
1703
__ float_move(in_regs[i], out_regs[c_arg]);
1708
assert( i + 1 < total_in_args &&
1709
in_sig_bt[i + 1] == T_VOID &&
1710
out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
1711
__ double_move(in_regs[i], out_regs[c_arg]);
1716
__ long_move(in_regs[i], out_regs[c_arg]);
1720
case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
1723
__ move32_64(in_regs[i], out_regs[c_arg]);
1728
// point c_arg at the first arg that is already loaded in case we
1729
// need to spill before we call out
1730
int c_arg = total_c_args - total_in_args;
1732
// Pre-load a static method's oop into c_rarg1.
1733
if (method->is_static()) {
1735
// load oop into a register
1737
JNIHandles::make_local(method->method_holder()->java_mirror()));
1739
// Now handlize the static class mirror it's known not-null.
1740
__ str(c_rarg1, Address(sp, klass_offset));
1741
map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
1743
// Now get the handle
1744
__ lea(c_rarg1, Address(sp, klass_offset));
1745
// and protect the arg if we must spill
1749
// Change state to native (we save the return address in the thread, since it might not
1750
// be pushed on the stack when we do a stack traversal).
1751
// We use the same pc/oopMap repeatedly when we call out
1753
Label native_return;
1754
__ set_last_Java_frame(sp, noreg, native_return, rscratch1);
1756
Label dtrace_method_entry, dtrace_method_entry_done;
1757
if (DTraceMethodProbes) {
1758
__ b(dtrace_method_entry);
1759
__ bind(dtrace_method_entry_done);
1762
// RedefineClasses() tracing support for obsolete method entry
1763
if (log_is_enabled(Trace, redefine, class, obsolete)) {
1764
// protect the args we've loaded
1765
save_args(masm, total_c_args, c_arg, out_regs);
1766
__ mov_metadata(c_rarg1, method());
1768
CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1770
restore_args(masm, total_c_args, c_arg, out_regs);
1773
// Lock a synchronized method
1775
// Register definitions used by locking and unlocking
1777
const Register swap_reg = r0;
1778
const Register obj_reg = r19; // Will contain the oop
1779
const Register lock_reg = r13; // Address of compiler lock object (BasicLock)
1780
const Register old_hdr = r13; // value of old header at unlock time
1781
const Register lock_tmp = r14; // Temporary used by lightweight_lock/unlock
1782
const Register tmp = lr;
1784
Label slow_path_lock;
1787
if (method->is_synchronized()) {
1789
const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1791
// Get the handle (the 2nd argument)
1792
__ mov(oop_handle_reg, c_rarg1);
1794
// Get address of the box
1796
__ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1798
// Load the oop from the handle
1799
__ ldr(obj_reg, Address(oop_handle_reg, 0));
1801
if (LockingMode == LM_MONITOR) {
1802
__ b(slow_path_lock);
1803
} else if (LockingMode == LM_LEGACY) {
1804
// Load (object->mark() | 1) into swap_reg %r0
1805
__ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1806
__ orr(swap_reg, rscratch1, 1);
1808
// Save (object->mark() | 1) into BasicLock's displaced header
1809
__ str(swap_reg, Address(lock_reg, mark_word_offset));
1811
// src -> dest iff dest == r0 else r0 <- dest
1812
__ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
1814
// Hmm should this move to the slow path code area???
1816
// Test if the oopMark is an obvious stack pointer, i.e.,
1817
// 1) (mark & 3) == 0, and
1818
// 2) sp <= mark < mark + os::pagesize()
1819
// These 3 tests can be done by evaluating the following
1820
// expression: ((mark - sp) & (3 - os::vm_page_size())),
1821
// assuming both stack pointer and pagesize have their
1822
// least significant 2 bits clear.
1823
// NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
1825
__ sub(swap_reg, sp, swap_reg);
1826
__ neg(swap_reg, swap_reg);
1827
__ ands(swap_reg, swap_reg, 3 - (int)os::vm_page_size());
1829
// Save the test result, for recursive case, the result is zero
1830
__ str(swap_reg, Address(lock_reg, mark_word_offset));
1831
__ br(Assembler::NE, slow_path_lock);
1833
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
1834
__ lightweight_lock(obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
1837
__ increment(Address(rthread, JavaThread::held_monitor_count_offset()));
1839
// Slow path will re-enter here
1844
// Finally just about ready to make the JNI call
1846
// get JNIEnv* which is first argument to native
1847
__ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
1849
// Now set thread in native
1850
__ mov(rscratch1, _thread_in_native);
1851
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1852
__ stlrw(rscratch1, rscratch2);
1854
__ rt_call(native_func);
1856
__ bind(native_return);
1858
intptr_t return_pc = (intptr_t) __ pc();
1859
oop_maps->add_gc_map(return_pc - start, map);
1861
// Verify or restore cpu control state after JNI call
1862
__ restore_cpu_control_state_after_jni(rscratch1, rscratch2);
1864
// Unpack native results.
1866
case T_BOOLEAN: __ c2bool(r0); break;
1867
case T_CHAR : __ ubfx(r0, r0, 0, 16); break;
1868
case T_BYTE : __ sbfx(r0, r0, 0, 8); break;
1869
case T_SHORT : __ sbfx(r0, r0, 0, 16); break;
1870
case T_INT : __ sbfx(r0, r0, 0, 32); break;
1873
// Result is in v0 we'll save as needed
1875
case T_ARRAY: // Really a handle
1876
case T_OBJECT: // Really a handle
1877
break; // can't de-handlize until after safepoint check
1880
default : ShouldNotReachHere();
1883
Label safepoint_in_progress, safepoint_in_progress_done;
1884
Label after_transition;
1886
// Switch thread to "native transition" state before reading the synchronization state.
1887
// This additional state is necessary because reading and testing the synchronization
1888
// state is not atomic w.r.t. GC, as this scenario demonstrates:
1889
// Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
1890
// VM thread changes sync state to synchronizing and suspends threads for GC.
1891
// Thread A is resumed to finish this native method, but doesn't block here since it
1892
// didn't see any synchronization is progress, and escapes.
1893
__ mov(rscratch1, _thread_in_native_trans);
1895
__ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset()));
1897
// Force this write out before the read below
1898
if (!UseSystemMemoryBarrier) {
1899
__ dmb(Assembler::ISH);
1902
__ verify_sve_vector_length();
1904
// Check for safepoint operation in progress and/or pending suspend requests.
1906
// We need an acquire here to ensure that any subsequent load of the
1907
// global SafepointSynchronize::_state flag is ordered after this load
1908
// of the thread-local polling word. We don't want this poll to
1909
// return false (i.e. not safepointing) and a later poll of the global
1910
// SafepointSynchronize::_state spuriously to return true.
1912
// This is to avoid a race when we're in a native->Java transition
1913
// racing the code which wakes up from a safepoint.
1915
__ safepoint_poll(safepoint_in_progress, true /* at_return */, true /* acquire */, false /* in_nmethod */);
1916
__ ldrw(rscratch1, Address(rthread, JavaThread::suspend_flags_offset()));
1917
__ cbnzw(rscratch1, safepoint_in_progress);
1918
__ bind(safepoint_in_progress_done);
1921
// change thread state
1922
__ mov(rscratch1, _thread_in_Java);
1923
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1924
__ stlrw(rscratch1, rscratch2);
1925
__ bind(after_transition);
1929
__ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
1930
__ cmpw(rscratch1, StackOverflow::stack_guard_yellow_reserved_disabled);
1931
__ br(Assembler::EQ, reguard);
1932
__ bind(reguard_done);
1934
// native result if any is live
1938
Label slow_path_unlock;
1939
if (method->is_synchronized()) {
1941
// Get locked oop from the handle we passed to jni
1942
__ ldr(obj_reg, Address(oop_handle_reg, 0));
1944
Label done, not_recursive;
1946
if (LockingMode == LM_LEGACY) {
1947
// Simple recursive lock?
1948
__ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1949
__ cbnz(rscratch1, not_recursive);
1950
__ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
1954
__ bind(not_recursive);
1956
// Must save r0 if if it is live now because cmpxchg must use it
1957
if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1958
save_native_result(masm, ret_type, stack_slots);
1961
if (LockingMode == LM_MONITOR) {
1962
__ b(slow_path_unlock);
1963
} else if (LockingMode == LM_LEGACY) {
1964
// get address of the stack lock
1965
__ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1966
// get old displaced header
1967
__ ldr(old_hdr, Address(r0, 0));
1969
// Atomic swap old header if oop still contains the stack lock
1971
__ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, count, &slow_path_unlock);
1973
__ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
1975
assert(LockingMode == LM_LIGHTWEIGHT, "");
1976
__ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
1977
__ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
1980
// slow path re-enters here
1981
__ bind(unlock_done);
1982
if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1983
restore_native_result(masm, ret_type, stack_slots);
1989
Label dtrace_method_exit, dtrace_method_exit_done;
1990
if (DTraceMethodProbes) {
1991
__ b(dtrace_method_exit);
1992
__ bind(dtrace_method_exit_done);
1995
__ reset_last_Java_frame(false);
1997
// Unbox oop result, e.g. JNIHandles::resolve result.
1998
if (is_reference_type(ret_type)) {
1999
__ resolve_jobject(r0, r1, r2);
2002
if (CheckJNICalls) {
2003
// clear_pending_jni_exception_check
2004
__ str(zr, Address(rthread, JavaThread::pending_jni_exception_check_fn_offset()));
2007
// reset handle block
2008
__ ldr(r2, Address(rthread, JavaThread::active_handles_offset()));
2009
__ str(zr, Address(r2, JNIHandleBlock::top_offset()));
2013
// Any exception pending?
2014
__ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2015
__ cbnz(rscratch1, exception_pending);
2020
// Unexpected paths are out of line and go here
2022
// forward the exception
2023
__ bind(exception_pending);
2025
// and forward the exception
2026
__ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2028
// Slow path locking & unlocking
2029
if (method->is_synchronized()) {
2031
__ block_comment("Slow path lock {");
2032
__ bind(slow_path_lock);
2034
// has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2035
// args are (oop obj, BasicLock* lock, JavaThread* thread)
2037
// protect the args we've loaded
2038
save_args(masm, total_c_args, c_arg, out_regs);
2040
__ mov(c_rarg0, obj_reg);
2041
__ mov(c_rarg1, lock_reg);
2042
__ mov(c_rarg2, rthread);
2044
// Not a leaf but we have last_Java_frame setup as we want
2045
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
2046
restore_args(masm, total_c_args, c_arg, out_regs);
2050
__ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2051
__ cbz(rscratch1, L);
2052
__ stop("no pending exception allowed on exit from monitorenter");
2058
__ block_comment("} Slow path lock");
2060
__ block_comment("Slow path unlock {");
2061
__ bind(slow_path_unlock);
2063
// If we haven't already saved the native result we must save it now as xmm registers
2064
// are still exposed.
2066
if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2067
save_native_result(masm, ret_type, stack_slots);
2070
__ mov(c_rarg2, rthread);
2071
__ lea(c_rarg1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2072
__ mov(c_rarg0, obj_reg);
2074
// Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2075
// NOTE that obj_reg == r19 currently
2076
__ ldr(r19, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2077
__ str(zr, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2079
__ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C));
2084
__ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2085
__ cbz(rscratch1, L);
2086
__ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2091
__ str(r19, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2093
if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2094
restore_native_result(masm, ret_type, stack_slots);
2098
__ block_comment("} Slow path unlock");
2102
// SLOW PATH Reguard the stack if needed
2105
save_native_result(masm, ret_type, stack_slots);
2106
__ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
2107
restore_native_result(masm, ret_type, stack_slots);
2111
// SLOW PATH safepoint
2113
__ block_comment("safepoint {");
2114
__ bind(safepoint_in_progress);
2116
// Don't use call_VM as it will see a possible pending exception and forward it
2117
// and never return here preventing us from clearing _last_native_pc down below.
2119
save_native_result(masm, ret_type, stack_slots);
2120
__ mov(c_rarg0, rthread);
2122
assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
2124
__ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
2127
// Restore any method result value
2128
restore_native_result(masm, ret_type, stack_slots);
2130
__ b(safepoint_in_progress_done);
2131
__ block_comment("} safepoint");
2134
// SLOW PATH dtrace support
2135
if (DTraceMethodProbes) {
2137
__ block_comment("dtrace entry {");
2138
__ bind(dtrace_method_entry);
2140
// We have all of the arguments setup at this point. We must not touch any register
2141
// argument registers at this point (what if we save/restore them there are no oop?
2143
save_args(masm, total_c_args, c_arg, out_regs);
2144
__ mov_metadata(c_rarg1, method());
2146
CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2148
restore_args(masm, total_c_args, c_arg, out_regs);
2149
__ b(dtrace_method_entry_done);
2150
__ block_comment("} dtrace entry");
2154
__ block_comment("dtrace exit {");
2155
__ bind(dtrace_method_exit);
2156
save_native_result(masm, ret_type, stack_slots);
2157
__ mov_metadata(c_rarg1, method());
2159
CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2161
restore_native_result(masm, ret_type, stack_slots);
2162
__ b(dtrace_method_exit_done);
2163
__ block_comment("} dtrace exit");
2169
nmethod *nm = nmethod::new_native_nmethod(method,
2174
stack_slots / VMRegImpl::slots_per_word,
2175
(is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2176
in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2182
// this function returns the adjust size (in number of words) to a c2i adapter
2183
// activation for use during deoptimization
2184
int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
2185
assert(callee_locals >= callee_parameters,
2186
"test and remove; got more parms than locals");
2187
if (callee_locals < callee_parameters)
2188
return 0; // No adjustment for negative locals
2189
int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2190
// diff is counted in stack words
2191
return align_up(diff, 2);
2195
//------------------------------generate_deopt_blob----------------------------
2196
void SharedRuntime::generate_deopt_blob() {
2197
// Allocate space for the code
2199
// Setup code generation tools
2203
pad += 512; // Increase the buffer size when compiling for JVMCI
2206
CodeBuffer buffer("deopt_blob", 2048+pad, 1024);
2207
MacroAssembler* masm = new MacroAssembler(&buffer);
2208
int frame_size_in_words;
2209
OopMap* map = nullptr;
2210
OopMapSet *oop_maps = new OopMapSet();
2211
RegisterSaver reg_save(COMPILER2_OR_JVMCI != 0);
2214
// This code enters when returning to a de-optimized nmethod. A return
2215
// address has been pushed on the stack, and return values are in
2217
// If we are doing a normal deopt then we were called from the patched
2218
// nmethod from the point we returned to the nmethod. So the return
2219
// address on the stack is wrong by NativeCall::instruction_size
2220
// We will adjust the value so it looks like we have the original return
2221
// address on the stack (like when we eagerly deoptimized).
2222
// In the case of an exception pending when deoptimizing, we enter
2223
// with a return address on the stack that points after the call we patched
2224
// into the exception handler. We have the following register state from,
2225
// e.g., the forward exception stub (see stubGenerator_x86_64.cpp).
2226
// r0: exception oop
2227
// r19: exception handler
2229
// So in this case we simply jam r3 into the useless return address and
2230
// the stack looks just like we want.
2232
// At this point we need to de-opt. We save the argument return
2233
// registers. We call the first C routine, fetch_unroll_info(). This
2234
// routine captures the return values and returns a structure which
2235
// describes the current frame size and the sizes of all replacement frames.
2236
// The current frame is compiled code and may contain many inlined
2237
// functions, each with their own JVM state. We pop the current frame, then
2238
// push all the new frames. Then we call the C routine unpack_frames() to
2239
// populate these frames. Finally unpack_frames() returns us the new target
2240
// address. Notice that callee-save registers are BLOWN here; they have
2241
// already been captured in the vframeArray at the time the return PC was
2243
address start = __ pc();
2246
// Prolog for non exception case!
2248
// Save everything in sight.
2249
map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2251
// Normal deoptimization. Save exec mode for unpack_frames.
2252
__ movw(rcpool, Deoptimization::Unpack_deopt); // callee-saved
2255
int reexecute_offset = __ pc() - start;
2256
#if INCLUDE_JVMCI && !defined(COMPILER1)
2257
if (EnableJVMCI && UseJVMCICompiler) {
2258
// JVMCI does not use this kind of deoptimization
2259
__ should_not_reach_here();
2264
// return address is the pc describes what bci to do re-execute at
2266
// No need to update map as each call to save_live_registers will produce identical oopmap
2267
(void) reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2269
__ movw(rcpool, Deoptimization::Unpack_reexecute); // callee-saved
2273
Label after_fetch_unroll_info_call;
2274
int implicit_exception_uncommon_trap_offset = 0;
2275
int uncommon_trap_offset = 0;
2278
implicit_exception_uncommon_trap_offset = __ pc() - start;
2280
__ ldr(lr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
2281
__ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
2283
uncommon_trap_offset = __ pc() - start;
2285
// Save everything in sight.
2286
reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2287
// fetch_unroll_info needs to call last_java_frame()
2289
__ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2291
__ ldrw(c_rarg1, Address(rthread, in_bytes(JavaThread::pending_deoptimization_offset())));
2292
__ movw(rscratch1, -1);
2293
__ strw(rscratch1, Address(rthread, in_bytes(JavaThread::pending_deoptimization_offset())));
2295
__ movw(rcpool, (int32_t)Deoptimization::Unpack_reexecute);
2296
__ mov(c_rarg0, rthread);
2297
__ movw(c_rarg2, rcpool); // exec mode
2299
RuntimeAddress(CAST_FROM_FN_PTR(address,
2300
Deoptimization::uncommon_trap)));
2303
oop_maps->add_gc_map( __ pc()-start, map->deep_copy());
2305
__ reset_last_Java_frame(false);
2307
__ b(after_fetch_unroll_info_call);
2309
#endif // INCLUDE_JVMCI
2311
int exception_offset = __ pc() - start;
2313
// Prolog for exception case
2315
// all registers are dead at this entry point, except for r0, and
2316
// r3 which contain the exception oop and exception pc
2317
// respectively. Set them in TLS and fall thru to the
2318
// unpack_with_exception_in_tls entry point.
2320
__ str(r3, Address(rthread, JavaThread::exception_pc_offset()));
2321
__ str(r0, Address(rthread, JavaThread::exception_oop_offset()));
2323
int exception_in_tls_offset = __ pc() - start;
2325
// new implementation because exception oop is now passed in JavaThread
2327
// Prolog for exception case
2328
// All registers must be preserved because they might be used by LinearScan
2329
// Exceptiop oop and throwing PC are passed in JavaThread
2330
// tos: stack at point of call to method that threw the exception (i.e. only
2331
// args are on the stack, no return address)
2333
// The return address pushed by save_live_registers will be patched
2334
// later with the throwing pc. The correct value is not available
2335
// now because loading it from memory would destroy registers.
2337
// NB: The SP at this point must be the SP of the method that is
2338
// being deoptimized. Deoptimization assumes that the frame created
2339
// here by save_live_registers is immediately below the method's SP.
2340
// This is a somewhat fragile mechanism.
2342
// Save everything in sight.
2343
map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2345
// Now it is safe to overwrite any register
2347
// Deopt during an exception. Save exec mode for unpack_frames.
2348
__ mov(rcpool, Deoptimization::Unpack_exception); // callee-saved
2350
// load throwing pc from JavaThread and patch it as the return address
2351
// of the current frame. Then clear the field in JavaThread
2352
__ ldr(r3, Address(rthread, JavaThread::exception_pc_offset()));
2353
__ protect_return_address(r3);
2354
__ str(r3, Address(rfp, wordSize));
2355
__ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
2358
// verify that there is really an exception oop in JavaThread
2359
__ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
2362
// verify that there is no pending exception
2363
Label no_pending_exception;
2364
__ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2365
__ cbz(rscratch1, no_pending_exception);
2366
__ stop("must not have pending exception here");
2367
__ bind(no_pending_exception);
2372
// Call C code. Need thread and this frame, but NOT official VM entry
2373
// crud. We cannot block on this call, no GC can happen.
2375
// UnrollBlock* fetch_unroll_info(JavaThread* thread)
2377
// fetch_unroll_info needs to call last_java_frame().
2380
__ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2383
__ ldr(rscratch1, Address(rthread, JavaThread::last_Java_fp_offset()));
2384
__ cbz(rscratch1, L);
2385
__ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
2389
__ mov(c_rarg0, rthread);
2390
__ mov(c_rarg1, rcpool);
2391
__ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
2395
// Need to have an oopmap that tells fetch_unroll_info where to
2396
// find any register it might need.
2397
oop_maps->add_gc_map(__ pc() - start, map);
2399
__ reset_last_Java_frame(false);
2403
__ bind(after_fetch_unroll_info_call);
2407
// Load UnrollBlock* into r5
2410
__ ldrw(rcpool, Address(r5, Deoptimization::UnrollBlock::unpack_kind_offset()));
2412
__ cmpw(rcpool, Deoptimization::Unpack_exception); // Was exception pending?
2413
__ br(Assembler::NE, noException);
2414
__ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
2415
// QQQ this is useless it was null above
2416
__ ldr(r3, Address(rthread, JavaThread::exception_pc_offset()));
2417
__ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
2418
__ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
2422
// Overwrite the result registers with the exception results.
2423
__ str(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2424
// I think this is useless
2425
// __ str(r3, Address(sp, RegisterSaver::r3_offset_in_bytes()));
2427
__ bind(noException);
2429
// Only register save data is on the stack.
2430
// Now restore the result registers. Everything else is either dead
2431
// or captured in the vframeArray.
2433
// Restore fp result register
2434
__ ldrd(v0, Address(sp, reg_save.v0_offset_in_bytes()));
2435
// Restore integer result register
2436
__ ldr(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2438
// Pop all of the register save area off the stack
2439
__ add(sp, sp, frame_size_in_words * wordSize);
2441
// All of the register save area has been popped of the stack. Only the
2442
// return address remains.
2444
// Pop all the frames we must move/replace.
2446
// Frame picture (youngest to oldest)
2447
// 1: self-frame (no frame link)
2448
// 2: deopting frame (no frame link)
2449
// 3: caller of deopting frame (could be compiled/interpreted).
2451
// Note: by leaving the return address of self-frame on the stack
2452
// and using the size of frame 2 to adjust the stack
2453
// when we are done the return to frame 3 will still be on the stack.
2455
// Pop deoptimized frame
2456
__ ldrw(r2, Address(r5, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset()));
2457
__ sub(r2, r2, 2 * wordSize);
2459
__ ldp(rfp, zr, __ post(sp, 2 * wordSize));
2462
// Compilers generate code that bang the stack by as much as the
2463
// interpreter would need. So this stack banging should never
2464
// trigger a fault. Verify that it does not on non product builds.
2465
__ ldrw(r19, Address(r5, Deoptimization::UnrollBlock::total_frame_sizes_offset()));
2466
__ bang_stack_size(r19, r2);
2468
// Load address of array of frame pcs into r2
2469
__ ldr(r2, Address(r5, Deoptimization::UnrollBlock::frame_pcs_offset()));
2472
// __ addptr(sp, wordSize); FIXME ????
2474
// Load address of array of frame sizes into r4
2475
__ ldr(r4, Address(r5, Deoptimization::UnrollBlock::frame_sizes_offset()));
2477
// Load counter into r3
2478
__ ldrw(r3, Address(r5, Deoptimization::UnrollBlock::number_of_frames_offset()));
2480
// Now adjust the caller's stack to make up for the extra locals
2481
// but record the original sp so that we can save it in the skeletal interpreter
2482
// frame and the stack walking of interpreter_sender will get the unextended sp
2483
// value and not the "real" sp value.
2485
const Register sender_sp = r6;
2487
__ mov(sender_sp, sp);
2488
__ ldrw(r19, Address(r5,
2489
Deoptimization::UnrollBlock::
2490
caller_adjustment_offset()));
2491
__ sub(sp, sp, r19);
2493
// Push interpreter frames in a loop
2494
__ mov(rscratch1, (uint64_t)0xDEADDEAD); // Make a recognizable pattern
2495
__ mov(rscratch2, rscratch1);
2498
__ ldr(r19, Address(__ post(r4, wordSize))); // Load frame size
2499
__ sub(r19, r19, 2*wordSize); // We'll push pc and fp by hand
2500
__ ldr(lr, Address(__ post(r2, wordSize))); // Load pc
2501
__ enter(); // Save old & set new fp
2502
__ sub(sp, sp, r19); // Prolog
2503
// This value is corrected by layout_activation_impl
2504
__ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
2505
__ str(sender_sp, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize)); // Make it walkable
2506
__ mov(sender_sp, sp); // Pass sender_sp to next frame
2507
__ sub(r3, r3, 1); // Decrement counter
2510
// Re-push self-frame
2511
__ ldr(lr, Address(r2));
2514
// Allocate a full sized register save area. We subtract 2 because
2515
// enter() just pushed 2 words
2516
__ sub(sp, sp, (frame_size_in_words - 2) * wordSize);
2518
// Restore frame locals after moving the frame
2519
__ strd(v0, Address(sp, reg_save.v0_offset_in_bytes()));
2520
__ str(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2522
// Call C code. Need thread but NOT official VM entry
2523
// crud. We cannot block on this call, no GC can happen. Call should
2524
// restore return values to their stack-slots with the new SP.
2526
// void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)
2528
// Use rfp because the frames look interpreted now
2529
// Don't need the precise return PC here, just precise enough to point into this code blob.
2530
address the_pc = __ pc();
2531
__ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
2533
__ mov(c_rarg0, rthread);
2534
__ movw(c_rarg1, rcpool); // second arg: exec_mode
2535
__ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2538
// Set an oopmap for the call site
2539
// Use the same PC we used for the last java frame
2540
oop_maps->add_gc_map(the_pc - start,
2541
new OopMap( frame_size_in_words, 0 ));
2544
__ reset_last_Java_frame(true);
2546
// Collect return values
2547
__ ldrd(v0, Address(sp, reg_save.v0_offset_in_bytes()));
2548
__ ldr(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2549
// I think this is useless (throwing pc?)
2550
// __ ldr(r3, Address(sp, RegisterSaver::r3_offset_in_bytes()));
2553
__ leave(); // Epilog
2555
// Jump to interpreter
2558
// Make sure all code is generated
2561
_deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
2562
_deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
2565
_deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
2566
_deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
2571
// Number of stack slots between incoming argument block and the start of
2572
// a new frame. The PROLOG must add this many slots to the stack. The
2573
// EPILOG must remove this many slots. aarch64 needs two slots for
2574
// return address and fp.
2575
// TODO think this is correct but check
2576
uint SharedRuntime::in_preserve_stack_slots() {
2580
uint SharedRuntime::out_preserve_stack_slots() {
2585
//------------------------------generate_uncommon_trap_blob--------------------
2586
void SharedRuntime::generate_uncommon_trap_blob() {
2587
// Allocate space for the code
2589
// Setup code generation tools
2590
CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
2591
MacroAssembler* masm = new MacroAssembler(&buffer);
2593
assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
2595
address start = __ pc();
2597
// Push self-frame. We get here with a return address in LR
2598
// and sp should be 16 byte aligned
2599
// push rfp and retaddr by hand
2600
__ protect_return_address();
2601
__ stp(rfp, lr, Address(__ pre(sp, -2 * wordSize)));
2602
// we don't expect an arg reg save area
2604
assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
2606
// compiler left unloaded_class_index in j_rarg0 move to where the
2607
// runtime expects it.
2608
if (c_rarg1 != j_rarg0) {
2609
__ movw(c_rarg1, j_rarg0);
2612
// we need to set the past SP to the stack pointer of the stub frame
2613
// and the pc to the address where this runtime call will return
2614
// although actually any pc in this code blob will do).
2616
__ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2618
// Call C code. Need thread but NOT official VM entry
2619
// crud. We cannot block on this call, no GC can happen. Call should
2620
// capture callee-saved registers as well as return values.
2621
// Thread is in rdi already.
2623
// UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index);
2625
// n.b. 2 gp args, 0 fp args, integral return type
2627
__ mov(c_rarg0, rthread);
2628
__ movw(c_rarg2, (unsigned)Deoptimization::Unpack_uncommon_trap);
2630
RuntimeAddress(CAST_FROM_FN_PTR(address,
2631
Deoptimization::uncommon_trap)));
2635
// Set an oopmap for the call site
2636
OopMapSet* oop_maps = new OopMapSet();
2637
OopMap* map = new OopMap(SimpleRuntimeFrame::framesize, 0);
2639
// location of rfp is known implicitly by the frame sender code
2641
oop_maps->add_gc_map(__ pc() - start, map);
2643
__ reset_last_Java_frame(false);
2645
// move UnrollBlock* into r4
2650
__ ldrw(rscratch1, Address(r4, Deoptimization::UnrollBlock::unpack_kind_offset()));
2651
__ cmpw(rscratch1, (unsigned)Deoptimization::Unpack_uncommon_trap);
2652
__ br(Assembler::EQ, L);
2653
__ stop("SharedRuntime::generate_uncommon_trap_blob: expected Unpack_uncommon_trap");
2658
// Pop all the frames we must move/replace.
2660
// Frame picture (youngest to oldest)
2661
// 1: self-frame (no frame link)
2662
// 2: deopting frame (no frame link)
2663
// 3: caller of deopting frame (could be compiled/interpreted).
2665
// Pop self-frame. We have no frame, and must rely only on r0 and sp.
2666
__ add(sp, sp, (SimpleRuntimeFrame::framesize) << LogBytesPerInt); // Epilog!
2668
// Pop deoptimized frame (int)
2669
__ ldrw(r2, Address(r4,
2670
Deoptimization::UnrollBlock::
2671
size_of_deoptimized_frame_offset()));
2672
__ sub(r2, r2, 2 * wordSize);
2674
__ ldp(rfp, zr, __ post(sp, 2 * wordSize));
2677
// Compilers generate code that bang the stack by as much as the
2678
// interpreter would need. So this stack banging should never
2679
// trigger a fault. Verify that it does not on non product builds.
2680
__ ldrw(r1, Address(r4,
2681
Deoptimization::UnrollBlock::
2682
total_frame_sizes_offset()));
2683
__ bang_stack_size(r1, r2);
2686
// Load address of array of frame pcs into r2 (address*)
2687
__ ldr(r2, Address(r4,
2688
Deoptimization::UnrollBlock::frame_pcs_offset()));
2690
// Load address of array of frame sizes into r5 (intptr_t*)
2691
__ ldr(r5, Address(r4,
2692
Deoptimization::UnrollBlock::
2693
frame_sizes_offset()));
2696
__ ldrw(r3, Address(r4,
2697
Deoptimization::UnrollBlock::
2698
number_of_frames_offset())); // (int)
2700
// Now adjust the caller's stack to make up for the extra locals but
2701
// record the original sp so that we can save it in the skeletal
2702
// interpreter frame and the stack walking of interpreter_sender
2703
// will get the unextended sp value and not the "real" sp value.
2705
const Register sender_sp = r8;
2707
__ mov(sender_sp, sp);
2708
__ ldrw(r1, Address(r4,
2709
Deoptimization::UnrollBlock::
2710
caller_adjustment_offset())); // (int)
2713
// Push interpreter frames in a loop
2716
__ ldr(r1, Address(r5, 0)); // Load frame size
2717
__ sub(r1, r1, 2 * wordSize); // We'll push pc and rfp by hand
2718
__ ldr(lr, Address(r2, 0)); // Save return address
2719
__ enter(); // and old rfp & set new rfp
2720
__ sub(sp, sp, r1); // Prolog
2721
__ str(sender_sp, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize)); // Make it walkable
2722
// This value is corrected by layout_activation_impl
2723
__ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
2724
__ mov(sender_sp, sp); // Pass sender_sp to next frame
2725
__ add(r5, r5, wordSize); // Bump array pointer (sizes)
2726
__ add(r2, r2, wordSize); // Bump array pointer (pcs)
2727
__ subsw(r3, r3, 1); // Decrement counter
2728
__ br(Assembler::GT, loop);
2729
__ ldr(lr, Address(r2, 0)); // save final return address
2730
// Re-push self-frame
2731
__ enter(); // & old rfp & set new rfp
2733
// Use rfp because the frames look interpreted now
2734
// Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP.
2735
// Don't need the precise return PC here, just precise enough to point into this code blob.
2736
address the_pc = __ pc();
2737
__ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
2739
// Call C code. Need thread but NOT official VM entry
2740
// crud. We cannot block on this call, no GC can happen. Call should
2741
// restore return values to their stack-slots with the new SP.
2742
// Thread is in rdi already.
2744
// BasicType unpack_frames(JavaThread* thread, int exec_mode);
2746
// n.b. 2 gp args, 0 fp args, integral return type
2748
// sp should already be aligned
2749
__ mov(c_rarg0, rthread);
2750
__ movw(c_rarg1, (unsigned)Deoptimization::Unpack_uncommon_trap);
2751
__ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2754
// Set an oopmap for the call site
2755
// Use the same PC we used for the last java frame
2756
oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
2759
__ reset_last_Java_frame(true);
2762
__ leave(); // Epilog
2764
// Jump to interpreter
2767
// Make sure all code is generated
2770
_uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps,
2771
SimpleRuntimeFrame::framesize >> 1);
2776
//------------------------------generate_handler_blob------
2778
// Generate a special Compile2Runtime blob that saves all registers,
2781
SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
2783
OopMapSet *oop_maps = new OopMapSet();
2786
// Allocate space for the code. Setup code generation tools.
2787
CodeBuffer buffer("handler_blob", 2048, 1024);
2788
MacroAssembler* masm = new MacroAssembler(&buffer);
2790
address start = __ pc();
2791
address call_pc = nullptr;
2792
int frame_size_in_words;
2793
bool cause_return = (poll_type == POLL_AT_RETURN);
2794
RegisterSaver reg_save(poll_type == POLL_AT_VECTOR_LOOP /* save_vectors */);
2796
// When the signal occurred, the LR was either signed and stored on the stack (in which
2797
// case it will be restored from the stack before being used) or unsigned and not stored
2798
// on the stack. Stipping ensures we get the right value.
2799
__ strip_return_address();
2801
// Save Integer and Float registers.
2802
map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2804
// The following is basically a call_VM. However, we need the precise
2805
// address of the call in order to generate an oopmap. Hence, we do all the
2809
__ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2811
// The return address must always be correct so that frame constructor never
2812
// sees an invalid pc.
2814
if (!cause_return) {
2815
// overwrite the return address pushed by save_live_registers
2816
// Additionally, r20 is a callee-saved register so we can look at
2817
// it later to determine if someone changed the return address for
2819
__ ldr(r20, Address(rthread, JavaThread::saved_exception_pc_offset()));
2820
__ protect_return_address(r20);
2821
__ str(r20, Address(rfp, wordSize));
2825
__ mov(c_rarg0, rthread);
2826
__ lea(rscratch1, RuntimeAddress(call_ptr));
2830
// Set an oopmap for the call site. This oopmap will map all
2831
// oop-registers and debug-info registers as callee-saved. This
2832
// will allow deoptimization at this safepoint to find all possible
2833
// debug-info recordings, as well as let GC find all oops.
2835
oop_maps->add_gc_map( __ pc() - start, map);
2839
__ reset_last_Java_frame(false);
2841
__ membar(Assembler::LoadLoad | Assembler::LoadStore);
2843
__ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2844
__ cbz(rscratch1, noException);
2846
// Exception pending
2848
reg_save.restore_live_registers(masm);
2850
__ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2852
// No exception case
2853
__ bind(noException);
2855
Label no_adjust, bail;
2856
if (!cause_return) {
2857
// If our stashed return pc was modified by the runtime we avoid touching it
2858
__ ldr(rscratch1, Address(rfp, wordSize));
2859
__ cmp(r20, rscratch1);
2860
__ br(Assembler::NE, no_adjust);
2861
__ authenticate_return_address(r20);
2864
// Verify the correct encoding of the poll we're about to skip.
2865
// See NativeInstruction::is_ldrw_to_zr()
2866
__ ldrw(rscratch1, Address(r20));
2867
__ ubfx(rscratch2, rscratch1, 22, 10);
2868
__ cmpw(rscratch2, 0b1011100101);
2869
__ br(Assembler::NE, bail);
2870
__ ubfx(rscratch2, rscratch1, 0, 5);
2871
__ cmpw(rscratch2, 0b11111);
2872
__ br(Assembler::NE, bail);
2874
// Adjust return pc forward to step over the safepoint poll instruction
2875
__ add(r20, r20, NativeInstruction::instruction_size);
2876
__ protect_return_address(r20);
2877
__ str(r20, Address(rfp, wordSize));
2881
// Normal exit, restore registers and exit.
2882
reg_save.restore_live_registers(masm);
2888
__ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected");
2891
// Make sure all code is generated
2894
// Fill-out other meta info
2895
return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
2899
// generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
2901
// Generate a stub that calls into vm to find out the proper destination
2902
// of a java call. All the argument registers are live at this point
2903
// but since this is generic code we don't know what they are and the caller
2904
// must do any gc of the args.
2906
RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
2907
assert (StubRoutines::forward_exception_entry() != nullptr, "must be generated before");
2909
// allocate space for the code
2912
CodeBuffer buffer(name, 1000, 512);
2913
MacroAssembler* masm = new MacroAssembler(&buffer);
2915
int frame_size_in_words;
2916
RegisterSaver reg_save(false /* save_vectors */);
2918
OopMapSet *oop_maps = new OopMapSet();
2919
OopMap* map = nullptr;
2921
int start = __ offset();
2923
map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2925
int frame_complete = __ offset();
2929
__ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2931
__ mov(c_rarg0, rthread);
2932
__ lea(rscratch1, RuntimeAddress(destination));
2938
// Set an oopmap for the call site.
2939
// We need this not only for callee-saved registers, but also for volatile
2940
// registers that the compiler might be keeping live across a safepoint.
2942
oop_maps->add_gc_map( __ offset() - start, map);
2944
// r0 contains the address we are going to jump to assuming no exception got installed
2946
// clear last_Java_sp
2947
__ reset_last_Java_frame(false);
2948
// check for pending exceptions
2950
__ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2951
__ cbnz(rscratch1, pending);
2953
// get the returned Method*
2954
__ get_vm_result_2(rmethod, rthread);
2955
__ str(rmethod, Address(sp, reg_save.reg_offset_in_bytes(rmethod)));
2957
// r0 is where we want to jump, overwrite rscratch1 which is saved and scratch
2958
__ str(r0, Address(sp, reg_save.rscratch1_offset_in_bytes()));
2959
reg_save.restore_live_registers(masm);
2961
// We are back to the original state on entry and ready to go.
2965
// Pending exception after the safepoint
2969
reg_save.restore_live_registers(masm);
2971
// exception pending => remove activation and forward to exception handler
2973
__ str(zr, Address(rthread, JavaThread::vm_result_offset()));
2975
__ ldr(r0, Address(rthread, Thread::pending_exception_offset()));
2976
__ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2979
// make sure all code is generated
2983
// frame_size_words or bytes??
2984
return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
2988
// This is here instead of runtime_aarch64_64.cpp because it uses SimpleRuntimeFrame
2990
//------------------------------generate_exception_blob---------------------------
2991
// creates exception blob at the end
2992
// Using exception blob, this code is jumped from a compiled method.
2993
// (see emit_exception_handler in x86_64.ad file)
2995
// Given an exception pc at a call we call into the runtime for the
2996
// handler in this method. This handler might merely restore state
2997
// (i.e. callee save registers) unwind the frame and jump to the
2998
// exception handler for the nmethod if there is no Java level handler
3001
// This code is entered with a jmp.
3009
// r3: exception pc in caller or ???
3010
// destination: exception handler of caller
3012
// Note: the exception pc MUST be at a call (precise debug information)
3013
// Registers r0, r3, r2, r4, r5, r8-r11 are not callee saved.
3016
void OptoRuntime::generate_exception_blob() {
3017
assert(!OptoRuntime::is_callee_saved_register(R3_num), "");
3018
assert(!OptoRuntime::is_callee_saved_register(R0_num), "");
3019
assert(!OptoRuntime::is_callee_saved_register(R2_num), "");
3021
assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
3023
// Allocate space for the code
3025
// Setup code generation tools
3026
CodeBuffer buffer("exception_blob", 2048, 1024);
3027
MacroAssembler* masm = new MacroAssembler(&buffer);
3029
// TODO check various assumptions made here
3031
// make sure we do so before running this
3033
address start = __ pc();
3035
// push rfp and retaddr by hand
3036
// Exception pc is 'return address' for stack walker
3037
__ protect_return_address();
3038
__ stp(rfp, lr, Address(__ pre(sp, -2 * wordSize)));
3039
// there are no callee save registers and we don't expect an
3040
// arg reg save area
3042
assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
3044
// Store exception in Thread object. We cannot pass any arguments to the
3045
// handle_exception call, since we do not want to make any assumption
3046
// about the size of the frame where the exception happened in.
3047
__ str(r0, Address(rthread, JavaThread::exception_oop_offset()));
3048
__ str(r3, Address(rthread, JavaThread::exception_pc_offset()));
3050
// This call does all the hard work. It checks if an exception handler
3051
// exists in the method.
3052
// If so, it returns the handler address.
3053
// If not, it prepares for stack-unwinding, restoring the callee-save
3054
// registers of the frame being removed.
3056
// address OptoRuntime::handle_exception_C(JavaThread* thread)
3058
// n.b. 1 gp arg, 0 fp args, integral return type
3060
// the stack should always be aligned
3061
address the_pc = __ pc();
3062
__ set_last_Java_frame(sp, noreg, the_pc, rscratch1);
3063
__ mov(c_rarg0, rthread);
3064
__ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C)));
3066
// handle_exception_C is a special VM call which does not require an explicit
3067
// instruction sync afterwards.
3069
// May jump to SVE compiled code
3070
__ reinitialize_ptrue();
3072
// Set an oopmap for the call site. This oopmap will only be used if we
3073
// are unwinding the stack. Hence, all locations will be dead.
3074
// Callee-saved registers will be the same as the frame above (i.e.,
3075
// handle_exception_stub), since they were restored when we got the
3078
OopMapSet* oop_maps = new OopMapSet();
3080
oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
3082
__ reset_last_Java_frame(false);
3084
// Restore callee-saved registers
3086
// rfp is an implicitly saved callee saved register (i.e. the calling
3087
// convention will save restore it in prolog/epilog) Other than that
3088
// there are no callee save registers now that adapter frames are gone.
3089
// and we dont' expect an arg reg save area
3090
__ ldp(rfp, r3, Address(__ post(sp, 2 * wordSize)));
3091
__ authenticate_return_address(r3);
3093
// r0: exception handler
3095
// We have a handler in r0 (could be deopt blob).
3098
// Get the exception oop
3099
__ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
3100
// Get the exception pc in case we are deoptimized
3101
__ ldr(r4, Address(rthread, JavaThread::exception_pc_offset()));
3103
__ str(zr, Address(rthread, JavaThread::exception_handler_pc_offset()));
3104
__ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
3106
// Clear the exception oop so GC no longer processes it as a root.
3107
__ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
3109
// r0: exception oop
3110
// r8: exception handler
3116
// Make sure all code is generated
3119
// Set exception blob
3120
_exception_blob = ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);