25
#include "precompiled.hpp"
26
#include "asm/macroAssembler.inline.hpp"
27
#include "c1/c1_Defs.hpp"
28
#include "c1/c1_LIRAssembler.hpp"
29
#include "c1/c1_MacroAssembler.hpp"
30
#include "c1/c1_Runtime1.hpp"
31
#include "ci/ciUtilities.hpp"
32
#include "compiler/oopMap.hpp"
33
#include "gc/shared/cardTable.hpp"
34
#include "gc/shared/cardTableBarrierSet.hpp"
35
#include "gc/shared/collectedHeap.hpp"
36
#include "gc/shared/tlab_globals.hpp"
37
#include "interpreter/interpreter.hpp"
38
#include "memory/universe.hpp"
39
#include "nativeInst_arm.hpp"
40
#include "oops/oop.inline.hpp"
41
#include "prims/jvmtiExport.hpp"
42
#include "register_arm.hpp"
43
#include "runtime/sharedRuntime.hpp"
44
#include "runtime/signature.hpp"
45
#include "runtime/vframeArray.hpp"
46
#include "utilities/align.hpp"
47
#include "vmreg_arm.inline.hpp"
56
int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) {
59
int call_offset = set_last_Java_frame(SP, FP, false, Rtemp);
62
if (call_offset == -1) {
63
call_offset = offset();
65
reset_last_Java_frame(Rtemp);
67
assert(frame_size() != no_frame_size, "frame must be fixed");
68
if (_stub_id != Runtime1::forward_exception_id) {
69
ldr(R3, Address(Rthread, Thread::pending_exception_offset()));
72
if (oop_result1->is_valid()) {
73
assert_different_registers(oop_result1, R3, Rtemp);
74
get_vm_result(oop_result1, Rtemp);
76
if (metadata_result->is_valid()) {
77
assert_different_registers(metadata_result, R3, Rtemp);
78
get_vm_result_2(metadata_result, Rtemp);
84
if (_stub_id != Runtime1::forward_exception_id) {
85
assert(frame_size() != no_frame_size, "cannot directly call forward_exception_id");
87
jump(Runtime1::entry_for(Runtime1::forward_exception_id), relocInfo::runtime_call_type, Rtemp, ne);
91
ldr(R3, Address(Rthread, Thread::pending_exception_offset()));
100
int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) {
104
return call_RT(oop_result1, metadata_result, entry, 1);
108
int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) {
109
assert(arg1 == R1 && arg2 == R2, "cannot handle otherwise");
110
return call_RT(oop_result1, metadata_result, entry, 2);
114
int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) {
115
assert(arg1 == R1 && arg2 == R2 && arg3 == R3, "cannot handle otherwise");
116
return call_RT(oop_result1, metadata_result, entry, 3);
125
fpu_save_size = pd_nof_fpu_regs_reg_alloc,
129
R0_offset = fpu_save_size,
142
#if (FP_REG_NUM != 11)
149
arg1_offset = reg_save_size * wordSize,
150
arg2_offset = (reg_save_size + 1) * wordSize
154
static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers = HaveVFP) {
155
sasm->set_frame_size(reg_save_size );
159
OopMap* map = new OopMap(VMRegImpl::slots_per_word * reg_save_size, 0);
162
for (int i = R0_offset; i < R10_offset; i++) {
163
if (j == FP_REG_NUM) {
167
map->set_callee_saved(VMRegImpl::stack2reg(i), as_Register(j)->as_VMReg());
170
assert(j == R10->encoding(), "must be");
171
#if (FP_REG_NUM != 11)
173
map->set_callee_saved(VMRegImpl::stack2reg(R11_offset), R11->as_VMReg());
175
map->set_callee_saved(VMRegImpl::stack2reg(FP_offset), FP->as_VMReg());
176
map->set_callee_saved(VMRegImpl::stack2reg(LR_offset), LR->as_VMReg());
178
if (save_fpu_registers) {
179
for (int i = 0; i < fpu_save_size; i++) {
180
map->set_callee_saved(VMRegImpl::stack2reg(i), as_FloatRegister(i)->as_VMReg());
187
static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = HaveVFP) {
188
__ block_comment("save_live_registers");
189
sasm->set_frame_size(reg_save_size );
191
__ push(RegisterSet(FP) | RegisterSet(LR));
192
__ push(RegisterSet(R0, R6) | RegisterSet(R8, R10) | R12 | altFP_7_11);
193
if (save_fpu_registers) {
194
__ fpush(FloatRegisterSet(D0, fpu_save_size / 2));
196
__ sub(SP, SP, fpu_save_size * wordSize);
199
return generate_oop_map(sasm, save_fpu_registers);
203
static void restore_live_registers(StubAssembler* sasm,
207
bool restore_fpu_registers = HaveVFP) {
208
__ block_comment("restore_live_registers");
210
if (restore_fpu_registers) {
211
__ fpop(FloatRegisterSet(D0, fpu_save_size / 2));
213
__ add(SP, SP, (R1_offset - fpu_save_size) * wordSize);
216
__ add(SP, SP, (restore_R0 ? fpu_save_size : R1_offset) * wordSize);
218
__ pop(RegisterSet((restore_R0 ? R0 : R1), R6) | RegisterSet(R8, R10) | R12 | altFP_7_11);
220
__ pop(RegisterSet(FP) | RegisterSet(do_return ? PC : LR));
222
assert (!do_return, "return without restoring FP/LR");
227
static void restore_live_registers_except_R0(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) {
228
restore_live_registers(sasm, false, true, true, restore_fpu_registers);
231
static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) {
232
restore_live_registers(sasm, true, true, true, restore_fpu_registers);
235
static void restore_live_registers_except_FP_LR(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) {
236
restore_live_registers(sasm, true, false, false, restore_fpu_registers);
239
static void restore_live_registers_without_return(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) {
240
restore_live_registers(sasm, true, true, false, restore_fpu_registers);
243
void StubAssembler::save_live_registers() {
244
::save_live_registers(this);
247
void StubAssembler::restore_live_registers_without_return() {
248
::restore_live_registers_without_return(this);
251
void Runtime1::initialize_pd() {
255
OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
256
OopMap* oop_map = save_live_registers(sasm);
260
__ ldr(R1, Address(SP, arg1_offset));
261
__ ldr(R2, Address(SP, arg2_offset));
262
call_offset = __ call_RT(noreg, noreg, target, R1, R2);
264
call_offset = __ call_RT(noreg, noreg, target);
267
OopMapSet* oop_maps = new OopMapSet();
268
oop_maps->add_gc_map(call_offset, oop_map);
270
DEBUG_ONLY(STOP("generate_exception_throw");)
275
static void restore_sp_for_method_handle(StubAssembler* sasm) {
277
__ ldr_s32(Rtemp, Address(Rthread, JavaThread::is_method_handle_return_offset()));
279
__ mov(SP, Rmh_SP_save, ne);
283
OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) {
284
__ block_comment("generate_handle_exception");
286
bool save_fpu_registers = false;
289
OopMapSet* oop_maps = new OopMapSet();
290
OopMap* oop_map = nullptr;
293
case forward_exception_id: {
294
save_fpu_registers = HaveVFP;
295
oop_map = generate_oop_map(sasm);
296
__ ldr(Rexception_obj, Address(Rthread, Thread::pending_exception_offset()));
297
__ ldr(Rexception_pc, Address(SP, LR_offset * wordSize));
298
Register zero = __ zero_register(Rtemp);
299
__ str(zero, Address(Rthread, Thread::pending_exception_offset()));
302
case handle_exception_id:
303
save_fpu_registers = HaveVFP;
305
case handle_exception_nofpu_id:
307
oop_map = save_live_registers(sasm, save_fpu_registers);
309
case handle_exception_from_callee_id:
312
oop_map = save_live_registers(sasm);
314
default: ShouldNotReachHere();
317
__ str(Rexception_obj, Address(Rthread, JavaThread::exception_oop_offset()));
318
__ str(Rexception_pc, Address(Rthread, JavaThread::exception_pc_offset()));
320
__ str(Rexception_pc, Address(SP, LR_offset * wordSize));
322
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
323
oop_maps->add_gc_map(call_offset, oop_map);
326
__ str(R0, Address(SP, LR_offset * wordSize));
331
case forward_exception_id:
332
case handle_exception_nofpu_id:
333
case handle_exception_id:
334
restore_live_registers(sasm, save_fpu_registers);
337
case handle_exception_from_callee_id:
338
restore_live_registers_without_return(sasm);
339
restore_sp_for_method_handle(sasm);
342
default: ShouldNotReachHere();
345
DEBUG_ONLY(STOP("generate_handle_exception");)
351
void Runtime1::generate_unwind_exception(StubAssembler* sasm) {
353
if (AbortVMOnException) {
354
save_live_registers(sasm);
355
__ call_VM_leaf(CAST_FROM_FN_PTR(address, check_abort_on_vm_exception), Rexception_obj);
356
restore_live_registers(sasm);
363
__ mov(c_rarg0, Rthread);
364
__ mov(Rexception_pc, LR);
366
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), c_rarg0, c_rarg1);
370
__ verify_not_null_oop(Rexception_obj);
373
restore_sp_for_method_handle(sasm);
379
OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
380
OopMap* oop_map = save_live_registers(sasm);
383
int call_offset = __ call_RT(noreg, noreg, target);
384
OopMapSet* oop_maps = new OopMapSet();
385
oop_maps->add_gc_map(call_offset, oop_map);
387
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
388
assert(deopt_blob != nullptr, "deoptimization blob must have been created");
392
restore_live_registers_except_FP_LR(sasm);
393
__ pop(RegisterSet(FP) | RegisterSet(PC), eq);
397
__ pop(RegisterSet(FP) | RegisterSet(LR));
399
__ jump(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type, Rtemp);
401
DEBUG_ONLY(STOP("generate_patching");)
406
OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
407
const bool must_gc_arguments = true;
408
const bool dont_gc_arguments = false;
410
OopMapSet* oop_maps = nullptr;
411
bool save_fpu_registers = HaveVFP;
414
case forward_exception_id:
416
oop_maps = generate_handle_exception(id, sasm);
421
case new_instance_id:
422
case fast_new_instance_id:
423
case fast_new_instance_init_check_id:
425
const Register result = R0;
426
const Register klass = R1;
428
OopMap* map = save_live_registers(sasm);
429
int call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
430
oop_maps = new OopMapSet();
431
oop_maps->add_gc_map(call_offset, map);
435
restore_live_registers_except_R0(sasm);
439
case counter_overflow_id:
441
OopMap* oop_map = save_live_registers(sasm);
442
__ ldr(R1, Address(SP, arg1_offset));
443
__ ldr(R2, Address(SP, arg2_offset));
444
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), R1, R2);
445
oop_maps = new OopMapSet();
446
oop_maps->add_gc_map(call_offset, oop_map);
447
restore_live_registers(sasm);
451
case new_type_array_id:
452
case new_object_array_id:
454
if (id == new_type_array_id) {
455
__ set_info("new_type_array", dont_gc_arguments);
457
__ set_info("new_object_array", dont_gc_arguments);
460
const Register result = R0;
461
const Register klass = R1;
462
const Register length = R2;
464
OopMap* map = save_live_registers(sasm);
466
if (id == new_type_array_id) {
467
call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
469
call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
471
oop_maps = new OopMapSet();
472
oop_maps->add_gc_map(call_offset, map);
476
restore_live_registers_except_R0(sasm);
480
case new_multi_array_id:
482
__ set_info("new_multi_array", dont_gc_arguments);
487
const Register result = R0;
488
OopMap* map = save_live_registers(sasm);
491
__ add(R3, SP, arg1_offset);
492
int call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_multi_array), R1, R2, R3);
494
oop_maps = new OopMapSet();
495
oop_maps->add_gc_map(call_offset, map);
499
restore_live_registers_except_R0(sasm);
503
case register_finalizer_id:
505
__ set_info("register_finalizer", dont_gc_arguments);
508
__ load_klass(Rtemp, R0);
509
__ ldr_u32(Rtemp, Address(Rtemp, Klass::access_flags_offset()));
511
__ tst(Rtemp, JVM_ACC_HAS_FINALIZER);
515
OopMap* map = save_live_registers(sasm);
516
oop_maps = new OopMapSet();
517
int call_offset = __ call_RT(noreg, noreg,
518
CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), R0);
519
oop_maps->add_gc_map(call_offset, map);
520
restore_live_registers(sasm);
524
case throw_range_check_failed_id:
526
__ set_info("range_check_failed", dont_gc_arguments);
527
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
531
case throw_index_exception_id:
533
__ set_info("index_range_check_failed", dont_gc_arguments);
534
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
538
case throw_div0_exception_id:
540
__ set_info("throw_div0_exception", dont_gc_arguments);
541
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
545
case throw_null_pointer_exception_id:
547
__ set_info("throw_null_pointer_exception", dont_gc_arguments);
548
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
552
case handle_exception_nofpu_id:
553
case handle_exception_id:
555
__ set_info("handle_exception", dont_gc_arguments);
556
oop_maps = generate_handle_exception(id, sasm);
560
case handle_exception_from_callee_id:
562
__ set_info("handle_exception_from_callee", dont_gc_arguments);
563
oop_maps = generate_handle_exception(id, sasm);
567
case unwind_exception_id:
569
__ set_info("unwind_exception", dont_gc_arguments);
570
generate_unwind_exception(sasm);
574
case throw_array_store_exception_id:
576
__ set_info("throw_array_store_exception", dont_gc_arguments);
577
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
581
case throw_class_cast_exception_id:
583
__ set_info("throw_class_cast_exception", dont_gc_arguments);
584
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
588
case throw_incompatible_class_change_error_id:
590
__ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments);
591
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
595
case slow_subtype_check_id:
600
__ raw_push(R2, R3, LR);
603
__ ldr(R2, Address(R0, Klass::secondary_supers_offset()));
605
__ ldr_s32(R3, Address(R2, Array<Klass*>::length_offset_in_bytes()));
606
__ add(R2, R2, Array<Klass*>::base_offset_in_bytes());
611
__ ldr(LR, Address(R2, wordSize, post_indexed));
617
__ str(R1, Address(R0, Klass::secondary_super_cache_offset()));
619
__ raw_pop_and_ret(R2, R3);
624
__ raw_pop_and_ret(R2, R3);
628
case monitorenter_nofpu_id:
629
save_fpu_registers = false;
631
case monitorenter_id:
633
__ set_info("monitorenter", dont_gc_arguments);
634
const Register obj = R1;
635
const Register lock = R2;
636
OopMap* map = save_live_registers(sasm, save_fpu_registers);
637
__ ldr(obj, Address(SP, arg1_offset));
638
__ ldr(lock, Address(SP, arg2_offset));
639
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), obj, lock);
640
oop_maps = new OopMapSet();
641
oop_maps->add_gc_map(call_offset, map);
642
restore_live_registers(sasm, save_fpu_registers);
646
case monitorexit_nofpu_id:
647
save_fpu_registers = false;
651
__ set_info("monitorexit", dont_gc_arguments);
652
const Register lock = R1;
653
OopMap* map = save_live_registers(sasm, save_fpu_registers);
654
__ ldr(lock, Address(SP, arg1_offset));
655
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), lock);
656
oop_maps = new OopMapSet();
657
oop_maps->add_gc_map(call_offset, map);
658
restore_live_registers(sasm, save_fpu_registers);
664
__ set_info("deoptimize", dont_gc_arguments);
665
OopMap* oop_map = save_live_registers(sasm);
666
const Register trap_request = R1;
667
__ ldr(trap_request, Address(SP, arg1_offset));
668
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), trap_request);
669
oop_maps = new OopMapSet();
670
oop_maps->add_gc_map(call_offset, oop_map);
671
restore_live_registers_without_return(sasm);
672
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
673
assert(deopt_blob != nullptr, "deoptimization blob must have been created");
674
__ jump(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type, noreg);
678
case access_field_patching_id:
680
__ set_info("access_field_patching", dont_gc_arguments);
681
oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
685
case load_klass_patching_id:
687
__ set_info("load_klass_patching", dont_gc_arguments);
688
oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
692
case load_appendix_patching_id:
694
__ set_info("load_appendix_patching", dont_gc_arguments);
695
oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
699
case load_mirror_patching_id:
701
__ set_info("load_mirror_patching", dont_gc_arguments);
702
oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
706
case predicate_failed_trap_id:
708
__ set_info("predicate_failed_trap", dont_gc_arguments);
710
OopMap* oop_map = save_live_registers(sasm);
711
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
713
oop_maps = new OopMapSet();
714
oop_maps->add_gc_map(call_offset, oop_map);
716
restore_live_registers_without_return(sasm);
718
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
719
assert(deopt_blob != nullptr, "deoptimization blob must have been created");
720
__ jump(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type, Rtemp);
726
__ set_info("unimplemented entry", dont_gc_arguments);
727
STOP("unimplemented entry");
737
const char *Runtime1::pd_name_for_address(address entry) {
739
#define FUNCTION_CASE(a, f) \
740
if ((intptr_t)a == CAST_FROM_FN_PTR(intptr_t, f)) return #f
742
FUNCTION_CASE(entry, __aeabi_fadd_glibc);
743
FUNCTION_CASE(entry, __aeabi_fmul);
744
FUNCTION_CASE(entry, __aeabi_fsub_glibc);
745
FUNCTION_CASE(entry, __aeabi_fdiv);
748
FUNCTION_CASE(entry, __aeabi_dadd_glibc);
749
FUNCTION_CASE(entry, __aeabi_dmul);
750
FUNCTION_CASE(entry, __aeabi_dsub_glibc);
751
FUNCTION_CASE(entry, __aeabi_ddiv);
753
FUNCTION_CASE(entry, __aeabi_f2d);
754
FUNCTION_CASE(entry, __aeabi_d2f);
755
FUNCTION_CASE(entry, __aeabi_i2f);
756
FUNCTION_CASE(entry, __aeabi_i2d);
757
FUNCTION_CASE(entry, __aeabi_f2iz);
759
FUNCTION_CASE(entry, SharedRuntime::fcmpl);
760
FUNCTION_CASE(entry, SharedRuntime::fcmpg);
761
FUNCTION_CASE(entry, SharedRuntime::dcmpl);
762
FUNCTION_CASE(entry, SharedRuntime::dcmpg);
764
FUNCTION_CASE(entry, SharedRuntime::unordered_fcmplt);
765
FUNCTION_CASE(entry, SharedRuntime::unordered_dcmplt);
766
FUNCTION_CASE(entry, SharedRuntime::unordered_fcmple);
767
FUNCTION_CASE(entry, SharedRuntime::unordered_dcmple);
768
FUNCTION_CASE(entry, SharedRuntime::unordered_fcmpge);
769
FUNCTION_CASE(entry, SharedRuntime::unordered_dcmpge);
770
FUNCTION_CASE(entry, SharedRuntime::unordered_fcmpgt);
771
FUNCTION_CASE(entry, SharedRuntime::unordered_dcmpgt);
773
FUNCTION_CASE(entry, SharedRuntime::fneg);
774
FUNCTION_CASE(entry, SharedRuntime::dneg);
776
FUNCTION_CASE(entry, __aeabi_fcmpeq);
777
FUNCTION_CASE(entry, __aeabi_fcmplt);
778
FUNCTION_CASE(entry, __aeabi_fcmple);
779
FUNCTION_CASE(entry, __aeabi_fcmpge);
780
FUNCTION_CASE(entry, __aeabi_fcmpgt);
782
FUNCTION_CASE(entry, __aeabi_dcmpeq);
783
FUNCTION_CASE(entry, __aeabi_dcmplt);
784
FUNCTION_CASE(entry, __aeabi_dcmple);
785
FUNCTION_CASE(entry, __aeabi_dcmpge);
786
FUNCTION_CASE(entry, __aeabi_dcmpgt);
791
const char *Runtime1::pd_name_for_address(address entry) {
792
return "<unknown function>";