25
#include "precompiled.hpp"
26
#include "asm/macroAssembler.hpp"
27
#include "asm/macroAssembler.inline.hpp"
28
#include "c1/c1_CodeStubs.hpp"
29
#include "c1/c1_Compilation.hpp"
30
#include "c1/c1_LIRAssembler.hpp"
31
#include "c1/c1_MacroAssembler.hpp"
32
#include "c1/c1_Runtime1.hpp"
33
#include "c1/c1_ValueStack.hpp"
34
#include "ci/ciArrayKlass.hpp"
35
#include "ci/ciInstance.hpp"
36
#include "compiler/oopMap.hpp"
37
#include "gc/shared/collectedHeap.hpp"
38
#include "gc/shared/gc_globals.hpp"
39
#include "nativeInst_x86.hpp"
40
#include "oops/objArrayKlass.hpp"
41
#include "runtime/frame.inline.hpp"
42
#include "runtime/safepointMechanism.hpp"
43
#include "runtime/sharedRuntime.hpp"
44
#include "runtime/stubRoutines.hpp"
45
#include "utilities/powerOfTwo.hpp"
46
#include "vmreg_x86.inline.hpp"
54
static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
57
jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
65
static jlong fp_signmask_pool[(4+1)*2];
68
static jlong *float_signmask_pool = double_quadword(&fp_signmask_pool[1*2], CONST64(0x7FFFFFFF7FFFFFFF), CONST64(0x7FFFFFFF7FFFFFFF));
69
static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CONST64(0x7FFFFFFFFFFFFFFF), CONST64(0x7FFFFFFFFFFFFFFF));
70
static jlong *float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], (jlong)UCONST64(0x8000000080000000), (jlong)UCONST64(0x8000000080000000));
71
static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], (jlong)UCONST64(0x8000000000000000), (jlong)UCONST64(0x8000000000000000));
75
const Register SYNC_header = rax;
76
const Register SHIFT_count = rcx;
81
static void select_different_registers(Register preserve,
85
if (tmp1 == preserve) {
86
assert_different_registers(tmp1, tmp2, extra);
88
} else if (tmp2 == preserve) {
89
assert_different_registers(tmp1, tmp2, extra);
92
assert_different_registers(preserve, tmp1, tmp2);
97
static void select_different_registers(Register preserve,
102
if (tmp1 == preserve) {
103
assert_different_registers(tmp1, tmp2, tmp3, extra);
105
} else if (tmp2 == preserve) {
106
assert_different_registers(tmp1, tmp2, tmp3, extra);
108
} else if (tmp3 == preserve) {
109
assert_different_registers(tmp1, tmp2, tmp3, extra);
112
assert_different_registers(preserve, tmp1, tmp2, tmp3);
117
bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
118
if (opr->is_constant()) {
119
LIR_Const* constant = opr->as_constant_ptr();
120
switch (constant->type()) {
133
LIR_Opr LIR_Assembler::receiverOpr() {
134
return FrameMap::receiver_opr;
137
LIR_Opr LIR_Assembler::osrBufferPointer() {
138
return FrameMap::as_pointer_opr(receiverOpr()->as_register());
144
address LIR_Assembler::float_constant(float f) {
145
address const_addr = __ float_constant(f);
146
if (const_addr == nullptr) {
147
bailout("const section overflow");
148
return __ code()->consts()->start();
155
address LIR_Assembler::double_constant(double d) {
156
address const_addr = __ double_constant(d);
157
if (const_addr == nullptr) {
158
bailout("const section overflow");
159
return __ code()->consts()->start();
166
void LIR_Assembler::fpop() {
170
void LIR_Assembler::fxch(int i) {
174
void LIR_Assembler::fld(int i) {
178
void LIR_Assembler::ffree(int i) {
183
void LIR_Assembler::breakpoint() {
187
void LIR_Assembler::push(LIR_Opr opr) {
188
if (opr->is_single_cpu()) {
189
__ push_reg(opr->as_register());
190
} else if (opr->is_double_cpu()) {
191
NOT_LP64(__ push_reg(opr->as_register_hi()));
192
__ push_reg(opr->as_register_lo());
193
} else if (opr->is_stack()) {
194
__ push_addr(frame_map()->address_for_slot(opr->single_stack_ix()));
195
} else if (opr->is_constant()) {
196
LIR_Const* const_opr = opr->as_constant_ptr();
197
if (const_opr->type() == T_OBJECT) {
198
__ push_oop(const_opr->as_jobject(), rscratch1);
199
} else if (const_opr->type() == T_INT) {
200
__ push_jint(const_opr->as_jint());
202
ShouldNotReachHere();
206
ShouldNotReachHere();
210
void LIR_Assembler::pop(LIR_Opr opr) {
211
if (opr->is_single_cpu()) {
212
__ pop_reg(opr->as_register());
214
ShouldNotReachHere();
218
bool LIR_Assembler::is_literal_address(LIR_Address* addr) {
219
return addr->base()->is_illegal() && addr->index()->is_illegal();
224
Address LIR_Assembler::as_Address(LIR_Address* addr) {
225
return as_Address(addr, rscratch1);
228
Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {
229
if (addr->base()->is_illegal()) {
230
assert(addr->index()->is_illegal(), "must be illegal too");
231
AddressLiteral laddr((address)addr->disp(), relocInfo::none);
232
if (! __ reachable(laddr)) {
233
__ movptr(tmp, laddr.addr());
237
return __ as_Address(laddr);
241
Register base = addr->base()->as_pointer_register();
243
if (addr->index()->is_illegal()) {
244
return Address( base, addr->disp());
245
} else if (addr->index()->is_cpu_register()) {
246
Register index = addr->index()->as_pointer_register();
247
return Address(base, index, (Address::ScaleFactor) addr->scale(), addr->disp());
248
} else if (addr->index()->is_constant()) {
249
intptr_t addr_offset = (addr->index()->as_constant_ptr()->as_jint() << addr->scale()) + addr->disp();
250
assert(Assembler::is_simm32(addr_offset), "must be");
252
return Address(base, addr_offset);
260
Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
261
Address base = as_Address(addr);
262
return Address(base._base, base._index, base._scale, base._disp + BytesPerWord);
266
Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
267
return as_Address(addr);
271
void LIR_Assembler::osr_entry() {
272
offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
273
BlockBegin* osr_entry = compilation()->hir()->osr_entry();
274
ValueStack* entry_state = osr_entry->state();
275
int number_of_locks = entry_state->locks_size();
286
ciMethod* m = compilation()->method();
287
__ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
308
Register OSR_buf = osrBufferPointer()->as_pointer_register();
309
{ assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
310
int monitor_offset = BytesPerWord * method()->max_locals() +
311
(BasicObjectLock::size() * BytesPerWord) * (number_of_locks - 1);
315
for (int i = 0; i < number_of_locks; i++) {
316
int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
321
__ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), NULL_WORD);
322
__ jcc(Assembler::notZero, L);
323
__ stop("locked object is null");
327
__ movptr(rbx, Address(OSR_buf, slot_offset + 0));
328
__ movptr(frame_map()->address_for_monitor_lock(i), rbx);
329
__ movptr(rbx, Address(OSR_buf, slot_offset + 1*BytesPerWord));
330
__ movptr(frame_map()->address_for_monitor_object(i), rbx);
337
int LIR_Assembler::check_icache() {
338
return __ ic_check(CodeEntryAlignment);
341
void LIR_Assembler::clinit_barrier(ciMethod* method) {
342
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
343
assert(!method->holder()->is_not_initialized(), "initialization should have been started");
345
Label L_skip_barrier;
346
Register klass = rscratch1;
347
Register thread = LP64_ONLY( r15_thread ) NOT_LP64( noreg );
348
assert(thread != noreg, "x86_32 not implemented");
350
__ mov_metadata(klass, method->holder()->constant_encoding());
351
__ clinit_barrier(klass, thread, &L_skip_barrier );
353
__ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
355
__ bind(L_skip_barrier);
358
void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
360
PatchingStub* patch = new PatchingStub(_masm, patching_id(info));
362
patching_epilog(patch, lir_patch_normal, reg, info);
365
void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
366
Metadata* o = nullptr;
367
PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id);
368
__ mov_metadata(reg, o);
369
patching_epilog(patch, lir_patch_normal, reg, info);
373
int LIR_Assembler::initial_frame_size_in_bytes() const {
379
return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size;
383
int LIR_Assembler::emit_exception_handler() {
385
address handler_base = __ start_a_stub(exception_handler_size());
386
if (handler_base == nullptr) {
388
bailout("exception handler overflow");
392
int offset = code_offset();
396
__ invalidate_registers(false, true, true, false, true, true);
399
__ verify_not_null_oop(rax);
402
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)));
403
__ should_not_reach_here();
404
guarantee(code_offset() - offset <= exception_handler_size(), "overflow");
413
int LIR_Assembler::emit_unwind_handler() {
415
if (CommentedAssembly) {
416
_masm->block_comment("Unwind handler");
420
int offset = code_offset();
423
Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread);
424
NOT_LP64(__ get_thread(thread));
425
__ movptr(rax, Address(thread, JavaThread::exception_oop_offset()));
426
__ movptr(Address(thread, JavaThread::exception_oop_offset()), NULL_WORD);
427
__ movptr(Address(thread, JavaThread::exception_pc_offset()), NULL_WORD);
429
__ bind(_unwind_handler_entry);
430
__ verify_not_null_oop(rax);
431
if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
436
MonitorExitStub* stub = nullptr;
437
if (method()->is_synchronized()) {
438
monitor_address(0, FrameMap::rax_opr);
439
stub = new MonitorExitStub(FrameMap::rax_opr, true, 0);
440
if (LockingMode == LM_MONITOR) {
441
__ jmp(*stub->entry());
443
__ unlock_object(rdi, rsi, rax, *stub->entry());
445
__ bind(*stub->continuation());
448
if (compilation()->env()->dtrace_method_probes()) {
450
__ mov(rdi, r15_thread);
451
__ mov_metadata(rsi, method()->constant_encoding());
454
__ movptr(Address(rsp, 0), rax);
455
__ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding(), noreg);
457
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
460
if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
465
__ remove_frame(initial_frame_size_in_bytes());
466
__ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
469
if (stub != nullptr) {
470
stub->emit_code(this);
477
int LIR_Assembler::emit_deopt_handler() {
479
address handler_base = __ start_a_stub(deopt_handler_size());
480
if (handler_base == nullptr) {
482
bailout("deopt handler overflow");
486
int offset = code_offset();
487
InternalAddress here(__ pc());
489
__ pushptr(here.addr(), rscratch1);
490
__ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
491
guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
497
void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
498
assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
499
if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
500
assert(result->fpu() == 0, "result must already be on TOS");
504
__ remove_frame(initial_frame_size_in_bytes());
506
if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
507
__ reserved_stack_check();
514
const Register thread = r15_thread;
516
const Register thread = rbx;
517
__ get_thread(thread);
519
code_stub->set_safepoint_offset(__ offset());
520
__ relocate(relocInfo::poll_return_type);
521
__ safepoint_poll(*code_stub->entry(), thread, true , true );
526
int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
527
guarantee(info != nullptr, "Shouldn't be null");
528
int offset = __ offset();
530
const Register poll_addr = rscratch1;
531
__ movptr(poll_addr, Address(r15_thread, JavaThread::polling_page_offset()));
533
assert(tmp->is_cpu_register(), "needed");
534
const Register poll_addr = tmp->as_register();
535
__ get_thread(poll_addr);
536
__ movptr(poll_addr, Address(poll_addr, in_bytes(JavaThread::polling_page_offset())));
538
add_debug_info_for_branch(info);
539
__ relocate(relocInfo::poll_type);
540
address pre_pc = __ pc();
541
__ testl(rax, Address(poll_addr, 0));
542
address post_pc = __ pc();
543
guarantee(pointer_delta(post_pc, pre_pc, 1) == 2 LP64_ONLY(+1), "must be exact length");
548
void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
549
if (from_reg != to_reg) __ mov(to_reg, from_reg);
552
void LIR_Assembler::swap_reg(Register a, Register b) {
557
void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
558
assert(src->is_constant(), "should not call otherwise");
559
assert(dest->is_register(), "should not call otherwise");
560
LIR_Const* c = src->as_constant_ptr();
564
assert(patch_code == lir_patch_none, "no patching handled here");
565
__ movl(dest->as_register(), c->as_jint());
570
assert(patch_code == lir_patch_none, "no patching handled here");
571
__ movptr(dest->as_register(), c->as_jint());
576
assert(patch_code == lir_patch_none, "no patching handled here");
578
__ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
580
__ movptr(dest->as_register_lo(), c->as_jint_lo());
581
__ movptr(dest->as_register_hi(), c->as_jint_hi());
587
if (patch_code != lir_patch_none) {
588
jobject2reg_with_patching(dest->as_register(), info);
590
__ movoop(dest->as_register(), c->as_jobject());
596
if (patch_code != lir_patch_none) {
597
klass2reg_with_patching(dest->as_register(), info);
599
__ mov_metadata(dest->as_register(), c->as_metadata());
605
if (dest->is_single_xmm()) {
606
if (LP64_ONLY(UseAVX <= 2 &&) c->is_zero_float()) {
607
__ xorps(dest->as_xmm_float_reg(), dest->as_xmm_float_reg());
609
__ movflt(dest->as_xmm_float_reg(),
610
InternalAddress(float_constant(c->as_jfloat())));
614
assert(dest->is_single_fpu(), "must be");
615
assert(dest->fpu_regnr() == 0, "dest must be TOS");
616
if (c->is_zero_float()) {
618
} else if (c->is_one_float()) {
621
__ fld_s (InternalAddress(float_constant(c->as_jfloat())));
624
ShouldNotReachHere();
631
if (dest->is_double_xmm()) {
632
if (LP64_ONLY(UseAVX <= 2 &&) c->is_zero_double()) {
633
__ xorpd(dest->as_xmm_double_reg(), dest->as_xmm_double_reg());
635
__ movdbl(dest->as_xmm_double_reg(),
636
InternalAddress(double_constant(c->as_jdouble())));
640
assert(dest->is_double_fpu(), "must be");
641
assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
642
if (c->is_zero_double()) {
644
} else if (c->is_one_double()) {
647
__ fld_d (InternalAddress(double_constant(c->as_jdouble())));
650
ShouldNotReachHere();
657
ShouldNotReachHere();
661
void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
662
assert(src->is_constant(), "should not call otherwise");
663
assert(dest->is_stack(), "should not call otherwise");
664
LIR_Const* c = src->as_constant_ptr();
669
__ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
673
__ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
677
__ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject(), rscratch1);
683
__ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
684
lo_word_offset_in_bytes),
685
(intptr_t)c->as_jlong_bits(),
688
__ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
689
lo_word_offset_in_bytes), c->as_jint_lo_bits());
690
__ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
691
hi_word_offset_in_bytes), c->as_jint_hi_bits());
696
ShouldNotReachHere();
700
void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
701
assert(src->is_constant(), "should not call otherwise");
702
assert(dest->is_address(), "should not call otherwise");
703
LIR_Const* c = src->as_constant_ptr();
704
LIR_Address* addr = dest->as_address_ptr();
706
int null_check_here = code_offset();
710
__ movl(as_Address(addr), c->as_jint_bits());
714
__ movptr(as_Address(addr), c->as_jint_bits());
719
if (c->as_jobject() == nullptr) {
720
if (UseCompressedOops && !wide) {
721
__ movl(as_Address(addr), NULL_WORD);
724
__ xorptr(rscratch1, rscratch1);
725
null_check_here = code_offset();
726
__ movptr(as_Address(addr), rscratch1);
728
__ movptr(as_Address(addr), NULL_WORD);
732
if (is_literal_address(addr)) {
733
ShouldNotReachHere();
734
__ movoop(as_Address(addr, noreg), c->as_jobject(), rscratch1);
737
__ movoop(rscratch1, c->as_jobject());
738
if (UseCompressedOops && !wide) {
739
__ encode_heap_oop(rscratch1);
740
null_check_here = code_offset();
741
__ movl(as_Address_lo(addr), rscratch1);
743
null_check_here = code_offset();
744
__ movptr(as_Address_lo(addr), rscratch1);
747
__ movoop(as_Address(addr), c->as_jobject(), noreg);
756
if (is_literal_address(addr)) {
757
ShouldNotReachHere();
758
__ movptr(as_Address(addr, r15_thread), (intptr_t)c->as_jlong_bits());
760
__ movptr(r10, (intptr_t)c->as_jlong_bits());
761
null_check_here = code_offset();
762
__ movptr(as_Address_lo(addr), r10);
766
__ movptr(as_Address_hi(addr), c->as_jint_hi_bits());
767
__ movptr(as_Address_lo(addr), c->as_jint_lo_bits());
773
__ movb(as_Address(addr), c->as_jint() & 0xFF);
778
__ movw(as_Address(addr), c->as_jint() & 0xFFFF);
782
ShouldNotReachHere();
785
if (info != nullptr) {
786
add_debug_info_for_null_check(null_check_here, info);
791
void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
792
assert(src->is_register(), "should not call otherwise");
793
assert(dest->is_register(), "should not call otherwise");
796
if (dest->is_single_cpu()) {
798
if (src->type() == T_LONG) {
800
move_regs(src->as_register_lo(), dest->as_register());
804
assert(src->is_single_cpu(), "must match");
805
if (src->type() == T_OBJECT) {
806
__ verify_oop(src->as_register());
808
move_regs(src->as_register(), dest->as_register());
810
} else if (dest->is_double_cpu()) {
812
if (is_reference_type(src->type())) {
814
__ verify_oop(src->as_register());
815
move_regs(src->as_register(), dest->as_register_lo());
819
assert(src->is_double_cpu(), "must match");
820
Register f_lo = src->as_register_lo();
821
Register f_hi = src->as_register_hi();
822
Register t_lo = dest->as_register_lo();
823
Register t_hi = dest->as_register_hi();
825
assert(f_hi == f_lo, "must be same");
826
assert(t_hi == t_lo, "must be same");
827
move_regs(f_lo, t_lo);
829
assert(f_lo != f_hi && t_lo != t_hi, "invalid register allocation");
832
if (f_lo == t_hi && f_hi == t_lo) {
833
swap_reg(f_lo, f_hi);
834
} else if (f_hi == t_lo) {
835
assert(f_lo != t_hi, "overwriting register");
836
move_regs(f_hi, t_hi);
837
move_regs(f_lo, t_lo);
839
assert(f_hi != t_lo, "overwriting register");
840
move_regs(f_lo, t_lo);
841
move_regs(f_hi, t_hi);
848
} else if (src->is_single_xmm() && !dest->is_single_xmm()) {
849
__ movflt(Address(rsp, 0), src->as_xmm_float_reg());
850
__ fld_s(Address(rsp, 0));
851
} else if (src->is_double_xmm() && !dest->is_double_xmm()) {
852
__ movdbl(Address(rsp, 0), src->as_xmm_double_reg());
853
__ fld_d(Address(rsp, 0));
854
} else if (dest->is_single_xmm() && !src->is_single_xmm()) {
855
__ fstp_s(Address(rsp, 0));
856
__ movflt(dest->as_xmm_float_reg(), Address(rsp, 0));
857
} else if (dest->is_double_xmm() && !src->is_double_xmm()) {
858
__ fstp_d(Address(rsp, 0));
859
__ movdbl(dest->as_xmm_double_reg(), Address(rsp, 0));
863
} else if (dest->is_single_xmm()) {
864
assert(src->is_single_xmm(), "must match");
865
__ movflt(dest->as_xmm_float_reg(), src->as_xmm_float_reg());
866
} else if (dest->is_double_xmm()) {
867
assert(src->is_double_xmm(), "must match");
868
__ movdbl(dest->as_xmm_double_reg(), src->as_xmm_double_reg());
872
} else if (dest->is_single_fpu() || dest->is_double_fpu()) {
873
assert(src->is_single_fpu() || src->is_double_fpu(), "must match");
874
assert(src->fpu() == dest->fpu(), "currently should be nothing to do");
878
ShouldNotReachHere();
882
void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
883
assert(src->is_register(), "should not call otherwise");
884
assert(dest->is_stack(), "should not call otherwise");
886
if (src->is_single_cpu()) {
887
Address dst = frame_map()->address_for_slot(dest->single_stack_ix());
888
if (is_reference_type(type)) {
889
__ verify_oop(src->as_register());
890
__ movptr (dst, src->as_register());
891
} else if (type == T_METADATA || type == T_ADDRESS) {
892
__ movptr (dst, src->as_register());
894
__ movl (dst, src->as_register());
897
} else if (src->is_double_cpu()) {
898
Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
899
Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes);
900
__ movptr (dstLO, src->as_register_lo());
901
NOT_LP64(__ movptr (dstHI, src->as_register_hi()));
903
} else if (src->is_single_xmm()) {
904
Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
905
__ movflt(dst_addr, src->as_xmm_float_reg());
907
} else if (src->is_double_xmm()) {
908
Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
909
__ movdbl(dst_addr, src->as_xmm_double_reg());
912
} else if (src->is_single_fpu()) {
913
assert(src->fpu_regnr() == 0, "argument must be on TOS");
914
Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
915
if (pop_fpu_stack) __ fstp_s (dst_addr);
916
else __ fst_s (dst_addr);
918
} else if (src->is_double_fpu()) {
919
assert(src->fpu_regnrLo() == 0, "argument must be on TOS");
920
Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
921
if (pop_fpu_stack) __ fstp_d (dst_addr);
922
else __ fst_d (dst_addr);
926
ShouldNotReachHere();
931
void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide) {
932
LIR_Address* to_addr = dest->as_address_ptr();
933
PatchingStub* patch = nullptr;
934
Register compressed_src = rscratch1;
936
if (is_reference_type(type)) {
937
__ verify_oop(src->as_register());
939
if (UseCompressedOops && !wide) {
940
__ movptr(compressed_src, src->as_register());
941
__ encode_heap_oop(compressed_src);
942
if (patch_code != lir_patch_none) {
943
info->oop_map()->set_narrowoop(compressed_src->as_VMReg());
949
if (patch_code != lir_patch_none) {
950
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
951
Address toa = as_Address(to_addr);
952
assert(toa.disp() != 0, "must have");
955
int null_check_here = code_offset();
959
assert(src->is_single_xmm(), "not a float");
960
__ movflt(as_Address(to_addr), src->as_xmm_float_reg());
962
if (src->is_single_xmm()) {
963
__ movflt(as_Address(to_addr), src->as_xmm_float_reg());
965
assert(src->is_single_fpu(), "must be");
966
assert(src->fpu_regnr() == 0, "argument must be on TOS");
967
if (pop_fpu_stack) __ fstp_s(as_Address(to_addr));
968
else __ fst_s (as_Address(to_addr));
976
assert(src->is_double_xmm(), "not a double");
977
__ movdbl(as_Address(to_addr), src->as_xmm_double_reg());
979
if (src->is_double_xmm()) {
980
__ movdbl(as_Address(to_addr), src->as_xmm_double_reg());
982
assert(src->is_double_fpu(), "must be");
983
assert(src->fpu_regnrLo() == 0, "argument must be on TOS");
984
if (pop_fpu_stack) __ fstp_d(as_Address(to_addr));
985
else __ fst_d (as_Address(to_addr));
993
if (UseCompressedOops && !wide) {
994
__ movl(as_Address(to_addr), compressed_src);
996
__ movptr(as_Address(to_addr), src->as_register());
1004
LP64_ONLY(ShouldNotReachHere());
1005
__ movptr(as_Address(to_addr), src->as_register());
1008
__ movptr(as_Address(to_addr), src->as_register());
1011
__ movl(as_Address(to_addr), src->as_register());
1015
Register from_lo = src->as_register_lo();
1016
Register from_hi = src->as_register_hi();
1018
__ movptr(as_Address_lo(to_addr), from_lo);
1020
Register base = to_addr->base()->as_register();
1021
Register index = noreg;
1022
if (to_addr->index()->is_register()) {
1023
index = to_addr->index()->as_register();
1025
if (base == from_lo || index == from_lo) {
1026
assert(base != from_hi, "can't be");
1027
assert(index == noreg || (index != base && index != from_hi), "can't handle this");
1028
__ movl(as_Address_hi(to_addr), from_hi);
1029
if (patch != nullptr) {
1030
patching_epilog(patch, lir_patch_high, base, info);
1031
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1032
patch_code = lir_patch_low;
1034
__ movl(as_Address_lo(to_addr), from_lo);
1036
assert(index == noreg || (index != base && index != from_lo), "can't handle this");
1037
__ movl(as_Address_lo(to_addr), from_lo);
1038
if (patch != nullptr) {
1039
patching_epilog(patch, lir_patch_low, base, info);
1040
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1041
patch_code = lir_patch_high;
1043
__ movl(as_Address_hi(to_addr), from_hi);
1051
Register src_reg = src->as_register();
1052
Address dst_addr = as_Address(to_addr);
1053
assert(VM_Version::is_P6() || src_reg->has_byte_register(), "must use byte registers if not P6");
1054
__ movb(dst_addr, src_reg);
1060
__ movw(as_Address(to_addr), src->as_register());
1064
ShouldNotReachHere();
1066
if (info != nullptr) {
1067
add_debug_info_for_null_check(null_check_here, info);
1070
if (patch_code != lir_patch_none) {
1071
patching_epilog(patch, patch_code, to_addr->base()->as_register(), info);
1076
void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
1077
assert(src->is_stack(), "should not call otherwise");
1078
assert(dest->is_register(), "should not call otherwise");
1080
if (dest->is_single_cpu()) {
1081
if (is_reference_type(type)) {
1082
__ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1083
__ verify_oop(dest->as_register());
1084
} else if (type == T_METADATA || type == T_ADDRESS) {
1085
__ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1087
__ movl(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1090
} else if (dest->is_double_cpu()) {
1091
Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);
1092
Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes);
1093
__ movptr(dest->as_register_lo(), src_addr_LO);
1094
NOT_LP64(__ movptr(dest->as_register_hi(), src_addr_HI));
1096
} else if (dest->is_single_xmm()) {
1097
Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
1098
__ movflt(dest->as_xmm_float_reg(), src_addr);
1100
} else if (dest->is_double_xmm()) {
1101
Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
1102
__ movdbl(dest->as_xmm_double_reg(), src_addr);
1105
} else if (dest->is_single_fpu()) {
1106
assert(dest->fpu_regnr() == 0, "dest must be TOS");
1107
Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
1110
} else if (dest->is_double_fpu()) {
1111
assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1112
Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
1117
ShouldNotReachHere();
1122
void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
1123
if (src->is_single_stack()) {
1124
if (is_reference_type(type)) {
1125
__ pushptr(frame_map()->address_for_slot(src ->single_stack_ix()));
1126
__ popptr (frame_map()->address_for_slot(dest->single_stack_ix()));
1129
__ pushl(frame_map()->address_for_slot(src ->single_stack_ix()));
1130
__ popl (frame_map()->address_for_slot(dest->single_stack_ix()));
1133
__ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix()));
1134
__ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1);
1138
} else if (src->is_double_stack()) {
1140
__ pushptr(frame_map()->address_for_slot(src ->double_stack_ix()));
1141
__ popptr (frame_map()->address_for_slot(dest->double_stack_ix()));
1143
__ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 0));
1145
__ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 2 * wordSize));
1146
__ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 2 * wordSize));
1147
__ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 0));
1151
ShouldNotReachHere();
1156
void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
1157
assert(src->is_address(), "should not call otherwise");
1158
assert(dest->is_register(), "should not call otherwise");
1160
LIR_Address* addr = src->as_address_ptr();
1161
Address from_addr = as_Address(addr);
1163
if (addr->base()->type() == T_OBJECT) {
1164
__ verify_oop(addr->base()->as_pointer_register());
1172
if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) {
1177
__ xorptr(dest->as_register(), dest->as_register());
1184
PatchingStub* patch = nullptr;
1185
if (patch_code != lir_patch_none) {
1186
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1187
assert(from_addr.disp() != 0, "must have");
1189
if (info != nullptr) {
1190
add_debug_info_for_null_check_here(info);
1195
if (dest->is_single_xmm()) {
1196
__ movflt(dest->as_xmm_float_reg(), from_addr);
1199
assert(dest->is_single_fpu(), "must be");
1200
assert(dest->fpu_regnr() == 0, "dest must be TOS");
1201
__ fld_s(from_addr);
1203
ShouldNotReachHere();
1210
if (dest->is_double_xmm()) {
1211
__ movdbl(dest->as_xmm_double_reg(), from_addr);
1214
assert(dest->is_double_fpu(), "must be");
1215
assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1216
__ fld_d(from_addr);
1218
ShouldNotReachHere();
1226
if (UseCompressedOops && !wide) {
1227
__ movl(dest->as_register(), from_addr);
1229
__ movptr(dest->as_register(), from_addr);
1234
__ movptr(dest->as_register(), from_addr);
1237
__ movl(dest->as_register(), from_addr);
1241
Register to_lo = dest->as_register_lo();
1242
Register to_hi = dest->as_register_hi();
1244
__ movptr(to_lo, as_Address_lo(addr));
1246
Register base = addr->base()->as_register();
1247
Register index = noreg;
1248
if (addr->index()->is_register()) {
1249
index = addr->index()->as_register();
1251
if ((base == to_lo && index == to_hi) ||
1252
(base == to_hi && index == to_lo)) {
1256
assert(info == nullptr && patch == nullptr, "must be");
1257
__ lea(to_hi, as_Address(addr));
1258
__ movl(to_lo, Address(to_hi, 0));
1259
__ movl(to_hi, Address(to_hi, BytesPerWord));
1260
} else if (base == to_lo || index == to_lo) {
1261
assert(base != to_hi, "can't be");
1262
assert(index == noreg || (index != base && index != to_hi), "can't handle this");
1263
__ movl(to_hi, as_Address_hi(addr));
1264
if (patch != nullptr) {
1265
patching_epilog(patch, lir_patch_high, base, info);
1266
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1267
patch_code = lir_patch_low;
1269
__ movl(to_lo, as_Address_lo(addr));
1271
assert(index == noreg || (index != base && index != to_lo), "can't handle this");
1272
__ movl(to_lo, as_Address_lo(addr));
1273
if (patch != nullptr) {
1274
patching_epilog(patch, lir_patch_low, base, info);
1275
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1276
patch_code = lir_patch_high;
1278
__ movl(to_hi, as_Address_hi(addr));
1286
Register dest_reg = dest->as_register();
1287
assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6");
1288
if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1289
__ movsbl(dest_reg, from_addr);
1291
__ movb(dest_reg, from_addr);
1292
__ shll(dest_reg, 24);
1293
__ sarl(dest_reg, 24);
1299
Register dest_reg = dest->as_register();
1300
assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6");
1301
if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1302
__ movzwl(dest_reg, from_addr);
1304
__ movw(dest_reg, from_addr);
1310
Register dest_reg = dest->as_register();
1311
if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1312
__ movswl(dest_reg, from_addr);
1314
__ movw(dest_reg, from_addr);
1315
__ shll(dest_reg, 16);
1316
__ sarl(dest_reg, 16);
1322
ShouldNotReachHere();
1325
if (patch != nullptr) {
1326
patching_epilog(patch, patch_code, addr->base()->as_register(), info);
1329
if (is_reference_type(type)) {
1331
if (UseCompressedOops && !wide) {
1332
__ decode_heap_oop(dest->as_register());
1336
if (!(UseZGC && !ZGenerational)) {
1338
__ verify_oop(dest->as_register());
1345
Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const {
1346
int elem_size = type2aelembytes(type);
1347
switch (elem_size) {
1348
case 1: return Address::times_1;
1349
case 2: return Address::times_2;
1350
case 4: return Address::times_4;
1351
case 8: return Address::times_8;
1353
ShouldNotReachHere();
1354
return Address::no_scale;
1358
void LIR_Assembler::emit_op3(LIR_Op3* op) {
1359
switch (op->code()) {
1362
arithmetic_idiv(op->code(),
1370
__ fmad(op->result_opr()->as_xmm_double_reg(),
1371
op->in_opr1()->as_xmm_double_reg(),
1372
op->in_opr2()->as_xmm_double_reg(),
1373
op->in_opr3()->as_xmm_double_reg());
1376
__ fmaf(op->result_opr()->as_xmm_float_reg(),
1377
op->in_opr1()->as_xmm_float_reg(),
1378
op->in_opr2()->as_xmm_float_reg(),
1379
op->in_opr3()->as_xmm_float_reg());
1381
default: ShouldNotReachHere(); break;
1385
void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
1387
assert(op->block() == nullptr || op->block()->label() == op->label(), "wrong label");
1388
if (op->block() != nullptr) _branch_target_blocks.append(op->block());
1389
if (op->ublock() != nullptr) _branch_target_blocks.append(op->ublock());
1392
if (op->cond() == lir_cond_always) {
1393
if (op->info() != nullptr) add_debug_info_for_branch(op->info());
1394
__ jmp (*(op->label()));
1396
Assembler::Condition acond = Assembler::zero;
1397
if (op->code() == lir_cond_float_branch) {
1398
assert(op->ublock() != nullptr, "must have unordered successor");
1399
__ jcc(Assembler::parity, *(op->ublock()->label()));
1400
switch(op->cond()) {
1401
case lir_cond_equal: acond = Assembler::equal; break;
1402
case lir_cond_notEqual: acond = Assembler::notEqual; break;
1403
case lir_cond_less: acond = Assembler::below; break;
1404
case lir_cond_lessEqual: acond = Assembler::belowEqual; break;
1405
case lir_cond_greaterEqual: acond = Assembler::aboveEqual; break;
1406
case lir_cond_greater: acond = Assembler::above; break;
1407
default: ShouldNotReachHere();
1410
switch (op->cond()) {
1411
case lir_cond_equal: acond = Assembler::equal; break;
1412
case lir_cond_notEqual: acond = Assembler::notEqual; break;
1413
case lir_cond_less: acond = Assembler::less; break;
1414
case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
1415
case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;
1416
case lir_cond_greater: acond = Assembler::greater; break;
1417
case lir_cond_belowEqual: acond = Assembler::belowEqual; break;
1418
case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break;
1419
default: ShouldNotReachHere();
1422
__ jcc(acond,*(op->label()));
1426
void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
1427
LIR_Opr src = op->in_opr();
1428
LIR_Opr dest = op->result_opr();
1430
switch (op->bytecode()) {
1431
case Bytecodes::_i2l:
1433
__ movl2ptr(dest->as_register_lo(), src->as_register());
1435
move_regs(src->as_register(), dest->as_register_lo());
1436
move_regs(src->as_register(), dest->as_register_hi());
1437
__ sarl(dest->as_register_hi(), 31);
1441
case Bytecodes::_l2i:
1443
__ movl(dest->as_register(), src->as_register_lo());
1445
move_regs(src->as_register_lo(), dest->as_register());
1449
case Bytecodes::_i2b:
1450
move_regs(src->as_register(), dest->as_register());
1451
__ sign_extend_byte(dest->as_register());
1454
case Bytecodes::_i2c:
1455
move_regs(src->as_register(), dest->as_register());
1456
__ andl(dest->as_register(), 0xFFFF);
1459
case Bytecodes::_i2s:
1460
move_regs(src->as_register(), dest->as_register());
1461
__ sign_extend_short(dest->as_register());
1466
case Bytecodes::_f2d:
1467
__ cvtss2sd(dest->as_xmm_double_reg(), src->as_xmm_float_reg());
1470
case Bytecodes::_d2f:
1471
__ cvtsd2ss(dest->as_xmm_float_reg(), src->as_xmm_double_reg());
1474
case Bytecodes::_i2f:
1475
__ cvtsi2ssl(dest->as_xmm_float_reg(), src->as_register());
1478
case Bytecodes::_i2d:
1479
__ cvtsi2sdl(dest->as_xmm_double_reg(), src->as_register());
1482
case Bytecodes::_l2f:
1483
__ cvtsi2ssq(dest->as_xmm_float_reg(), src->as_register_lo());
1486
case Bytecodes::_l2d:
1487
__ cvtsi2sdq(dest->as_xmm_double_reg(), src->as_register_lo());
1490
case Bytecodes::_f2i:
1491
__ convert_f2i(dest->as_register(), src->as_xmm_float_reg());
1494
case Bytecodes::_d2i:
1495
__ convert_d2i(dest->as_register(), src->as_xmm_double_reg());
1498
case Bytecodes::_f2l:
1499
__ convert_f2l(dest->as_register_lo(), src->as_xmm_float_reg());
1502
case Bytecodes::_d2l:
1503
__ convert_d2l(dest->as_register_lo(), src->as_xmm_double_reg());
1506
case Bytecodes::_f2d:
1507
case Bytecodes::_d2f:
1508
if (dest->is_single_xmm()) {
1509
__ cvtsd2ss(dest->as_xmm_float_reg(), src->as_xmm_double_reg());
1510
} else if (dest->is_double_xmm()) {
1511
__ cvtss2sd(dest->as_xmm_double_reg(), src->as_xmm_float_reg());
1513
assert(src->fpu() == dest->fpu(), "register must be equal");
1518
case Bytecodes::_i2f:
1519
case Bytecodes::_i2d:
1520
if (dest->is_single_xmm()) {
1521
__ cvtsi2ssl(dest->as_xmm_float_reg(), src->as_register());
1522
} else if (dest->is_double_xmm()) {
1523
__ cvtsi2sdl(dest->as_xmm_double_reg(), src->as_register());
1525
assert(dest->fpu() == 0, "result must be on TOS");
1526
__ movl(Address(rsp, 0), src->as_register());
1527
__ fild_s(Address(rsp, 0));
1531
case Bytecodes::_l2f:
1532
case Bytecodes::_l2d:
1533
assert(!dest->is_xmm_register(), "result in xmm register not supported (no SSE instruction present)");
1534
assert(dest->fpu() == 0, "result must be on TOS");
1535
__ movptr(Address(rsp, 0), src->as_register_lo());
1536
__ movl(Address(rsp, BytesPerWord), src->as_register_hi());
1537
__ fild_d(Address(rsp, 0));
1541
case Bytecodes::_f2i:
1542
case Bytecodes::_d2i:
1543
if (src->is_single_xmm()) {
1544
__ cvttss2sil(dest->as_register(), src->as_xmm_float_reg());
1545
} else if (src->is_double_xmm()) {
1546
__ cvttsd2sil(dest->as_register(), src->as_xmm_double_reg());
1548
assert(src->fpu() == 0, "input must be on TOS");
1549
__ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_trunc()));
1550
__ fist_s(Address(rsp, 0));
1551
__ movl(dest->as_register(), Address(rsp, 0));
1552
__ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_std()));
1555
assert(op->stub() != nullptr, "stub required");
1556
__ cmpl(dest->as_register(), 0x80000000);
1557
__ jcc(Assembler::equal, *op->stub()->entry());
1558
__ bind(*op->stub()->continuation());
1561
case Bytecodes::_f2l:
1562
case Bytecodes::_d2l:
1563
assert(!src->is_xmm_register(), "input in xmm register not supported (no SSE instruction present)");
1564
assert(src->fpu() == 0, "input must be on TOS");
1565
assert(dest == FrameMap::long0_opr, "runtime stub places result in these registers");
1569
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::fpu2long_stub_id)));
1574
default: ShouldNotReachHere();
1578
void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
1579
if (op->init_check()) {
1580
add_debug_info_for_null_check_here(op->stub()->info());
1581
__ cmpb(Address(op->klass()->as_register(),
1582
InstanceKlass::init_state_offset()),
1583
InstanceKlass::fully_initialized);
1584
__ jcc(Assembler::notEqual, *op->stub()->entry());
1586
__ allocate_object(op->obj()->as_register(),
1587
op->tmp1()->as_register(),
1588
op->tmp2()->as_register(),
1591
op->klass()->as_register(),
1592
*op->stub()->entry());
1593
__ bind(*op->stub()->continuation());
1596
void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1597
Register len = op->len()->as_register();
1598
LP64_ONLY( __ movslq(len, len); )
1601
(!UseFastNewObjectArray && is_reference_type(op->type())) ||
1602
(!UseFastNewTypeArray && !is_reference_type(op->type()))) {
1603
__ jmp(*op->stub()->entry());
1605
Register tmp1 = op->tmp1()->as_register();
1606
Register tmp2 = op->tmp2()->as_register();
1607
Register tmp3 = op->tmp3()->as_register();
1610
} else if (len == tmp2) {
1612
} else if (len == tmp3) {
1617
__ allocate_array(op->obj()->as_register(),
1621
arrayOopDesc::base_offset_in_bytes(op->type()),
1622
array_element_size(op->type()),
1623
op->klass()->as_register(),
1624
*op->stub()->entry(),
1627
__ bind(*op->stub()->continuation());
1630
void LIR_Assembler::type_profile_helper(Register mdo,
1631
ciMethodData *md, ciProfileData *data,
1632
Register recv, Label* update_done) {
1633
for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1636
__ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
1637
__ jccb(Assembler::notEqual, next_test);
1638
Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
1639
__ addptr(data_addr, DataLayout::counter_increment);
1640
__ jmp(*update_done);
1645
for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1647
Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)));
1648
__ cmpptr(recv_addr, NULL_WORD);
1649
__ jccb(Assembler::notEqual, next_test);
1650
__ movptr(recv_addr, recv);
1651
__ movptr(Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))), DataLayout::counter_increment);
1652
__ jmp(*update_done);
1657
void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
1659
CodeStub* stub = op->stub();
1660
Register obj = op->object()->as_register();
1661
Register k_RInfo = op->tmp1()->as_register();
1662
Register klass_RInfo = op->tmp2()->as_register();
1663
Register dst = op->result_opr()->as_register();
1664
ciKlass* k = op->klass();
1665
Register Rtmp1 = noreg;
1666
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
1669
ciMethodData* md = nullptr;
1670
ciProfileData* data = nullptr;
1672
if (op->should_profile()) {
1673
ciMethod* method = op->profiled_method();
1674
assert(method != nullptr, "Should have method");
1675
int bci = op->profiled_bci();
1676
md = method->method_data_or_null();
1677
assert(md != nullptr, "Sanity");
1678
data = md->bci_to_data(bci);
1679
assert(data != nullptr, "need data for type check");
1680
assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1682
Label* success_target = success;
1683
Label* failure_target = failure;
1685
if (obj == k_RInfo) {
1687
} else if (obj == klass_RInfo) {
1690
if (k->is_loaded() && !UseCompressedClassPointers) {
1691
select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1693
Rtmp1 = op->tmp3()->as_register();
1694
select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1697
assert_different_registers(obj, k_RInfo, klass_RInfo);
1699
__ testptr(obj, obj);
1700
if (op->should_profile()) {
1702
Register mdo = klass_RInfo;
1703
__ mov_metadata(mdo, md->constant_encoding());
1704
__ jccb(Assembler::notEqual, not_null);
1706
Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()));
1707
int header_bits = BitData::null_seen_byte_constant();
1708
__ orb(data_addr, header_bits);
1709
__ jmp(*obj_is_null);
1713
Register recv = k_RInfo;
1714
__ load_klass(recv, obj, tmp_load_klass);
1715
type_profile_helper(mdo, md, data, recv, &update_done);
1717
Address nonprofiled_receiver_count_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1718
__ addptr(nonprofiled_receiver_count_addr, DataLayout::counter_increment);
1720
__ bind(update_done);
1722
__ jcc(Assembler::equal, *obj_is_null);
1725
if (!k->is_loaded()) {
1726
klass2reg_with_patching(k_RInfo, op->info_for_patch());
1729
__ mov_metadata(k_RInfo, k->constant_encoding());
1734
if (op->fast_check()) {
1738
if (UseCompressedClassPointers) {
1739
__ load_klass(Rtmp1, obj, tmp_load_klass);
1740
__ cmpptr(k_RInfo, Rtmp1);
1742
__ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
1745
if (k->is_loaded()) {
1746
__ cmpklass(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding());
1748
__ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
1751
__ jcc(Assembler::notEqual, *failure_target);
1756
__ load_klass(klass_RInfo, obj, tmp_load_klass);
1757
if (k->is_loaded()) {
1760
__ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset()));
1762
__ cmpklass(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding());
1764
if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1765
__ jcc(Assembler::notEqual, *failure_target);
1769
__ jcc(Assembler::equal, *success_target);
1772
__ cmpptr(klass_RInfo, k_RInfo);
1774
__ cmpklass(klass_RInfo, k->constant_encoding());
1776
__ jcc(Assembler::equal, *success_target);
1778
__ push(klass_RInfo);
1782
__ pushklass(k->constant_encoding(), noreg);
1784
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1785
__ pop(klass_RInfo);
1786
__ pop(klass_RInfo);
1788
__ testl(klass_RInfo, klass_RInfo);
1789
__ jcc(Assembler::equal, *failure_target);
1794
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
1796
__ push(klass_RInfo);
1798
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1799
__ pop(klass_RInfo);
1802
__ testl(k_RInfo, k_RInfo);
1803
__ jcc(Assembler::equal, *failure_target);
1811
void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
1812
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
1813
LIR_Code code = op->code();
1814
if (code == lir_store_check) {
1815
Register value = op->object()->as_register();
1816
Register array = op->array()->as_register();
1817
Register k_RInfo = op->tmp1()->as_register();
1818
Register klass_RInfo = op->tmp2()->as_register();
1819
Register Rtmp1 = op->tmp3()->as_register();
1821
CodeStub* stub = op->stub();
1824
ciMethodData* md = nullptr;
1825
ciProfileData* data = nullptr;
1827
if (op->should_profile()) {
1828
ciMethod* method = op->profiled_method();
1829
assert(method != nullptr, "Should have method");
1830
int bci = op->profiled_bci();
1831
md = method->method_data_or_null();
1832
assert(md != nullptr, "Sanity");
1833
data = md->bci_to_data(bci);
1834
assert(data != nullptr, "need data for type check");
1835
assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1838
Label* success_target = &done;
1839
Label* failure_target = stub->entry();
1841
__ testptr(value, value);
1842
if (op->should_profile()) {
1844
Register mdo = klass_RInfo;
1845
__ mov_metadata(mdo, md->constant_encoding());
1846
__ jccb(Assembler::notEqual, not_null);
1848
Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()));
1849
int header_bits = BitData::null_seen_byte_constant();
1850
__ orb(data_addr, header_bits);
1855
Register recv = k_RInfo;
1856
__ load_klass(recv, value, tmp_load_klass);
1857
type_profile_helper(mdo, md, data, recv, &update_done);
1859
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1860
__ addptr(counter_addr, DataLayout::counter_increment);
1861
__ bind(update_done);
1863
__ jcc(Assembler::equal, done);
1866
add_debug_info_for_null_check_here(op->info_for_exception());
1867
__ load_klass(k_RInfo, array, tmp_load_klass);
1868
__ load_klass(klass_RInfo, value, tmp_load_klass);
1871
__ movptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
1873
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
1875
__ push(klass_RInfo);
1877
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1878
__ pop(klass_RInfo);
1881
__ testl(k_RInfo, k_RInfo);
1882
__ jcc(Assembler::equal, *failure_target);
1887
if (code == lir_checkcast) {
1888
Register obj = op->object()->as_register();
1889
Register dst = op->result_opr()->as_register();
1891
emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
1897
if (code == lir_instanceof) {
1898
Register obj = op->object()->as_register();
1899
Register dst = op->result_opr()->as_register();
1900
Label success, failure, done;
1901
emit_typecheck_helper(op, &success, &failure, &failure);
1903
__ xorptr(dst, dst);
1909
ShouldNotReachHere();
1915
void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1916
if (LP64_ONLY(false &&) op->code() == lir_cas_long) {
1917
assert(op->cmp_value()->as_register_lo() == rax, "wrong register");
1918
assert(op->cmp_value()->as_register_hi() == rdx, "wrong register");
1919
assert(op->new_value()->as_register_lo() == rbx, "wrong register");
1920
assert(op->new_value()->as_register_hi() == rcx, "wrong register");
1921
Register addr = op->addr()->as_register();
1923
NOT_LP64(__ cmpxchg8(Address(addr, 0)));
1925
} else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) {
1926
NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");)
1927
Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1928
Register newval = op->new_value()->as_register();
1929
Register cmpval = op->cmp_value()->as_register();
1930
assert(cmpval == rax, "wrong register");
1931
assert(newval != noreg, "new val must be register");
1932
assert(cmpval != newval, "cmp and new values must be in different registers");
1933
assert(cmpval != addr, "cmp and addr must be in different registers");
1934
assert(newval != addr, "new value and addr must be in different registers");
1936
if ( op->code() == lir_cas_obj) {
1938
if (UseCompressedOops) {
1939
__ encode_heap_oop(cmpval);
1940
__ mov(rscratch1, newval);
1941
__ encode_heap_oop(rscratch1);
1944
__ cmpxchgl(rscratch1, Address(addr, 0));
1949
__ cmpxchgptr(newval, Address(addr, 0));
1952
assert(op->code() == lir_cas_int, "lir_cas_int expected");
1954
__ cmpxchgl(newval, Address(addr, 0));
1957
} else if (op->code() == lir_cas_long) {
1958
Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1959
Register newval = op->new_value()->as_register_lo();
1960
Register cmpval = op->cmp_value()->as_register_lo();
1961
assert(cmpval == rax, "wrong register");
1962
assert(newval != noreg, "new val must be register");
1963
assert(cmpval != newval, "cmp and new values must be in different registers");
1964
assert(cmpval != addr, "cmp and addr must be in different registers");
1965
assert(newval != addr, "new value and addr must be in different registers");
1967
__ cmpxchgq(newval, Address(addr, 0));
1974
void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type,
1975
LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) {
1976
assert(cmp_opr1 == LIR_OprFact::illegalOpr && cmp_opr2 == LIR_OprFact::illegalOpr, "unnecessary cmp oprs on x86");
1978
Assembler::Condition acond, ncond;
1979
switch (condition) {
1980
case lir_cond_equal: acond = Assembler::equal; ncond = Assembler::notEqual; break;
1981
case lir_cond_notEqual: acond = Assembler::notEqual; ncond = Assembler::equal; break;
1982
case lir_cond_less: acond = Assembler::less; ncond = Assembler::greaterEqual; break;
1983
case lir_cond_lessEqual: acond = Assembler::lessEqual; ncond = Assembler::greater; break;
1984
case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less; break;
1985
case lir_cond_greater: acond = Assembler::greater; ncond = Assembler::lessEqual; break;
1986
case lir_cond_belowEqual: acond = Assembler::belowEqual; ncond = Assembler::above; break;
1987
case lir_cond_aboveEqual: acond = Assembler::aboveEqual; ncond = Assembler::below; break;
1988
default: acond = Assembler::equal; ncond = Assembler::notEqual;
1989
ShouldNotReachHere();
1992
if (opr1->is_cpu_register()) {
1993
reg2reg(opr1, result);
1994
} else if (opr1->is_stack()) {
1995
stack2reg(opr1, result, result->type());
1996
} else if (opr1->is_constant()) {
1997
const2reg(opr1, result, lir_patch_none, nullptr);
1999
ShouldNotReachHere();
2002
if (VM_Version::supports_cmov() && !opr2->is_constant()) {
2004
if (opr2->is_single_cpu()) {
2005
assert(opr2->cpu_regnr() != result->cpu_regnr(), "opr2 already overwritten by previous move");
2006
__ cmov(ncond, result->as_register(), opr2->as_register());
2007
} else if (opr2->is_double_cpu()) {
2008
assert(opr2->cpu_regnrLo() != result->cpu_regnrLo() && opr2->cpu_regnrLo() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");
2009
assert(opr2->cpu_regnrHi() != result->cpu_regnrLo() && opr2->cpu_regnrHi() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");
2010
__ cmovptr(ncond, result->as_register_lo(), opr2->as_register_lo());
2011
NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), opr2->as_register_hi());)
2012
} else if (opr2->is_single_stack()) {
2013
__ cmovl(ncond, result->as_register(), frame_map()->address_for_slot(opr2->single_stack_ix()));
2014
} else if (opr2->is_double_stack()) {
2015
__ cmovptr(ncond, result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix(), lo_word_offset_in_bytes));
2016
NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), frame_map()->address_for_slot(opr2->double_stack_ix(), hi_word_offset_in_bytes));)
2018
ShouldNotReachHere();
2023
__ jccb(acond, skip);
2024
if (opr2->is_cpu_register()) {
2025
reg2reg(opr2, result);
2026
} else if (opr2->is_stack()) {
2027
stack2reg(opr2, result, result->type());
2028
} else if (opr2->is_constant()) {
2029
const2reg(opr2, result, lir_patch_none, nullptr);
2031
ShouldNotReachHere();
2038
void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
2039
assert(info == nullptr, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
2041
if (left->is_single_cpu()) {
2042
assert(left == dest, "left and dest must be equal");
2043
Register lreg = left->as_register();
2045
if (right->is_single_cpu()) {
2047
Register rreg = right->as_register();
2049
case lir_add: __ addl (lreg, rreg); break;
2050
case lir_sub: __ subl (lreg, rreg); break;
2051
case lir_mul: __ imull(lreg, rreg); break;
2052
default: ShouldNotReachHere();
2055
} else if (right->is_stack()) {
2057
Address raddr = frame_map()->address_for_slot(right->single_stack_ix());
2059
case lir_add: __ addl(lreg, raddr); break;
2060
case lir_sub: __ subl(lreg, raddr); break;
2061
default: ShouldNotReachHere();
2064
} else if (right->is_constant()) {
2066
jint c = right->as_constant_ptr()->as_jint();
2069
__ incrementl(lreg, c);
2073
__ decrementl(lreg, c);
2076
default: ShouldNotReachHere();
2080
ShouldNotReachHere();
2083
} else if (left->is_double_cpu()) {
2084
assert(left == dest, "left and dest must be equal");
2085
Register lreg_lo = left->as_register_lo();
2086
Register lreg_hi = left->as_register_hi();
2088
if (right->is_double_cpu()) {
2090
Register rreg_lo = right->as_register_lo();
2091
Register rreg_hi = right->as_register_hi();
2092
NOT_LP64(assert_different_registers(lreg_lo, lreg_hi, rreg_lo, rreg_hi));
2093
LP64_ONLY(assert_different_registers(lreg_lo, rreg_lo));
2096
__ addptr(lreg_lo, rreg_lo);
2097
NOT_LP64(__ adcl(lreg_hi, rreg_hi));
2100
__ subptr(lreg_lo, rreg_lo);
2101
NOT_LP64(__ sbbl(lreg_hi, rreg_hi));
2105
__ imulq(lreg_lo, rreg_lo);
2107
assert(lreg_lo == rax && lreg_hi == rdx, "must be");
2108
__ imull(lreg_hi, rreg_lo);
2109
__ imull(rreg_hi, lreg_lo);
2110
__ addl (rreg_hi, lreg_hi);
2112
__ addl (lreg_hi, rreg_hi);
2116
ShouldNotReachHere();
2119
} else if (right->is_constant()) {
2122
jlong c = right->as_constant_ptr()->as_jlong_bits();
2123
__ movptr(r10, (intptr_t) c);
2126
__ addptr(lreg_lo, r10);
2129
__ subptr(lreg_lo, r10);
2132
ShouldNotReachHere();
2135
jint c_lo = right->as_constant_ptr()->as_jint_lo();
2136
jint c_hi = right->as_constant_ptr()->as_jint_hi();
2139
__ addptr(lreg_lo, c_lo);
2140
__ adcl(lreg_hi, c_hi);
2143
__ subptr(lreg_lo, c_lo);
2144
__ sbbl(lreg_hi, c_hi);
2147
ShouldNotReachHere();
2152
ShouldNotReachHere();
2155
} else if (left->is_single_xmm()) {
2156
assert(left == dest, "left and dest must be equal");
2157
XMMRegister lreg = left->as_xmm_float_reg();
2159
if (right->is_single_xmm()) {
2160
XMMRegister rreg = right->as_xmm_float_reg();
2162
case lir_add: __ addss(lreg, rreg); break;
2163
case lir_sub: __ subss(lreg, rreg); break;
2164
case lir_mul: __ mulss(lreg, rreg); break;
2165
case lir_div: __ divss(lreg, rreg); break;
2166
default: ShouldNotReachHere();
2170
if (right->is_single_stack()) {
2171
raddr = frame_map()->address_for_slot(right->single_stack_ix());
2172
} else if (right->is_constant()) {
2174
raddr = __ as_Address(InternalAddress(float_constant(right->as_jfloat())));
2176
ShouldNotReachHere();
2179
case lir_add: __ addss(lreg, raddr); break;
2180
case lir_sub: __ subss(lreg, raddr); break;
2181
case lir_mul: __ mulss(lreg, raddr); break;
2182
case lir_div: __ divss(lreg, raddr); break;
2183
default: ShouldNotReachHere();
2187
} else if (left->is_double_xmm()) {
2188
assert(left == dest, "left and dest must be equal");
2190
XMMRegister lreg = left->as_xmm_double_reg();
2191
if (right->is_double_xmm()) {
2192
XMMRegister rreg = right->as_xmm_double_reg();
2194
case lir_add: __ addsd(lreg, rreg); break;
2195
case lir_sub: __ subsd(lreg, rreg); break;
2196
case lir_mul: __ mulsd(lreg, rreg); break;
2197
case lir_div: __ divsd(lreg, rreg); break;
2198
default: ShouldNotReachHere();
2202
if (right->is_double_stack()) {
2203
raddr = frame_map()->address_for_slot(right->double_stack_ix());
2204
} else if (right->is_constant()) {
2206
raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble())));
2208
ShouldNotReachHere();
2211
case lir_add: __ addsd(lreg, raddr); break;
2212
case lir_sub: __ subsd(lreg, raddr); break;
2213
case lir_mul: __ mulsd(lreg, raddr); break;
2214
case lir_div: __ divsd(lreg, raddr); break;
2215
default: ShouldNotReachHere();
2220
} else if (left->is_single_fpu()) {
2221
assert(dest->is_single_fpu(), "fpu stack allocation required");
2223
if (right->is_single_fpu()) {
2224
arith_fpu_implementation(code, left->fpu_regnr(), right->fpu_regnr(), dest->fpu_regnr(), pop_fpu_stack);
2227
assert(left->fpu_regnr() == 0, "left must be on TOS");
2228
assert(dest->fpu_regnr() == 0, "dest must be on TOS");
2231
if (right->is_single_stack()) {
2232
raddr = frame_map()->address_for_slot(right->single_stack_ix());
2233
} else if (right->is_constant()) {
2234
address const_addr = float_constant(right->as_jfloat());
2235
assert(const_addr != nullptr, "incorrect float/double constant maintenance");
2237
raddr = __ as_Address(InternalAddress(const_addr));
2239
ShouldNotReachHere();
2243
case lir_add: __ fadd_s(raddr); break;
2244
case lir_sub: __ fsub_s(raddr); break;
2245
case lir_mul: __ fmul_s(raddr); break;
2246
case lir_div: __ fdiv_s(raddr); break;
2247
default: ShouldNotReachHere();
2251
} else if (left->is_double_fpu()) {
2252
assert(dest->is_double_fpu(), "fpu stack allocation required");
2254
if (code == lir_mul || code == lir_div) {
2256
__ fld_x(ExternalAddress(StubRoutines::x86::addr_fpu_subnormal_bias1()));
2257
__ fmulp(left->fpu_regnrLo() + 1);
2260
if (right->is_double_fpu()) {
2261
arith_fpu_implementation(code, left->fpu_regnrLo(), right->fpu_regnrLo(), dest->fpu_regnrLo(), pop_fpu_stack);
2264
assert(left->fpu_regnrLo() == 0, "left must be on TOS");
2265
assert(dest->fpu_regnrLo() == 0, "dest must be on TOS");
2268
if (right->is_double_stack()) {
2269
raddr = frame_map()->address_for_slot(right->double_stack_ix());
2270
} else if (right->is_constant()) {
2272
raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble())));
2274
ShouldNotReachHere();
2278
case lir_add: __ fadd_d(raddr); break;
2279
case lir_sub: __ fsub_d(raddr); break;
2280
case lir_mul: __ fmul_d(raddr); break;
2281
case lir_div: __ fdiv_d(raddr); break;
2282
default: ShouldNotReachHere();
2286
if (code == lir_mul || code == lir_div) {
2288
__ fld_x(ExternalAddress(StubRoutines::x86::addr_fpu_subnormal_bias2()));
2289
__ fmulp(dest->fpu_regnrLo() + 1);
2293
} else if (left->is_single_stack() || left->is_address()) {
2294
assert(left == dest, "left and dest must be equal");
2297
if (left->is_single_stack()) {
2298
laddr = frame_map()->address_for_slot(left->single_stack_ix());
2299
} else if (left->is_address()) {
2300
laddr = as_Address(left->as_address_ptr());
2302
ShouldNotReachHere();
2305
if (right->is_single_cpu()) {
2306
Register rreg = right->as_register();
2308
case lir_add: __ addl(laddr, rreg); break;
2309
case lir_sub: __ subl(laddr, rreg); break;
2310
default: ShouldNotReachHere();
2312
} else if (right->is_constant()) {
2313
jint c = right->as_constant_ptr()->as_jint();
2316
__ incrementl(laddr, c);
2320
__ decrementl(laddr, c);
2323
default: ShouldNotReachHere();
2326
ShouldNotReachHere();
2330
ShouldNotReachHere();
2335
void LIR_Assembler::arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack) {
2336
assert(pop_fpu_stack || (left_index == dest_index || right_index == dest_index), "invalid LIR");
2337
assert(!pop_fpu_stack || (left_index - 1 == dest_index || right_index - 1 == dest_index), "invalid LIR");
2338
assert(left_index == 0 || right_index == 0, "either must be on top of stack");
2340
bool left_is_tos = (left_index == 0);
2341
bool dest_is_tos = (dest_index == 0);
2342
int non_tos_index = (left_is_tos ? right_index : left_index);
2346
if (pop_fpu_stack) __ faddp(non_tos_index);
2347
else if (dest_is_tos) __ fadd (non_tos_index);
2348
else __ fadda(non_tos_index);
2353
if (pop_fpu_stack) __ fsubrp(non_tos_index);
2354
else if (dest_is_tos) __ fsub (non_tos_index);
2355
else __ fsubra(non_tos_index);
2357
if (pop_fpu_stack) __ fsubp (non_tos_index);
2358
else if (dest_is_tos) __ fsubr (non_tos_index);
2359
else __ fsuba (non_tos_index);
2364
if (pop_fpu_stack) __ fmulp(non_tos_index);
2365
else if (dest_is_tos) __ fmul (non_tos_index);
2366
else __ fmula(non_tos_index);
2371
if (pop_fpu_stack) __ fdivrp(non_tos_index);
2372
else if (dest_is_tos) __ fdiv (non_tos_index);
2373
else __ fdivra(non_tos_index);
2375
if (pop_fpu_stack) __ fdivp (non_tos_index);
2376
else if (dest_is_tos) __ fdivr (non_tos_index);
2377
else __ fdiva (non_tos_index);
2382
assert(left_is_tos && dest_is_tos && right_index == 1, "must be guaranteed by FPU stack allocation");
2387
ShouldNotReachHere();
2393
void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr tmp, LIR_Opr dest, LIR_Op* op) {
2394
if (value->is_double_xmm()) {
2399
if (UseAVX > 2 && !VM_Version::supports_avx512vl()) {
2400
assert(tmp->is_valid(), "need temporary");
2401
__ vpandn(dest->as_xmm_double_reg(), tmp->as_xmm_double_reg(), value->as_xmm_double_reg(), 2);
2405
if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) {
2406
__ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg());
2408
assert(!tmp->is_valid(), "do not need temporary");
2409
__ andpd(dest->as_xmm_double_reg(),
2410
ExternalAddress((address)double_signmask_pool),
2416
case lir_sqrt: __ sqrtsd(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); break;
2418
default : ShouldNotReachHere();
2422
} else if (value->is_double_fpu()) {
2423
assert(value->fpu_regnrLo() == 0 && dest->fpu_regnrLo() == 0, "both must be on TOS");
2425
case lir_abs : __ fabs() ; break;
2426
case lir_sqrt : __ fsqrt(); break;
2427
default : ShouldNotReachHere();
2430
} else if (code == lir_f2hf) {
2431
__ flt_to_flt16(dest->as_register(), value->as_xmm_float_reg(), tmp->as_xmm_float_reg());
2432
} else if (code == lir_hf2f) {
2433
__ flt16_to_flt(dest->as_xmm_float_reg(), value->as_register());
2439
void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
2441
if (left->is_single_cpu()) {
2442
Register reg = left->as_register();
2443
if (right->is_constant()) {
2444
int val = right->as_constant_ptr()->as_jint();
2446
case lir_logic_and: __ andl (reg, val); break;
2447
case lir_logic_or: __ orl (reg, val); break;
2448
case lir_logic_xor: __ xorl (reg, val); break;
2449
default: ShouldNotReachHere();
2451
} else if (right->is_stack()) {
2453
Address raddr = frame_map()->address_for_slot(right->single_stack_ix());
2455
case lir_logic_and: __ andl (reg, raddr); break;
2456
case lir_logic_or: __ orl (reg, raddr); break;
2457
case lir_logic_xor: __ xorl (reg, raddr); break;
2458
default: ShouldNotReachHere();
2461
Register rright = right->as_register();
2463
case lir_logic_and: __ andptr (reg, rright); break;
2464
case lir_logic_or : __ orptr (reg, rright); break;
2465
case lir_logic_xor: __ xorptr (reg, rright); break;
2466
default: ShouldNotReachHere();
2469
move_regs(reg, dst->as_register());
2471
Register l_lo = left->as_register_lo();
2472
Register l_hi = left->as_register_hi();
2473
if (right->is_constant()) {
2475
__ mov64(rscratch1, right->as_constant_ptr()->as_jlong());
2478
__ andq(l_lo, rscratch1);
2481
__ orq(l_lo, rscratch1);
2484
__ xorq(l_lo, rscratch1);
2486
default: ShouldNotReachHere();
2489
int r_lo = right->as_constant_ptr()->as_jint_lo();
2490
int r_hi = right->as_constant_ptr()->as_jint_hi();
2493
__ andl(l_lo, r_lo);
2494
__ andl(l_hi, r_hi);
2501
__ xorl(l_lo, r_lo);
2502
__ xorl(l_hi, r_hi);
2504
default: ShouldNotReachHere();
2510
if (is_reference_type(right->type())) {
2511
r_lo = right->as_register();
2513
r_lo = right->as_register_lo();
2516
Register r_lo = right->as_register_lo();
2517
Register r_hi = right->as_register_hi();
2518
assert(l_lo != r_hi, "overwriting registers");
2522
__ andptr(l_lo, r_lo);
2523
NOT_LP64(__ andptr(l_hi, r_hi);)
2526
__ orptr(l_lo, r_lo);
2527
NOT_LP64(__ orptr(l_hi, r_hi);)
2530
__ xorptr(l_lo, r_lo);
2531
NOT_LP64(__ xorptr(l_hi, r_hi);)
2533
default: ShouldNotReachHere();
2537
Register dst_lo = dst->as_register_lo();
2538
Register dst_hi = dst->as_register_hi();
2541
move_regs(l_lo, dst_lo);
2543
if (dst_lo == l_hi) {
2544
assert(dst_hi != l_lo, "overwriting registers");
2545
move_regs(l_hi, dst_hi);
2546
move_regs(l_lo, dst_lo);
2548
assert(dst_lo != l_hi, "overwriting registers");
2549
move_regs(l_lo, dst_lo);
2550
move_regs(l_hi, dst_hi);
2558
void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) {
2560
assert(left->is_single_cpu(), "left must be register");
2561
assert(right->is_single_cpu() || right->is_constant(), "right must be register or constant");
2562
assert(result->is_single_cpu(), "result must be register");
2567
Register lreg = left->as_register();
2568
Register dreg = result->as_register();
2570
if (right->is_constant()) {
2571
jint divisor = right->as_constant_ptr()->as_jint();
2572
assert(divisor > 0 && is_power_of_2(divisor), "must be");
2573
if (code == lir_idiv) {
2574
assert(lreg == rax, "must be rax,");
2575
assert(temp->as_register() == rdx, "tmp register must be rdx");
2580
__ andl(rdx, divisor - 1);
2583
__ sarl(lreg, log2i_exact(divisor));
2584
move_regs(lreg, dreg);
2585
} else if (code == lir_irem) {
2588
__ andl(dreg, 0x80000000 | (divisor - 1));
2589
__ jcc(Assembler::positive, done);
2591
__ orl(dreg, ~(divisor - 1));
2595
ShouldNotReachHere();
2598
Register rreg = right->as_register();
2599
assert(lreg == rax, "left register must be rax,");
2600
assert(rreg != rdx, "right register must not be rdx");
2601
assert(temp->as_register() == rdx, "tmp register must be rdx");
2603
move_regs(lreg, rax);
2605
int idivl_offset = __ corrected_idivl(rreg);
2606
if (ImplicitDiv0Checks) {
2607
add_debug_info_for_div0(idivl_offset, info);
2609
if (code == lir_irem) {
2610
move_regs(rdx, dreg);
2612
move_regs(rax, dreg);
2618
void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
2619
if (opr1->is_single_cpu()) {
2620
Register reg1 = opr1->as_register();
2621
if (opr2->is_single_cpu()) {
2623
if (is_reference_type(opr1->type())) {
2624
__ cmpoop(reg1, opr2->as_register());
2626
assert(!is_reference_type(opr2->type()), "cmp int, oop?");
2627
__ cmpl(reg1, opr2->as_register());
2629
} else if (opr2->is_stack()) {
2631
if (is_reference_type(opr1->type())) {
2632
__ cmpoop(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2634
__ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2636
} else if (opr2->is_constant()) {
2638
LIR_Const* c = opr2->as_constant_ptr();
2639
if (c->type() == T_INT) {
2640
jint i = c->as_jint();
2642
__ testl(reg1, reg1);
2646
} else if (c->type() == T_METADATA) {
2648
assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");
2649
Metadata* m = c->as_metadata();
2651
__ testptr(reg1, reg1);
2653
ShouldNotReachHere();
2655
} else if (is_reference_type(c->type())) {
2657
jobject o = c->as_jobject();
2659
__ testptr(reg1, reg1);
2661
__ cmpoop(reg1, o, rscratch1);
2664
fatal("unexpected type: %s", basictype_to_str(c->type()));
2667
} else if (opr2->is_address()) {
2668
if (op->info() != nullptr) {
2669
add_debug_info_for_null_check_here(op->info());
2671
__ cmpl(reg1, as_Address(opr2->as_address_ptr()));
2673
ShouldNotReachHere();
2676
} else if(opr1->is_double_cpu()) {
2677
Register xlo = opr1->as_register_lo();
2678
Register xhi = opr1->as_register_hi();
2679
if (opr2->is_double_cpu()) {
2681
__ cmpptr(xlo, opr2->as_register_lo());
2684
Register ylo = opr2->as_register_lo();
2685
Register yhi = opr2->as_register_hi();
2688
if (condition == lir_cond_equal || condition == lir_cond_notEqual) {
2692
} else if (opr2->is_constant()) {
2694
assert(opr2->as_jlong() == (jlong)0, "only handles zero");
2696
__ cmpptr(xlo, (int32_t)opr2->as_jlong());
2698
assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles equals case");
2702
ShouldNotReachHere();
2705
} else if (opr1->is_single_xmm()) {
2706
XMMRegister reg1 = opr1->as_xmm_float_reg();
2707
if (opr2->is_single_xmm()) {
2709
__ ucomiss(reg1, opr2->as_xmm_float_reg());
2710
} else if (opr2->is_stack()) {
2712
__ ucomiss(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2713
} else if (opr2->is_constant()) {
2715
__ ucomiss(reg1, InternalAddress(float_constant(opr2->as_jfloat())));
2716
} else if (opr2->is_address()) {
2718
if (op->info() != nullptr) {
2719
add_debug_info_for_null_check_here(op->info());
2721
__ ucomiss(reg1, as_Address(opr2->as_address_ptr()));
2723
ShouldNotReachHere();
2726
} else if (opr1->is_double_xmm()) {
2727
XMMRegister reg1 = opr1->as_xmm_double_reg();
2728
if (opr2->is_double_xmm()) {
2730
__ ucomisd(reg1, opr2->as_xmm_double_reg());
2731
} else if (opr2->is_stack()) {
2733
__ ucomisd(reg1, frame_map()->address_for_slot(opr2->double_stack_ix()));
2734
} else if (opr2->is_constant()) {
2736
__ ucomisd(reg1, InternalAddress(double_constant(opr2->as_jdouble())));
2737
} else if (opr2->is_address()) {
2739
if (op->info() != nullptr) {
2740
add_debug_info_for_null_check_here(op->info());
2742
__ ucomisd(reg1, as_Address(opr2->pointer()->as_address()));
2744
ShouldNotReachHere();
2748
} else if(opr1->is_single_fpu() || opr1->is_double_fpu()) {
2749
assert(opr1->is_fpu_register() && opr1->fpu() == 0, "currently left-hand side must be on TOS (relax this restriction)");
2750
assert(opr2->is_fpu_register(), "both must be registers");
2751
__ fcmp(noreg, opr2->fpu(), op->fpu_pop_count() > 0, op->fpu_pop_count() > 1);
2754
} else if (opr1->is_address() && opr2->is_constant()) {
2755
LIR_Const* c = opr2->as_constant_ptr();
2757
if (is_reference_type(c->type())) {
2758
assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse");
2759
__ movoop(rscratch1, c->as_jobject());
2762
if (op->info() != nullptr) {
2763
add_debug_info_for_null_check_here(op->info());
2766
LIR_Address* addr = opr1->as_address_ptr();
2767
if (c->type() == T_INT) {
2768
__ cmpl(as_Address(addr), c->as_jint());
2769
} else if (is_reference_type(c->type())) {
2773
__ cmpoop(rscratch1, as_Address(addr, noreg));
2775
__ cmpoop(as_Address(addr), c->as_jobject());
2778
ShouldNotReachHere();
2782
ShouldNotReachHere();
2786
void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) {
2787
if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
2788
if (left->is_single_xmm()) {
2789
assert(right->is_single_xmm(), "must match");
2790
__ cmpss2int(left->as_xmm_float_reg(), right->as_xmm_float_reg(), dst->as_register(), code == lir_ucmp_fd2i);
2791
} else if (left->is_double_xmm()) {
2792
assert(right->is_double_xmm(), "must match");
2793
__ cmpsd2int(left->as_xmm_double_reg(), right->as_xmm_double_reg(), dst->as_register(), code == lir_ucmp_fd2i);
2797
ShouldNotReachHere();
2799
assert(left->is_single_fpu() || left->is_double_fpu(), "must be");
2800
assert(right->is_single_fpu() || right->is_double_fpu(), "must match");
2802
assert(left->fpu() == 0, "left must be on TOS");
2803
__ fcmp2int(dst->as_register(), code == lir_ucmp_fd2i, right->fpu(),
2804
op->fpu_pop_count() > 0, op->fpu_pop_count() > 1);
2808
assert(code == lir_cmp_l2i, "check");
2811
Register dest = dst->as_register();
2812
__ cmpptr(left->as_register_lo(), right->as_register_lo());
2814
__ jccb(Assembler::less, done);
2815
__ setb(Assembler::notZero, dest);
2816
__ movzbl(dest, dest);
2819
__ lcmp2int(left->as_register_hi(),
2820
left->as_register_lo(),
2821
right->as_register_hi(),
2822
right->as_register_lo());
2823
move_regs(left->as_register_hi(), dst->as_register());
2829
void LIR_Assembler::align_call(LIR_Code code) {
2831
int offset = __ offset();
2833
case lir_static_call:
2834
case lir_optvirtual_call:
2835
case lir_dynamic_call:
2836
offset += NativeCall::displacement_offset;
2838
case lir_icvirtual_call:
2839
offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size_rex;
2841
default: ShouldNotReachHere();
2843
__ align(BytesPerWord, offset);
2847
void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2848
assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
2850
__ call(AddressLiteral(op->addr(), rtype));
2851
add_call_info(code_offset(), op->info());
2856
void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2857
__ ic_call(op->addr());
2858
add_call_info(code_offset(), op->info());
2859
assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
2865
void LIR_Assembler::emit_static_call_stub() {
2866
address call_pc = __ pc();
2867
address stub = __ start_a_stub(call_stub_size());
2868
if (stub == nullptr) {
2869
bailout("static call stub overflow");
2873
int start = __ offset();
2876
__ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size_rex + NativeCall::displacement_offset);
2877
__ relocate(static_stub_Relocation::spec(call_pc));
2878
__ mov_metadata(rbx, (Metadata*)nullptr);
2880
assert(((__ offset() + 1) % BytesPerWord) == 0, "must be aligned");
2882
__ jump(RuntimeAddress(__ pc()));
2884
assert(__ offset() - start <= call_stub_size(), "stub too big");
2889
void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2890
assert(exceptionOop->as_register() == rax, "must match");
2891
assert(exceptionPC->as_register() == rdx, "must match");
2895
info->add_register_oop(exceptionOop);
2896
Runtime1::StubID unwind_id;
2900
int pc_for_athrow_offset = __ offset();
2901
InternalAddress pc_for_athrow(__ pc());
2902
__ lea(exceptionPC->as_register(), pc_for_athrow);
2903
add_call_info(pc_for_athrow_offset, info);
2905
__ verify_not_null_oop(rax);
2907
if (compilation()->has_fpu_code()) {
2908
unwind_id = Runtime1::handle_exception_id;
2910
unwind_id = Runtime1::handle_exception_nofpu_id;
2912
__ call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
2919
void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
2920
assert(exceptionOop->as_register() == rax, "must match");
2922
__ jmp(_unwind_handler_entry);
2926
void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2932
assert(count->as_register() == SHIFT_count, "count must be in ECX");
2933
assert(left == dest, "left and dest must be equal");
2934
assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2936
if (left->is_single_cpu()) {
2937
Register value = left->as_register();
2938
assert(value != SHIFT_count, "left cannot be ECX");
2941
case lir_shl: __ shll(value); break;
2942
case lir_shr: __ sarl(value); break;
2943
case lir_ushr: __ shrl(value); break;
2944
default: ShouldNotReachHere();
2946
} else if (left->is_double_cpu()) {
2947
Register lo = left->as_register_lo();
2948
Register hi = left->as_register_hi();
2949
assert(lo != SHIFT_count && hi != SHIFT_count, "left cannot be ECX");
2952
case lir_shl: __ shlptr(lo); break;
2953
case lir_shr: __ sarptr(lo); break;
2954
case lir_ushr: __ shrptr(lo); break;
2955
default: ShouldNotReachHere();
2960
case lir_shl: __ lshl(hi, lo); break;
2961
case lir_shr: __ lshr(hi, lo, true); break;
2962
case lir_ushr: __ lshr(hi, lo, false); break;
2963
default: ShouldNotReachHere();
2967
ShouldNotReachHere();
2972
void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2973
if (dest->is_single_cpu()) {
2975
Register value = dest->as_register();
2976
count = count & 0x1F;
2978
move_regs(left->as_register(), value);
2980
case lir_shl: __ shll(value, count); break;
2981
case lir_shr: __ sarl(value, count); break;
2982
case lir_ushr: __ shrl(value, count); break;
2983
default: ShouldNotReachHere();
2985
} else if (dest->is_double_cpu()) {
2990
Register value = dest->as_register_lo();
2991
count = count & 0x1F;
2993
move_regs(left->as_register_lo(), value);
2995
case lir_shl: __ shlptr(value, count); break;
2996
case lir_shr: __ sarptr(value, count); break;
2997
case lir_ushr: __ shrptr(value, count); break;
2998
default: ShouldNotReachHere();
3002
ShouldNotReachHere();
3007
void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
3008
assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3009
int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3010
assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3011
__ movptr (Address(rsp, offset_from_rsp_in_bytes), r);
3015
void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {
3016
assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3017
int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3018
assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3019
__ movptr (Address(rsp, offset_from_rsp_in_bytes), c);
3023
void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
3024
assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3025
int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3026
assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3027
__ movoop(Address(rsp, offset_from_rsp_in_bytes), o, rscratch1);
3031
void LIR_Assembler::store_parameter(Metadata* m, int offset_from_rsp_in_words) {
3032
assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3033
int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3034
assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3035
__ mov_metadata(Address(rsp, offset_from_rsp_in_bytes), m, rscratch1);
3042
void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
3043
ciArrayKlass* default_type = op->expected_type();
3044
Register src = op->src()->as_register();
3045
Register dst = op->dst()->as_register();
3046
Register src_pos = op->src_pos()->as_register();
3047
Register dst_pos = op->dst_pos()->as_register();
3048
Register length = op->length()->as_register();
3049
Register tmp = op->tmp()->as_register();
3050
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3052
CodeStub* stub = op->stub();
3053
int flags = op->flags();
3054
BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
3055
if (is_reference_type(basic_type)) basic_type = T_OBJECT;
3058
if (default_type == nullptr) {
3067
store_parameter(length, 2);
3068
store_parameter(dst_pos, 1);
3069
store_parameter(dst, 0);
3072
store_parameter(src_pos, 3);
3073
store_parameter(src, 4);
3074
NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");)
3076
address copyfunc_addr = StubRoutines::generic_arraycopy();
3077
assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
3083
assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
3084
__ mov(c_rarg0, j_rarg0);
3085
assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
3086
__ mov(c_rarg1, j_rarg1);
3087
assert_different_registers(c_rarg2, j_rarg3, j_rarg4);
3088
__ mov(c_rarg2, j_rarg2);
3089
assert_different_registers(c_rarg3, j_rarg4);
3090
__ mov(c_rarg3, j_rarg3);
3093
__ subptr(rsp, 6*wordSize);
3094
store_parameter(j_rarg4, 4);
3096
if (PrintC1Statistics) {
3097
__ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt), rscratch1);
3100
__ call(RuntimeAddress(copyfunc_addr));
3101
__ addptr(rsp, 6*wordSize);
3103
__ mov(c_rarg4, j_rarg4);
3105
if (PrintC1Statistics) {
3106
__ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt), rscratch1);
3109
__ call(RuntimeAddress(copyfunc_addr));
3119
if (PrintC1Statistics) {
3120
__ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt), rscratch1);
3123
__ call_VM_leaf(copyfunc_addr, 5);
3128
__ jcc(Assembler::equal, *stub->continuation());
3135
__ movptr (dst, Address(rsp, 0*BytesPerWord));
3136
__ movptr (dst_pos, Address(rsp, 1*BytesPerWord));
3137
__ movptr (length, Address(rsp, 2*BytesPerWord));
3138
__ movptr (src_pos, Address(rsp, 3*BytesPerWord));
3139
__ movptr (src, Address(rsp, 4*BytesPerWord));
3141
__ subl(length, tmp);
3142
__ addl(src_pos, tmp);
3143
__ addl(dst_pos, tmp);
3144
__ jmp(*stub->entry());
3146
__ bind(*stub->continuation());
3150
assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
3152
int elem_size = type2aelembytes(basic_type);
3153
Address::ScaleFactor scale;
3155
switch (elem_size) {
3157
scale = Address::times_1;
3160
scale = Address::times_2;
3163
scale = Address::times_4;
3166
scale = Address::times_8;
3169
scale = Address::no_scale;
3170
ShouldNotReachHere();
3173
Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
3174
Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
3175
Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
3176
Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
3181
if (flags & LIR_OpArrayCopy::src_null_check) {
3182
__ testptr(src, src);
3183
__ jcc(Assembler::zero, *stub->entry());
3185
if (flags & LIR_OpArrayCopy::dst_null_check) {
3186
__ testptr(dst, dst);
3187
__ jcc(Assembler::zero, *stub->entry());
3193
if (flags & LIR_OpArrayCopy::type_check) {
3194
if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
3195
__ load_klass(tmp, dst, tmp_load_klass);
3196
__ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value);
3197
__ jcc(Assembler::greaterEqual, *stub->entry());
3200
if (!(flags & LIR_OpArrayCopy::src_objarray)) {
3201
__ load_klass(tmp, src, tmp_load_klass);
3202
__ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value);
3203
__ jcc(Assembler::greaterEqual, *stub->entry());
3208
if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
3209
__ testl(src_pos, src_pos);
3210
__ jcc(Assembler::less, *stub->entry());
3212
if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
3213
__ testl(dst_pos, dst_pos);
3214
__ jcc(Assembler::less, *stub->entry());
3217
if (flags & LIR_OpArrayCopy::src_range_check) {
3218
__ lea(tmp, Address(src_pos, length, Address::times_1, 0));
3219
__ cmpl(tmp, src_length_addr);
3220
__ jcc(Assembler::above, *stub->entry());
3222
if (flags & LIR_OpArrayCopy::dst_range_check) {
3223
__ lea(tmp, Address(dst_pos, length, Address::times_1, 0));
3224
__ cmpl(tmp, dst_length_addr);
3225
__ jcc(Assembler::above, *stub->entry());
3228
if (flags & LIR_OpArrayCopy::length_positive_check) {
3229
__ testl(length, length);
3230
__ jcc(Assembler::less, *stub->entry());
3234
__ movl2ptr(src_pos, src_pos);
3235
__ movl2ptr(dst_pos, dst_pos);
3238
if (flags & LIR_OpArrayCopy::type_check) {
3240
if (basic_type != T_OBJECT) {
3242
if (UseCompressedClassPointers) {
3243
__ movl(tmp, src_klass_addr);
3244
__ cmpl(tmp, dst_klass_addr);
3246
__ movptr(tmp, src_klass_addr);
3247
__ cmpptr(tmp, dst_klass_addr);
3249
__ jcc(Assembler::notEqual, *stub->entry());
3258
__ load_klass(src, src, tmp_load_klass);
3259
__ load_klass(dst, dst, tmp_load_klass);
3261
__ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, nullptr);
3265
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
3270
__ jcc(Assembler::notEqual, cont);
3276
address copyfunc_addr = StubRoutines::checkcast_arraycopy();
3277
if (copyfunc_addr != nullptr) {
3281
int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
3282
if ((flags & mask) != mask) {
3284
assert(flags & mask, "one of the two should be known to be an object array");
3286
if (!(flags & LIR_OpArrayCopy::src_objarray)) {
3287
__ load_klass(tmp, src, tmp_load_klass);
3288
} else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
3289
__ load_klass(tmp, dst, tmp_load_klass);
3291
int lh_offset = in_bytes(Klass::layout_helper_offset());
3292
Address klass_lh_addr(tmp, lh_offset);
3293
jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
3294
__ cmpl(klass_lh_addr, objArray_lh);
3295
__ jcc(Assembler::notEqual, *stub->entry());
3300
store_parameter(dst, 0);
3301
store_parameter(dst_pos, 1);
3302
store_parameter(length, 2);
3303
store_parameter(src_pos, 3);
3304
store_parameter(src, 4);
3307
__ movptr(tmp, dst_klass_addr);
3308
__ movptr(tmp, Address(tmp, ObjArrayKlass::element_klass_offset()));
3310
__ movl(tmp, Address(tmp, Klass::super_check_offset_offset()));
3313
__ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3315
__ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3318
__ call_VM_leaf(copyfunc_addr, 5);
3320
__ movl2ptr(length, length);
3322
__ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3323
assert_different_registers(c_rarg0, dst, dst_pos, length);
3324
__ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3325
assert_different_registers(c_rarg1, dst, length);
3327
__ mov(c_rarg2, length);
3328
assert_different_registers(c_rarg2, dst);
3332
__ subptr(rsp, 6*wordSize);
3333
__ load_klass(c_rarg3, dst, tmp_load_klass);
3334
__ movptr(c_rarg3, Address(c_rarg3, ObjArrayKlass::element_klass_offset()));
3335
store_parameter(c_rarg3, 4);
3336
__ movl(c_rarg3, Address(c_rarg3, Klass::super_check_offset_offset()));
3337
__ call(RuntimeAddress(copyfunc_addr));
3338
__ addptr(rsp, 6*wordSize);
3340
__ load_klass(c_rarg4, dst, tmp_load_klass);
3341
__ movptr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset()));
3342
__ movl(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset()));
3343
__ call(RuntimeAddress(copyfunc_addr));
3349
if (PrintC1Statistics) {
3352
__ jcc(Assembler::notZero, failed);
3353
__ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt), rscratch1);
3359
__ jcc(Assembler::zero, *stub->continuation());
3362
if (PrintC1Statistics) {
3363
__ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt), rscratch1);
3372
__ movptr (dst, Address(rsp, 0*BytesPerWord));
3373
__ movptr (dst_pos, Address(rsp, 1*BytesPerWord));
3374
__ movptr (length, Address(rsp, 2*BytesPerWord));
3375
__ movptr (src_pos, Address(rsp, 3*BytesPerWord));
3376
__ movptr (src, Address(rsp, 4*BytesPerWord));
3379
__ subl(length, tmp);
3380
__ addl(src_pos, tmp);
3381
__ addl(dst_pos, tmp);
3384
__ jmp(*stub->entry());
3393
if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
3401
Label known_ok, halt;
3402
__ mov_metadata(tmp, default_type->constant_encoding());
3404
if (UseCompressedClassPointers) {
3405
__ encode_klass_not_null(tmp, rscratch1);
3409
if (basic_type != T_OBJECT) {
3411
if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr);
3412
else __ cmpptr(tmp, dst_klass_addr);
3413
__ jcc(Assembler::notEqual, halt);
3414
if (UseCompressedClassPointers) __ cmpl(tmp, src_klass_addr);
3415
else __ cmpptr(tmp, src_klass_addr);
3416
__ jcc(Assembler::equal, known_ok);
3418
if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr);
3419
else __ cmpptr(tmp, dst_klass_addr);
3420
__ jcc(Assembler::equal, known_ok);
3421
__ cmpptr(src, dst);
3422
__ jcc(Assembler::equal, known_ok);
3425
__ stop("incorrect type information in arraycopy");
3431
if (PrintC1Statistics) {
3432
__ incrementl(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)), rscratch1);
3437
assert_different_registers(c_rarg0, dst, dst_pos, length);
3438
__ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3439
assert_different_registers(c_rarg1, length);
3440
__ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3441
__ mov(c_rarg2, length);
3444
__ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3445
store_parameter(tmp, 0);
3446
__ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3447
store_parameter(tmp, 1);
3448
store_parameter(length, 2);
3451
bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
3452
bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
3454
address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
3455
__ call_VM_leaf(entry, 0);
3457
if (stub != nullptr) {
3458
__ bind(*stub->continuation());
3462
void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
3463
assert(op->crc()->is_single_cpu(), "crc must be register");
3464
assert(op->val()->is_single_cpu(), "byte value must be register");
3465
assert(op->result_opr()->is_single_cpu(), "result must be register");
3466
Register crc = op->crc()->as_register();
3467
Register val = op->val()->as_register();
3468
Register res = op->result_opr()->as_register();
3470
assert_different_registers(val, crc, res);
3472
__ lea(res, ExternalAddress(StubRoutines::crc_table_addr()));
3474
__ update_byte_crc32(crc, val, res);
3479
void LIR_Assembler::emit_lock(LIR_OpLock* op) {
3480
Register obj = op->obj_opr()->as_register();
3481
Register hdr = op->hdr_opr()->as_register();
3482
Register lock = op->lock_opr()->as_register();
3483
if (LockingMode == LM_MONITOR) {
3484
if (op->info() != nullptr) {
3485
add_debug_info_for_null_check_here(op->info());
3488
__ jmp(*op->stub()->entry());
3489
} else if (op->code() == lir_lock) {
3490
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
3491
Register tmp = LockingMode == LM_LIGHTWEIGHT ? op->scratch_opr()->as_register() : noreg;
3493
int null_check_offset = __ lock_object(hdr, obj, lock, tmp, *op->stub()->entry());
3494
if (op->info() != nullptr) {
3495
add_debug_info_for_null_check(null_check_offset, op->info());
3498
} else if (op->code() == lir_unlock) {
3499
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
3500
__ unlock_object(hdr, obj, lock, *op->stub()->entry());
3504
__ bind(*op->stub()->continuation());
3507
void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
3508
Register obj = op->obj()->as_pointer_register();
3509
Register result = op->result_opr()->as_pointer_register();
3511
CodeEmitInfo* info = op->info();
3512
if (info != nullptr) {
3513
add_debug_info_for_null_check_here(info);
3517
if (UseCompressedClassPointers) {
3518
__ movl(result, Address(obj, oopDesc::klass_offset_in_bytes()));
3519
__ decode_klass_not_null(result, rscratch1);
3522
__ movptr(result, Address(obj, oopDesc::klass_offset_in_bytes()));
3525
void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
3526
ciMethod* method = op->profiled_method();
3527
int bci = op->profiled_bci();
3528
ciMethod* callee = op->profiled_callee();
3529
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3532
ciMethodData* md = method->method_data_or_null();
3533
assert(md != nullptr, "Sanity");
3534
ciProfileData* data = md->bci_to_data(bci);
3535
assert(data != nullptr && data->is_CounterData(), "need CounterData for calls");
3536
assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
3537
Register mdo = op->mdo()->as_register();
3538
__ mov_metadata(mdo, md->constant_encoding());
3539
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
3542
if (op->should_profile_receiver_type()) {
3543
assert(op->recv()->is_single_cpu(), "recv must be allocated");
3544
Register recv = op->recv()->as_register();
3545
assert_different_registers(mdo, recv);
3546
assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
3547
ciKlass* known_klass = op->known_holder();
3548
if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) {
3555
ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
3557
for (i = 0; i < VirtualCallData::row_limit(); i++) {
3558
ciKlass* receiver = vc_data->receiver(i);
3559
if (known_klass->equals(receiver)) {
3560
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
3561
__ addptr(data_addr, DataLayout::counter_increment);
3571
for (i = 0; i < VirtualCallData::row_limit(); i++) {
3572
ciKlass* receiver = vc_data->receiver(i);
3573
if (receiver == nullptr) {
3574
Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
3575
__ mov_metadata(recv_addr, known_klass->constant_encoding(), rscratch1);
3576
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
3577
__ addptr(data_addr, DataLayout::counter_increment);
3582
__ load_klass(recv, recv, tmp_load_klass);
3584
type_profile_helper(mdo, md, data, recv, &update_done);
3587
__ addptr(counter_addr, DataLayout::counter_increment);
3589
__ bind(update_done);
3593
__ addptr(counter_addr, DataLayout::counter_increment);
3597
void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
3598
Register obj = op->obj()->as_register();
3599
Register tmp = op->tmp()->as_pointer_register();
3600
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3601
Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
3602
ciKlass* exact_klass = op->exact_klass();
3603
intptr_t current_klass = op->current_klass();
3604
bool not_null = op->not_null();
3605
bool no_conflict = op->no_conflict();
3607
Label update, next, none;
3609
bool do_null = !not_null;
3610
bool exact_klass_set = exact_klass != nullptr && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
3611
bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
3613
assert(do_null || do_update, "why are we here?");
3614
assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
3621
assert_different_registers(obj, rscratch1, mdo_addr.base(), mdo_addr.index());
3623
assert_different_registers(obj, mdo_addr.base(), mdo_addr.index());
3627
assert_different_registers(obj, tmp, rscratch1, mdo_addr.base(), mdo_addr.index());
3629
assert_different_registers(obj, tmp, mdo_addr.base(), mdo_addr.index());
3634
__ testptr(obj, obj);
3635
__ jccb(Assembler::notZero, update);
3636
if (!TypeEntries::was_null_seen(current_klass)) {
3637
__ testptr(mdo_addr, TypeEntries::null_seen);
3639
__ jccb(Assembler::notZero, next);
3641
__ jcc(Assembler::notZero, next);
3645
__ orptr(mdo_addr, TypeEntries::null_seen);
3655
__ testptr(obj, obj);
3656
__ jcc(Assembler::notZero, update);
3657
__ stop("unexpected null obj");
3665
if (exact_klass != nullptr) {
3667
__ load_klass(tmp, obj, tmp_load_klass);
3669
__ mov_metadata(tmp, exact_klass->constant_encoding());
3670
__ cmpptr(tmp, Address(rsp, 0));
3671
__ jcc(Assembler::equal, ok);
3672
__ stop("exact klass and actual klass differ");
3678
if (exact_klass == nullptr || TypeEntries::is_type_none(current_klass)) {
3679
if (exact_klass != nullptr) {
3680
__ mov_metadata(tmp, exact_klass->constant_encoding());
3682
__ load_klass(tmp, obj, tmp_load_klass);
3685
__ mov(rscratch1, tmp);
3687
__ xorptr(tmp, mdo_addr);
3688
__ testptr(tmp, TypeEntries::type_klass_mask);
3691
__ jccb(Assembler::zero, next);
3693
__ testptr(tmp, TypeEntries::type_unknown);
3694
__ jccb(Assembler::notZero, next);
3696
if (TypeEntries::is_type_none(current_klass)) {
3697
__ testptr(mdo_addr, TypeEntries::type_mask);
3698
__ jccb(Assembler::zero, none);
3703
__ mov(tmp, rscratch1);
3704
__ xorptr(tmp, mdo_addr);
3705
__ testptr(tmp, TypeEntries::type_klass_mask);
3706
__ jccb(Assembler::zero, next);
3710
assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
3711
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
3713
__ testptr(mdo_addr, TypeEntries::type_unknown);
3714
__ jccb(Assembler::notZero, next);
3718
__ orptr(mdo_addr, TypeEntries::type_unknown);
3720
if (TypeEntries::is_type_none(current_klass)) {
3725
__ movptr(mdo_addr, tmp);
3727
__ andptr(tmp, TypeEntries::type_klass_mask);
3728
__ verify_klass_ptr(tmp);
3733
assert(exact_klass != nullptr, "should be");
3734
if (TypeEntries::is_type_none(current_klass)) {
3735
__ mov_metadata(tmp, exact_klass->constant_encoding());
3736
__ xorptr(tmp, mdo_addr);
3737
__ testptr(tmp, TypeEntries::type_klass_mask);
3739
__ jcc(Assembler::zero, next);
3744
__ testptr(mdo_addr, TypeEntries::type_mask);
3745
__ jcc(Assembler::zero, ok);
3747
__ mov_metadata(tmp, exact_klass->constant_encoding());
3748
__ xorptr(tmp, mdo_addr);
3749
__ testptr(tmp, TypeEntries::type_mask);
3750
__ jcc(Assembler::zero, ok);
3752
__ stop("unexpected profiling mismatch");
3757
__ jccb(Assembler::zero, next);
3760
__ movptr(mdo_addr, tmp);
3762
__ andptr(tmp, TypeEntries::type_klass_mask);
3763
__ verify_klass_ptr(tmp);
3766
assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
3767
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
3769
__ testptr(mdo_addr, TypeEntries::type_unknown);
3770
__ jccb(Assembler::notZero, next);
3772
__ orptr(mdo_addr, TypeEntries::type_unknown);
3779
void LIR_Assembler::emit_delay(LIR_OpDelay*) {
3784
void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
3785
__ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
3789
void LIR_Assembler::align_backward_branch_target() {
3790
__ align(BytesPerWord);
3794
void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
3795
if (left->is_single_cpu()) {
3796
__ negl(left->as_register());
3797
move_regs(left->as_register(), dest->as_register());
3799
} else if (left->is_double_cpu()) {
3800
Register lo = left->as_register_lo();
3802
Register dst = dest->as_register_lo();
3806
Register hi = left->as_register_hi();
3808
if (dest->as_register_lo() == hi) {
3809
assert(dest->as_register_hi() != lo, "destroying register");
3810
move_regs(hi, dest->as_register_hi());
3811
move_regs(lo, dest->as_register_lo());
3813
move_regs(lo, dest->as_register_lo());
3814
move_regs(hi, dest->as_register_hi());
3818
} else if (dest->is_single_xmm()) {
3820
if (UseAVX > 2 && !VM_Version::supports_avx512vl()) {
3821
assert(tmp->is_valid(), "need temporary");
3822
assert_different_registers(left->as_xmm_float_reg(), tmp->as_xmm_float_reg());
3823
__ vpxor(dest->as_xmm_float_reg(), tmp->as_xmm_float_reg(), left->as_xmm_float_reg(), 2);
3828
assert(!tmp->is_valid(), "do not need temporary");
3829
if (left->as_xmm_float_reg() != dest->as_xmm_float_reg()) {
3830
__ movflt(dest->as_xmm_float_reg(), left->as_xmm_float_reg());
3832
__ xorps(dest->as_xmm_float_reg(),
3833
ExternalAddress((address)float_signflip_pool),
3836
} else if (dest->is_double_xmm()) {
3838
if (UseAVX > 2 && !VM_Version::supports_avx512vl()) {
3839
assert(tmp->is_valid(), "need temporary");
3840
assert_different_registers(left->as_xmm_double_reg(), tmp->as_xmm_double_reg());
3841
__ vpxor(dest->as_xmm_double_reg(), tmp->as_xmm_double_reg(), left->as_xmm_double_reg(), 2);
3846
assert(!tmp->is_valid(), "do not need temporary");
3847
if (left->as_xmm_double_reg() != dest->as_xmm_double_reg()) {
3848
__ movdbl(dest->as_xmm_double_reg(), left->as_xmm_double_reg());
3850
__ xorpd(dest->as_xmm_double_reg(),
3851
ExternalAddress((address)double_signflip_pool),
3855
} else if (left->is_single_fpu() || left->is_double_fpu()) {
3856
assert(left->fpu() == 0, "arg must be on TOS");
3857
assert(dest->fpu() == 0, "dest must be TOS");
3862
ShouldNotReachHere();
3867
void LIR_Assembler::leal(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
3868
assert(src->is_address(), "must be an address");
3869
assert(dest->is_register(), "must be a register");
3871
PatchingStub* patch = nullptr;
3872
if (patch_code != lir_patch_none) {
3873
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
3876
Register reg = dest->as_pointer_register();
3877
LIR_Address* addr = src->as_address_ptr();
3878
__ lea(reg, as_Address(addr));
3880
if (patch != nullptr) {
3881
patching_epilog(patch, patch_code, addr->base()->as_register(), info);
3887
void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
3888
assert(!tmp->is_valid(), "don't need temporary");
3889
__ call(RuntimeAddress(dest));
3890
if (info != nullptr) {
3891
add_call_info_here(info);
3897
void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
3898
assert(type == T_LONG, "only for volatile long fields");
3900
if (info != nullptr) {
3901
add_debug_info_for_null_check_here(info);
3904
if (src->is_double_xmm()) {
3905
if (dest->is_double_cpu()) {
3907
__ movdq(dest->as_register_lo(), src->as_xmm_double_reg());
3909
__ movdl(dest->as_register_lo(), src->as_xmm_double_reg());
3910
__ psrlq(src->as_xmm_double_reg(), 32);
3911
__ movdl(dest->as_register_hi(), src->as_xmm_double_reg());
3913
} else if (dest->is_double_stack()) {
3914
__ movdbl(frame_map()->address_for_slot(dest->double_stack_ix()), src->as_xmm_double_reg());
3915
} else if (dest->is_address()) {
3916
__ movdbl(as_Address(dest->as_address_ptr()), src->as_xmm_double_reg());
3918
ShouldNotReachHere();
3921
} else if (dest->is_double_xmm()) {
3922
if (src->is_double_stack()) {
3923
__ movdbl(dest->as_xmm_double_reg(), frame_map()->address_for_slot(src->double_stack_ix()));
3924
} else if (src->is_address()) {
3925
__ movdbl(dest->as_xmm_double_reg(), as_Address(src->as_address_ptr()));
3927
ShouldNotReachHere();
3931
} else if (src->is_double_fpu()) {
3932
assert(src->fpu_regnrLo() == 0, "must be TOS");
3933
if (dest->is_double_stack()) {
3934
__ fistp_d(frame_map()->address_for_slot(dest->double_stack_ix()));
3935
} else if (dest->is_address()) {
3936
__ fistp_d(as_Address(dest->as_address_ptr()));
3938
ShouldNotReachHere();
3941
} else if (dest->is_double_fpu()) {
3942
assert(dest->fpu_regnrLo() == 0, "must be TOS");
3943
if (src->is_double_stack()) {
3944
__ fild_d(frame_map()->address_for_slot(src->double_stack_ix()));
3945
} else if (src->is_address()) {
3946
__ fild_d(as_Address(src->as_address_ptr()));
3948
ShouldNotReachHere();
3953
ShouldNotReachHere();
3959
void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
3960
assert(op->code() == lir_assert, "must be");
3962
if (op->in_opr1()->is_valid()) {
3963
assert(op->in_opr2()->is_valid(), "both operands must be valid");
3964
comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
3966
assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
3967
assert(op->condition() == lir_cond_always, "no other conditions allowed");
3971
if (op->condition() != lir_cond_always) {
3972
Assembler::Condition acond = Assembler::zero;
3973
switch (op->condition()) {
3974
case lir_cond_equal: acond = Assembler::equal; break;
3975
case lir_cond_notEqual: acond = Assembler::notEqual; break;
3976
case lir_cond_less: acond = Assembler::less; break;
3977
case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
3978
case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;
3979
case lir_cond_greater: acond = Assembler::greater; break;
3980
case lir_cond_belowEqual: acond = Assembler::belowEqual; break;
3981
case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break;
3982
default: ShouldNotReachHere();
3987
const char* str = __ code_string(op->msg());
3996
void LIR_Assembler::membar() {
3998
__ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad));
4001
void LIR_Assembler::membar_acquire() {
4005
void LIR_Assembler::membar_release() {
4009
void LIR_Assembler::membar_loadload() {
4014
void LIR_Assembler::membar_storestore() {
4019
void LIR_Assembler::membar_loadstore() {
4024
void LIR_Assembler::membar_storeload() {
4025
__ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
4028
void LIR_Assembler::on_spin_wait() {
4032
void LIR_Assembler::get_thread(LIR_Opr result_reg) {
4033
assert(result_reg->is_register(), "check");
4036
__ mov(result_reg->as_register(), r15_thread);
4038
__ get_thread(result_reg->as_register());
4043
void LIR_Assembler::peephole(LIR_List*) {
4047
void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
4048
assert(data == dest, "xchg/xadd uses only 2 operands");
4050
if (data->type() == T_INT) {
4051
if (code == lir_xadd) {
4053
__ xaddl(as_Address(src->as_address_ptr()), data->as_register());
4055
__ xchgl(data->as_register(), as_Address(src->as_address_ptr()));
4057
} else if (data->is_oop()) {
4058
assert (code == lir_xchg, "xadd for oops");
4059
Register obj = data->as_register();
4061
if (UseCompressedOops) {
4062
__ encode_heap_oop(obj);
4063
__ xchgl(obj, as_Address(src->as_address_ptr()));
4064
__ decode_heap_oop(obj);
4066
__ xchgptr(obj, as_Address(src->as_address_ptr()));
4069
__ xchgl(obj, as_Address(src->as_address_ptr()));
4071
} else if (data->type() == T_LONG) {
4073
assert(data->as_register_lo() == data->as_register_hi(), "should be a single register");
4074
if (code == lir_xadd) {
4076
__ xaddq(as_Address(src->as_address_ptr()), data->as_register_lo());
4078
__ xchgq(data->as_register_lo(), as_Address(src->as_address_ptr()));
4081
ShouldNotReachHere();
4084
ShouldNotReachHere();