2
* Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
25
#include "precompiled.hpp"
26
#include "asm/macroAssembler.inline.hpp"
27
#include "gc/shared/barrierSetAssembler.hpp"
28
#include "gc/shared/collectedHeap.hpp"
29
#include "gc/shared/tlab_globals.hpp"
30
#include "interpreter/interp_masm.hpp"
31
#include "interpreter/interpreter.hpp"
32
#include "interpreter/interpreterRuntime.hpp"
33
#include "interpreter/templateTable.hpp"
34
#include "memory/universe.hpp"
35
#include "oops/cpCache.hpp"
36
#include "oops/klass.inline.hpp"
37
#include "oops/methodCounters.hpp"
38
#include "oops/methodData.hpp"
39
#include "oops/objArrayKlass.hpp"
40
#include "oops/oop.inline.hpp"
41
#include "oops/resolvedIndyEntry.hpp"
42
#include "oops/resolvedFieldEntry.hpp"
43
#include "oops/resolvedMethodEntry.hpp"
44
#include "prims/jvmtiExport.hpp"
45
#include "prims/methodHandles.hpp"
46
#include "runtime/frame.inline.hpp"
47
#include "runtime/sharedRuntime.hpp"
48
#include "runtime/stubRoutines.hpp"
49
#include "runtime/synchronizer.hpp"
50
#include "utilities/powerOfTwo.hpp"
54
//----------------------------------------------------------------------------------------------------
58
static inline Address iaddress(int n) {
59
return Address(Rlocals, Interpreter::local_offset_in_bytes(n));
62
static inline Address laddress(int n) { return iaddress(n + 1); }
63
static inline Address haddress(int n) { return iaddress(n + 0); }
65
static inline Address faddress(int n) { return iaddress(n); }
66
static inline Address daddress(int n) { return laddress(n); }
67
static inline Address aaddress(int n) { return iaddress(n); }
70
void TemplateTable::get_local_base_addr(Register r, Register index) {
71
__ sub(r, Rlocals, AsmOperand(index, lsl, Interpreter::logStackElementSize));
74
Address TemplateTable::load_iaddress(Register index, Register scratch) {
75
return Address(Rlocals, index, lsl, Interpreter::logStackElementSize, basic_offset, sub_offset);
78
Address TemplateTable::load_aaddress(Register index, Register scratch) {
79
return load_iaddress(index, scratch);
82
Address TemplateTable::load_faddress(Register index, Register scratch) {
84
return load_iaddress(index, scratch);
86
get_local_base_addr(scratch, index);
87
return Address(scratch);
91
Address TemplateTable::load_daddress(Register index, Register scratch) {
92
get_local_base_addr(scratch, index);
93
return Address(scratch, Interpreter::local_offset_in_bytes(1));
96
// At top of Java expression stack which may be different than SP.
97
// It isn't for category 1 objects.
98
static inline Address at_tos() {
99
return Address(Rstack_top, Interpreter::expr_offset_in_bytes(0));
102
static inline Address at_tos_p1() {
103
return Address(Rstack_top, Interpreter::expr_offset_in_bytes(1));
106
static inline Address at_tos_p2() {
107
return Address(Rstack_top, Interpreter::expr_offset_in_bytes(2));
111
// Loads double/long local into R0_tos_lo/R1_tos_hi with two
112
// separate ldr instructions (supports nonadjacent values).
113
// Used for longs in all modes, and for doubles in SOFTFP mode.
114
void TemplateTable::load_category2_local(Register Rlocal_index, Register tmp) {
115
const Register Rlocal_base = tmp;
116
assert_different_registers(Rlocal_index, tmp);
118
get_local_base_addr(Rlocal_base, Rlocal_index);
119
__ ldr(R0_tos_lo, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
120
__ ldr(R1_tos_hi, Address(Rlocal_base, Interpreter::local_offset_in_bytes(0)));
124
// Stores R0_tos_lo/R1_tos_hi to double/long local with two
125
// separate str instructions (supports nonadjacent values).
126
// Used for longs in all modes, and for doubles in SOFTFP mode
127
void TemplateTable::store_category2_local(Register Rlocal_index, Register tmp) {
128
const Register Rlocal_base = tmp;
129
assert_different_registers(Rlocal_index, tmp);
131
get_local_base_addr(Rlocal_base, Rlocal_index);
132
__ str(R0_tos_lo, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
133
__ str(R1_tos_hi, Address(Rlocal_base, Interpreter::local_offset_in_bytes(0)));
136
// Returns address of Java array element using temp register as address base.
137
Address TemplateTable::get_array_elem_addr(BasicType elemType, Register array, Register index, Register temp) {
138
int logElemSize = exact_log2(type2aelembytes(elemType));
139
__ add_ptr_scaled_int32(temp, array, index, logElemSize);
140
return Address(temp, arrayOopDesc::base_offset_in_bytes(elemType));
143
// Returns address of Java array element using temp register as offset from array base
144
Address TemplateTable::get_array_elem_addr_same_base(BasicType elemType, Register array, Register index, Register temp) {
145
int logElemSize = exact_log2(type2aelembytes(elemType));
146
if (logElemSize == 0) {
147
__ add(temp, index, arrayOopDesc::base_offset_in_bytes(elemType));
149
__ mov(temp, arrayOopDesc::base_offset_in_bytes(elemType));
150
__ add_ptr_scaled_int32(temp, temp, index, logElemSize);
152
return Address(array, temp);
155
//----------------------------------------------------------------------------------------------------
156
// Condition conversion
157
AsmCondition convNegCond(TemplateTable::Condition cc) {
159
case TemplateTable::equal : return ne;
160
case TemplateTable::not_equal : return eq;
161
case TemplateTable::less : return ge;
162
case TemplateTable::less_equal : return gt;
163
case TemplateTable::greater : return le;
164
case TemplateTable::greater_equal: return lt;
166
ShouldNotReachHere();
170
//----------------------------------------------------------------------------------------------------
171
// Miscellaneous helper routines
173
// Store an oop (or null) at the address described by obj.
174
// Blows all volatile registers R0-R3, Rtemp, LR).
175
// Also destroys new_val and obj.base().
176
static void do_oop_store(InterpreterMacroAssembler* _masm,
183
DecoratorSet decorators = 0) {
185
assert_different_registers(obj.base(), new_val, tmp1, tmp2, tmp3, noreg);
187
__ store_heap_oop_null(obj, new_val, tmp1, tmp2, tmp3, decorators);
189
__ store_heap_oop(obj, new_val, tmp1, tmp2, tmp3, decorators);
193
static void do_oop_load(InterpreterMacroAssembler* _masm,
196
DecoratorSet decorators = 0) {
197
__ load_heap_oop(dst, obj, noreg, noreg, noreg, decorators);
200
Address TemplateTable::at_bcp(int offset) {
201
assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
202
return Address(Rbcp, offset);
206
// Blows volatile registers R0-R3, Rtemp, LR.
207
void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
208
Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
210
assert_different_registers(bc_reg, temp_reg);
211
if (!RewriteBytecodes) return;
215
case Bytecodes::_fast_aputfield:
216
case Bytecodes::_fast_bputfield:
217
case Bytecodes::_fast_zputfield:
218
case Bytecodes::_fast_cputfield:
219
case Bytecodes::_fast_dputfield:
220
case Bytecodes::_fast_fputfield:
221
case Bytecodes::_fast_iputfield:
222
case Bytecodes::_fast_lputfield:
223
case Bytecodes::_fast_sputfield:
225
// We skip bytecode quickening for putfield instructions when
226
// the put_code written to the constant pool cache is zero.
227
// This is required so that every execution of this instruction
228
// calls out to InterpreterRuntime::resolve_get_put to do
229
// additional, required work.
230
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
231
assert(load_bc_into_bc_reg, "we use bc_reg as temp");
232
__ load_field_entry(temp_reg, bc_reg);
233
if (byte_no == f1_byte) {
234
__ add(temp_reg, temp_reg, in_bytes(ResolvedFieldEntry::get_code_offset()));
236
__ add(temp_reg, temp_reg, in_bytes(ResolvedFieldEntry::put_code_offset()));
238
// Load-acquire the bytecode to match store-release in ResolvedFieldEntry::fill_in()
239
__ ldrb(temp_reg, temp_reg);
240
__ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), noreg, true);
243
__ cbz(temp_reg, L_patch_done); // test if bytecode is zero
247
assert(byte_no == -1, "sanity");
248
// the pair bytecodes have already done the load.
249
if (load_bc_into_bc_reg) {
254
if (__ can_post_breakpoint()) {
256
// if a breakpoint is present we can't rewrite the stream directly
257
__ ldrb(temp_reg, at_bcp(0));
258
__ cmp(temp_reg, Bytecodes::_breakpoint);
259
__ b(L_fast_patch, ne);
265
// Let breakpoint table handling rewrite to quicker bytecode
266
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), R1, R2, R3);
268
__ bind(L_fast_patch);
273
__ ldrb(temp_reg, at_bcp(0));
274
__ cmp(temp_reg, (int)Bytecodes::java_code(bc));
276
__ cmp(temp_reg, bc_reg);
278
__ stop("patching the wrong bytecode");
283
__ strb(bc_reg, at_bcp(0));
284
__ bind(L_patch_done);
287
//----------------------------------------------------------------------------------------------------
288
// Individual instructions
290
void TemplateTable::nop() {
291
transition(vtos, vtos);
295
void TemplateTable::shouldnotreachhere() {
296
transition(vtos, vtos);
297
__ stop("shouldnotreachhere bytecode");
302
void TemplateTable::aconst_null() {
303
transition(vtos, atos);
308
void TemplateTable::iconst(int value) {
309
transition(vtos, itos);
310
__ mov_slow(R0_tos, value);
314
void TemplateTable::lconst(int value) {
315
transition(vtos, ltos);
316
assert((value == 0) || (value == 1), "unexpected long constant");
317
__ mov(R0_tos, value);
318
__ mov(R1_tos_hi, 0);
322
void TemplateTable::fconst(int value) {
323
transition(vtos, ftos);
324
const int zero = 0; // 0.0f
325
const int one = 0x3f800000; // 1.0f
326
const int two = 0x40000000; // 2.0f
329
case 0: __ mov(R0_tos, zero); break;
330
case 1: __ mov(R0_tos, one); break;
331
case 2: __ mov(R0_tos, two); break;
332
default: ShouldNotReachHere(); break;
336
__ fmsr(S0_tos, R0_tos);
341
void TemplateTable::dconst(int value) {
342
transition(vtos, dtos);
343
const int one_lo = 0; // low part of 1.0
344
const int one_hi = 0x3ff00000; // high part of 1.0
348
__ mov(R0_tos_lo, 0);
349
__ mov(R1_tos_hi, 0);
352
__ fmdrr(D0_tos, R0_tmp, R0_tmp);
354
} else if (value == 1) {
355
__ mov(R0_tos_lo, one_lo);
356
__ mov_slow(R1_tos_hi, one_hi);
358
__ fmdrr(D0_tos, R0_tos_lo, R1_tos_hi);
361
ShouldNotReachHere();
366
void TemplateTable::bipush() {
367
transition(vtos, itos);
368
__ ldrsb(R0_tos, at_bcp(1));
372
void TemplateTable::sipush() {
373
transition(vtos, itos);
374
__ ldrsb(R0_tmp, at_bcp(1));
375
__ ldrb(R1_tmp, at_bcp(2));
376
__ orr(R0_tos, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
380
void TemplateTable::ldc(LdcType type) {
381
transition(vtos, vtos);
382
Label fastCase, Condy, Done;
384
const Register Rindex = R1_tmp;
385
const Register Rcpool = R2_tmp;
386
const Register Rtags = R3_tmp;
387
const Register RtagType = R3_tmp;
389
if (is_ldc_wide(type)) {
390
__ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
392
__ ldrb(Rindex, at_bcp(1));
394
__ get_cpool_and_tags(Rcpool, Rtags);
396
const int base_offset = ConstantPool::header_size() * wordSize;
397
const int tags_offset = Array<u1>::base_offset_in_bytes();
400
__ add(Rtemp, Rtags, tags_offset);
401
__ ldrb(RtagType, Address(Rtemp, Rindex));
402
volatile_barrier(MacroAssembler::LoadLoad, Rtemp);
404
// unresolved class - get the resolved class
405
__ cmp(RtagType, JVM_CONSTANT_UnresolvedClass);
407
// unresolved class in error (resolution failed) - call into runtime
408
// so that the same error from first resolution attempt is thrown.
409
__ cond_cmp(RtagType, JVM_CONSTANT_UnresolvedClassInError, ne);
411
// resolved class - need to call vm to get java mirror of the class
412
__ cond_cmp(RtagType, JVM_CONSTANT_Class, ne);
416
// slow case - call runtime
417
__ mov(R1, is_ldc_wide(type) ? 1 : 0);
418
call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), R1);
422
// int, float, String
425
__ cmp(RtagType, JVM_CONSTANT_Integer);
426
__ cond_cmp(RtagType, JVM_CONSTANT_Float, ne);
430
__ add(Rtemp, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
431
__ ldr_u32(R0_tos, Address(Rtemp, base_offset));
433
// floats and ints are placed on stack in the same way, so
434
// we can use push(itos) to transfer float value without VFP
444
// Fast path for caching oop constants.
445
void TemplateTable::fast_aldc(LdcType type) {
446
transition(vtos, atos);
447
int index_size = is_ldc_wide(type) ? sizeof(u2) : sizeof(u1);
450
// We are resolved if the resolved reference cache entry contains a
451
// non-null object (CallSite, etc.)
452
assert_different_registers(R0_tos, R2_tmp);
453
__ get_index_at_bcp(R2_tmp, 1, R0_tos, index_size);
454
__ load_resolved_reference_at_index(R0_tos, R2_tmp);
455
__ cbnz(R0_tos, resolved);
457
address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
459
// first time invocation - must resolve first
460
__ mov(R1, (int)bytecode());
461
__ call_VM(R0_tos, entry, R1);
464
{ // Check for the null sentinel.
465
// If we just called the VM, that already did the mapping for us,
466
// but it's harmless to retry.
468
Register result = R0;
472
// Stash null_sentinel address to get its value later
473
__ mov_slow(rarg, (uintptr_t)Universe::the_null_sentinel_addr());
474
__ ldr(tmp, Address(rarg));
475
__ resolve_oop_handle(tmp);
478
__ mov(result, 0); // null object reference
483
__ verify_oop(R0_tos);
487
void TemplateTable::ldc2_w() {
488
transition(vtos, vtos);
489
const Register Rtags = R2_tmp;
490
const Register Rindex = R3_tmp;
491
const Register Rcpool = R4_tmp;
492
const Register Rbase = R5_tmp;
494
__ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
496
__ get_cpool_and_tags(Rcpool, Rtags);
497
const int base_offset = ConstantPool::header_size() * wordSize;
498
const int tags_offset = Array<u1>::base_offset_in_bytes();
500
__ add(Rbase, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
502
// get type from tags
503
__ add(Rtemp, Rtags, tags_offset);
504
__ ldrb(Rtemp, Address(Rtemp, Rindex));
506
Label Done, NotLong, NotDouble;
507
__ cmp(Rtemp, JVM_CONSTANT_Double);
510
__ ldr(R0_tos_lo, Address(Rbase, base_offset + 0 * wordSize));
511
__ ldr(R1_tos_hi, Address(Rbase, base_offset + 1 * wordSize));
513
__ ldr_double(D0_tos, Address(Rbase, base_offset));
519
__ cmp(Rtemp, JVM_CONSTANT_Long);
521
__ ldr(R0_tos_lo, Address(Rbase, base_offset + 0 * wordSize));
522
__ ldr(R1_tos_hi, Address(Rbase, base_offset + 1 * wordSize));
533
void TemplateTable::condy_helper(Label& Done)
535
Register obj = R0_tmp;
536
Register rtmp = R1_tmp;
537
Register flags = R2_tmp;
538
Register off = R3_tmp;
540
__ mov(rtmp, (int) bytecode());
541
__ call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc), rtmp);
542
__ get_vm_result_2(flags, rtmp);
544
// VMr = obj = base address to find primitive value to push
545
// VMr2 = flags = (tos, off) using format of CPCE::_flags
548
__ logical_shift_left( off, off, 32 - ConstantPoolCache::field_index_bits);
549
__ logical_shift_right(off, off, 32 - ConstantPoolCache::field_index_bits);
551
const Address field(obj, off);
553
__ logical_shift_right(flags, flags, ConstantPoolCache::tos_state_shift);
555
switch (bytecode()) {
556
case Bytecodes::_ldc:
557
case Bytecodes::_ldc_w:
559
// tos in (itos, ftos, stos, btos, ctos, ztos)
560
Label notIntFloat, notShort, notByte, notChar, notBool;
562
__ cond_cmp(flags, ftos, ne);
563
__ b(notIntFloat, ne);
564
__ ldr(R0_tos, field);
568
__ bind(notIntFloat);
571
__ ldrsh(R0_tos, field);
578
__ ldrsb(R0_tos, field);
585
__ ldrh(R0_tos, field);
592
__ ldrsb(R0_tos, field);
600
case Bytecodes::_ldc2_w:
604
__ cond_cmp(flags, dtos, ne);
605
__ b(notLongDouble, ne);
607
__ add(rtmp, obj, wordSize);
608
__ ldr(R0_tos_lo, Address(obj, off));
609
__ ldr(R1_tos_hi, Address(rtmp, off));
613
__ bind(notLongDouble);
619
ShouldNotReachHere();
622
__ stop("bad ldc/condy");
626
void TemplateTable::locals_index(Register reg, int offset) {
627
__ ldrb(reg, at_bcp(offset));
630
void TemplateTable::iload() {
634
void TemplateTable::nofast_iload() {
635
iload_internal(may_not_rewrite);
638
void TemplateTable::iload_internal(RewriteControl rc) {
639
transition(vtos, itos);
641
if ((rc == may_rewrite) && __ rewrite_frequent_pairs()) {
643
const Register next_bytecode = R1_tmp;
644
const Register target_bytecode = R2_tmp;
647
__ ldrb(next_bytecode, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
648
// if _iload, wait to rewrite to iload2. We only want to rewrite the
649
// last two iloads in a pair. Comparing against fast_iload means that
650
// the next bytecode is neither an iload or a caload, and therefore
652
__ cmp(next_bytecode, Bytecodes::_iload);
655
__ cmp(next_bytecode, Bytecodes::_fast_iload);
656
__ mov(target_bytecode, Bytecodes::_fast_iload2);
659
// if _caload, rewrite to fast_icaload
660
__ cmp(next_bytecode, Bytecodes::_caload);
661
__ mov(target_bytecode, Bytecodes::_fast_icaload);
664
// rewrite so iload doesn't check again.
665
__ mov(target_bytecode, Bytecodes::_fast_iload);
670
patch_bytecode(Bytecodes::_iload, target_bytecode, Rtemp, false);
674
// Get the local value into tos
675
const Register Rlocal_index = R1_tmp;
676
locals_index(Rlocal_index);
677
Address local = load_iaddress(Rlocal_index, Rtemp);
678
__ ldr_s32(R0_tos, local);
682
void TemplateTable::fast_iload2() {
683
transition(vtos, itos);
684
const Register Rlocal_index = R1_tmp;
686
locals_index(Rlocal_index);
687
Address local = load_iaddress(Rlocal_index, Rtemp);
688
__ ldr_s32(R0_tos, local);
691
locals_index(Rlocal_index, 3);
692
local = load_iaddress(Rlocal_index, Rtemp);
693
__ ldr_s32(R0_tos, local);
696
void TemplateTable::fast_iload() {
697
transition(vtos, itos);
698
const Register Rlocal_index = R1_tmp;
700
locals_index(Rlocal_index);
701
Address local = load_iaddress(Rlocal_index, Rtemp);
702
__ ldr_s32(R0_tos, local);
706
void TemplateTable::lload() {
707
transition(vtos, ltos);
708
const Register Rlocal_index = R2_tmp;
710
locals_index(Rlocal_index);
711
load_category2_local(Rlocal_index, R3_tmp);
715
void TemplateTable::fload() {
716
transition(vtos, ftos);
717
const Register Rlocal_index = R2_tmp;
719
// Get the local value into tos
720
locals_index(Rlocal_index);
721
Address local = load_faddress(Rlocal_index, Rtemp);
723
__ ldr(R0_tos, local);
725
__ ldr_float(S0_tos, local);
730
void TemplateTable::dload() {
731
transition(vtos, dtos);
732
const Register Rlocal_index = R2_tmp;
734
locals_index(Rlocal_index);
737
load_category2_local(Rlocal_index, R3_tmp);
739
__ ldr_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
744
void TemplateTable::aload() {
745
transition(vtos, atos);
746
const Register Rlocal_index = R1_tmp;
748
locals_index(Rlocal_index);
749
Address local = load_aaddress(Rlocal_index, Rtemp);
750
__ ldr(R0_tos, local);
754
void TemplateTable::locals_index_wide(Register reg) {
755
assert_different_registers(reg, Rtemp);
756
__ ldrb(Rtemp, at_bcp(2));
757
__ ldrb(reg, at_bcp(3));
758
__ orr(reg, reg, AsmOperand(Rtemp, lsl, 8));
762
void TemplateTable::wide_iload() {
763
transition(vtos, itos);
764
const Register Rlocal_index = R2_tmp;
766
locals_index_wide(Rlocal_index);
767
Address local = load_iaddress(Rlocal_index, Rtemp);
768
__ ldr_s32(R0_tos, local);
772
void TemplateTable::wide_lload() {
773
transition(vtos, ltos);
774
const Register Rlocal_index = R2_tmp;
775
const Register Rlocal_base = R3_tmp;
777
locals_index_wide(Rlocal_index);
778
load_category2_local(Rlocal_index, R3_tmp);
782
void TemplateTable::wide_fload() {
783
transition(vtos, ftos);
784
const Register Rlocal_index = R2_tmp;
786
locals_index_wide(Rlocal_index);
787
Address local = load_faddress(Rlocal_index, Rtemp);
789
__ ldr(R0_tos, local);
791
__ ldr_float(S0_tos, local);
796
void TemplateTable::wide_dload() {
797
transition(vtos, dtos);
798
const Register Rlocal_index = R2_tmp;
800
locals_index_wide(Rlocal_index);
802
load_category2_local(Rlocal_index, R3_tmp);
804
__ ldr_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
809
void TemplateTable::wide_aload() {
810
transition(vtos, atos);
811
const Register Rlocal_index = R2_tmp;
813
locals_index_wide(Rlocal_index);
814
Address local = load_aaddress(Rlocal_index, Rtemp);
815
__ ldr(R0_tos, local);
818
void TemplateTable::index_check(Register array, Register index) {
819
// Pop ptr into array
821
index_check_without_pop(array, index);
824
void TemplateTable::index_check_without_pop(Register array, Register index) {
825
assert_different_registers(array, index, Rtemp);
827
__ ldr_s32(Rtemp, Address(array, arrayOopDesc::length_offset_in_bytes()));
828
__ cmp_32(index, Rtemp);
829
if (index != R4_ArrayIndexOutOfBounds_index) {
830
// convention with generate_ArrayIndexOutOfBounds_handler()
831
__ mov(R4_ArrayIndexOutOfBounds_index, index, hs);
833
__ mov(R1, array, hs);
834
__ b(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, hs);
838
void TemplateTable::iaload() {
839
transition(itos, itos);
840
const Register Rarray = R1_tmp;
841
const Register Rindex = R0_tos;
843
index_check(Rarray, Rindex);
844
Address addr = get_array_elem_addr_same_base(T_INT, Rarray, Rindex, Rtemp);
845
__ access_load_at(T_INT, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg);
849
void TemplateTable::laload() {
850
transition(itos, ltos);
851
const Register Rarray = R1_tmp;
852
const Register Rindex = R0_tos;
854
index_check(Rarray, Rindex);
856
Address addr = get_array_elem_addr_same_base(T_LONG, Rarray, Rindex, Rtemp);
857
__ access_load_at(T_LONG, IN_HEAP | IS_ARRAY, addr, noreg /* ltos */, noreg, noreg, noreg);
861
void TemplateTable::faload() {
862
transition(itos, ftos);
863
const Register Rarray = R1_tmp;
864
const Register Rindex = R0_tos;
866
index_check(Rarray, Rindex);
868
Address addr = get_array_elem_addr_same_base(T_FLOAT, Rarray, Rindex, Rtemp);
869
__ access_load_at(T_FLOAT, IN_HEAP | IS_ARRAY, addr, noreg /* ftos */, noreg, noreg, noreg);
873
void TemplateTable::daload() {
874
transition(itos, dtos);
875
const Register Rarray = R1_tmp;
876
const Register Rindex = R0_tos;
878
index_check(Rarray, Rindex);
880
Address addr = get_array_elem_addr_same_base(T_DOUBLE, Rarray, Rindex, Rtemp);
881
__ access_load_at(T_DOUBLE, IN_HEAP | IS_ARRAY, addr, noreg /* dtos */, noreg, noreg, noreg);
885
void TemplateTable::aaload() {
886
transition(itos, atos);
887
const Register Rarray = R1_tmp;
888
const Register Rindex = R0_tos;
890
index_check(Rarray, Rindex);
891
do_oop_load(_masm, R0_tos, get_array_elem_addr_same_base(T_OBJECT, Rarray, Rindex, Rtemp), IS_ARRAY);
895
void TemplateTable::baload() {
896
transition(itos, itos);
897
const Register Rarray = R1_tmp;
898
const Register Rindex = R0_tos;
900
index_check(Rarray, Rindex);
901
Address addr = get_array_elem_addr_same_base(T_BYTE, Rarray, Rindex, Rtemp);
902
__ access_load_at(T_BYTE, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg);
906
void TemplateTable::caload() {
907
transition(itos, itos);
908
const Register Rarray = R1_tmp;
909
const Register Rindex = R0_tos;
911
index_check(Rarray, Rindex);
912
Address addr = get_array_elem_addr_same_base(T_CHAR, Rarray, Rindex, Rtemp);
913
__ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg);
917
// iload followed by caload frequent pair
918
void TemplateTable::fast_icaload() {
919
transition(vtos, itos);
920
const Register Rlocal_index = R1_tmp;
921
const Register Rarray = R1_tmp;
922
const Register Rindex = R4_tmp; // index_check prefers index on R4
923
assert_different_registers(Rlocal_index, Rindex);
924
assert_different_registers(Rarray, Rindex);
926
// load index out of locals
927
locals_index(Rlocal_index);
928
Address local = load_iaddress(Rlocal_index, Rtemp);
929
__ ldr_s32(Rindex, local);
932
index_check(Rarray, Rindex);
933
Address addr = get_array_elem_addr_same_base(T_CHAR, Rarray, Rindex, Rtemp);
934
__ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg);
938
void TemplateTable::saload() {
939
transition(itos, itos);
940
const Register Rarray = R1_tmp;
941
const Register Rindex = R0_tos;
943
index_check(Rarray, Rindex);
944
Address addr = get_array_elem_addr_same_base(T_SHORT, Rarray, Rindex, Rtemp);
945
__ access_load_at(T_SHORT, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg);
949
void TemplateTable::iload(int n) {
950
transition(vtos, itos);
951
__ ldr_s32(R0_tos, iaddress(n));
955
void TemplateTable::lload(int n) {
956
transition(vtos, ltos);
957
__ ldr(R0_tos_lo, laddress(n));
958
__ ldr(R1_tos_hi, haddress(n));
962
void TemplateTable::fload(int n) {
963
transition(vtos, ftos);
965
__ ldr(R0_tos, faddress(n));
967
__ ldr_float(S0_tos, faddress(n));
972
void TemplateTable::dload(int n) {
973
transition(vtos, dtos);
975
__ ldr(R0_tos_lo, laddress(n));
976
__ ldr(R1_tos_hi, haddress(n));
978
__ ldr_double(D0_tos, daddress(n));
983
void TemplateTable::aload(int n) {
984
transition(vtos, atos);
985
__ ldr(R0_tos, aaddress(n));
988
void TemplateTable::aload_0() {
992
void TemplateTable::nofast_aload_0() {
993
aload_0_internal(may_not_rewrite);
996
void TemplateTable::aload_0_internal(RewriteControl rc) {
997
transition(vtos, atos);
998
// According to bytecode histograms, the pairs:
1000
// _aload_0, _fast_igetfield
1001
// _aload_0, _fast_agetfield
1002
// _aload_0, _fast_fgetfield
1004
// occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
1005
// bytecode checks if the next bytecode is either _fast_igetfield,
1006
// _fast_agetfield or _fast_fgetfield and then rewrites the
1007
// current bytecode into a pair bytecode; otherwise it rewrites the current
1008
// bytecode into _fast_aload_0 that doesn't do the pair check anymore.
1010
// Note: If the next bytecode is _getfield, the rewrite must be delayed,
1011
// otherwise we may miss an opportunity for a pair.
1013
// Also rewrite frequent pairs
1016
// These bytecodes with a small amount of code are most profitable to rewrite
1017
if ((rc == may_rewrite) && __ rewrite_frequent_pairs()) {
1018
Label rewrite, done;
1019
const Register next_bytecode = R1_tmp;
1020
const Register target_bytecode = R2_tmp;
1023
__ ldrb(next_bytecode, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
1025
// if _getfield then wait with rewrite
1026
__ cmp(next_bytecode, Bytecodes::_getfield);
1029
// if _igetfield then rewrite to _fast_iaccess_0
1030
assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
1031
__ cmp(next_bytecode, Bytecodes::_fast_igetfield);
1032
__ mov(target_bytecode, Bytecodes::_fast_iaccess_0);
1035
// if _agetfield then rewrite to _fast_aaccess_0
1036
assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
1037
__ cmp(next_bytecode, Bytecodes::_fast_agetfield);
1038
__ mov(target_bytecode, Bytecodes::_fast_aaccess_0);
1041
// if _fgetfield then rewrite to _fast_faccess_0, else rewrite to _fast_aload0
1042
assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
1043
assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
1045
__ cmp(next_bytecode, Bytecodes::_fast_fgetfield);
1046
__ mov(target_bytecode, Bytecodes::_fast_faccess_0, eq);
1047
__ mov(target_bytecode, Bytecodes::_fast_aload_0, ne);
1051
patch_bytecode(Bytecodes::_aload_0, target_bytecode, Rtemp, false);
1059
void TemplateTable::istore() {
1060
transition(itos, vtos);
1061
const Register Rlocal_index = R2_tmp;
1063
locals_index(Rlocal_index);
1064
Address local = load_iaddress(Rlocal_index, Rtemp);
1065
__ str_32(R0_tos, local);
1069
void TemplateTable::lstore() {
1070
transition(ltos, vtos);
1071
const Register Rlocal_index = R2_tmp;
1073
locals_index(Rlocal_index);
1074
store_category2_local(Rlocal_index, R3_tmp);
1078
void TemplateTable::fstore() {
1079
transition(ftos, vtos);
1080
const Register Rlocal_index = R2_tmp;
1082
locals_index(Rlocal_index);
1083
Address local = load_faddress(Rlocal_index, Rtemp);
1085
__ str(R0_tos, local);
1087
__ str_float(S0_tos, local);
1092
void TemplateTable::dstore() {
1093
transition(dtos, vtos);
1094
const Register Rlocal_index = R2_tmp;
1096
locals_index(Rlocal_index);
1099
store_category2_local(Rlocal_index, R3_tmp);
1101
__ str_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
1106
void TemplateTable::astore() {
1107
transition(vtos, vtos);
1108
const Register Rlocal_index = R1_tmp;
1111
locals_index(Rlocal_index);
1112
Address local = load_aaddress(Rlocal_index, Rtemp);
1113
__ str(R0_tos, local);
1117
void TemplateTable::wide_istore() {
1118
transition(vtos, vtos);
1119
const Register Rlocal_index = R2_tmp;
1122
locals_index_wide(Rlocal_index);
1123
Address local = load_iaddress(Rlocal_index, Rtemp);
1124
__ str_32(R0_tos, local);
1128
void TemplateTable::wide_lstore() {
1129
transition(vtos, vtos);
1130
const Register Rlocal_index = R2_tmp;
1131
const Register Rlocal_base = R3_tmp;
1133
__ pop_l(R0_tos_lo, R1_tos_hi);
1135
locals_index_wide(Rlocal_index);
1136
store_category2_local(Rlocal_index, R3_tmp);
1140
void TemplateTable::wide_fstore() {
1145
void TemplateTable::wide_dstore() {
1150
void TemplateTable::wide_astore() {
1151
transition(vtos, vtos);
1152
const Register Rlocal_index = R2_tmp;
1155
locals_index_wide(Rlocal_index);
1156
Address local = load_aaddress(Rlocal_index, Rtemp);
1157
__ str(R0_tos, local);
1161
void TemplateTable::iastore() {
1162
transition(itos, vtos);
1163
const Register Rindex = R4_tmp; // index_check prefers index in R4
1164
const Register Rarray = R3_tmp;
1168
index_check(Rarray, Rindex);
1169
Address addr = get_array_elem_addr_same_base(T_INT, Rarray, Rindex, Rtemp);
1170
__ access_store_at(T_INT, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg, false);
1174
void TemplateTable::lastore() {
1175
transition(ltos, vtos);
1176
const Register Rindex = R4_tmp; // index_check prefers index in R4
1177
const Register Rarray = R3_tmp;
1178
// R0_tos_lo:R1_tos_hi: value
1181
index_check(Rarray, Rindex);
1183
Address addr = get_array_elem_addr_same_base(T_LONG, Rarray, Rindex, Rtemp);
1184
__ access_store_at(T_LONG, IN_HEAP | IS_ARRAY, addr, noreg /* ltos */, noreg, noreg, noreg, false);
1188
void TemplateTable::fastore() {
1189
transition(ftos, vtos);
1190
const Register Rindex = R4_tmp; // index_check prefers index in R4
1191
const Register Rarray = R3_tmp;
1192
// S0_tos/R0_tos: value
1195
index_check(Rarray, Rindex);
1196
Address addr = get_array_elem_addr_same_base(T_FLOAT, Rarray, Rindex, Rtemp);
1197
__ access_store_at(T_FLOAT, IN_HEAP | IS_ARRAY, addr, noreg /* ftos */, noreg, noreg, noreg, false);
1201
void TemplateTable::dastore() {
1202
transition(dtos, vtos);
1203
const Register Rindex = R4_tmp; // index_check prefers index in R4
1204
const Register Rarray = R3_tmp;
1205
// D0_tos / R0_tos_lo:R1_to_hi: value
1208
index_check(Rarray, Rindex);
1210
Address addr = get_array_elem_addr_same_base(T_DOUBLE, Rarray, Rindex, Rtemp);
1211
__ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY, addr, noreg /* dtos */, noreg, noreg, noreg, false);
1215
void TemplateTable::aastore() {
1216
transition(vtos, vtos);
1217
Label is_null, throw_array_store, done;
1219
const Register Raddr_1 = R1_tmp;
1220
const Register Rvalue_2 = R2_tmp;
1221
const Register Rarray_3 = R3_tmp;
1222
const Register Rindex_4 = R4_tmp; // preferred by index_check_without_pop()
1223
const Register Rsub_5 = R5_tmp;
1224
const Register Rsuper_LR = LR_tmp;
1226
// stack: ..., array, index, value
1227
__ ldr(Rvalue_2, at_tos()); // Value
1228
__ ldr_s32(Rindex_4, at_tos_p1()); // Index
1229
__ ldr(Rarray_3, at_tos_p2()); // Array
1231
index_check_without_pop(Rarray_3, Rindex_4);
1233
// Compute the array base
1234
__ add(Raddr_1, Rarray_3, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
1236
// do array store check - check for null value first
1237
__ cbz(Rvalue_2, is_null);
1240
__ load_klass(Rsub_5, Rvalue_2);
1242
__ load_klass(Rtemp, Rarray_3);
1243
__ ldr(Rsuper_LR, Address(Rtemp, ObjArrayKlass::element_klass_offset()));
1245
__ gen_subtype_check(Rsub_5, Rsuper_LR, throw_array_store, R0_tmp, R3_tmp);
1246
// Come here on success
1249
__ add(Raddr_1, Raddr_1, AsmOperand(Rindex_4, lsl, LogBytesPerHeapOop));
1251
// Now store using the appropriate barrier
1252
do_oop_store(_masm, Raddr_1, Rvalue_2, Rtemp, R0_tmp, R3_tmp, false, IS_ARRAY);
1255
__ bind(throw_array_store);
1257
// Come here on failure of subtype check
1258
__ profile_typecheck_failed(R0_tmp);
1261
__ b(Interpreter::_throw_ArrayStoreException_entry);
1263
// Have a null in Rvalue_2, store null at array[index].
1265
__ profile_null_seen(R0_tmp);
1268
do_oop_store(_masm, Address::indexed_oop(Raddr_1, Rindex_4), Rvalue_2, Rtemp, R0_tmp, R3_tmp, true, IS_ARRAY);
1270
// Pop stack arguments
1272
__ add(Rstack_top, Rstack_top, 3 * Interpreter::stackElementSize);
1276
void TemplateTable::bastore() {
1277
transition(itos, vtos);
1278
const Register Rindex = R4_tmp; // index_check prefers index in R4
1279
const Register Rarray = R3_tmp;
1283
index_check(Rarray, Rindex);
1285
// Need to check whether array is boolean or byte
1286
// since both types share the bastore bytecode.
1287
__ load_klass(Rtemp, Rarray);
1288
__ ldr_u32(Rtemp, Address(Rtemp, Klass::layout_helper_offset()));
1290
__ tst(Rtemp, Klass::layout_helper_boolean_diffbit());
1292
__ and_32(R0_tos, R0_tos, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1
1294
Address addr = get_array_elem_addr_same_base(T_BYTE, Rarray, Rindex, Rtemp);
1295
__ access_store_at(T_BYTE, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg, false);
1299
void TemplateTable::castore() {
1300
transition(itos, vtos);
1301
const Register Rindex = R4_tmp; // index_check prefers index in R4
1302
const Register Rarray = R3_tmp;
1306
index_check(Rarray, Rindex);
1307
Address addr = get_array_elem_addr_same_base(T_CHAR, Rarray, Rindex, Rtemp);
1308
__ access_store_at(T_CHAR, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg, false);
1312
void TemplateTable::sastore() {
1313
assert(arrayOopDesc::base_offset_in_bytes(T_CHAR) ==
1314
arrayOopDesc::base_offset_in_bytes(T_SHORT),
1315
"base offsets for char and short should be equal");
1320
void TemplateTable::istore(int n) {
1321
transition(itos, vtos);
1322
__ str_32(R0_tos, iaddress(n));
1326
void TemplateTable::lstore(int n) {
1327
transition(ltos, vtos);
1328
__ str(R0_tos_lo, laddress(n));
1329
__ str(R1_tos_hi, haddress(n));
1333
void TemplateTable::fstore(int n) {
1334
transition(ftos, vtos);
1336
__ str(R0_tos, faddress(n));
1338
__ str_float(S0_tos, faddress(n));
1343
void TemplateTable::dstore(int n) {
1344
transition(dtos, vtos);
1346
__ str(R0_tos_lo, laddress(n));
1347
__ str(R1_tos_hi, haddress(n));
1349
__ str_double(D0_tos, daddress(n));
1354
void TemplateTable::astore(int n) {
1355
transition(vtos, vtos);
1357
__ str(R0_tos, aaddress(n));
1361
void TemplateTable::pop() {
1362
transition(vtos, vtos);
1363
__ add(Rstack_top, Rstack_top, Interpreter::stackElementSize);
1367
void TemplateTable::pop2() {
1368
transition(vtos, vtos);
1369
__ add(Rstack_top, Rstack_top, 2*Interpreter::stackElementSize);
1373
void TemplateTable::dup() {
1374
transition(vtos, vtos);
1376
__ load_ptr(0, R0_tmp);
1377
__ push_ptr(R0_tmp);
1382
void TemplateTable::dup_x1() {
1383
transition(vtos, vtos);
1385
__ load_ptr(0, R0_tmp); // load b
1386
__ load_ptr(1, R2_tmp); // load a
1387
__ store_ptr(1, R0_tmp); // store b
1388
__ store_ptr(0, R2_tmp); // store a
1389
__ push_ptr(R0_tmp); // push b
1390
// stack: ..., b, a, b
1394
void TemplateTable::dup_x2() {
1395
transition(vtos, vtos);
1396
// stack: ..., a, b, c
1397
__ load_ptr(0, R0_tmp); // load c
1398
__ load_ptr(1, R2_tmp); // load b
1399
__ load_ptr(2, R4_tmp); // load a
1401
__ push_ptr(R0_tmp); // push c
1403
// stack: ..., a, b, c, c
1404
__ store_ptr(1, R2_tmp); // store b
1405
__ store_ptr(2, R4_tmp); // store a
1406
__ store_ptr(3, R0_tmp); // store c
1407
// stack: ..., c, a, b, c
1411
void TemplateTable::dup2() {
1412
transition(vtos, vtos);
1414
__ load_ptr(1, R0_tmp); // load a
1415
__ push_ptr(R0_tmp); // push a
1416
__ load_ptr(1, R0_tmp); // load b
1417
__ push_ptr(R0_tmp); // push b
1418
// stack: ..., a, b, a, b
1422
void TemplateTable::dup2_x1() {
1423
transition(vtos, vtos);
1425
// stack: ..., a, b, c
1426
__ load_ptr(0, R4_tmp); // load c
1427
__ load_ptr(1, R2_tmp); // load b
1428
__ load_ptr(2, R0_tmp); // load a
1430
__ push_ptr(R2_tmp); // push b
1431
__ push_ptr(R4_tmp); // push c
1433
// stack: ..., a, b, c, b, c
1435
__ store_ptr(2, R0_tmp); // store a
1436
__ store_ptr(3, R4_tmp); // store c
1437
__ store_ptr(4, R2_tmp); // store b
1439
// stack: ..., b, c, a, b, c
1443
void TemplateTable::dup2_x2() {
1444
transition(vtos, vtos);
1445
// stack: ..., a, b, c, d
1446
__ load_ptr(0, R0_tmp); // load d
1447
__ load_ptr(1, R2_tmp); // load c
1448
__ push_ptr(R2_tmp); // push c
1449
__ push_ptr(R0_tmp); // push d
1450
// stack: ..., a, b, c, d, c, d
1451
__ load_ptr(4, R4_tmp); // load b
1452
__ store_ptr(4, R0_tmp); // store d in b
1453
__ store_ptr(2, R4_tmp); // store b in d
1454
// stack: ..., a, d, c, b, c, d
1455
__ load_ptr(5, R4_tmp); // load a
1456
__ store_ptr(5, R2_tmp); // store c in a
1457
__ store_ptr(3, R4_tmp); // store a in c
1458
// stack: ..., c, d, a, b, c, d
1462
void TemplateTable::swap() {
1463
transition(vtos, vtos);
1465
__ load_ptr(1, R0_tmp); // load a
1466
__ load_ptr(0, R2_tmp); // load b
1467
__ store_ptr(0, R0_tmp); // store a in b
1468
__ store_ptr(1, R2_tmp); // store b in a
1473
void TemplateTable::iop2(Operation op) {
1474
transition(itos, itos);
1475
const Register arg1 = R1_tmp;
1476
const Register arg2 = R0_tos;
1480
case add : __ add_32 (R0_tos, arg1, arg2); break;
1481
case sub : __ sub_32 (R0_tos, arg1, arg2); break;
1482
case mul : __ mul_32 (R0_tos, arg1, arg2); break;
1483
case _and : __ and_32 (R0_tos, arg1, arg2); break;
1484
case _or : __ orr_32 (R0_tos, arg1, arg2); break;
1485
case _xor : __ eor_32 (R0_tos, arg1, arg2); break;
1486
case shl : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, lsl, arg2)); break;
1487
case shr : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, asr, arg2)); break;
1488
case ushr : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, lsr, arg2)); break;
1489
default : ShouldNotReachHere();
1494
void TemplateTable::lop2(Operation op) {
1495
transition(ltos, ltos);
1496
const Register arg1_lo = R2_tmp;
1497
const Register arg1_hi = R3_tmp;
1498
const Register arg2_lo = R0_tos_lo;
1499
const Register arg2_hi = R1_tos_hi;
1501
__ pop_l(arg1_lo, arg1_hi);
1503
case add : __ adds(R0_tos_lo, arg1_lo, arg2_lo); __ adc (R1_tos_hi, arg1_hi, arg2_hi); break;
1504
case sub : __ subs(R0_tos_lo, arg1_lo, arg2_lo); __ sbc (R1_tos_hi, arg1_hi, arg2_hi); break;
1505
case _and: __ andr(R0_tos_lo, arg1_lo, arg2_lo); __ andr(R1_tos_hi, arg1_hi, arg2_hi); break;
1506
case _or : __ orr (R0_tos_lo, arg1_lo, arg2_lo); __ orr (R1_tos_hi, arg1_hi, arg2_hi); break;
1507
case _xor: __ eor (R0_tos_lo, arg1_lo, arg2_lo); __ eor (R1_tos_hi, arg1_hi, arg2_hi); break;
1508
default : ShouldNotReachHere();
1513
void TemplateTable::idiv() {
1514
transition(itos, itos);
1519
__ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::none);
1525
void TemplateTable::irem() {
1526
transition(itos, itos);
1531
__ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::none);
1536
void TemplateTable::lmul() {
1537
transition(ltos, ltos);
1538
const Register arg1_lo = R0_tos_lo;
1539
const Register arg1_hi = R1_tos_hi;
1540
const Register arg2_lo = R2_tmp;
1541
const Register arg2_hi = R3_tmp;
1543
__ pop_l(arg2_lo, arg2_hi);
1545
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lmul), arg1_lo, arg1_hi, arg2_lo, arg2_hi);
1549
void TemplateTable::ldiv() {
1550
transition(ltos, ltos);
1551
const Register x_lo = R2_tmp;
1552
const Register x_hi = R3_tmp;
1553
const Register y_lo = R0_tos_lo;
1554
const Register y_hi = R1_tos_hi;
1556
__ pop_l(x_lo, x_hi);
1559
__ orrs(Rtemp, y_lo, y_hi);
1560
__ call(Interpreter::_throw_ArithmeticException_entry, relocInfo::none, eq);
1561
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv), y_lo, y_hi, x_lo, x_hi);
1565
void TemplateTable::lrem() {
1566
transition(ltos, ltos);
1567
const Register x_lo = R2_tmp;
1568
const Register x_hi = R3_tmp;
1569
const Register y_lo = R0_tos_lo;
1570
const Register y_hi = R1_tos_hi;
1572
__ pop_l(x_lo, x_hi);
1575
__ orrs(Rtemp, y_lo, y_hi);
1576
__ call(Interpreter::_throw_ArithmeticException_entry, relocInfo::none, eq);
1577
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem), y_lo, y_hi, x_lo, x_hi);
1581
void TemplateTable::lshl() {
1582
transition(itos, ltos);
1583
const Register shift_cnt = R4_tmp;
1584
const Register val_lo = R2_tmp;
1585
const Register val_hi = R3_tmp;
1587
__ pop_l(val_lo, val_hi);
1588
__ andr(shift_cnt, R0_tos, 63);
1589
__ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, lsl, shift_cnt);
1593
void TemplateTable::lshr() {
1594
transition(itos, ltos);
1595
const Register shift_cnt = R4_tmp;
1596
const Register val_lo = R2_tmp;
1597
const Register val_hi = R3_tmp;
1599
__ pop_l(val_lo, val_hi);
1600
__ andr(shift_cnt, R0_tos, 63);
1601
__ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, asr, shift_cnt);
1605
void TemplateTable::lushr() {
1606
transition(itos, ltos);
1607
const Register shift_cnt = R4_tmp;
1608
const Register val_lo = R2_tmp;
1609
const Register val_hi = R3_tmp;
1611
__ pop_l(val_lo, val_hi);
1612
__ andr(shift_cnt, R0_tos, 63);
1613
__ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, lsr, shift_cnt);
1617
void TemplateTable::fop2(Operation op) {
1618
transition(ftos, ftos);
1623
case add: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fadd_glibc), R0, R1); break;
1624
case sub: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fsub_glibc), R0, R1); break;
1625
case mul: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fmul), R0, R1); break;
1626
case div: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fdiv), R0, R1); break;
1627
case rem: __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), R0, R1); break;
1628
default : ShouldNotReachHere();
1631
const FloatRegister arg1 = S1_tmp;
1632
const FloatRegister arg2 = S0_tos;
1635
case add: __ pop_f(arg1); __ add_float(S0_tos, arg1, arg2); break;
1636
case sub: __ pop_f(arg1); __ sub_float(S0_tos, arg1, arg2); break;
1637
case mul: __ pop_f(arg1); __ mul_float(S0_tos, arg1, arg2); break;
1638
case div: __ pop_f(arg1); __ div_float(S0_tos, arg1, arg2); break;
1644
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), R0, R1);
1645
__ fmsr(S0_tos, R0);
1647
__ mov_float(S1_reg, arg2);
1649
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1650
#endif // !__ABI_HARD__
1652
default : ShouldNotReachHere();
1658
void TemplateTable::dop2(Operation op) {
1659
transition(dtos, dtos);
1661
__ mov(R2, R0_tos_lo);
1662
__ mov(R3, R1_tos_hi);
1665
// __aeabi_XXXX_glibc: Imported code from glibc soft-fp bundle for calculation accuracy improvement. See CR 6757269.
1666
case add: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dadd_glibc), R0, R1, R2, R3); break;
1667
case sub: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dsub_glibc), R0, R1, R2, R3); break;
1668
case mul: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dmul), R0, R1, R2, R3); break;
1669
case div: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_ddiv), R0, R1, R2, R3); break;
1670
case rem: __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), R0, R1, R2, R3); break;
1671
default : ShouldNotReachHere();
1674
const FloatRegister arg1 = D1_tmp;
1675
const FloatRegister arg2 = D0_tos;
1678
case add: __ pop_d(arg1); __ add_double(D0_tos, arg1, arg2); break;
1679
case sub: __ pop_d(arg1); __ sub_double(D0_tos, arg1, arg2); break;
1680
case mul: __ pop_d(arg1); __ mul_double(D0_tos, arg1, arg2); break;
1681
case div: __ pop_d(arg1); __ div_double(D0_tos, arg1, arg2); break;
1685
__ fmrrd(R0, R1, arg1);
1686
__ fmrrd(R2, R3, arg2);
1687
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), R0, R1, R2, R3);
1688
__ fmdrr(D0_tos, R0, R1);
1690
__ mov_double(D1, arg2);
1692
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1693
#endif // !__ABI_HARD__
1695
default : ShouldNotReachHere();
1701
void TemplateTable::ineg() {
1702
transition(itos, itos);
1703
__ neg_32(R0_tos, R0_tos);
1707
void TemplateTable::lneg() {
1708
transition(ltos, ltos);
1709
__ rsbs(R0_tos_lo, R0_tos_lo, 0);
1710
__ rsc (R1_tos_hi, R1_tos_hi, 0);
1714
void TemplateTable::fneg() {
1715
transition(ftos, ftos);
1718
const int sign_mask = 0x80000000;
1719
__ eor(R0_tos, R0_tos, sign_mask);
1721
__ neg_float(S0_tos, S0_tos);
1726
void TemplateTable::dneg() {
1727
transition(dtos, dtos);
1729
// Invert sign bit in the high part of the double
1730
const int sign_mask_hi = 0x80000000;
1731
__ eor(R1_tos_hi, R1_tos_hi, sign_mask_hi);
1733
__ neg_double(D0_tos, D0_tos);
1738
void TemplateTable::iinc() {
1739
transition(vtos, vtos);
1740
const Register Rconst = R2_tmp;
1741
const Register Rlocal_index = R1_tmp;
1742
const Register Rval = R0_tmp;
1744
__ ldrsb(Rconst, at_bcp(2));
1745
locals_index(Rlocal_index);
1746
Address local = load_iaddress(Rlocal_index, Rtemp);
1747
__ ldr_s32(Rval, local);
1748
__ add(Rval, Rval, Rconst);
1749
__ str_32(Rval, local);
1753
void TemplateTable::wide_iinc() {
1754
transition(vtos, vtos);
1755
const Register Rconst = R2_tmp;
1756
const Register Rlocal_index = R1_tmp;
1757
const Register Rval = R0_tmp;
1759
// get constant in Rconst
1760
__ ldrsb(R2_tmp, at_bcp(4));
1761
__ ldrb(R3_tmp, at_bcp(5));
1762
__ orr(Rconst, R3_tmp, AsmOperand(R2_tmp, lsl, 8));
1764
locals_index_wide(Rlocal_index);
1765
Address local = load_iaddress(Rlocal_index, Rtemp);
1766
__ ldr_s32(Rval, local);
1767
__ add(Rval, Rval, Rconst);
1768
__ str_32(Rval, local);
1772
void TemplateTable::convert() {
1775
{ TosState tos_in = ilgl;
1776
TosState tos_out = ilgl;
1777
switch (bytecode()) {
1778
case Bytecodes::_i2l: // fall through
1779
case Bytecodes::_i2f: // fall through
1780
case Bytecodes::_i2d: // fall through
1781
case Bytecodes::_i2b: // fall through
1782
case Bytecodes::_i2c: // fall through
1783
case Bytecodes::_i2s: tos_in = itos; break;
1784
case Bytecodes::_l2i: // fall through
1785
case Bytecodes::_l2f: // fall through
1786
case Bytecodes::_l2d: tos_in = ltos; break;
1787
case Bytecodes::_f2i: // fall through
1788
case Bytecodes::_f2l: // fall through
1789
case Bytecodes::_f2d: tos_in = ftos; break;
1790
case Bytecodes::_d2i: // fall through
1791
case Bytecodes::_d2l: // fall through
1792
case Bytecodes::_d2f: tos_in = dtos; break;
1793
default : ShouldNotReachHere();
1795
switch (bytecode()) {
1796
case Bytecodes::_l2i: // fall through
1797
case Bytecodes::_f2i: // fall through
1798
case Bytecodes::_d2i: // fall through
1799
case Bytecodes::_i2b: // fall through
1800
case Bytecodes::_i2c: // fall through
1801
case Bytecodes::_i2s: tos_out = itos; break;
1802
case Bytecodes::_i2l: // fall through
1803
case Bytecodes::_f2l: // fall through
1804
case Bytecodes::_d2l: tos_out = ltos; break;
1805
case Bytecodes::_i2f: // fall through
1806
case Bytecodes::_l2f: // fall through
1807
case Bytecodes::_d2f: tos_out = ftos; break;
1808
case Bytecodes::_i2d: // fall through
1809
case Bytecodes::_l2d: // fall through
1810
case Bytecodes::_f2d: tos_out = dtos; break;
1811
default : ShouldNotReachHere();
1813
transition(tos_in, tos_out);
1818
switch (bytecode()) {
1819
case Bytecodes::_i2l:
1820
__ mov(R1_tos_hi, AsmOperand(R0_tos, asr, BitsPerWord-1));
1823
case Bytecodes::_i2f:
1825
__ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_i2f), R0_tos);
1827
__ fmsr(S0_tmp, R0_tos);
1828
__ fsitos(S0_tos, S0_tmp);
1832
case Bytecodes::_i2d:
1834
__ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_i2d), R0_tos);
1836
__ fmsr(S0_tmp, R0_tos);
1837
__ fsitod(D0_tos, S0_tmp);
1841
case Bytecodes::_i2b:
1842
__ sign_extend(R0_tos, R0_tos, 8);
1845
case Bytecodes::_i2c:
1846
__ zero_extend(R0_tos, R0_tos, 16);
1849
case Bytecodes::_i2s:
1850
__ sign_extend(R0_tos, R0_tos, 16);
1853
case Bytecodes::_l2i:
1857
case Bytecodes::_l2f:
1858
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2f), R0_tos_lo, R1_tos_hi);
1859
#if !defined(__SOFTFP__) && !defined(__ABI_HARD__)
1860
__ fmsr(S0_tos, R0);
1861
#endif // !__SOFTFP__ && !__ABI_HARD__
1864
case Bytecodes::_l2d:
1865
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2d), R0_tos_lo, R1_tos_hi);
1866
#if !defined(__SOFTFP__) && !defined(__ABI_HARD__)
1867
__ fmdrr(D0_tos, R0, R1);
1868
#endif // !__SOFTFP__ && !__ABI_HARD__
1871
case Bytecodes::_f2i:
1873
__ ftosizs(S0_tos, S0_tos);
1874
__ fmrs(R0_tos, S0_tos);
1876
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), R0_tos);
1877
#endif // !__SOFTFP__
1880
case Bytecodes::_f2l:
1882
__ fmrs(R0_tos, S0_tos);
1883
#endif // !__SOFTFP__
1884
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), R0_tos);
1887
case Bytecodes::_f2d:
1889
__ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_f2d), R0_tos);
1891
__ convert_f2d(D0_tos, S0_tos);
1895
case Bytecodes::_d2i:
1897
__ ftosizd(Stemp, D0);
1900
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), R0_tos_lo, R1_tos_hi);
1901
#endif // !__SOFTFP__
1904
case Bytecodes::_d2l:
1906
__ fmrrd(R0_tos_lo, R1_tos_hi, D0_tos);
1907
#endif // !__SOFTFP__
1908
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), R0_tos_lo, R1_tos_hi);
1911
case Bytecodes::_d2f:
1913
__ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_d2f), R0_tos_lo, R1_tos_hi);
1915
__ convert_d2f(S0_tos, D0_tos);
1920
ShouldNotReachHere();
1925
void TemplateTable::lcmp() {
1926
transition(ltos, itos);
1927
const Register arg1_lo = R2_tmp;
1928
const Register arg1_hi = R3_tmp;
1929
const Register arg2_lo = R0_tos_lo;
1930
const Register arg2_hi = R1_tos_hi;
1931
const Register res = R4_tmp;
1933
__ pop_l(arg1_lo, arg1_hi);
1935
// long compare arg1 with arg2
1936
// result is -1/0/+1 if '<'/'='/'>'
1940
__ cmp (arg1_hi, arg2_hi);
1941
__ mvn (res, 0, lt);
1942
__ mov (res, 1, gt);
1944
__ cmp (arg1_lo, arg2_lo);
1945
__ mvn (res, 0, lo);
1946
__ mov (res, 1, hi);
1948
__ mov (R0_tos, res);
1952
void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1953
assert((unordered_result == 1) || (unordered_result == -1), "invalid unordered result");
1959
transition(ftos, itos);
1960
const Register Rx = R0;
1961
const Register Ry = R1;
1966
if (unordered_result == 1) {
1967
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fcmpg), Rx, Ry);
1969
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fcmpl), Rx, Ry);
1974
transition(dtos, itos);
1975
const Register Rx_lo = R0;
1976
const Register Rx_hi = R1;
1977
const Register Ry_lo = R2;
1978
const Register Ry_hi = R3;
1980
__ mov(Ry_lo, R0_tos_lo);
1981
__ mov(Ry_hi, R1_tos_hi);
1982
__ pop_l(Rx_lo, Rx_hi);
1984
if (unordered_result == 1) {
1985
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcmpg), Rx_lo, Rx_hi, Ry_lo, Ry_hi);
1987
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcmpl), Rx_lo, Rx_hi, Ry_lo, Ry_hi);
1994
transition(ftos, itos);
1996
__ fcmps(S1_tmp, S0_tos);
1998
transition(dtos, itos);
2000
__ fcmpd(D1_tmp, D0_tos);
2005
// comparison result | flag N | flag Z | flag C | flag V
2006
// "<" | 1 | 0 | 0 | 0
2007
// "==" | 0 | 1 | 1 | 0
2008
// ">" | 0 | 0 | 1 | 0
2009
// unordered | 0 | 0 | 1 | 1
2011
if (unordered_result < 0) {
2012
__ mov(R0_tos, 1); // result == 1 if greater
2013
__ mvn(R0_tos, 0, lt); // result == -1 if less or unordered (N!=V)
2015
__ mov(R0_tos, 1); // result == 1 if greater or unordered
2016
__ mvn(R0_tos, 0, mi); // result == -1 if less (N=1)
2018
__ mov(R0_tos, 0, eq); // result == 0 if equ (Z=1)
2023
void TemplateTable::branch(bool is_jsr, bool is_wide) {
2025
const Register Rdisp = R0_tmp;
2026
const Register Rbumped_taken_count = R5_tmp;
2028
__ profile_taken_branch(R0_tmp, Rbumped_taken_count); // R0 holds updated MDP, Rbumped_taken_count holds bumped taken count
2030
const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
2031
InvocationCounter::counter_offset();
2032
const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
2033
InvocationCounter::counter_offset();
2034
const int method_offset = frame::interpreter_frame_method_offset * wordSize;
2036
// Load up R0 with the branch displacement
2038
__ ldrsb(R0_tmp, at_bcp(1));
2039
__ ldrb(R1_tmp, at_bcp(2));
2040
__ ldrb(R2_tmp, at_bcp(3));
2041
__ ldrb(R3_tmp, at_bcp(4));
2042
__ orr(R0_tmp, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2043
__ orr(R0_tmp, R2_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2044
__ orr(Rdisp, R3_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2046
__ ldrsb(R0_tmp, at_bcp(1));
2047
__ ldrb(R1_tmp, at_bcp(2));
2048
__ orr(Rdisp, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2051
// Handle all the JSR stuff here, then exit.
2052
// It's much shorter and cleaner than intermingling with the
2053
// non-JSR normal-branch stuff occurring below.
2055
// compute return address as bci in R1
2056
const Register Rret_addr = R1_tmp;
2057
assert_different_registers(Rdisp, Rret_addr, Rtemp);
2059
__ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2060
__ sub(Rret_addr, Rbcp, - (is_wide ? 5 : 3) + in_bytes(ConstMethod::codes_offset()));
2061
__ sub(Rret_addr, Rret_addr, Rtemp);
2063
// Load the next target bytecode into R3_bytecode and advance Rbcp
2064
__ ldrb(R3_bytecode, Address(Rbcp, Rdisp, lsl, 0, pre_indexed));
2066
// Push return address
2067
__ push_i(Rret_addr);
2069
__ dispatch_only_noverify(vtos);
2073
// Normal (non-jsr) branch handling
2075
// Adjust the bcp by the displacement in Rdisp and load next bytecode.
2076
__ ldrb(R3_bytecode, Address(Rbcp, Rdisp, lsl, 0, pre_indexed));
2078
assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters");
2079
Label backedge_counter_overflow;
2082
if (UseLoopCounter) {
2083
// increment backedge counter for backward branches
2084
// Rdisp (R0): target offset
2086
const Register Rcnt = R2_tmp;
2087
const Register Rcounters = R1_tmp;
2089
// count only if backward branch
2090
__ tst(Rdisp, Rdisp);
2094
int increment = InvocationCounter::count_increment;
2095
if (ProfileInterpreter) {
2096
// Are we profiling?
2097
__ ldr(Rtemp, Address(Rmethod, Method::method_data_offset()));
2098
__ cbz(Rtemp, no_mdo);
2099
// Increment the MDO backedge counter
2100
const Address mdo_backedge_counter(Rtemp, in_bytes(MethodData::backedge_counter_offset()) +
2101
in_bytes(InvocationCounter::counter_offset()));
2102
const Address mask(Rtemp, in_bytes(MethodData::backedge_mask_offset()));
2103
__ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
2104
Rcnt, R4_tmp, eq, &backedge_counter_overflow);
2108
// Increment backedge counter in MethodCounters*
2109
// Note Rbumped_taken_count is a callee saved registers for ARM32
2110
__ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
2113
const Address mask(Rcounters, in_bytes(MethodCounters::backedge_mask_offset()));
2114
__ increment_mask_and_jump(Address(Rcounters, be_offset), increment, mask,
2115
Rcnt, R4_tmp, eq, &backedge_counter_overflow);
2119
if (!UseOnStackReplacement) {
2120
__ bind(backedge_counter_overflow);
2123
// continue with the bytecode @ target
2124
__ dispatch_only(vtos, true);
2126
if (UseLoopCounter && UseOnStackReplacement) {
2127
// invocation counter overflow
2128
__ bind(backedge_counter_overflow);
2130
__ sub(R1, Rbcp, Rdisp); // branch bcp
2131
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R1);
2133
// R0: osr nmethod (osr ok) or null (osr not possible)
2134
const Register Rnmethod = R0;
2136
__ ldrb(R3_bytecode, Address(Rbcp)); // reload next bytecode
2138
__ cbz(Rnmethod, dispatch); // test result, no osr if null
2140
// nmethod may have been invalidated (VM may block upon call_VM return)
2141
__ ldrb(R1_tmp, Address(Rnmethod, nmethod::state_offset()));
2142
__ cmp(R1_tmp, nmethod::in_use);
2145
// We have the address of an on stack replacement routine in Rnmethod,
2146
// We need to prepare to execute the OSR method. First we must
2147
// migrate the locals and monitors off of the stack.
2149
__ mov(Rtmp_save0, Rnmethod); // save the nmethod
2151
call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
2155
__ ldr(R1_tmp, Address(Rtmp_save0, nmethod::osr_entry_point_offset()));
2156
__ ldr(Rtemp, Address(FP, frame::interpreter_frame_sender_sp_offset * wordSize));
2158
__ ldmia(FP, RegisterSet(FP) | RegisterSet(LR));
2159
__ bic(SP, Rtemp, StackAlignmentInBytes - 1); // Remove frame and align stack
2166
void TemplateTable::if_0cmp(Condition cc) {
2167
transition(itos, vtos);
2168
// assume branch is more often taken than not (loops use backward branches)
2170
__ cmp_32(R0_tos, 0);
2171
__ b(not_taken, convNegCond(cc));
2172
branch(false, false);
2174
__ profile_not_taken_branch(R0_tmp);
2178
void TemplateTable::if_icmp(Condition cc) {
2179
transition(itos, vtos);
2180
// assume branch is more often taken than not (loops use backward branches)
2183
__ cmp_32(R1_tmp, R0_tos);
2184
__ b(not_taken, convNegCond(cc));
2185
branch(false, false);
2187
__ profile_not_taken_branch(R0_tmp);
2191
void TemplateTable::if_nullcmp(Condition cc) {
2192
transition(atos, vtos);
2193
assert(cc == equal || cc == not_equal, "invalid condition");
2195
// assume branch is more often taken than not (loops use backward branches)
2198
__ cbnz(R0_tos, not_taken);
2200
__ cbz(R0_tos, not_taken);
2202
branch(false, false);
2204
__ profile_not_taken_branch(R0_tmp);
2208
void TemplateTable::if_acmp(Condition cc) {
2209
transition(atos, vtos);
2210
// assume branch is more often taken than not (loops use backward branches)
2213
__ cmpoop(R1_tmp, R0_tos);
2214
__ b(not_taken, convNegCond(cc));
2215
branch(false, false);
2217
__ profile_not_taken_branch(R0_tmp);
2221
void TemplateTable::ret() {
2222
transition(vtos, vtos);
2223
const Register Rlocal_index = R1_tmp;
2224
const Register Rret_bci = Rtmp_save0; // R4/R19
2226
locals_index(Rlocal_index);
2227
Address local = load_iaddress(Rlocal_index, Rtemp);
2228
__ ldr_s32(Rret_bci, local); // get return bci, compute return bcp
2229
__ profile_ret(Rtmp_save1, Rret_bci);
2230
__ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2231
__ add(Rtemp, Rtemp, in_bytes(ConstMethod::codes_offset()));
2232
__ add(Rbcp, Rtemp, Rret_bci);
2233
__ dispatch_next(vtos);
2237
void TemplateTable::wide_ret() {
2238
transition(vtos, vtos);
2239
const Register Rlocal_index = R1_tmp;
2240
const Register Rret_bci = Rtmp_save0; // R4/R19
2242
locals_index_wide(Rlocal_index);
2243
Address local = load_iaddress(Rlocal_index, Rtemp);
2244
__ ldr_s32(Rret_bci, local); // get return bci, compute return bcp
2245
__ profile_ret(Rtmp_save1, Rret_bci);
2246
__ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2247
__ add(Rtemp, Rtemp, in_bytes(ConstMethod::codes_offset()));
2248
__ add(Rbcp, Rtemp, Rret_bci);
2249
__ dispatch_next(vtos);
2253
void TemplateTable::tableswitch() {
2254
transition(itos, vtos);
2256
const Register Rindex = R0_tos;
2257
const Register Rtemp2 = R1_tmp;
2258
const Register Rabcp = R2_tmp; // aligned bcp
2259
const Register Rlow = R3_tmp;
2260
const Register Rhigh = R4_tmp;
2261
const Register Roffset = R5_tmp;
2264
__ add(Rtemp, Rbcp, 1 + (2*BytesPerInt-1));
2265
__ align_reg(Rabcp, Rtemp, BytesPerInt);
2268
__ ldmia(Rabcp, RegisterSet(Rlow) | RegisterSet(Rhigh), writeback);
2269
__ byteswap_u32(Rlow, Rtemp, Rtemp2);
2270
__ byteswap_u32(Rhigh, Rtemp, Rtemp2);
2272
// compare index with high bound
2273
__ cmp_32(Rhigh, Rindex);
2276
// if Rindex <= Rhigh then calculate index in table (Rindex - Rlow)
2277
__ subs(Rindex, Rindex, Rlow, ge);
2279
// if Rindex <= Rhigh and (Rindex - Rlow) >= 0
2280
// ("ge" status accumulated from cmp and subs instructions) then load
2281
// offset from table, otherwise load offset for default case
2283
if(ProfileInterpreter) {
2284
Label default_case, continue_execution;
2286
__ b(default_case, lt);
2287
__ ldr(Roffset, Address(Rabcp, Rindex, lsl, LogBytesPerInt));
2288
__ profile_switch_case(Rabcp, Rindex, Rtemp2, R0_tmp);
2289
__ b(continue_execution);
2291
__ bind(default_case);
2292
__ profile_switch_default(R0_tmp);
2293
__ ldr(Roffset, Address(Rabcp, -3 * BytesPerInt));
2295
__ bind(continue_execution);
2297
__ ldr(Roffset, Address(Rabcp, -3 * BytesPerInt), lt);
2298
__ ldr(Roffset, Address(Rabcp, Rindex, lsl, LogBytesPerInt), ge);
2301
__ byteswap_u32(Roffset, Rtemp, Rtemp2);
2303
// load the next bytecode to R3_bytecode and advance Rbcp
2304
__ ldrb(R3_bytecode, Address(Rbcp, Roffset, lsl, 0, pre_indexed));
2305
__ dispatch_only(vtos, true);
2310
void TemplateTable::lookupswitch() {
2311
transition(itos, itos);
2312
__ stop("lookupswitch bytecode should have been rewritten");
2316
void TemplateTable::fast_linearswitch() {
2317
transition(itos, vtos);
2318
Label loop, found, default_case, continue_execution;
2320
const Register Rkey = R0_tos;
2321
const Register Rabcp = R2_tmp; // aligned bcp
2322
const Register Rdefault = R3_tmp;
2323
const Register Rcount = R4_tmp;
2324
const Register Roffset = R5_tmp;
2326
// bswap Rkey, so we can avoid bswapping the table entries
2327
__ byteswap_u32(Rkey, R1_tmp, Rtemp);
2330
__ add(Rtemp, Rbcp, 1 + (BytesPerInt-1));
2331
__ align_reg(Rabcp, Rtemp, BytesPerInt);
2333
// load default & counter
2334
__ ldmia(Rabcp, RegisterSet(Rdefault) | RegisterSet(Rcount), writeback);
2335
__ byteswap_u32(Rcount, R1_tmp, Rtemp);
2337
__ cmp_32(Rcount, 0);
2338
__ ldr(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed), ne);
2339
__ b(default_case, eq);
2343
__ cmp_32(Rtemp, Rkey);
2345
__ subs(Rcount, Rcount, 1);
2346
__ ldr(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed), ne);
2350
__ bind(default_case);
2351
__ profile_switch_default(R0_tmp);
2352
__ mov(Roffset, Rdefault);
2353
__ b(continue_execution);
2355
// entry found -> get offset
2357
// Rabcp is already incremented and points to the next entry
2358
__ ldr_s32(Roffset, Address(Rabcp, -BytesPerInt));
2359
if (ProfileInterpreter) {
2360
// Calculate index of the selected case.
2361
assert_different_registers(Roffset, Rcount, Rtemp, R0_tmp, R1_tmp, R2_tmp);
2364
__ add(Rtemp, Rbcp, 1 + (BytesPerInt-1));
2365
__ align_reg(R2_tmp, Rtemp, BytesPerInt);
2367
// load number of cases
2368
__ ldr_u32(R2_tmp, Address(R2_tmp, BytesPerInt));
2369
__ byteswap_u32(R2_tmp, R1_tmp, Rtemp);
2371
// Selected index = <number of cases> - <current loop count>
2372
__ sub(R1_tmp, R2_tmp, Rcount);
2373
__ profile_switch_case(R0_tmp, R1_tmp, Rtemp, R1_tmp);
2376
// continue execution
2377
__ bind(continue_execution);
2378
__ byteswap_u32(Roffset, R1_tmp, Rtemp);
2380
// load the next bytecode to R3_bytecode and advance Rbcp
2381
__ ldrb(R3_bytecode, Address(Rbcp, Roffset, lsl, 0, pre_indexed));
2382
__ dispatch_only(vtos, true);
2386
void TemplateTable::fast_binaryswitch() {
2387
transition(itos, vtos);
2388
// Implementation using the following core algorithm:
2390
// int binary_search(int key, LookupswitchPair* array, int n) {
2391
// // Binary search according to "Methodik des Programmierens" by
2392
// // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
2395
// while (i+1 < j) {
2396
// // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
2397
// // with Q: for all i: 0 <= i < n: key < a[i]
2398
// // where a stands for the array and assuming that the (inexisting)
2399
// // element a[n] is infinitely big.
2400
// int h = (i + j) >> 1;
2402
// if (key < array[h].fast_match()) {
2408
// // R: a[i] <= key < a[i+1] or Q
2409
// // (i.e., if key is within array, i is the correct index)
2413
// register allocation
2414
const Register key = R0_tos; // already set (tosca)
2415
const Register array = R1_tmp;
2416
const Register i = R2_tmp;
2417
const Register j = R3_tmp;
2418
const Register h = R4_tmp;
2419
const Register val = R5_tmp;
2420
const Register temp1 = Rtemp;
2421
const Register temp2 = LR_tmp;
2422
const Register offset = R3_tmp;
2424
// set 'array' = aligned bcp + 2 ints
2425
__ add(temp1, Rbcp, 1 + (BytesPerInt-1) + 2*BytesPerInt);
2426
__ align_reg(array, temp1, BytesPerInt);
2429
__ mov(i, 0); // i = 0;
2430
__ ldr_s32(j, Address(array, -BytesPerInt)); // j = length(array);
2431
// Convert j into native byteordering
2432
__ byteswap_u32(j, temp1, temp2);
2438
// binary search loop
2441
// int h = (i + j) >> 1;
2442
__ add(h, i, j); // h = i + j;
2443
__ logical_shift_right(h, h, 1); // h = (i + j) >> 1;
2444
// if (key < array[h].fast_match()) {
2449
__ ldr_s32(val, Address(array, h, lsl, 1+LogBytesPerInt));
2450
// Convert array[h].match to native byte-ordering before compare
2451
__ byteswap_u32(val, temp1, temp2);
2452
__ cmp_32(key, val);
2453
__ mov(j, h, lt); // j = h if (key < array[h].fast_match())
2454
__ mov(i, h, ge); // i = h if (key >= array[h].fast_match())
2457
__ add(temp1, i, 1); // i+1
2458
__ cmp(temp1, j); // i+1 < j
2462
// end of binary search, result index is i (must check again!)
2464
// Convert array[i].match to native byte-ordering before compare
2465
__ ldr_s32(val, Address(array, i, lsl, 1+LogBytesPerInt));
2466
__ byteswap_u32(val, temp1, temp2);
2467
__ cmp_32(key, val);
2468
__ b(default_case, ne);
2471
__ add(temp1, array, AsmOperand(i, lsl, 1+LogBytesPerInt));
2472
__ ldr_s32(offset, Address(temp1, 1*BytesPerInt));
2473
__ profile_switch_case(R0, i, R1, i);
2474
__ byteswap_u32(offset, temp1, temp2);
2475
__ ldrb(R3_bytecode, Address(Rbcp, offset, lsl, 0, pre_indexed));
2476
__ dispatch_only(vtos, true);
2479
__ bind(default_case);
2480
__ profile_switch_default(R0);
2481
__ ldr_s32(offset, Address(array, -2*BytesPerInt));
2482
__ byteswap_u32(offset, temp1, temp2);
2483
__ ldrb(R3_bytecode, Address(Rbcp, offset, lsl, 0, pre_indexed));
2484
__ dispatch_only(vtos, true);
2488
void TemplateTable::_return(TosState state) {
2489
transition(state, state);
2490
assert(_desc->calls_vm(), "inconsistent calls_vm information"); // call in remove_activation
2492
if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2493
Label skip_register_finalizer;
2494
assert(state == vtos, "only valid state");
2495
__ ldr(R1, aaddress(0));
2496
__ load_klass(Rtemp, R1);
2497
__ ldr_u32(Rtemp, Address(Rtemp, Klass::access_flags_offset()));
2498
__ tbz(Rtemp, exact_log2(JVM_ACC_HAS_FINALIZER), skip_register_finalizer);
2500
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R1);
2502
__ bind(skip_register_finalizer);
2505
if (_desc->bytecode() != Bytecodes::_return_register_finalizer) {
2507
__ ldr(Rtemp, Address(Rthread, JavaThread::polling_word_offset()));
2508
__ tbz(Rtemp, exact_log2(SafepointMechanism::poll_bit()), no_safepoint);
2510
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint));
2512
__ bind(no_safepoint);
2515
// Narrow result if state is itos but result type is smaller.
2516
// Need to narrow in the return bytecode rather than in generate_return_entry
2517
// since compiled code callers expect the result to already be narrowed.
2518
if (state == itos) {
2521
__ remove_activation(state, LR);
2523
__ interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
2525
// According to interpreter calling conventions, result is returned in R0/R1,
2526
// so ftos (S0) and dtos (D0) are moved to R0/R1.
2527
// This conversion should be done after remove_activation, as it uses
2528
// push(state) & pop(state) to preserve return value.
2529
__ convert_tos_to_retval(state);
2533
__ nop(); // to avoid filling CPU pipeline with invalid instructions
2538
// ----------------------------------------------------------------------------
2539
// Volatile variables demand their effects be made known to all CPU's in
2540
// order. Store buffers on most chips allow reads & writes to reorder; the
2541
// JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
2542
// memory barrier (i.e., it's not sufficient that the interpreter does not
2543
// reorder volatile references, the hardware also must not reorder them).
2545
// According to the new Java Memory Model (JMM):
2546
// (1) All volatiles are serialized wrt to each other.
2547
// ALSO reads & writes act as acquire & release, so:
2548
// (2) A read cannot let unrelated NON-volatile memory refs that happen after
2549
// the read float up to before the read. It's OK for non-volatile memory refs
2550
// that happen before the volatile read to float down below it.
2551
// (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
2552
// that happen BEFORE the write float down to after the write. It's OK for
2553
// non-volatile memory refs that happen after the volatile write to float up
2556
// We only put in barriers around volatile refs (they are expensive), not
2557
// _between_ memory refs (that would require us to track the flavor of the
2558
// previous memory refs). Requirements (2) and (3) require some barriers
2559
// before volatile stores and after volatile loads. These nearly cover
2560
// requirement (1) but miss the volatile-store-volatile-load case. This final
2561
// case is placed after volatile-stores although it could just as well go
2562
// before volatile-loads.
2563
void TemplateTable::volatile_barrier(MacroAssembler::Membar_mask_bits order_constraint,
2565
bool preserve_flags,
2566
Register load_tgt) {
2567
__ membar(order_constraint, tmp, preserve_flags, load_tgt);
2570
// Blows all volatile registers: R0-R3, Rtemp, LR.
2571
void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
2574
assert_different_registers(Rcache, Rindex, Rtemp);
2575
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2578
Bytecodes::Code code = bytecode();
2579
__ load_method_entry(Rcache, Rindex);
2582
__ add(Rtemp, Rcache, in_bytes(ResolvedMethodEntry::bytecode1_offset()));
2585
__ add(Rtemp, Rcache, in_bytes(ResolvedMethodEntry::bytecode2_offset()));
2588
// Load-acquire the bytecode to match store-release in InterpreterRuntime
2589
__ ldrb(Rtemp, Rtemp);
2590
__ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), noreg, true);
2591
__ cmp(Rtemp, code); // have we resolved this bytecode?
2594
// resolve first time through
2595
address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2597
__ call_VM(noreg, entry, R1);
2598
// Update registers with resolved info
2599
__ load_method_entry(Rcache, Rindex);
2603
void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
2606
assert_different_registers(Rcache, Rindex, Rtemp);
2610
Bytecodes::Code code = bytecode();
2612
case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2613
case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2617
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2618
__ load_field_entry(Rcache, Rindex);
2619
if (byte_no == f1_byte) {
2620
__ add(Rtemp, Rcache, in_bytes(ResolvedFieldEntry::get_code_offset()));
2622
__ add(Rtemp, Rcache, in_bytes(ResolvedFieldEntry::put_code_offset()));
2625
// Load-acquire the bytecode to match store-release in ResolvedFieldEntry::fill_in()
2626
__ ldrb(Rtemp, Rtemp);
2627
__ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), noreg, true);
2629
__ cmp(Rtemp, code); // have we resolved this bytecode?
2632
// resolve first time through
2633
address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2635
__ call_VM(noreg, entry, R1);
2636
// Update registers with resolved info
2637
__ load_field_entry(Rcache, Rindex);
2641
void TemplateTable::load_resolved_field_entry(Register obj,
2646
bool is_static = false) {
2647
assert_different_registers(cache, tos_state, flags, offset);
2650
__ ldr(offset, Address(cache, in_bytes(ResolvedFieldEntry::field_offset_offset())));
2653
__ ldrb(flags, Address(cache, in_bytes(ResolvedFieldEntry::flags_offset())));
2656
__ ldrb(tos_state, Address(cache, in_bytes(ResolvedFieldEntry::type_offset())));
2658
// Klass overwrite register
2660
__ ldr(obj, Address(cache, ResolvedFieldEntry::field_holder_offset()));
2661
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2662
__ ldr(obj, Address(obj, mirror_offset));
2663
__ resolve_oop_handle(obj);
2667
// The rmethod register is input and overwritten to be the adapter method for the
2668
// indy call. Link Register (lr) is set to the return address for the adapter and
2669
// an appendix may be pushed to the stack. Registers R1-R3, Rtemp (R12) are clobbered
2670
void TemplateTable::load_invokedynamic_entry(Register method) {
2672
const Register appendix = R1;
2673
const Register cache = R2_tmp;
2674
const Register index = R3_tmp;
2675
assert_different_registers(method, appendix, cache, index);
2680
__ load_resolved_indy_entry(cache, index);
2681
// Load-acquire the adapter method to match store-release in ResolvedIndyEntry::fill_in()
2682
__ ldr(method, Address(cache, in_bytes(ResolvedIndyEntry::method_offset())));
2683
TemplateTable::volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), noreg, true);
2684
// Compare the method to zero
2685
__ cbnz(method, resolved);
2687
Bytecodes::Code code = bytecode();
2689
// Call to the interpreter runtime to resolve invokedynamic
2690
address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2691
__ mov(R1, code); // this is essentially Bytecodes::_invokedynamic, call_VM requires R1
2692
__ call_VM(noreg, entry, R1);
2693
// Update registers with resolved info
2694
__ load_resolved_indy_entry(cache, index);
2695
// Load-acquire the adapter method to match store-release in ResolvedIndyEntry::fill_in()
2696
__ ldr(method, Address(cache, in_bytes(ResolvedIndyEntry::method_offset())));
2697
TemplateTable::volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), noreg, true);
2700
__ cbnz(method, resolved);
2701
__ stop("Should be resolved by now");
2706
// Check if there is an appendix
2707
__ ldrb(index, Address(cache, in_bytes(ResolvedIndyEntry::flags_offset())));
2708
__ tbz(index, ResolvedIndyEntry::has_appendix_shift, L_no_push);
2710
__ ldrh(index, Address(cache, in_bytes(ResolvedIndyEntry::resolved_references_index_offset())));
2711
// Push the appendix as a trailing parameter
2712
// since the parameter_size includes it.
2713
__ load_resolved_reference_at_index(appendix, index);
2714
__ verify_oop(appendix);
2715
__ push(appendix); // push appendix (MethodType, CallSite, etc.)
2718
// compute return type
2719
__ ldrb(index, Address(cache, in_bytes(ResolvedIndyEntry::result_type_offset())));
2720
// load return address
2722
const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
2723
__ mov_address(Rtemp, table_addr);
2724
__ ldr(LR, Address(Rtemp, index, lsl, Interpreter::logStackElementSize));
2728
// Blows all volatile registers: R0-R3, Rtemp, LR.
2729
void TemplateTable::load_resolved_method_entry_special_or_static(Register Rcache,
2732
Register index = flags;
2733
assert_different_registers(Rcache, method, flags);
2734
resolve_cache_and_index_for_method(f1_byte, Rcache, index);
2735
__ ldrb(flags, Address(Rcache, in_bytes(ResolvedMethodEntry::flags_offset())));
2736
__ ldr(method, Address(Rcache, in_bytes(ResolvedMethodEntry::method_offset())));
2739
void TemplateTable::load_resolved_method_entry_handle(Register Rcache,
2743
Register index = ref_index;
2744
assert_different_registers(method, flags);
2745
assert_different_registers(Rcache, method, index);
2748
resolve_cache_and_index_for_method(f1_byte, Rcache, index);
2749
__ ldrb(flags, Address(Rcache, in_bytes(ResolvedMethodEntry::flags_offset())));
2751
// maybe push appendix to arguments (just before return address)
2753
__ tbz(flags, ResolvedMethodEntry::has_appendix_shift, L_no_push);
2754
// invokehandle uses an index into the resolved references array
2755
__ ldrh(ref_index, Address(Rcache, in_bytes(ResolvedMethodEntry::resolved_references_index_offset())));
2756
// Push the appendix as a trailing parameter.
2757
// This must be done before we get the receiver,
2758
// since the parameter_size includes it.
2759
Register appendix = method;
2760
__ load_resolved_reference_at_index(appendix, ref_index);
2761
__ push(appendix); // push appendix (MethodType, CallSite, etc.)
2764
__ ldr(method, Address(Rcache, in_bytes(ResolvedMethodEntry::method_offset())));
2767
void TemplateTable::load_resolved_method_entry_interface(Register Rcache,
2769
Register method_or_table_index,
2772
const Register index = method_or_table_index;
2773
assert_different_registers(method_or_table_index, Rcache, flags);
2775
// determine constant pool cache field offsets
2776
resolve_cache_and_index_for_method(f1_byte, Rcache, index);
2777
__ ldrb(flags, Address(Rcache, in_bytes(ResolvedMethodEntry::flags_offset())));
2779
// Invokeinterface can behave in different ways:
2780
// If calling a method from java.lang.Object, the forced virtual flag is true so the invocation will
2781
// behave like an invokevirtual call. The state of the virtual final flag will determine whether a method or
2782
// vtable index is placed in the register.
2783
// Otherwise, the registers will be populated with the klass and method.
2785
Label NotVirtual; Label NotVFinal; Label Done;
2786
__ tbz(flags, ResolvedMethodEntry::is_forced_virtual_shift, NotVirtual);
2787
__ tbz(flags, ResolvedMethodEntry::is_vfinal_shift, NotVFinal);
2788
__ ldr(method_or_table_index, Address(Rcache, in_bytes(ResolvedMethodEntry::method_offset())));
2792
__ ldrh(method_or_table_index, Address(Rcache, in_bytes(ResolvedMethodEntry::table_index_offset())));
2795
__ bind(NotVirtual);
2796
__ ldr(method_or_table_index, Address(Rcache, in_bytes(ResolvedMethodEntry::method_offset())));
2797
__ ldr(klass, Address(Rcache, in_bytes(ResolvedMethodEntry::klass_offset())));
2801
void TemplateTable::load_resolved_method_entry_virtual(Register Rcache,
2802
Register method_or_table_index,
2805
const Register index = flags;
2806
assert_different_registers(method_or_table_index, Rcache, flags);
2808
// determine constant pool cache field offsets
2809
resolve_cache_and_index_for_method(f2_byte, Rcache, index);
2810
__ ldrb(flags, Address(Rcache, in_bytes(ResolvedMethodEntry::flags_offset())));
2812
// method_or_table_index can either be an itable index or a method depending on the virtual final flag
2813
Label NotVFinal; Label Done;
2814
__ tbz(flags, ResolvedMethodEntry::is_vfinal_shift, NotVFinal);
2815
__ ldr(method_or_table_index, Address(Rcache, in_bytes(ResolvedMethodEntry::method_offset())));
2819
__ ldrh(method_or_table_index, Address(Rcache, in_bytes(ResolvedMethodEntry::table_index_offset())));
2823
// The registers cache and index expected to be set before call, and should not be Rtemp.
2824
// Blows volatile registers R0-R3, Rtemp, LR,
2825
// except cache and index registers which are preserved.
2826
void TemplateTable::jvmti_post_field_access(Register Rcache,
2830
assert_different_registers(Rcache, Rindex, Rtemp);
2832
if (__ can_post_field_access()) {
2833
// Check to see if a field access watch has been set before we take
2834
// the time to call into the VM.
2838
__ ldr_global_s32(Rtemp, (address)JvmtiExport::get_field_access_count_addr());
2839
__ cbz(Rtemp, Lcontinue);
2841
// cache entry pointer
2842
__ load_field_entry(R2, Rindex);
2845
__ mov(R1, 0); // null object reference
2847
__ pop(atos); // Get the object
2850
__ push(atos); // Restore stack state
2852
// R1: object pointer or null
2853
// R2: cache entry pointer
2854
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2856
__ load_field_entry(Rcache, Rindex);
2863
void TemplateTable::pop_and_check_object(Register r) {
2865
__ null_check(r, Rtemp); // for field access must check obj.
2870
void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2871
transition(vtos, vtos);
2873
const Register Rcache = R4_tmp;
2874
const Register Rindex = R3_tmp;
2876
const Register Roffset = R2_tmp;
2877
const Register Rtos_state = R3_tmp;
2878
const Register Robj = R4_tmp; // Rcache is free at the time of loading Robj
2879
const Register Rflags = R5_tmp;
2881
resolve_cache_and_index_for_field(byte_no, Rcache, Rindex);
2882
jvmti_post_field_access(Rcache, Rindex, is_static, false);
2883
load_resolved_field_entry(Robj, Rcache, Rtos_state, Roffset, Rflags, is_static);
2886
pop_and_check_object(Robj);
2889
Label Done, Lint, Ltable, shouldNotReachHere;
2890
Label Lbtos, Lztos, Lctos, Lstos, Litos, Lltos, Lftos, Ldtos, Latos;
2892
// There are actually two versions of implementation of getfield/getstatic:
2894
// 1) Table switch using add(PC,...) instruction (fast_version)
2895
// 2) Table switch using ldr(PC,...) instruction
2897
// First version requires fixed size of code block for each case and
2898
// can not be used in RewriteBytecodes and VerifyOops
2901
// Size of fixed size code block for fast_version
2902
const int log_max_block_size = 3;
2903
const int max_block_size = 1 << log_max_block_size;
2905
// Decide if fast version is enabled
2906
bool fast_version = (is_static || !RewriteBytecodes) && !VerifyOops;
2908
// On 32-bit ARM atos and itos cases can be merged only for fast version, because
2909
// atos requires additional processing in slow version.
2910
bool atos_merged_with_itos = fast_version;
2912
assert(number_of_states == 10, "number of tos states should be equal to 9");
2914
__ cmp(Rtos_state, itos);
2915
if(atos_merged_with_itos) {
2916
__ cmp(Rtos_state, atos, ne);
2919
// table switch by type
2921
__ add(PC, PC, AsmOperand(Rtos_state, lsl, log_max_block_size + Assembler::LogInstructionSize), ne);
2923
__ ldr(PC, Address(PC, Rtos_state, lsl, LogBytesPerWord), ne);
2926
// jump to itos/atos case
2929
// table with addresses for slow version
2934
__ emit_address(Lbtos);
2935
__ emit_address(Lztos);
2936
__ emit_address(Lctos);
2937
__ emit_address(Lstos);
2938
__ emit_address(Litos);
2939
__ emit_address(Lltos);
2940
__ emit_address(Lftos);
2941
__ emit_address(Ldtos);
2942
__ emit_address(Latos);
2950
assert(btos == seq++, "btos has unexpected value");
2951
FixedSizeCodeBlock btos_block(_masm, max_block_size, fast_version);
2953
__ access_load_at(T_BYTE, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg);
2955
// Rewrite bytecode to be faster
2956
if (!is_static && rc == may_rewrite) {
2957
patch_bytecode(Bytecodes::_fast_bgetfield, R0_tmp, Rtemp);
2962
// ztos (same as btos for getfield)
2964
assert(ztos == seq++, "ztos has unexpected value");
2965
FixedSizeCodeBlock ztos_block(_masm, max_block_size, fast_version);
2967
__ access_load_at(T_BOOLEAN, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg);
2969
// Rewrite bytecode to be faster (use btos fast getfield)
2970
if (!is_static && rc == may_rewrite) {
2971
patch_bytecode(Bytecodes::_fast_bgetfield, R0_tmp, Rtemp);
2978
assert(ctos == seq++, "ctos has unexpected value");
2979
FixedSizeCodeBlock ctos_block(_masm, max_block_size, fast_version);
2981
__ access_load_at(T_CHAR, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg);
2983
if (!is_static && rc == may_rewrite) {
2984
patch_bytecode(Bytecodes::_fast_cgetfield, R0_tmp, Rtemp);
2991
assert(stos == seq++, "stos has unexpected value");
2992
FixedSizeCodeBlock stos_block(_masm, max_block_size, fast_version);
2994
__ access_load_at(T_SHORT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg);
2996
if (!is_static && rc == may_rewrite) {
2997
patch_bytecode(Bytecodes::_fast_sgetfield, R0_tmp, Rtemp);
3004
assert(itos == seq++, "itos has unexpected value");
3005
FixedSizeCodeBlock itos_block(_masm, max_block_size, fast_version);
3007
__ b(shouldNotReachHere);
3012
assert(ltos == seq++, "ltos has unexpected value");
3013
FixedSizeCodeBlock ltos_block(_masm, max_block_size, fast_version);
3015
__ access_load_at(T_LONG, IN_HEAP, Address(Robj, Roffset), noreg /* ltos */, noreg, noreg, noreg);
3017
if (!is_static && rc == may_rewrite) {
3018
patch_bytecode(Bytecodes::_fast_lgetfield, R0_tmp, Rtemp);
3025
assert(ftos == seq++, "ftos has unexpected value");
3026
FixedSizeCodeBlock ftos_block(_masm, max_block_size, fast_version);
3028
// floats and ints are placed on stack in same way, so
3029
// we can use push(itos) to transfer value without using VFP
3030
__ access_load_at(T_INT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg);
3032
if (!is_static && rc == may_rewrite) {
3033
patch_bytecode(Bytecodes::_fast_fgetfield, R0_tmp, Rtemp);
3040
assert(dtos == seq++, "dtos has unexpected value");
3041
FixedSizeCodeBlock dtos_block(_masm, max_block_size, fast_version);
3043
// doubles and longs are placed on stack in the same way, so
3044
// we can use push(ltos) to transfer value without using VFP
3045
__ access_load_at(T_LONG, IN_HEAP, Address(Robj, Roffset), noreg /* ltos */, noreg, noreg, noreg);
3047
if (!is_static && rc == may_rewrite) {
3048
patch_bytecode(Bytecodes::_fast_dgetfield, R0_tmp, Rtemp);
3055
assert(atos == seq++, "atos has unexpected value");
3057
// atos case for slow version on 32-bit ARM
3058
if(!atos_merged_with_itos) {
3060
do_oop_load(_masm, R0_tos, Address(Robj, Roffset));
3062
// Rewrite bytecode to be faster
3063
if (!is_static && rc == may_rewrite) {
3064
patch_bytecode(Bytecodes::_fast_agetfield, R0_tmp, Rtemp);
3070
assert(vtos == seq++, "vtos has unexpected value");
3072
__ bind(shouldNotReachHere);
3073
__ should_not_reach_here();
3075
// itos and atos cases are frequent so it makes sense to move them out of table switch
3076
// atos case can be merged with itos case (and thus moved out of table switch) on 32-bit ARM, fast version only
3079
__ access_load_at(T_INT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg);
3081
// Rewrite bytecode to be faster
3082
if (!is_static && rc == may_rewrite) {
3083
patch_bytecode(Bytecodes::_fast_igetfield, R0_tmp, Rtemp);
3089
// Check for volatile field
3091
__ tbz(Rflags, ResolvedFieldEntry::is_volatile_shift, notVolatile);
3092
__ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
3093
__ bind(notVolatile);
3097
void TemplateTable::getfield(int byte_no) {
3098
getfield_or_static(byte_no, false);
3101
void TemplateTable::nofast_getfield(int byte_no) {
3102
getfield_or_static(byte_no, false, may_not_rewrite);
3105
void TemplateTable::getstatic(int byte_no) {
3106
getfield_or_static(byte_no, true);
3110
// The registers cache and index expected to be set before call, and should not be R1 or Rtemp.
3111
// Blows volatile registers R0-R3, Rtemp, LR,
3112
// except cache and index registers which are preserved.
3113
void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rindex, bool is_static) {
3114
ByteSize cp_base_offset = ConstantPoolCache::base_offset();
3115
assert_different_registers(Rcache, Rindex, R1, Rtemp);
3117
if (__ can_post_field_modification()) {
3118
// Check to see if a field modification watch has been set before we take
3119
// the time to call into the VM.
3122
__ ldr_global_s32(Rtemp, (address)JvmtiExport::get_field_modification_count_addr());
3123
__ cbz(Rtemp, Lcontinue);
3128
// Life is simple. Null out the object pointer.
3131
// Life is harder. The stack holds the value on top, followed by the object.
3132
// We don't know the size of the value, though; it could be one or two words
3133
// depending on its type. As a result, we must find the type to determine where
3135
__ ldrb(R3, Address(Rcache, in_bytes(ResolvedFieldEntry::type_offset())));
3138
__ cond_cmp(R3, dtos, ne);
3139
// two word value (ltos/dtos)
3140
__ ldr(R1, Address(SP, Interpreter::expr_offset_in_bytes(2)), eq);
3142
// one word value (not ltos, dtos)
3143
__ ldr(R1, Address(SP, Interpreter::expr_offset_in_bytes(1)), ne);
3147
__ mov(R3, Rstack_top);
3149
// R1: object pointer set up above (null if static)
3150
// R2: cache entry pointer
3151
// R3: value object on the stack
3152
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
3154
__ load_field_entry(Rcache, Rindex);
3161
void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
3162
transition(vtos, vtos);
3164
const Register Rcache = R4_tmp;
3165
const Register Rindex = R3_tmp;
3167
const Register Roffset = R2_tmp;
3168
const Register Rtos_state = R3_tmp;
3169
const Register Robj = R4_tmp; // Rcache is free at the time of loading Robj
3170
const Register Rflags = R5_tmp;
3172
resolve_cache_and_index_for_field(byte_no, Rcache, Rindex);
3173
jvmti_post_field_mod(Rcache, Rindex, is_static);
3174
load_resolved_field_entry(Robj, Rcache, Rtos_state, Roffset, Rflags, is_static);
3176
// Check for volatile field
3179
__ tbz(Rflags, ResolvedFieldEntry::is_volatile_shift, notVolatile);
3180
__ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
3181
__ bind(notVolatile);
3184
Label Done, Lint, shouldNotReachHere;
3185
Label Ltable, Lbtos, Lztos, Lctos, Lstos, Litos, Lltos, Lftos, Ldtos, Latos;
3187
// There are actually two versions of implementation of putfield/putstatic:
3190
// 1) Table switch using add(PC,...) instruction (fast_version)
3191
// 2) Table switch using ldr(PC,...) instruction
3193
// First version requires fixed size of code block for each case and
3194
// can not be used in RewriteBytecodes and VerifyOops
3197
// Size of fixed size code block for fast_version (in instructions)
3198
const int log_max_block_size = 3;
3199
const int max_block_size = 1 << log_max_block_size;
3201
// Decide if fast version is enabled
3202
bool fast_version = (is_static || !RewriteBytecodes) && !VerifyOops;
3204
assert(number_of_states == 10, "number of tos states should be equal to 9");
3206
// itos case is frequent and is moved outside table switch
3207
__ cmp(Rtos_state, itos);
3209
// table switch by type
3211
__ add(PC, PC, AsmOperand(Rtos_state, lsl, log_max_block_size + Assembler::LogInstructionSize), ne);
3213
__ ldr(PC, Address(PC, Rtos_state, lsl, LogBytesPerWord), ne);
3216
// jump to itos case
3219
// table with addresses for slow version
3224
__ emit_address(Lbtos);
3225
__ emit_address(Lztos);
3226
__ emit_address(Lctos);
3227
__ emit_address(Lstos);
3228
__ emit_address(Litos);
3229
__ emit_address(Lltos);
3230
__ emit_address(Lftos);
3231
__ emit_address(Ldtos);
3232
__ emit_address(Latos);
3240
assert(btos == seq++, "btos has unexpected value");
3241
FixedSizeCodeBlock btos_block(_masm, max_block_size, fast_version);
3244
if (!is_static) pop_and_check_object(Robj);
3245
__ access_store_at(T_BYTE, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg, false);
3246
if (!is_static && rc == may_rewrite) {
3247
patch_bytecode(Bytecodes::_fast_bputfield, R0_tmp, Rtemp, true, byte_no);
3254
assert(ztos == seq++, "ztos has unexpected value");
3255
FixedSizeCodeBlock ztos_block(_masm, max_block_size, fast_version);
3258
if (!is_static) pop_and_check_object(Robj);
3259
__ access_store_at(T_BOOLEAN, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg, false);
3260
if (!is_static && rc == may_rewrite) {
3261
patch_bytecode(Bytecodes::_fast_zputfield, R0_tmp, Rtemp, true, byte_no);
3268
assert(ctos == seq++, "ctos has unexpected value");
3269
FixedSizeCodeBlock ctos_block(_masm, max_block_size, fast_version);
3272
if (!is_static) pop_and_check_object(Robj);
3273
__ access_store_at(T_CHAR, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg, false);
3274
if (!is_static && rc == may_rewrite) {
3275
patch_bytecode(Bytecodes::_fast_cputfield, R0_tmp, Rtemp, true, byte_no);
3282
assert(stos == seq++, "stos has unexpected value");
3283
FixedSizeCodeBlock stos_block(_masm, max_block_size, fast_version);
3286
if (!is_static) pop_and_check_object(Robj);
3287
__ access_store_at(T_SHORT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg, false);
3288
if (!is_static && rc == may_rewrite) {
3289
patch_bytecode(Bytecodes::_fast_sputfield, R0_tmp, Rtemp, true, byte_no);
3296
assert(itos == seq++, "itos has unexpected value");
3297
FixedSizeCodeBlock itos_block(_masm, max_block_size, fast_version);
3299
__ b(shouldNotReachHere);
3304
assert(ltos == seq++, "ltos has unexpected value");
3305
FixedSizeCodeBlock ltos_block(_masm, max_block_size, fast_version);
3308
if (!is_static) pop_and_check_object(Robj);
3309
__ access_store_at(T_LONG, IN_HEAP, Address(Robj, Roffset), noreg /* ltos */, noreg, noreg, noreg, false);
3310
if (!is_static && rc == may_rewrite) {
3311
patch_bytecode(Bytecodes::_fast_lputfield, R0_tmp, Rtemp, true, byte_no);
3318
assert(ftos == seq++, "ftos has unexpected value");
3319
FixedSizeCodeBlock ftos_block(_masm, max_block_size, fast_version);
3321
// floats and ints are placed on stack in the same way, so
3322
// we can use pop(itos) to transfer value without using VFP
3324
if (!is_static) pop_and_check_object(Robj);
3325
__ access_store_at(T_INT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg, false);
3326
if (!is_static && rc == may_rewrite) {
3327
patch_bytecode(Bytecodes::_fast_fputfield, R0_tmp, Rtemp, true, byte_no);
3334
assert(dtos == seq++, "dtos has unexpected value");
3335
FixedSizeCodeBlock dtos_block(_masm, max_block_size, fast_version);
3337
// doubles and longs are placed on stack in the same way, so
3338
// we can use pop(ltos) to transfer value without using VFP
3340
if (!is_static) pop_and_check_object(Robj);
3341
__ access_store_at(T_LONG, IN_HEAP, Address(Robj, Roffset), noreg /* ltos */, noreg, noreg, noreg, false);
3342
if (!is_static && rc == may_rewrite) {
3343
patch_bytecode(Bytecodes::_fast_dputfield, R0_tmp, Rtemp, true, byte_no);
3350
assert(atos == seq++, "atos has unexpected value");
3353
if (!is_static) pop_and_check_object(Robj);
3354
// Store into the field
3355
do_oop_store(_masm, Address(Robj, Roffset), R0_tos, Rtemp, R1_tmp, R5_tmp, false);
3356
if (!is_static && rc == may_rewrite) {
3357
patch_bytecode(Bytecodes::_fast_aputfield, R0_tmp, Rtemp, true, byte_no);
3362
__ bind(shouldNotReachHere);
3363
__ should_not_reach_here();
3365
// itos case is frequent and is moved outside table switch
3368
if (!is_static) pop_and_check_object(Robj);
3369
__ access_store_at(T_INT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg, false);
3370
if (!is_static && rc == may_rewrite) {
3371
patch_bytecode(Bytecodes::_fast_iputfield, R0_tmp, Rtemp, true, byte_no);
3378
__ tbz(Rflags, ResolvedFieldEntry::is_volatile_shift, notVolatile);
3379
__ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
3380
__ bind(notVolatile);
3384
void TemplateTable::putfield(int byte_no) {
3385
putfield_or_static(byte_no, false);
3388
void TemplateTable::nofast_putfield(int byte_no) {
3389
putfield_or_static(byte_no, false, may_not_rewrite);
3392
void TemplateTable::putstatic(int byte_no) {
3393
putfield_or_static(byte_no, true);
3397
void TemplateTable::jvmti_post_fast_field_mod() {
3398
// This version of jvmti_post_fast_field_mod() is not used on ARM
3402
// Blows volatile registers R0-R3, Rtemp, LR,
3403
// but preserves tosca with the given state.
3404
void TemplateTable::jvmti_post_fast_field_mod(TosState state) {
3405
if (__ can_post_field_modification()) {
3406
// Check to see if a field modification watch has been set before we take
3407
// the time to call into the VM.
3410
__ ldr_global_s32(R2, (address)JvmtiExport::get_field_modification_count_addr());
3413
__ pop_ptr(R3); // copy the object pointer from tos
3415
__ push_ptr(R3); // put the object pointer back on tos
3417
__ push(state); // save value on the stack
3419
// access constant pool cache entry
3420
__ load_field_entry(R2, R1);
3423
assert(Interpreter::expr_offset_in_bytes(0) == 0, "adjust this code");
3424
__ mov(R3, Rstack_top); // put tos addr into R3
3426
// R1: object pointer copied above
3427
// R2: cache entry pointer
3428
// R3: jvalue object on the stack
3429
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), R1, R2, R3);
3431
__ pop(state); // restore value
3438
void TemplateTable::fast_storefield(TosState state) {
3439
transition(state, vtos);
3441
ByteSize base = ConstantPoolCache::base_offset();
3443
jvmti_post_fast_field_mod(state);
3445
const Register Rcache = R4_tmp;
3446
const Register Rindex = R3_tmp;
3448
const Register Roffset = R2_tmp;
3449
const Register Rtos_state = R3_tmp;
3450
const Register Robj = R4_tmp; // Rcache is free at the time of loading Robj
3451
const Register Rflags = R5_tmp;
3453
// access constant pool cache
3454
__ load_field_entry(Rcache, Rindex);
3455
load_resolved_field_entry(Robj, Rcache, Rtos_state, Roffset, Rflags);
3457
// load flags to test volatile
3459
// Check for volatile store
3461
__ tbz(Rflags, ResolvedFieldEntry::is_volatile_shift, notVolatile);
3462
__ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
3463
__ bind(notVolatile);
3466
// Get object from stack
3467
pop_and_check_object(Robj);
3469
Address addr = Address(Robj, Roffset);
3471
switch (bytecode()) {
3472
case Bytecodes::_fast_zputfield:
3473
__ access_store_at(T_BOOLEAN, IN_HEAP, addr, R0_tos, noreg, noreg, noreg, false);
3475
case Bytecodes::_fast_bputfield:
3476
__ access_store_at(T_BYTE, IN_HEAP, addr, R0_tos, noreg, noreg, noreg, false);
3478
case Bytecodes::_fast_sputfield:
3479
__ access_store_at(T_SHORT, IN_HEAP, addr, R0_tos, noreg, noreg, noreg, false);
3481
case Bytecodes::_fast_cputfield:
3482
__ access_store_at(T_CHAR, IN_HEAP, addr, R0_tos, noreg, noreg, noreg,false);
3484
case Bytecodes::_fast_iputfield:
3485
__ access_store_at(T_INT, IN_HEAP, addr, R0_tos, noreg, noreg, noreg, false);
3487
case Bytecodes::_fast_lputfield:
3488
__ access_store_at(T_LONG, IN_HEAP, addr, noreg, noreg, noreg, noreg, false);
3490
case Bytecodes::_fast_fputfield:
3491
__ access_store_at(T_FLOAT, IN_HEAP, addr, noreg, noreg, noreg, noreg, false);
3493
case Bytecodes::_fast_dputfield:
3494
__ access_store_at(T_DOUBLE, IN_HEAP, addr, noreg, noreg, noreg, noreg, false);
3496
case Bytecodes::_fast_aputfield:
3497
do_oop_store(_masm, addr, R0_tos, Rtemp, R1_tmp, R2_tmp, false);
3501
ShouldNotReachHere();
3505
// Check for volatile store
3507
__ tbz(Rflags, ResolvedFieldEntry::is_volatile_shift, notVolatile);
3508
__ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
3509
__ bind(notVolatile);
3514
void TemplateTable::fast_accessfield(TosState state) {
3515
transition(atos, state);
3517
// do the JVMTI work here to avoid disturbing the register state below
3518
if (__ can_post_field_access()) {
3519
// Check to see if a field access watch has been set before we take
3520
// the time to call into the VM.
3522
__ ldr_global_s32(R2, (address) JvmtiExport::get_field_access_count_addr());
3524
// access constant pool cache entry
3525
__ load_field_entry(R2, R1);
3526
__ push_ptr(R0_tos); // save object pointer before call_VM() clobbers it
3527
__ verify_oop(R0_tos);
3529
// R1: object pointer copied above
3530
// R2: cache entry pointer
3531
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), R1, R2);
3532
__ pop_ptr(R0_tos); // restore object pointer
3537
const Register Robj = R0_tos;
3538
const Register Rcache = R2_tmp;
3539
const Register Rflags = R2_tmp;
3540
const Register Rindex = R3_tmp;
3541
const Register Roffset = R3_tmp;
3543
// access constant pool cache
3544
__ load_field_entry(Rcache, Rindex);
3545
// replace index with field offset from cache entry
3546
__ ldr(Roffset, Address(Rcache, ResolvedFieldEntry::field_offset_offset()));
3548
// load flags to test volatile
3549
__ ldrb(Rflags, Address(Rcache, ResolvedFieldEntry::flags_offset()));
3551
__ verify_oop(Robj);
3552
__ null_check(Robj);
3554
Address addr = Address(Robj, Roffset);
3556
switch (bytecode()) {
3557
case Bytecodes::_fast_bgetfield:
3558
__ access_load_at(T_BYTE, IN_HEAP, addr, R0_tos, noreg, noreg, noreg);
3560
case Bytecodes::_fast_sgetfield:
3561
__ access_load_at(T_SHORT, IN_HEAP, addr, R0_tos, noreg, noreg, noreg);
3563
case Bytecodes::_fast_cgetfield:
3564
__ access_load_at(T_CHAR, IN_HEAP, addr, R0_tos, noreg, noreg, noreg);
3566
case Bytecodes::_fast_igetfield:
3567
__ access_load_at(T_INT, IN_HEAP, addr, R0_tos, noreg, noreg, noreg);
3569
case Bytecodes::_fast_lgetfield:
3570
__ access_load_at(T_LONG, IN_HEAP, addr, noreg, noreg, noreg, noreg);
3572
case Bytecodes::_fast_fgetfield:
3573
__ access_load_at(T_FLOAT, IN_HEAP, addr, noreg, noreg, noreg, noreg);
3575
case Bytecodes::_fast_dgetfield:
3576
__ access_load_at(T_DOUBLE, IN_HEAP, addr, noreg, noreg, noreg, noreg);
3578
case Bytecodes::_fast_agetfield:
3579
do_oop_load(_masm, R0_tos, addr);
3580
__ verify_oop(R0_tos);
3583
ShouldNotReachHere();
3587
// Check for volatile load
3589
__ tbz(Rflags, ResolvedFieldEntry::is_volatile_shift, notVolatile);
3590
__ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
3591
__ bind(notVolatile);
3596
void TemplateTable::fast_xaccess(TosState state) {
3597
transition(vtos, state);
3599
const Register Robj = R1_tmp;
3600
const Register Rcache = R2_tmp;
3601
const Register Rindex = R3_tmp;
3602
const Register Roffset = R3_tmp;
3603
const Register Rflags = R4_tmp;
3607
__ ldr(Robj, aaddress(0));
3609
// access constant pool cache
3610
__ load_field_entry(Rcache, Rindex, 2);
3611
__ ldr(Roffset, Address(Rcache, ResolvedFieldEntry::field_offset_offset()));
3613
// load flags to test volatile
3614
__ ldrb(Rflags, Address(Rcache, ResolvedFieldEntry::flags_offset()));
3616
// make sure exception is reported in correct bcp range (getfield is next instruction)
3617
__ add(Rbcp, Rbcp, 1);
3618
__ null_check(Robj, Rtemp);
3619
__ sub(Rbcp, Rbcp, 1);
3622
if (state == itos) {
3623
__ access_load_at(T_INT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg);
3624
} else if (state == atos) {
3625
do_oop_load(_masm, R0_tos, Address(Robj, Roffset));
3626
__ verify_oop(R0_tos);
3627
} else if (state == ftos) {
3629
__ ldr(R0_tos, Address(Robj, Roffset));
3631
__ access_load_at(T_FLOAT, IN_HEAP, Address(Robj, Roffset), noreg /* ftos */, noreg, noreg, noreg);
3634
ShouldNotReachHere();
3638
// Check for volatile load
3640
__ tbz(Rflags, ResolvedFieldEntry::is_volatile_shift, notVolatile);
3641
__ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
3642
__ bind(notVolatile);
3650
//----------------------------------------------------------------------------------------------------
3653
void TemplateTable::prepare_invoke(Register Rcache, Register recv) {
3655
const Register ret_type = R1_tmp;
3657
const Bytecodes::Code code = bytecode();
3658
const bool load_receiver = (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic);
3660
// save 'interpreter return address'
3663
// Load TOS state for later
3664
__ ldrb(ret_type, Address(Rcache, in_bytes(ResolvedMethodEntry::type_offset())));
3666
// load receiver if needed (after extra argument is pushed so parameter size is correct)
3667
if (load_receiver) {
3668
__ ldrh(recv, Address(Rcache, in_bytes(ResolvedMethodEntry::num_parameters_offset())));
3669
__ add(recv, Rstack_top, AsmOperand(recv, lsl, Interpreter::logStackElementSize));
3670
__ ldr(recv, Address(recv, -Interpreter::stackElementSize));
3671
__ verify_oop(recv);
3674
// load return address
3675
{ const address table = (address) Interpreter::invoke_return_entry_table_for(code);
3676
__ mov_slow(LR, table);
3677
__ ldr(LR, Address::indexed_ptr(LR, ret_type));
3682
void TemplateTable::invokevirtual_helper(Register index,
3686
const Register recv_klass = R2_tmp;
3688
assert_different_registers(index, recv, flags, Rtemp);
3689
assert_different_registers(index, recv_klass, R0_tmp, Rtemp);
3691
// Test for an invoke of a final method
3693
__ tbz(flags, ResolvedMethodEntry::is_vfinal_shift, notFinal);
3695
assert(index == Rmethod, "Method* must be Rmethod, for interpreter calling convention");
3697
// do the call - the index is actually the method to call
3699
// It's final, need a null check here!
3700
__ null_check(recv, Rtemp);
3702
// profile this call
3703
__ profile_final_call(R0_tmp);
3705
__ jump_from_interpreted(Rmethod);
3709
// get receiver klass
3710
__ load_klass(recv_klass, recv);
3712
// profile this call
3713
__ profile_virtual_call(R0_tmp, recv_klass);
3715
// get target Method* & entry point
3716
const ByteSize base = Klass::vtable_start_offset();
3717
assert(vtableEntry::size() == 1, "adjust the scaling in the code below");
3718
__ add(Rtemp, recv_klass, AsmOperand(index, lsl, LogHeapWordSize));
3719
__ ldr(Rmethod, Address(Rtemp, base + vtableEntry::method_offset()));
3720
__ jump_from_interpreted(Rmethod);
3723
void TemplateTable::invokevirtual(int byte_no) {
3724
transition(vtos, vtos);
3725
assert(byte_no == f2_byte, "use this argument");
3727
const Register Rrecv = R2_tmp;
3728
const Register Rflags = R3_tmp;
3730
load_resolved_method_entry_virtual(Rrecv, // ResolvedMethodEntry*
3731
Rmethod, // Method* or itable index
3733
prepare_invoke(Rrecv, Rrecv);
3738
// LR: return address
3740
invokevirtual_helper(Rmethod, Rrecv, Rflags);
3744
void TemplateTable::invokespecial(int byte_no) {
3745
transition(vtos, vtos);
3746
assert(byte_no == f1_byte, "use this argument");
3748
const Register Rrecv = R2_tmp;
3749
const Register Rflags = R3_tmp;
3751
load_resolved_method_entry_special_or_static(Rrecv, // ResolvedMethodEntry*
3754
prepare_invoke(Rrecv, Rrecv);
3755
__ verify_oop(Rrecv);
3756
__ null_check(Rrecv, Rtemp);
3758
__ profile_call(Rrecv);
3759
__ jump_from_interpreted(Rmethod);
3763
void TemplateTable::invokestatic(int byte_no) {
3764
transition(vtos, vtos);
3765
assert(byte_no == f1_byte, "use this argument");
3767
const Register Rrecv = R2_tmp;
3768
const Register Rflags = R3_tmp;
3770
load_resolved_method_entry_special_or_static(Rrecv, // ResolvedMethodEntry*
3773
prepare_invoke(Rrecv, Rrecv);
3775
__ profile_call(Rrecv);
3776
__ jump_from_interpreted(Rmethod);
3780
void TemplateTable::fast_invokevfinal(int byte_no) {
3781
transition(vtos, vtos);
3782
assert(byte_no == f2_byte, "use this argument");
3783
__ stop("fast_invokevfinal is not used on ARM");
3787
void TemplateTable::invokeinterface(int byte_no) {
3788
transition(vtos, vtos);
3789
assert(byte_no == f1_byte, "use this argument");
3791
const Register Ritable = R1_tmp;
3792
const Register Rrecv = R2_tmp;
3793
const Register Rinterf = R5_tmp;
3794
const Register Rindex = R4_tmp;
3795
const Register Rflags = R3_tmp;
3796
const Register Rklass = R2_tmp; // Note! Same register with Rrecv
3798
load_resolved_method_entry_interface(Rrecv, // ResolvedMethodEntry*
3800
Rmethod, // Method* or itable/vtable index
3802
prepare_invoke(Rrecv, Rrecv);
3804
// First check for Object case, then private interface method,
3805
// then regular interface method.
3807
// Special case of invokeinterface called for virtual method of
3808
// java.lang.Object. See cpCache.cpp for details.
3809
Label notObjectMethod;
3810
__ tbz(Rflags, ResolvedMethodEntry::is_forced_virtual_shift, notObjectMethod);
3811
invokevirtual_helper(Rmethod, Rrecv, Rflags);
3812
__ bind(notObjectMethod);
3814
// Get receiver klass into Rklass - also a null check
3815
__ load_klass(Rklass, Rrecv);
3817
// Check for private method invocation - indicated by vfinal
3818
Label no_such_interface;
3821
__ tbz(Rflags, ResolvedMethodEntry::is_vfinal_shift, notVFinal);
3824
__ check_klass_subtype(Rklass, Rinterf, R1_tmp, R3_tmp, noreg, subtype);
3825
// If we get here the typecheck failed
3826
__ b(no_such_interface);
3830
__ profile_final_call(R0_tmp);
3831
__ jump_from_interpreted(Rmethod);
3835
// Receiver subtype check against REFC.
3836
__ lookup_interface_method(// inputs: rec. class, interface
3837
Rklass, Rinterf, noreg,
3838
// outputs: scan temp. reg1, scan temp. reg2
3839
noreg, Ritable, Rtemp,
3842
// profile this call
3843
__ profile_virtual_call(R0_tmp, Rklass);
3845
// Get declaring interface class from method
3846
__ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
3847
__ ldr(Rtemp, Address(Rtemp, ConstMethod::constants_offset()));
3848
__ ldr(Rinterf, Address(Rtemp, ConstantPool::pool_holder_offset()));
3850
// Get itable index from method
3851
__ ldr_s32(Rtemp, Address(Rmethod, Method::itable_index_offset()));
3852
__ add(Rtemp, Rtemp, (-Method::itable_index_max)); // small negative constant is too large for an immediate on arm32
3853
__ neg(Rindex, Rtemp);
3855
__ lookup_interface_method(// inputs: rec. class, interface
3856
Rklass, Rinterf, Rindex,
3857
// outputs: scan temp. reg1, scan temp. reg2
3858
Rmethod, Ritable, Rtemp,
3861
// Rmethod: Method* to call
3863
// Check for abstract method error
3864
// Note: This should be done more efficiently via a throw_abstract_method_error
3865
// interpreter entry point and a conditional jump to it in case of a null
3868
__ cbnz(Rmethod, L);
3870
// note: must restore interpreter registers to canonical
3871
// state for exception handling to work correctly!
3872
__ restore_method();
3873
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3874
// the call_VM checks for exception, so we should never return here.
3875
__ should_not_reach_here();
3880
__ jump_from_interpreted(Rmethod);
3883
__ bind(no_such_interface);
3884
__ restore_method();
3885
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
3886
// the call_VM checks for exception, so we should never return here.
3887
__ should_not_reach_here();
3890
void TemplateTable::invokehandle(int byte_no) {
3891
transition(vtos, vtos);
3893
const Register Rrecv = R2_tmp;
3894
const Register Rmtype = R4_tmp;
3896
load_resolved_method_entry_handle(R2_tmp, // ResolvedMethodEntry*
3898
Rmtype, // Resolved Reference
3900
prepare_invoke(Rrecv, Rrecv);
3901
__ null_check(Rrecv, Rtemp);
3903
// Rmtype: MethodType object (from cpool->resolved_references[f1], if necessary)
3904
// Rmethod: MH.invokeExact_MT method (from f2)
3906
// Note: Rmtype is already pushed (if necessary) by prepare_invoke
3909
__ profile_final_call(R3_tmp); // FIXME: profile the LambdaForm also
3910
__ jump_from_interpreted(Rmethod);
3913
void TemplateTable::invokedynamic(int byte_no) {
3914
transition(vtos, vtos);
3916
const Register Rcallsite = R4_tmp;
3917
const Register R5_method = R5_tmp; // can't reuse Rmethod!
3919
load_invokedynamic_entry(R5_method);
3921
// Rcallsite: CallSite object (from cpool->resolved_references[f1])
3922
// Rmethod: MH.linkToCallSite method (from f2)
3924
// Note: Rcallsite is already pushed by prepare_invoke
3926
if (ProfileInterpreter) {
3927
__ profile_call(R2_tmp);
3931
__ mov(Rmethod, R5_method);
3932
__ jump_from_interpreted(Rmethod);
3935
//----------------------------------------------------------------------------------------------------
3938
void TemplateTable::_new() {
3939
transition(vtos, atos);
3941
const Register Robj = R0_tos;
3942
const Register Rcpool = R1_tmp;
3943
const Register Rindex = R2_tmp;
3944
const Register Rtags = R3_tmp;
3945
const Register Rsize = R3_tmp;
3947
Register Rklass = R4_tmp;
3948
assert_different_registers(Rcpool, Rindex, Rtags, Rklass, Rtemp);
3949
assert_different_registers(Rcpool, Rindex, Rklass, Rsize);
3953
Label initialize_header;
3955
__ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
3956
__ get_cpool_and_tags(Rcpool, Rtags);
3958
// Make sure the class we're about to instantiate has been resolved.
3959
// This is done before loading InstanceKlass to be consistent with the order
3960
// how Constant Pool is updated (see ConstantPool::klass_at_put)
3961
const int tags_offset = Array<u1>::base_offset_in_bytes();
3962
__ add(Rtemp, Rtags, Rindex);
3964
__ ldrb(Rtemp, Address(Rtemp, tags_offset));
3966
// use Rklass as a scratch
3967
volatile_barrier(MacroAssembler::LoadLoad, Rklass);
3969
// get InstanceKlass
3970
__ cmp(Rtemp, JVM_CONSTANT_Class);
3971
__ b(slow_case, ne);
3972
__ load_resolved_klass_at_offset(Rcpool, Rindex, Rklass);
3974
// make sure klass is initialized
3975
// make sure klass is fully initialized
3976
__ ldrb(Rtemp, Address(Rklass, InstanceKlass::init_state_offset()));
3977
__ cmp(Rtemp, InstanceKlass::fully_initialized);
3978
__ b(slow_case, ne);
3980
// get instance_size in InstanceKlass (scaled to a count of bytes)
3981
__ ldr_u32(Rsize, Address(Rklass, Klass::layout_helper_offset()));
3983
// test to see if it is malformed in some way
3984
// Klass::_lh_instance_slow_path_bit is really a bit mask, not bit number
3985
__ tbnz(Rsize, exact_log2(Klass::_lh_instance_slow_path_bit), slow_case);
3987
// Allocate the instance:
3988
// If TLAB is enabled:
3989
// Try to allocate in the TLAB.
3990
// If fails, go to the slow path.
3991
// Initialize the allocation.
3996
const Register Rtlab_top = R1_tmp;
3997
const Register Rtlab_end = R2_tmp;
3998
assert_different_registers(Robj, Rsize, Rklass, Rtlab_top, Rtlab_end);
4000
__ tlab_allocate(Robj, Rtlab_top, Rtlab_end, Rsize, slow_case);
4002
// the fields have been already cleared
4003
__ b(initialize_header);
4006
const Register Rzero0 = R1_tmp;
4007
const Register Rzero1 = R2_tmp;
4008
const Register Rzero_end = R5_tmp;
4009
const Register Rzero_cur = Rtemp;
4010
assert_different_registers(Robj, Rsize, Rklass, Rzero0, Rzero1, Rzero_cur, Rzero_end);
4012
// The object is initialized before the header. If the object size is
4013
// zero, go directly to the header initialization.
4014
__ subs(Rsize, Rsize, sizeof(oopDesc));
4015
__ add(Rzero_cur, Robj, sizeof(oopDesc));
4016
__ b(initialize_header, eq);
4019
// make sure Rsize is a multiple of 8
4021
__ tst(Rsize, 0x07);
4023
__ stop("object size is not multiple of 8 - adjust this code");
4029
__ add(Rzero_end, Rzero_cur, Rsize);
4031
// initialize remaining object fields: Rsize was a multiple of 8
4033
// loop is unrolled 2 times
4036
__ stmia(Rzero_cur, RegisterSet(Rzero0) | RegisterSet(Rzero1), writeback);
4037
__ cmp(Rzero_cur, Rzero_end);
4039
__ stmia(Rzero_cur, RegisterSet(Rzero0) | RegisterSet(Rzero1), writeback, ne);
4040
__ cmp(Rzero_cur, Rzero_end, ne);
4044
// initialize object header only.
4045
__ bind(initialize_header);
4046
__ mov_slow(Rtemp, (intptr_t)markWord::prototype().value());
4048
__ str(Rtemp, Address(Robj, oopDesc::mark_offset_in_bytes()));
4051
__ store_klass(Rklass, Robj); // blows Rklass:
4054
// Note: Disable DTrace runtime check for now to eliminate overhead on each allocation
4055
if (DTraceAllocProbes) {
4056
// Trigger dtrace event for fastpath
4059
__ ldrb_global(Rtemp, (address)&DTraceAllocProbes);
4060
__ cbz(Rtemp, Lcontinue);
4063
__ call_VM_leaf(CAST_FROM_FN_PTR(address, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc)), Robj);
4071
// jump over literals
4077
__ get_constant_pool(Rcpool);
4078
__ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4079
__ call_VM(Robj, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex);
4084
// StoreStore barrier required after complete initialization
4085
// (headers + content zeroing), before the object may escape.
4086
__ membar(MacroAssembler::StoreStore, R1_tmp);
4090
void TemplateTable::newarray() {
4091
transition(itos, atos);
4092
__ ldrb(R1, at_bcp(1));
4094
call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), R1, R2);
4095
// MacroAssembler::StoreStore useless (included in the runtime exit path)
4099
void TemplateTable::anewarray() {
4100
transition(itos, atos);
4101
__ get_unsigned_2_byte_index_at_bcp(R2, 1);
4102
__ get_constant_pool(R1);
4104
call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), R1, R2, R3);
4105
// MacroAssembler::StoreStore useless (included in the runtime exit path)
4109
void TemplateTable::arraylength() {
4110
transition(atos, itos);
4111
__ ldr_s32(R0_tos, Address(R0_tos, arrayOopDesc::length_offset_in_bytes()));
4115
void TemplateTable::checkcast() {
4116
transition(atos, atos);
4117
Label done, is_null, quicked, resolved, throw_exception;
4119
const Register Robj = R0_tos;
4120
const Register Rcpool = R2_tmp;
4121
const Register Rtags = R3_tmp;
4122
const Register Rindex = R4_tmp;
4123
const Register Rsuper = R3_tmp;
4124
const Register Rsub = R4_tmp;
4125
const Register Rsubtype_check_tmp1 = R1_tmp;
4126
const Register Rsubtype_check_tmp2 = LR_tmp;
4128
__ cbz(Robj, is_null);
4130
// Get cpool & tags index
4131
__ get_cpool_and_tags(Rcpool, Rtags);
4132
__ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4134
// See if bytecode has already been quicked
4135
__ add(Rtemp, Rtags, Rindex);
4136
__ ldrb(Rtemp, Address(Rtemp, Array<u1>::base_offset_in_bytes()));
4138
__ cmp(Rtemp, JVM_CONSTANT_Class);
4140
volatile_barrier(MacroAssembler::LoadLoad, Rtemp, true);
4145
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4146
// vm_result_2 has metadata result
4147
__ get_vm_result_2(Rsuper, Robj);
4151
__ bind(throw_exception);
4152
// Come here on failure of subtype check
4153
__ profile_typecheck_failed(R1_tmp);
4154
__ mov(R2_ClassCastException_obj, Robj); // convention with generate_ClassCastException_handler()
4155
__ b(Interpreter::_throw_ClassCastException_entry);
4157
// Get superklass in Rsuper and subklass in Rsub
4159
__ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper);
4162
__ load_klass(Rsub, Robj);
4164
// Generate subtype check. Blows both tmps and Rtemp.
4165
assert_different_registers(Robj, Rsub, Rsuper, Rsubtype_check_tmp1, Rsubtype_check_tmp2, Rtemp);
4166
__ gen_subtype_check(Rsub, Rsuper, throw_exception, Rsubtype_check_tmp1, Rsubtype_check_tmp2);
4168
// Come here on success
4170
// Collect counts on whether this check-cast sees nulls a lot or not.
4171
if (ProfileInterpreter) {
4174
__ profile_null_seen(R1_tmp);
4176
__ bind(is_null); // same as 'done'
4182
void TemplateTable::instanceof() {
4183
// result = 0: obj == nullptr or obj is not an instanceof the specified klass
4184
// result = 1: obj != nullptr and obj is an instanceof the specified klass
4186
transition(atos, itos);
4187
Label done, is_null, not_subtype, quicked, resolved;
4189
const Register Robj = R0_tos;
4190
const Register Rcpool = R2_tmp;
4191
const Register Rtags = R3_tmp;
4192
const Register Rindex = R4_tmp;
4193
const Register Rsuper = R3_tmp;
4194
const Register Rsub = R4_tmp;
4195
const Register Rsubtype_check_tmp1 = R0_tmp;
4196
const Register Rsubtype_check_tmp2 = R1_tmp;
4198
__ cbz(Robj, is_null);
4200
__ load_klass(Rsub, Robj);
4202
// Get cpool & tags index
4203
__ get_cpool_and_tags(Rcpool, Rtags);
4204
__ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4206
// See if bytecode has already been quicked
4207
__ add(Rtemp, Rtags, Rindex);
4208
__ ldrb(Rtemp, Address(Rtemp, Array<u1>::base_offset_in_bytes()));
4209
__ cmp(Rtemp, JVM_CONSTANT_Class);
4211
volatile_barrier(MacroAssembler::LoadLoad, Rtemp, true);
4216
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4217
// vm_result_2 has metadata result
4218
__ get_vm_result_2(Rsuper, Robj);
4222
// Get superklass in Rsuper and subklass in Rsub
4224
__ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper);
4227
__ load_klass(Rsub, Robj);
4229
// Generate subtype check. Blows both tmps and Rtemp.
4230
__ gen_subtype_check(Rsub, Rsuper, not_subtype, Rsubtype_check_tmp1, Rsubtype_check_tmp2);
4232
// Come here on success
4236
__ bind(not_subtype);
4237
// Come here on failure
4238
__ profile_typecheck_failed(R1_tmp);
4241
// Collect counts on whether this test sees nulls a lot or not.
4242
if (ProfileInterpreter) {
4245
__ profile_null_seen(R1_tmp);
4247
__ bind(is_null); // same as 'done'
4253
//----------------------------------------------------------------------------------------------------
4255
void TemplateTable::_breakpoint() {
4257
// Note: We get here even if we are single stepping..
4258
// jbug inists on setting breakpoints at every bytecode
4259
// even if we are in single step mode.
4261
transition(vtos, vtos);
4263
// get the unpatched byte code
4264
__ mov(R1, Rmethod);
4266
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), R1, R2);
4267
__ mov(Rtmp_save0, R0);
4269
// post the breakpoint event
4270
__ mov(R1, Rmethod);
4272
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), R1, R2);
4274
// complete the execution of original bytecode
4275
__ mov(R3_bytecode, Rtmp_save0);
4276
__ dispatch_only_normal(vtos);
4280
//----------------------------------------------------------------------------------------------------
4283
void TemplateTable::athrow() {
4284
transition(atos, vtos);
4285
__ mov(Rexception_obj, R0_tos);
4286
__ null_check(Rexception_obj, Rtemp);
4287
__ b(Interpreter::throw_exception_entry());
4291
//----------------------------------------------------------------------------------------------------
4294
// Note: monitorenter & exit are symmetric routines; which is reflected
4295
// in the assembly code structure as well
4299
// [expressions ] <--- Rstack_top = expression stack top
4302
// [monitor entry] <--- monitor block top = expression stack bot
4305
// [frame data ] <--- monitor block bot
4307
// [saved FP ] <--- FP
4310
void TemplateTable::monitorenter() {
4311
transition(atos, vtos);
4313
const Register Robj = R0_tos;
4314
const Register Rentry = R1_tmp;
4316
// check for null object
4317
__ null_check(Robj, Rtemp);
4319
const int entry_size = (frame::interpreter_frame_monitor_size_in_bytes());
4320
assert (entry_size % StackAlignmentInBytes == 0, "keep stack alignment");
4321
Label allocate_monitor, allocated;
4323
// initialize entry pointer
4324
__ mov(Rentry, 0); // points to free slot or null
4326
// find a free slot in the monitor block (result in Rentry)
4328
const Register Rcur = R2_tmp;
4329
const Register Rcur_obj = Rtemp;
4330
const Register Rbottom = R3_tmp;
4331
assert_different_registers(Robj, Rentry, Rcur, Rbottom, Rcur_obj);
4333
__ ldr(Rcur, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4334
// points to current entry, starting with top-most entry
4335
__ sub(Rbottom, FP, -frame::interpreter_frame_monitor_block_bottom_offset * wordSize);
4336
// points to word before bottom of monitor block
4338
__ cmp(Rcur, Rbottom); // check if there are no monitors
4339
__ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset()), ne);
4340
// prefetch monitor's object for the first iteration
4341
__ b(allocate_monitor, eq); // there are no monitors, skip searching
4344
__ cmp(Rcur_obj, 0); // check if current entry is used
4345
__ mov(Rentry, Rcur, eq); // if not used then remember entry
4347
__ cmp(Rcur_obj, Robj); // check if current entry is for same object
4348
__ b(exit, eq); // if same object then stop searching
4350
__ add(Rcur, Rcur, entry_size); // otherwise advance to next entry
4352
__ cmp(Rcur, Rbottom); // check if bottom reached
4353
__ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset()), ne);
4354
// prefetch monitor's object for the next iteration
4355
__ b(loop, ne); // if not at bottom then check this entry
4359
__ cbnz(Rentry, allocated); // check if a slot has been found; if found, continue with that one
4361
__ bind(allocate_monitor);
4363
// allocate one if there's no free slot
4365
assert_different_registers(Robj, Rentry, R2_tmp, Rtemp);
4367
// 1. compute new pointers
4370
__ ldr(Rentry, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4371
// old monitor block top / expression stack bottom
4373
__ sub(Rstack_top, Rstack_top, entry_size); // move expression stack top
4374
__ check_stack_top_on_expansion();
4376
__ sub(Rentry, Rentry, entry_size); // move expression stack bottom
4378
__ mov(R2_tmp, Rstack_top); // set start value for copy loop
4380
__ str(Rentry, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4381
// set new monitor block top
4383
// 2. move expression stack contents
4385
__ cmp(R2_tmp, Rentry); // check if expression stack is empty
4386
__ ldr(Rtemp, Address(R2_tmp, entry_size), ne); // load expression stack word from old location
4387
__ b(allocated, eq);
4390
__ str(Rtemp, Address(R2_tmp, wordSize, post_indexed)); // store expression stack word at new location
4391
// and advance to next word
4392
__ cmp(R2_tmp, Rentry); // check if bottom reached
4393
__ ldr(Rtemp, Address(R2, entry_size), ne); // load expression stack word from old location
4394
__ b(loop, ne); // if not at bottom then copy next word
4397
// call run-time routine
4399
// Rentry: points to monitor entry
4402
// Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
4403
// The object has already been popped from the stack, so the expression stack looks correct.
4404
__ add(Rbcp, Rbcp, 1);
4406
__ str(Robj, Address(Rentry, BasicObjectLock::obj_offset())); // store object
4407
__ lock_object(Rentry);
4409
// check to make sure this monitor doesn't cause stack overflow after locking
4410
__ save_bcp(); // in case of exception
4411
__ arm_stack_overflow_check(0, Rtemp);
4413
// The bcp has already been incremented. Just need to dispatch to next instruction.
4414
__ dispatch_next(vtos);
4418
void TemplateTable::monitorexit() {
4419
transition(atos, vtos);
4421
const Register Robj = R0_tos;
4422
const Register Rcur = R1_tmp;
4423
const Register Rbottom = R2_tmp;
4424
const Register Rcur_obj = Rtemp;
4425
const Register Rmonitor = R0; // fixed in unlock_object()
4427
// check for null object
4428
__ null_check(Robj, Rtemp);
4430
const int entry_size = (frame::interpreter_frame_monitor_size_in_bytes());
4431
Label found, throw_exception;
4433
// find matching slot
4435
assert_different_registers(Robj, Rcur, Rbottom, Rcur_obj);
4437
__ ldr(Rcur, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4438
// points to current entry, starting with top-most entry
4439
__ sub(Rbottom, FP, -frame::interpreter_frame_monitor_block_bottom_offset * wordSize);
4440
// points to word before bottom of monitor block
4442
__ cmp(Rcur, Rbottom); // check if bottom reached
4443
__ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset()), ne);
4444
// prefetch monitor's object for the first iteration
4445
__ b(throw_exception, eq); // throw exception if there are now monitors
4448
// check if current entry is for same object
4449
__ cmp(Rcur_obj, Robj);
4450
__ b(found, eq); // if same object then stop searching
4451
__ add(Rcur, Rcur, entry_size); // otherwise advance to next entry
4452
__ cmp(Rcur, Rbottom); // check if bottom reached
4453
__ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset()), ne);
4454
__ b (loop, ne); // if not at bottom then check this entry
4457
// error handling. Unlocking was not block-structured
4458
__ bind(throw_exception);
4459
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
4460
__ should_not_reach_here();
4462
// call run-time routine
4463
// Rcur: points to monitor entry
4465
__ push_ptr(Robj); // make sure object is on stack (contract with oopMaps)
4466
__ mov(Rmonitor, Rcur);
4467
__ unlock_object(Rmonitor);
4468
__ pop_ptr(Robj); // discard object
4472
//----------------------------------------------------------------------------------------------------
4475
void TemplateTable::wide() {
4476
transition(vtos, vtos);
4477
__ ldrb(R3_bytecode, at_bcp(1));
4479
InlinedAddress Ltable((address)Interpreter::_wentry_point);
4480
__ ldr_literal(Rtemp, Ltable);
4481
__ indirect_jump(Address::indexed_ptr(Rtemp, R3_bytecode), Rtemp);
4483
__ nop(); // to avoid filling CPU pipeline with invalid instructions
4485
__ bind_literal(Ltable);
4489
//----------------------------------------------------------------------------------------------------
4492
void TemplateTable::multianewarray() {
4493
transition(vtos, atos);
4494
__ ldrb(Rtmp_save0, at_bcp(3)); // get number of dimensions
4496
// last dim is on top of stack; we want address of first one:
4497
// first_addr = last_addr + ndims * stackElementSize - 1*wordsize
4498
// the latter wordSize to point to the beginning of the array.
4499
__ add(Rtemp, Rstack_top, AsmOperand(Rtmp_save0, lsl, Interpreter::logStackElementSize));
4500
__ sub(R1, Rtemp, wordSize);
4502
call_VM(R0, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), R1);
4503
__ add(Rstack_top, Rstack_top, AsmOperand(Rtmp_save0, lsl, Interpreter::logStackElementSize));
4504
// MacroAssembler::StoreStore useless (included in the runtime exit path)