2
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
3
* Copyright (c) 2014, 2024, Red Hat Inc. All rights reserved.
4
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6
* This code is free software; you can redistribute it and/or modify it
7
* under the terms of the GNU General Public License version 2 only, as
8
* published by the Free Software Foundation.
10
* This code is distributed in the hope that it will be useful, but WITHOUT
11
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13
* version 2 for more details (a copy is included in the LICENSE file that
14
* accompanied this code).
16
* You should have received a copy of the GNU General Public License version
17
* 2 along with this work; if not, write to the Free Software Foundation,
18
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21
* or visit www.oracle.com if you need additional information or have any
26
#include "precompiled.hpp"
27
#include "asm/assembler.hpp"
28
#include "asm/assembler.inline.hpp"
29
#include "ci/ciEnv.hpp"
30
#include "code/compiledIC.hpp"
31
#include "compiler/compileTask.hpp"
32
#include "compiler/disassembler.hpp"
33
#include "compiler/oopMap.hpp"
34
#include "gc/shared/barrierSet.hpp"
35
#include "gc/shared/barrierSetAssembler.hpp"
36
#include "gc/shared/cardTableBarrierSet.hpp"
37
#include "gc/shared/cardTable.hpp"
38
#include "gc/shared/collectedHeap.hpp"
39
#include "gc/shared/tlab_globals.hpp"
40
#include "interpreter/bytecodeHistogram.hpp"
41
#include "interpreter/interpreter.hpp"
43
#include "memory/resourceArea.hpp"
44
#include "memory/universe.hpp"
45
#include "nativeInst_aarch64.hpp"
46
#include "oops/accessDecorators.hpp"
47
#include "oops/compressedKlass.inline.hpp"
48
#include "oops/compressedOops.inline.hpp"
49
#include "oops/klass.inline.hpp"
50
#include "runtime/continuation.hpp"
51
#include "runtime/icache.hpp"
52
#include "runtime/interfaceSupport.inline.hpp"
53
#include "runtime/javaThread.hpp"
54
#include "runtime/jniHandles.inline.hpp"
55
#include "runtime/sharedRuntime.hpp"
56
#include "runtime/stubRoutines.hpp"
57
#include "utilities/globalDefinitions.hpp"
58
#include "utilities/powerOfTwo.hpp"
60
#include "c1/c1_LIRAssembler.hpp"
63
#include "oops/oop.hpp"
64
#include "opto/compile.hpp"
65
#include "opto/node.hpp"
66
#include "opto/output.hpp"
72
#define BLOCK_COMMENT(str) /* nothing */
74
#define BLOCK_COMMENT(str) block_comment(str)
76
#define STOP(str) stop(str);
77
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
80
extern "C" void disnm(intptr_t p);
82
// Target-dependent relocation processing
84
// Instruction sequences whose target may need to be retrieved or
85
// patched are distinguished by their leading instruction, sorting
86
// them into three main instruction groups and related subgroups.
88
// 1) Branch, Exception and System (insn count = 1)
89
// 1a) Unconditional branch (immediate):
91
// 1b) Compare & branch (immediate):
93
// 1c) Test & branch (immediate):
95
// 1d) Conditional branch (immediate):
98
// 2) Loads and Stores (insn count = 1)
99
// 2a) Load register literal:
102
// 3) Data Processing Immediate (insn count = 2 or 3)
103
// 3a) PC-rel. addressing
104
// adr/adrp Rx imm21; ldr/str Ry Rx #imm12
105
// adr/adrp Rx imm21; add Ry Rx #imm12
106
// adr/adrp Rx imm21; movk Rx #imm16<<32; ldr/str Ry, [Rx, #offset_in_page]
108
// adr/adrp Rx imm21; movk Rx #imm16<<32
109
// adr/adrp Rx imm21; movk Rx #imm16<<32; add Ry, Rx, #offset_in_page
110
// The latter form can only happen when the target is an
111
// ExternalAddress, and (by definition) ExternalAddresses don't
112
// move. Because of that property, there is never any need to
113
// patch the last of the three instructions. However,
114
// MacroAssembler::target_addr_for_insn takes all three
115
// instructions into account and returns the correct address.
116
// 3b) Move wide (immediate)
117
// movz Rx #imm16; movk Rx #imm16 << 16; movk Rx #imm16 << 32;
119
// A switch on a subset of the instruction's bits provides an
120
// efficient dispatch to these subcases.
122
// insn[28:26] -> main group ('x' == don't care)
124
// 100 -> Data Processing Immediate
125
// 101 -> Branch, Exception and System
126
// x1x -> Loads and Stores
128
// insn[30:25] -> subgroup ('_' == group, 'x' == don't care).
129
// n.b. in some cases extra bits need to be checked to verify the
130
// instruction is as expected
132
// 1) ... xx101x Branch, Exception and System
133
// 1a) 00___x Unconditional branch (immediate)
134
// 1b) 01___0 Compare & branch (immediate)
135
// 1c) 01___1 Test & branch (immediate)
136
// 1d) 10___0 Conditional branch (immediate)
137
// other Should not happen
139
// 2) ... xxx1x0 Loads and Stores
140
// 2a) xx1__00 Load/Store register (insn[28] == 1 && insn[24] == 0)
141
// 2aa) x01__00 Load register literal (i.e. requires insn[29] == 0)
142
// strictly should be 64 bit non-FP/SIMD i.e.
143
// 0101_000 (i.e. requires insn[31:24] == 01011000)
145
// 3) ... xx100x Data Processing Immediate
146
// 3a) xx___00 PC-rel. addressing (n.b. requires insn[24] == 0)
147
// 3b) xx___101 Move wide (immediate) (n.b. requires insn[24:23] == 01)
148
// strictly should be 64 bit movz #imm16<<0
149
// 110___10100 (i.e. requires insn[31:21] == 11010010100)
153
typedef int (*reloc_insn)(address insn_addr, address &target);
155
virtual reloc_insn adrpMem() = 0;
156
virtual reloc_insn adrpAdd() = 0;
157
virtual reloc_insn adrpMovk() = 0;
159
const address _insn_addr;
160
const uint32_t _insn;
162
static uint32_t insn_at(address insn_addr, int n) {
163
return ((uint32_t*)insn_addr)[n];
165
uint32_t insn_at(int n) const {
166
return insn_at(_insn_addr, n);
171
RelocActions(address insn_addr) : _insn_addr(insn_addr), _insn(insn_at(insn_addr, 0)) {}
172
RelocActions(address insn_addr, uint32_t insn)
173
: _insn_addr(insn_addr), _insn(insn) {}
175
virtual int unconditionalBranch(address insn_addr, address &target) = 0;
176
virtual int conditionalBranch(address insn_addr, address &target) = 0;
177
virtual int testAndBranch(address insn_addr, address &target) = 0;
178
virtual int loadStore(address insn_addr, address &target) = 0;
179
virtual int adr(address insn_addr, address &target) = 0;
180
virtual int adrp(address insn_addr, address &target, reloc_insn inner) = 0;
181
virtual int immediate(address insn_addr, address &target) = 0;
182
virtual void verify(address insn_addr, address &target) = 0;
184
int ALWAYSINLINE run(address insn_addr, address &target) {
185
int instructions = 1;
187
uint32_t dispatch = Instruction_aarch64::extract(_insn, 30, 25);
191
instructions = unconditionalBranch(insn_addr, target);
194
case 0b101010: // Conditional branch (immediate)
195
case 0b011010: { // Compare & branch (immediate)
196
instructions = conditionalBranch(insn_addr, target);
200
instructions = testAndBranch(insn_addr, target);
212
if ((Instruction_aarch64::extract(_insn, 29, 24) & 0b111011) == 0b011000) {
213
// Load register (literal)
214
instructions = loadStore(insn_addr, target);
218
assert(target == 0, "did not expect to relocate target for polling page load");
227
assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be");
228
int shift = Instruction_aarch64::extract(_insn, 31, 31);
230
uint32_t insn2 = insn_at(1);
231
if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 &&
232
Instruction_aarch64::extract(_insn, 4, 0) ==
233
Instruction_aarch64::extract(insn2, 9, 5)) {
234
instructions = adrp(insn_addr, target, adrpMem());
235
} else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 &&
236
Instruction_aarch64::extract(_insn, 4, 0) ==
237
Instruction_aarch64::extract(insn2, 4, 0)) {
238
instructions = adrp(insn_addr, target, adrpAdd());
239
} else if (Instruction_aarch64::extract(insn2, 31, 21) == 0b11110010110 &&
240
Instruction_aarch64::extract(_insn, 4, 0) ==
241
Instruction_aarch64::extract(insn2, 4, 0)) {
242
instructions = adrp(insn_addr, target, adrpMovk());
244
ShouldNotReachHere();
247
instructions = adr(insn_addr, target);
255
instructions = immediate(insn_addr, target);
259
ShouldNotReachHere();
263
verify(insn_addr, target);
264
return instructions * NativeInstruction::instruction_size;
268
class Patcher : public RelocActions {
269
virtual reloc_insn adrpMem() { return &Patcher::adrpMem_impl; }
270
virtual reloc_insn adrpAdd() { return &Patcher::adrpAdd_impl; }
271
virtual reloc_insn adrpMovk() { return &Patcher::adrpMovk_impl; }
274
Patcher(address insn_addr) : RelocActions(insn_addr) {}
276
virtual int unconditionalBranch(address insn_addr, address &target) {
277
intptr_t offset = (target - insn_addr) >> 2;
278
Instruction_aarch64::spatch(insn_addr, 25, 0, offset);
281
virtual int conditionalBranch(address insn_addr, address &target) {
282
intptr_t offset = (target - insn_addr) >> 2;
283
Instruction_aarch64::spatch(insn_addr, 23, 5, offset);
286
virtual int testAndBranch(address insn_addr, address &target) {
287
intptr_t offset = (target - insn_addr) >> 2;
288
Instruction_aarch64::spatch(insn_addr, 18, 5, offset);
291
virtual int loadStore(address insn_addr, address &target) {
292
intptr_t offset = (target - insn_addr) >> 2;
293
Instruction_aarch64::spatch(insn_addr, 23, 5, offset);
296
virtual int adr(address insn_addr, address &target) {
298
assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be");
300
// PC-rel. addressing
301
ptrdiff_t offset = target - insn_addr;
302
int offset_lo = offset & 3;
304
Instruction_aarch64::spatch(insn_addr, 23, 5, offset);
305
Instruction_aarch64::patch(insn_addr, 30, 29, offset_lo);
308
virtual int adrp(address insn_addr, address &target, reloc_insn inner) {
309
int instructions = 1;
311
assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be");
313
ptrdiff_t offset = target - insn_addr;
315
precond(inner != nullptr);
316
// Give the inner reloc a chance to modify the target.
317
address adjusted_target = target;
318
instructions = (*inner)(insn_addr, adjusted_target);
319
uintptr_t pc_page = (uintptr_t)insn_addr >> 12;
320
uintptr_t adr_page = (uintptr_t)adjusted_target >> 12;
321
offset = adr_page - pc_page;
322
int offset_lo = offset & 3;
324
Instruction_aarch64::spatch(insn_addr, 23, 5, offset);
325
Instruction_aarch64::patch(insn_addr, 30, 29, offset_lo);
328
static int adrpMem_impl(address insn_addr, address &target) {
329
uintptr_t dest = (uintptr_t)target;
330
int offset_lo = dest & 0xfff;
331
uint32_t insn2 = insn_at(insn_addr, 1);
332
uint32_t size = Instruction_aarch64::extract(insn2, 31, 30);
333
Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo >> size);
334
guarantee(((dest >> size) << size) == dest, "misaligned target");
337
static int adrpAdd_impl(address insn_addr, address &target) {
338
uintptr_t dest = (uintptr_t)target;
339
int offset_lo = dest & 0xfff;
340
Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo);
343
static int adrpMovk_impl(address insn_addr, address &target) {
344
uintptr_t dest = uintptr_t(target);
345
Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 20, 5, (uintptr_t)target >> 32);
346
dest = (dest & 0xffffffffULL) | (uintptr_t(insn_addr) & 0xffff00000000ULL);
347
target = address(dest);
350
virtual int immediate(address insn_addr, address &target) {
351
assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be");
352
uint64_t dest = (uint64_t)target;
353
// Move wide constant
354
assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
355
assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
356
Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff);
357
Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff);
358
Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff);
361
virtual void verify(address insn_addr, address &target) {
363
address address_is = MacroAssembler::target_addr_for_insn(insn_addr);
364
if (!(address_is == target)) {
365
tty->print_cr("%p at %p should be %p", address_is, insn_addr, target);
366
disnm((intptr_t)insn_addr);
367
assert(address_is == target, "should be");
373
// If insn1 and insn2 use the same register to form an address, either
374
// by an offsetted LDR or a simple ADD, return the offset. If the
375
// second instruction is an LDR, the offset may be scaled.
376
static bool offset_for(uint32_t insn1, uint32_t insn2, ptrdiff_t &byte_offset) {
377
if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 &&
378
Instruction_aarch64::extract(insn1, 4, 0) ==
379
Instruction_aarch64::extract(insn2, 9, 5)) {
380
// Load/store register (unsigned immediate)
381
byte_offset = Instruction_aarch64::extract(insn2, 21, 10);
382
uint32_t size = Instruction_aarch64::extract(insn2, 31, 30);
383
byte_offset <<= size;
385
} else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 &&
386
Instruction_aarch64::extract(insn1, 4, 0) ==
387
Instruction_aarch64::extract(insn2, 4, 0)) {
389
byte_offset = Instruction_aarch64::extract(insn2, 21, 10);
395
class AArch64Decoder : public RelocActions {
396
virtual reloc_insn adrpMem() { return &AArch64Decoder::adrpMem_impl; }
397
virtual reloc_insn adrpAdd() { return &AArch64Decoder::adrpAdd_impl; }
398
virtual reloc_insn adrpMovk() { return &AArch64Decoder::adrpMovk_impl; }
401
AArch64Decoder(address insn_addr, uint32_t insn) : RelocActions(insn_addr, insn) {}
403
virtual int loadStore(address insn_addr, address &target) {
404
intptr_t offset = Instruction_aarch64::sextract(_insn, 23, 5);
405
target = insn_addr + (offset << 2);
408
virtual int unconditionalBranch(address insn_addr, address &target) {
409
intptr_t offset = Instruction_aarch64::sextract(_insn, 25, 0);
410
target = insn_addr + (offset << 2);
413
virtual int conditionalBranch(address insn_addr, address &target) {
414
intptr_t offset = Instruction_aarch64::sextract(_insn, 23, 5);
415
target = address(((uint64_t)insn_addr + (offset << 2)));
418
virtual int testAndBranch(address insn_addr, address &target) {
419
intptr_t offset = Instruction_aarch64::sextract(_insn, 18, 5);
420
target = address(((uint64_t)insn_addr + (offset << 2)));
423
virtual int adr(address insn_addr, address &target) {
424
// PC-rel. addressing
425
intptr_t offset = Instruction_aarch64::extract(_insn, 30, 29);
426
offset |= Instruction_aarch64::sextract(_insn, 23, 5) << 2;
427
target = address((uint64_t)insn_addr + offset);
430
virtual int adrp(address insn_addr, address &target, reloc_insn inner) {
431
assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be");
432
intptr_t offset = Instruction_aarch64::extract(_insn, 30, 29);
433
offset |= Instruction_aarch64::sextract(_insn, 23, 5) << 2;
436
uint64_t target_page = ((uint64_t)insn_addr) + offset;
437
target_page &= ((uint64_t)-1) << shift;
438
uint32_t insn2 = insn_at(1);
439
target = address(target_page);
440
precond(inner != nullptr);
441
(*inner)(insn_addr, target);
444
static int adrpMem_impl(address insn_addr, address &target) {
445
uint32_t insn2 = insn_at(insn_addr, 1);
446
// Load/store register (unsigned immediate)
447
ptrdiff_t byte_offset = Instruction_aarch64::extract(insn2, 21, 10);
448
uint32_t size = Instruction_aarch64::extract(insn2, 31, 30);
449
byte_offset <<= size;
450
target += byte_offset;
453
static int adrpAdd_impl(address insn_addr, address &target) {
454
uint32_t insn2 = insn_at(insn_addr, 1);
456
ptrdiff_t byte_offset = Instruction_aarch64::extract(insn2, 21, 10);
457
target += byte_offset;
460
static int adrpMovk_impl(address insn_addr, address &target) {
461
uint32_t insn2 = insn_at(insn_addr, 1);
462
uint64_t dest = uint64_t(target);
463
dest = (dest & 0xffff0000ffffffff) |
464
((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32);
465
target = address(dest);
467
// We know the destination 4k page. Maybe we have a third
469
uint32_t insn = insn_at(insn_addr, 0);
470
uint32_t insn3 = insn_at(insn_addr, 2);
471
ptrdiff_t byte_offset;
472
if (offset_for(insn, insn3, byte_offset)) {
473
target += byte_offset;
479
virtual int immediate(address insn_addr, address &target) {
480
uint32_t *insns = (uint32_t *)insn_addr;
481
assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be");
482
// Move wide constant: movz, movk, movk. See movptr().
483
assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
484
assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
485
target = address(uint64_t(Instruction_aarch64::extract(_insn, 20, 5))
486
+ (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
487
+ (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
488
assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
489
assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
492
virtual void verify(address insn_addr, address &target) {
496
address MacroAssembler::target_addr_for_insn(address insn_addr, uint32_t insn) {
497
AArch64Decoder decoder(insn_addr, insn);
499
decoder.run(insn_addr, target);
503
// Patch any kind of instruction; there may be several instructions.
504
// Return the total length (in bytes) of the instructions.
505
int MacroAssembler::pd_patch_instruction_size(address insn_addr, address target) {
506
Patcher patcher(insn_addr);
507
return patcher.run(insn_addr, target);
510
int MacroAssembler::patch_oop(address insn_addr, address o) {
512
unsigned insn = *(unsigned*)insn_addr;
513
assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
515
// OOPs are either narrow (32 bits) or wide (48 bits). We encode
516
// narrow OOPs by setting the upper 16 bits in the first
518
if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010101) {
520
uint32_t n = CompressedOops::narrow_oop_value(cast_to_oop(o));
521
Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16);
522
Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff);
526
assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
527
uintptr_t dest = (uintptr_t)o;
528
Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff);
529
Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff);
530
Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff);
533
return instructions * NativeInstruction::instruction_size;
536
int MacroAssembler::patch_narrow_klass(address insn_addr, narrowKlass n) {
537
// Metadata pointers are either narrow (32 bits) or wide (48 bits).
538
// We encode narrow ones by setting the upper 16 bits in the first
540
NativeInstruction *insn = nativeInstruction_at(insn_addr);
541
assert(Instruction_aarch64::extract(insn->encoding(), 31, 21) == 0b11010010101 &&
542
nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
544
Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16);
545
Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff);
546
return 2 * NativeInstruction::instruction_size;
549
address MacroAssembler::target_addr_for_insn_or_null(address insn_addr, unsigned insn) {
550
if (NativeInstruction::is_ldrw_to_zr(address(&insn))) {
553
return MacroAssembler::target_addr_for_insn(insn_addr, insn);
556
void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod, Register tmp) {
558
lea(tmp, Address(rthread, JavaThread::polling_word_offset()));
561
ldr(tmp, Address(rthread, JavaThread::polling_word_offset()));
564
// Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore,
565
// we may safely use the sp instead to perform the stack watermark check.
566
cmp(in_nmethod ? sp : rfp, tmp);
567
br(Assembler::HI, slow_path);
569
tbnz(tmp, log2i_exact(SafepointMechanism::poll_bit()), slow_path);
573
void MacroAssembler::rt_call(address dest, Register tmp) {
574
CodeBlob *cb = CodeCache::find_blob(dest);
576
far_call(RuntimeAddress(dest));
578
lea(tmp, RuntimeAddress(dest));
583
void MacroAssembler::push_cont_fastpath(Register java_thread) {
584
if (!Continuations::enabled()) return;
586
ldr(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset()));
588
br(Assembler::LS, done);
589
mov(rscratch1, sp); // we can't use sp as the source in str
590
str(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset()));
594
void MacroAssembler::pop_cont_fastpath(Register java_thread) {
595
if (!Continuations::enabled()) return;
597
ldr(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset()));
599
br(Assembler::LO, done);
600
str(zr, Address(java_thread, JavaThread::cont_fastpath_offset()));
604
void MacroAssembler::reset_last_Java_frame(bool clear_fp) {
605
// we must set sp to zero to clear frame
606
str(zr, Address(rthread, JavaThread::last_Java_sp_offset()));
608
// must clear fp, so that compiled frames are not confused; it is
609
// possible that we need it only for debugging
611
str(zr, Address(rthread, JavaThread::last_Java_fp_offset()));
614
// Always clear the pc because it could have been set by make_walkable()
615
str(zr, Address(rthread, JavaThread::last_Java_pc_offset()));
620
// When entering C land, the rfp, & resp of the last Java frame have to be recorded
621
// in the (thread-local) JavaThread object. When leaving C land, the last Java fp
622
// has to be reset to 0. This is required to allow proper stack traversal.
623
void MacroAssembler::set_last_Java_frame(Register last_java_sp,
624
Register last_java_fp,
625
Register last_java_pc,
628
if (last_java_pc->is_valid()) {
629
str(last_java_pc, Address(rthread,
630
JavaThread::frame_anchor_offset()
631
+ JavaFrameAnchor::last_Java_pc_offset()));
634
// determine last_java_sp register
635
if (last_java_sp == sp) {
637
last_java_sp = scratch;
638
} else if (!last_java_sp->is_valid()) {
642
str(last_java_sp, Address(rthread, JavaThread::last_Java_sp_offset()));
644
// last_java_fp is optional
645
if (last_java_fp->is_valid()) {
646
str(last_java_fp, Address(rthread, JavaThread::last_Java_fp_offset()));
650
void MacroAssembler::set_last_Java_frame(Register last_java_sp,
651
Register last_java_fp,
652
address last_java_pc,
654
assert(last_java_pc != nullptr, "must provide a valid PC");
656
adr(scratch, last_java_pc);
657
str(scratch, Address(rthread,
658
JavaThread::frame_anchor_offset()
659
+ JavaFrameAnchor::last_Java_pc_offset()));
661
set_last_Java_frame(last_java_sp, last_java_fp, noreg, scratch);
664
void MacroAssembler::set_last_Java_frame(Register last_java_sp,
665
Register last_java_fp,
669
set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch);
671
InstructionMark im(this);
672
L.add_patch_at(code(), locator());
673
set_last_Java_frame(last_java_sp, last_java_fp, pc() /* Patched later */, scratch);
677
static inline bool target_needs_far_branch(address addr) {
678
// codecache size <= 128M
679
if (!MacroAssembler::far_branches()) {
682
// codecache size > 240M
683
if (MacroAssembler::codestub_branch_needs_far_jump()) {
686
// codecache size: 128M..240M
687
return !CodeCache::is_non_nmethod(addr);
690
void MacroAssembler::far_call(Address entry, Register tmp) {
691
assert(ReservedCodeCacheSize < 4*G, "branch out of range");
692
assert(CodeCache::find_blob(entry.target()) != nullptr,
693
"destination of far call not found in code cache");
694
assert(entry.rspec().type() == relocInfo::external_word_type
695
|| entry.rspec().type() == relocInfo::runtime_call_type
696
|| entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type");
697
if (target_needs_far_branch(entry.target())) {
699
// We can use ADRP here because we know that the total size of
700
// the code cache cannot exceed 2Gb (ADRP limit is 4GB).
701
adrp(tmp, entry, offset);
702
add(tmp, tmp, offset);
709
int MacroAssembler::far_jump(Address entry, Register tmp) {
710
assert(ReservedCodeCacheSize < 4*G, "branch out of range");
711
assert(CodeCache::find_blob(entry.target()) != nullptr,
712
"destination of far call not found in code cache");
713
assert(entry.rspec().type() == relocInfo::external_word_type
714
|| entry.rspec().type() == relocInfo::runtime_call_type
715
|| entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type");
716
address start = pc();
717
if (target_needs_far_branch(entry.target())) {
719
// We can use ADRP here because we know that the total size of
720
// the code cache cannot exceed 2Gb (ADRP limit is 4GB).
721
adrp(tmp, entry, offset);
722
add(tmp, tmp, offset);
730
void MacroAssembler::reserved_stack_check() {
731
// testing if reserved zone needs to be enabled
732
Label no_reserved_zone_enabling;
734
ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset()));
736
br(Assembler::LO, no_reserved_zone_enabling);
738
enter(); // LR and FP are live.
739
lea(rscratch1, CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone));
740
mov(c_rarg0, rthread);
744
// We have already removed our own frame.
745
// throw_delayed_StackOverflowError will think that it's been
746
// called by our caller.
747
lea(rscratch1, RuntimeAddress(StubRoutines::throw_delayed_StackOverflowError_entry()));
749
should_not_reach_here();
751
bind(no_reserved_zone_enabling);
754
static void pass_arg0(MacroAssembler* masm, Register arg) {
755
if (c_rarg0 != arg ) {
756
masm->mov(c_rarg0, arg);
760
static void pass_arg1(MacroAssembler* masm, Register arg) {
761
if (c_rarg1 != arg ) {
762
masm->mov(c_rarg1, arg);
766
static void pass_arg2(MacroAssembler* masm, Register arg) {
767
if (c_rarg2 != arg ) {
768
masm->mov(c_rarg2, arg);
772
static void pass_arg3(MacroAssembler* masm, Register arg) {
773
if (c_rarg3 != arg ) {
774
masm->mov(c_rarg3, arg);
778
void MacroAssembler::call_VM_base(Register oop_result,
779
Register java_thread,
780
Register last_java_sp,
782
int number_of_arguments,
783
bool check_exceptions) {
784
// determine java_thread register
785
if (!java_thread->is_valid()) {
786
java_thread = rthread;
789
// determine last_java_sp register
790
if (!last_java_sp->is_valid()) {
795
assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
796
assert(java_thread == rthread, "unexpected register");
798
// TraceBytecodes does not use r12 but saves it over the call, so don't verify
799
// if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");
802
assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result");
803
assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");
805
// push java thread (becomes first argument of C function)
807
mov(c_rarg0, java_thread);
809
// set last Java frame before call
810
assert(last_java_sp != rfp, "can't use rfp");
813
set_last_Java_frame(last_java_sp, rfp, l, rscratch1);
815
// do the call, remove parameters
816
MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments, &l);
818
// lr could be poisoned with PAC signature during throw_pending_exception
819
// if it was tail-call optimized by compiler, since lr is not callee-saved
820
// reload it with proper value
823
// reset last Java frame
824
// Only interpreter should have to clear fp
825
reset_last_Java_frame(true);
827
// C++ interp handles this in the interpreter
828
check_and_handle_popframe(java_thread);
829
check_and_handle_earlyret(java_thread);
831
if (check_exceptions) {
832
// check for pending exceptions (java_thread is set upon return)
833
ldr(rscratch1, Address(java_thread, in_bytes(Thread::pending_exception_offset())));
836
lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry()));
841
// get oop result if there is one and reset the value in the thread
842
if (oop_result->is_valid()) {
843
get_vm_result(oop_result, java_thread);
847
void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
848
call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions);
851
// Check the entry target is always reachable from any branch.
852
static bool is_always_within_branch_range(Address entry) {
853
const address target = entry.target();
855
if (!CodeCache::contains(target)) {
856
// We always use trampolines for callees outside CodeCache.
857
assert(entry.rspec().type() == relocInfo::runtime_call_type, "non-runtime call of an external target");
861
if (!MacroAssembler::far_branches()) {
865
if (entry.rspec().type() == relocInfo::runtime_call_type) {
866
// Runtime calls are calls of a non-compiled method (stubs, adapters).
867
// Non-compiled methods stay forever in CodeCache.
868
// We check whether the longest possible branch is within the branch range.
869
assert(CodeCache::find_blob(target) != nullptr &&
870
!CodeCache::find_blob(target)->is_nmethod(),
871
"runtime call of compiled method");
872
const address right_longest_branch_start = CodeCache::high_bound() - NativeInstruction::instruction_size;
873
const address left_longest_branch_start = CodeCache::low_bound();
874
const bool is_reachable = Assembler::reachable_from_branch_at(left_longest_branch_start, target) &&
875
Assembler::reachable_from_branch_at(right_longest_branch_start, target);
882
// Maybe emit a call via a trampoline. If the code cache is small
883
// trampolines won't be emitted.
884
address MacroAssembler::trampoline_call(Address entry) {
885
assert(entry.rspec().type() == relocInfo::runtime_call_type
886
|| entry.rspec().type() == relocInfo::opt_virtual_call_type
887
|| entry.rspec().type() == relocInfo::static_call_type
888
|| entry.rspec().type() == relocInfo::virtual_call_type, "wrong reloc type");
890
address target = entry.target();
892
if (!is_always_within_branch_range(entry)) {
893
if (!in_scratch_emit_size()) {
894
// We don't want to emit a trampoline if C2 is generating dummy
895
// code during its branch shortening phase.
896
if (entry.rspec().type() == relocInfo::runtime_call_type) {
897
assert(CodeBuffer::supports_shared_stubs(), "must support shared stubs");
898
code()->share_trampoline_for(entry.target(), offset());
900
address stub = emit_trampoline_stub(offset(), target);
901
if (stub == nullptr) {
902
postcond(pc() == badAddress);
903
return nullptr; // CodeCache is full
910
address call_pc = pc();
911
relocate(entry.rspec());
914
postcond(pc() != badAddress);
918
// Emit a trampoline stub for a call to a target which is too far away.
923
// branch-and-link to <destination> or <trampoline stub>
925
// Related trampoline stub for this call site in the stub section:
926
// load the call target from the constant pool
927
// branch (LR still points to the call site above)
929
address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset,
931
// Max stub size: alignment nop, TrampolineStub.
932
address stub = start_a_stub(max_trampoline_stub_size());
933
if (stub == nullptr) {
934
return nullptr; // CodeBuffer::expand failed
937
// Create a trampoline stub relocation which relates this trampoline stub
938
// with the call instruction at insts_call_instruction_offset in the
939
// instructions code-section.
941
relocate(trampoline_stub_Relocation::spec(code()->insts()->start()
942
+ insts_call_instruction_offset));
943
const int stub_start_offset = offset();
945
// Now, create the trampoline stub's code:
949
ldr(rscratch1, target);
952
assert(offset() - stub_start_offset == NativeCallTrampolineStub::data_offset,
954
emit_int64((int64_t)dest);
956
const address stub_start_addr = addr_at(stub_start_offset);
958
assert(is_NativeCallTrampolineStub_at(stub_start_addr), "doesn't look like a trampoline");
961
return stub_start_addr;
964
int MacroAssembler::max_trampoline_stub_size() {
965
// Max stub size: alignment nop, TrampolineStub.
966
return NativeInstruction::instruction_size + NativeCallTrampolineStub::instruction_size;
969
void MacroAssembler::emit_static_call_stub() {
970
// CompiledDirectCall::set_to_interpreted knows the
971
// exact layout of this stub.
974
mov_metadata(rmethod, nullptr);
976
// Jump to the entry point of the c2i stub.
977
movptr(rscratch1, 0);
981
int MacroAssembler::static_call_stub_size() {
982
// isb; movk; movz; movz; movk; movz; movz; br
983
return 8 * NativeInstruction::instruction_size;
986
void MacroAssembler::c2bool(Register x) {
987
// implements x == 0 ? 0 : 1
988
// note: must only look at least-significant byte of x
989
// since C-style booleans are stored in one byte
992
cset(x, Assembler::NE);
995
address MacroAssembler::ic_call(address entry, jint method_index) {
996
RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index);
997
// address const_ptr = long_constant((jlong)Universe::non_oop_word());
999
// ldr_constant(rscratch2, const_ptr);
1000
movptr(rscratch2, (intptr_t)Universe::non_oop_word());
1001
return trampoline_call(Address(entry, rh));
1004
int MacroAssembler::ic_check_size() {
1005
if (target_needs_far_branch(CAST_FROM_FN_PTR(address, SharedRuntime::get_ic_miss_stub()))) {
1006
return NativeInstruction::instruction_size * 7;
1008
return NativeInstruction::instruction_size * 5;
1012
int MacroAssembler::ic_check(int end_alignment) {
1013
Register receiver = j_rarg0;
1014
Register data = rscratch2;
1015
Register tmp1 = rscratch1;
1016
Register tmp2 = r10;
1018
// The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed
1019
// before the inline cache check, so we don't have to execute any nop instructions when dispatching
1020
// through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align
1021
// before the inline cache check here, and not after
1022
align(end_alignment, offset() + ic_check_size());
1024
int uep_offset = offset();
1026
if (UseCompressedClassPointers) {
1027
ldrw(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes()));
1028
ldrw(tmp2, Address(data, CompiledICData::speculated_klass_offset()));
1031
ldr(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes()));
1032
ldr(tmp2, Address(data, CompiledICData::speculated_klass_offset()));
1037
br(Assembler::EQ, dont);
1038
far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1040
assert((offset() % end_alignment) == 0, "Misaligned verified entry point");
1045
// Implementation of call_VM versions
1047
void MacroAssembler::call_VM(Register oop_result,
1048
address entry_point,
1049
bool check_exceptions) {
1050
call_VM_helper(oop_result, entry_point, 0, check_exceptions);
1053
void MacroAssembler::call_VM(Register oop_result,
1054
address entry_point,
1056
bool check_exceptions) {
1057
pass_arg1(this, arg_1);
1058
call_VM_helper(oop_result, entry_point, 1, check_exceptions);
1061
void MacroAssembler::call_VM(Register oop_result,
1062
address entry_point,
1065
bool check_exceptions) {
1066
assert_different_registers(arg_1, c_rarg2);
1067
pass_arg2(this, arg_2);
1068
pass_arg1(this, arg_1);
1069
call_VM_helper(oop_result, entry_point, 2, check_exceptions);
1072
void MacroAssembler::call_VM(Register oop_result,
1073
address entry_point,
1077
bool check_exceptions) {
1078
assert_different_registers(arg_1, c_rarg2, c_rarg3);
1079
assert_different_registers(arg_2, c_rarg3);
1080
pass_arg3(this, arg_3);
1082
pass_arg2(this, arg_2);
1084
pass_arg1(this, arg_1);
1085
call_VM_helper(oop_result, entry_point, 3, check_exceptions);
1088
void MacroAssembler::call_VM(Register oop_result,
1089
Register last_java_sp,
1090
address entry_point,
1091
int number_of_arguments,
1092
bool check_exceptions) {
1093
call_VM_base(oop_result, rthread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
1096
void MacroAssembler::call_VM(Register oop_result,
1097
Register last_java_sp,
1098
address entry_point,
1100
bool check_exceptions) {
1101
pass_arg1(this, arg_1);
1102
call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
1105
void MacroAssembler::call_VM(Register oop_result,
1106
Register last_java_sp,
1107
address entry_point,
1110
bool check_exceptions) {
1112
assert_different_registers(arg_1, c_rarg2);
1113
pass_arg2(this, arg_2);
1114
pass_arg1(this, arg_1);
1115
call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
1118
void MacroAssembler::call_VM(Register oop_result,
1119
Register last_java_sp,
1120
address entry_point,
1124
bool check_exceptions) {
1125
assert_different_registers(arg_1, c_rarg2, c_rarg3);
1126
assert_different_registers(arg_2, c_rarg3);
1127
pass_arg3(this, arg_3);
1128
pass_arg2(this, arg_2);
1129
pass_arg1(this, arg_1);
1130
call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
1134
void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) {
1135
ldr(oop_result, Address(java_thread, JavaThread::vm_result_offset()));
1136
str(zr, Address(java_thread, JavaThread::vm_result_offset()));
1137
verify_oop_msg(oop_result, "broken oop in call_VM_base");
1140
void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) {
1141
ldr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset()));
1142
str(zr, Address(java_thread, JavaThread::vm_result_2_offset()));
1145
void MacroAssembler::align(int modulus) {
1146
align(modulus, offset());
1149
// Ensure that the code at target bytes offset from the current offset() is aligned
1150
// according to modulus.
1151
void MacroAssembler::align(int modulus, int target) {
1152
int delta = target - offset();
1153
while ((offset() + delta) % modulus != 0) nop();
1156
void MacroAssembler::post_call_nop() {
1157
if (!Continuations::enabled()) {
1160
InstructionMark im(this);
1161
relocate(post_call_nop_Relocation::spec());
1162
InlineSkippedInstructionsCounter skipCounter(this);
1168
// these are no-ops overridden by InterpreterMacroAssembler
1170
void MacroAssembler::check_and_handle_earlyret(Register java_thread) { }
1172
void MacroAssembler::check_and_handle_popframe(Register java_thread) { }
1174
// Look up the method for a megamorphic invokeinterface call.
1175
// The target method is determined by <intf_klass, itable_index>.
1176
// The receiver klass is in recv_klass.
1177
// On success, the result will be in method_result, and execution falls through.
1178
// On failure, execution transfers to the given label.
1179
void MacroAssembler::lookup_interface_method(Register recv_klass,
1180
Register intf_klass,
1181
RegisterOrConstant itable_index,
1182
Register method_result,
1184
Label& L_no_such_interface,
1185
bool return_method) {
1186
assert_different_registers(recv_klass, intf_klass, scan_temp);
1187
assert_different_registers(method_result, intf_klass, scan_temp);
1188
assert(recv_klass != method_result || !return_method,
1189
"recv_klass can be destroyed when method isn't needed");
1190
assert(itable_index.is_constant() || itable_index.as_register() == method_result,
1191
"caller must use same register for non-constant itable index as for method");
1193
// Compute start of first itableOffsetEntry (which is at the end of the vtable)
1194
int vtable_base = in_bytes(Klass::vtable_start_offset());
1195
int itentry_off = in_bytes(itableMethodEntry::method_offset());
1196
int scan_step = itableOffsetEntry::size() * wordSize;
1197
int vte_size = vtableEntry::size_in_bytes();
1198
assert(vte_size == wordSize, "else adjust times_vte_scale");
1200
ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset()));
1202
// Could store the aligned, prescaled offset in the klass.
1203
// lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base));
1204
lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(3)));
1205
add(scan_temp, scan_temp, vtable_base);
1207
if (return_method) {
1208
// Adjust recv_klass by scaled itable_index, so we can free itable_index.
1209
assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
1210
// lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off));
1211
lea(recv_klass, Address(recv_klass, itable_index, Address::lsl(3)));
1213
add(recv_klass, recv_klass, itentry_off);
1216
// for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) {
1217
// if (scan->interface() == intf) {
1218
// result = (klass + scan->offset() + itable_index);
1221
Label search, found_method;
1223
ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset()));
1224
cmp(intf_klass, method_result);
1225
br(Assembler::EQ, found_method);
1227
// Check that the previous entry is non-null. A null entry means that
1228
// the receiver class doesn't implement the interface, and wasn't the
1229
// same as when the caller was compiled.
1230
cbz(method_result, L_no_such_interface);
1231
if (itableOffsetEntry::interface_offset() != 0) {
1232
add(scan_temp, scan_temp, scan_step);
1233
ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset()));
1235
ldr(method_result, Address(pre(scan_temp, scan_step)));
1237
cmp(intf_klass, method_result);
1238
br(Assembler::NE, search);
1243
if (return_method) {
1244
ldrw(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset()));
1245
ldr(method_result, Address(recv_klass, scan_temp, Address::uxtw(0)));
1249
// Look up the method for a megamorphic invokeinterface call in a single pass over itable:
1250
// - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData
1251
// - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index
1252
// The target method is determined by <holder_klass, itable_index>.
1253
// The receiver klass is in recv_klass.
1254
// On success, the result will be in method_result, and execution falls through.
1255
// On failure, execution transfers to the given label.
1256
void MacroAssembler::lookup_interface_method_stub(Register recv_klass,
1257
Register holder_klass,
1258
Register resolved_klass,
1259
Register method_result,
1260
Register temp_itbl_klass,
1263
Label& L_no_such_interface) {
1264
// 'method_result' is only used as output register at the very end of this method.
1265
// Until then we can reuse it as 'holder_offset'.
1266
Register holder_offset = method_result;
1267
assert_different_registers(resolved_klass, recv_klass, holder_klass, temp_itbl_klass, scan_temp, holder_offset);
1269
int vtable_start_offset = in_bytes(Klass::vtable_start_offset());
1270
int itable_offset_entry_size = itableOffsetEntry::size() * wordSize;
1271
int ioffset = in_bytes(itableOffsetEntry::interface_offset());
1272
int ooffset = in_bytes(itableOffsetEntry::offset_offset());
1274
Label L_loop_search_resolved_entry, L_resolved_found, L_holder_found;
1276
ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset()));
1277
add(recv_klass, recv_klass, vtable_start_offset + ioffset);
1278
// itableOffsetEntry[] itable = recv_klass + Klass::vtable_start_offset() + sizeof(vtableEntry) * recv_klass->_vtable_len;
1279
// temp_itbl_klass = itable[0]._interface;
1280
int vtblEntrySize = vtableEntry::size_in_bytes();
1281
assert(vtblEntrySize == wordSize, "ldr lsl shift amount must be 3");
1282
ldr(temp_itbl_klass, Address(recv_klass, scan_temp, Address::lsl(exact_log2(vtblEntrySize))));
1283
mov(holder_offset, zr);
1284
// scan_temp = &(itable[0]._interface)
1285
lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(exact_log2(vtblEntrySize))));
1288
// - if (holder_klass != resolved_klass), go to "scan for resolved"
1289
// - if (itable[0] == holder_klass), shortcut to "holder found"
1290
// - if (itable[0] == 0), no such interface
1291
cmp(resolved_klass, holder_klass);
1292
br(Assembler::NE, L_loop_search_resolved_entry);
1293
cmp(holder_klass, temp_itbl_klass);
1294
br(Assembler::EQ, L_holder_found);
1295
cbz(temp_itbl_klass, L_no_such_interface);
1297
// Loop: Look for holder_klass record in itable
1299
// temp_itbl_klass = *(scan_temp += itable_offset_entry_size);
1300
// if (temp_itbl_klass == holder_klass) {
1301
// goto L_holder_found; // Found!
1303
// } while (temp_itbl_klass != 0);
1304
// goto L_no_such_interface // Not found.
1305
Label L_search_holder;
1306
bind(L_search_holder);
1307
ldr(temp_itbl_klass, Address(pre(scan_temp, itable_offset_entry_size)));
1308
cmp(holder_klass, temp_itbl_klass);
1309
br(Assembler::EQ, L_holder_found);
1310
cbnz(temp_itbl_klass, L_search_holder);
1312
b(L_no_such_interface);
1314
// Loop: Look for resolved_class record in itable
1316
// temp_itbl_klass = *(scan_temp += itable_offset_entry_size);
1317
// if (temp_itbl_klass == 0) {
1318
// goto L_no_such_interface;
1320
// if (temp_itbl_klass == resolved_klass) {
1321
// goto L_resolved_found; // Found!
1323
// if (temp_itbl_klass == holder_klass) {
1324
// holder_offset = scan_temp;
1328
Label L_loop_search_resolved;
1329
bind(L_loop_search_resolved);
1330
ldr(temp_itbl_klass, Address(pre(scan_temp, itable_offset_entry_size)));
1331
bind(L_loop_search_resolved_entry);
1332
cbz(temp_itbl_klass, L_no_such_interface);
1333
cmp(resolved_klass, temp_itbl_klass);
1334
br(Assembler::EQ, L_resolved_found);
1335
cmp(holder_klass, temp_itbl_klass);
1336
br(Assembler::NE, L_loop_search_resolved);
1337
mov(holder_offset, scan_temp);
1338
b(L_loop_search_resolved);
1340
// See if we already have a holder klass. If not, go and scan for it.
1341
bind(L_resolved_found);
1342
cbz(holder_offset, L_search_holder);
1343
mov(scan_temp, holder_offset);
1345
// Finally, scan_temp contains holder_klass vtable offset
1346
bind(L_holder_found);
1347
ldrw(method_result, Address(scan_temp, ooffset - ioffset));
1348
add(recv_klass, recv_klass, itable_index * wordSize + in_bytes(itableMethodEntry::method_offset())
1349
- vtable_start_offset - ioffset); // substract offsets to restore the original value of recv_klass
1350
ldr(method_result, Address(recv_klass, method_result, Address::uxtw(0)));
1353
// virtual method calling
1354
void MacroAssembler::lookup_virtual_method(Register recv_klass,
1355
RegisterOrConstant vtable_index,
1356
Register method_result) {
1357
assert(vtableEntry::size() * wordSize == 8,
1358
"adjust the scaling in the code below");
1359
int64_t vtable_offset_in_bytes = in_bytes(Klass::vtable_start_offset() + vtableEntry::method_offset());
1361
if (vtable_index.is_register()) {
1362
lea(method_result, Address(recv_klass,
1363
vtable_index.as_register(),
1364
Address::lsl(LogBytesPerWord)));
1365
ldr(method_result, Address(method_result, vtable_offset_in_bytes));
1367
vtable_offset_in_bytes += vtable_index.as_constant() * wordSize;
1369
form_address(rscratch1, recv_klass, vtable_offset_in_bytes, 0));
1373
void MacroAssembler::check_klass_subtype(Register sub_klass,
1374
Register super_klass,
1378
check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, nullptr);
1379
check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr);
1384
void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
1385
Register super_klass,
1390
RegisterOrConstant super_check_offset) {
1391
assert_different_registers(sub_klass, super_klass, temp_reg);
1392
bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
1393
if (super_check_offset.is_register()) {
1394
assert_different_registers(sub_klass, super_klass,
1395
super_check_offset.as_register());
1396
} else if (must_load_sco) {
1397
assert(temp_reg != noreg, "supply either a temp or a register offset");
1400
Label L_fallthrough;
1401
int label_nulls = 0;
1402
if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
1403
if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
1404
if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; }
1405
assert(label_nulls <= 1, "at most one null in the batch");
1407
int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
1408
int sco_offset = in_bytes(Klass::super_check_offset_offset());
1409
Address super_check_offset_addr(super_klass, sco_offset);
1411
// Hacked jmp, which may only be used just before L_fallthrough.
1412
#define final_jmp(label) \
1413
if (&(label) == &L_fallthrough) { /*do nothing*/ } \
1414
else b(label) /*omit semi*/
1416
// If the pointers are equal, we are done (e.g., String[] elements).
1417
// This self-check enables sharing of secondary supertype arrays among
1418
// non-primary types such as array-of-interface. Otherwise, each such
1419
// type would need its own customized SSA.
1420
// We move this check to the front of the fast path because many
1421
// type checks are in fact trivially successful in this manner,
1422
// so we get a nicely predicted branch right at the start of the check.
1423
cmp(sub_klass, super_klass);
1424
br(Assembler::EQ, *L_success);
1426
// Check the supertype display:
1427
if (must_load_sco) {
1428
ldrw(temp_reg, super_check_offset_addr);
1429
super_check_offset = RegisterOrConstant(temp_reg);
1431
Address super_check_addr(sub_klass, super_check_offset);
1432
ldr(rscratch1, super_check_addr);
1433
cmp(super_klass, rscratch1); // load displayed supertype
1435
// This check has worked decisively for primary supers.
1436
// Secondary supers are sought in the super_cache ('super_cache_addr').
1437
// (Secondary supers are interfaces and very deeply nested subtypes.)
1438
// This works in the same check above because of a tricky aliasing
1439
// between the super_cache and the primary super display elements.
1440
// (The 'super_check_addr' can address either, as the case requires.)
1441
// Note that the cache is updated below if it does not help us find
1442
// what we need immediately.
1443
// So if it was a primary super, we can just fail immediately.
1444
// Otherwise, it's the slow path for us (no success at this point).
1446
if (super_check_offset.is_register()) {
1447
br(Assembler::EQ, *L_success);
1448
subs(zr, super_check_offset.as_register(), sc_offset);
1449
if (L_failure == &L_fallthrough) {
1450
br(Assembler::EQ, *L_slow_path);
1452
br(Assembler::NE, *L_failure);
1453
final_jmp(*L_slow_path);
1455
} else if (super_check_offset.as_constant() == sc_offset) {
1456
// Need a slow path; fast failure is impossible.
1457
if (L_slow_path == &L_fallthrough) {
1458
br(Assembler::EQ, *L_success);
1460
br(Assembler::NE, *L_slow_path);
1461
final_jmp(*L_success);
1464
// No slow path; it's a fast decision.
1465
if (L_failure == &L_fallthrough) {
1466
br(Assembler::EQ, *L_success);
1468
br(Assembler::NE, *L_failure);
1469
final_jmp(*L_success);
1473
bind(L_fallthrough);
1478
// These two are taken from x86, but they look generally useful
1480
// scans count pointer sized words at [addr] for occurrence of value,
1482
void MacroAssembler::repne_scan(Register addr, Register value, Register count,
1487
ldr(scratch, post(addr, wordSize));
1488
cmp(value, scratch);
1490
sub(count, count, 1);
1495
// scans count 4 byte words at [addr] for occurrence of value,
1497
void MacroAssembler::repne_scanw(Register addr, Register value, Register count,
1502
ldrw(scratch, post(addr, wordSize));
1503
cmpw(value, scratch);
1505
sub(count, count, 1);
1510
void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
1511
Register super_klass,
1516
bool set_cond_codes) {
1517
// NB! Callers may assume that, when temp2_reg is a valid register,
1518
// this code sets it to a nonzero value.
1520
assert_different_registers(sub_klass, super_klass, temp_reg);
1521
if (temp2_reg != noreg)
1522
assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, rscratch1);
1523
#define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg)
1525
Label L_fallthrough;
1526
int label_nulls = 0;
1527
if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
1528
if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
1529
assert(label_nulls <= 1, "at most one null in the batch");
1531
// a couple of useful fields in sub_klass:
1532
int ss_offset = in_bytes(Klass::secondary_supers_offset());
1533
int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
1534
Address secondary_supers_addr(sub_klass, ss_offset);
1535
Address super_cache_addr( sub_klass, sc_offset);
1537
BLOCK_COMMENT("check_klass_subtype_slow_path");
1539
// Do a linear scan of the secondary super-klass chain.
1540
// This code is rarely used, so simplicity is a virtue here.
1541
// The repne_scan instruction uses fixed registers, which we must spill.
1542
// Don't worry too much about pre-existing connections with the input regs.
1544
assert(sub_klass != r0, "killed reg"); // killed by mov(r0, super)
1545
assert(sub_klass != r2, "killed reg"); // killed by lea(r2, &pst_counter)
1547
RegSet pushed_registers;
1548
if (!IS_A_TEMP(r2)) pushed_registers += r2;
1549
if (!IS_A_TEMP(r5)) pushed_registers += r5;
1551
if (super_klass != r0) {
1552
if (!IS_A_TEMP(r0)) pushed_registers += r0;
1555
push(pushed_registers, sp);
1557
// Get super_klass value into r0 (even if it was in r5 or r2).
1558
if (super_klass != r0) {
1559
mov(r0, super_klass);
1563
incrementw(ExternalAddress((address)&SharedRuntime::_partial_subtype_ctr));
1566
// We will consult the secondary-super array.
1567
ldr(r5, secondary_supers_addr);
1568
// Load the array length.
1569
ldrw(r2, Address(r5, Array<Klass*>::length_offset_in_bytes()));
1570
// Skip to start of data.
1571
add(r5, r5, Array<Klass*>::base_offset_in_bytes());
1573
cmp(sp, zr); // Clear Z flag; SP is never zero
1574
// Scan R2 words at [R5] for an occurrence of R0.
1575
// Set NZ/Z based on last compare.
1576
repne_scan(r5, r0, r2, rscratch1);
1578
// Unspill the temp. registers:
1579
pop(pushed_registers, sp);
1581
br(Assembler::NE, *L_failure);
1583
// Success. Cache the super we found and proceed in triumph.
1584
str(super_klass, super_cache_addr);
1586
if (L_success != &L_fallthrough) {
1592
bind(L_fallthrough);
1595
// Ensure that the inline code and the stub are using the same registers.
1596
#define LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS \
1598
assert(r_super_klass == r0 && \
1599
r_array_base == r1 && \
1600
r_array_length == r2 && \
1601
(r_array_index == r3 || r_array_index == noreg) && \
1602
(r_sub_klass == r4 || r_sub_klass == noreg) && \
1603
(r_bitmap == rscratch2 || r_bitmap == noreg) && \
1604
(result == r5 || result == noreg), "registers must match aarch64.ad"); \
1607
// Return true: we succeeded in generating this code
1608
bool MacroAssembler::lookup_secondary_supers_table(Register r_sub_klass,
1609
Register r_super_klass,
1613
FloatRegister vtemp,
1615
u1 super_klass_slot,
1616
bool stub_is_near) {
1617
assert_different_registers(r_sub_klass, temp1, temp2, temp3, result, rscratch1, rscratch2);
1619
Label L_fallthrough;
1621
BLOCK_COMMENT("lookup_secondary_supers_table {");
1624
r_array_base = temp1, // r1
1625
r_array_length = temp2, // r2
1626
r_array_index = temp3, // r3
1627
r_bitmap = rscratch2;
1629
LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS;
1631
u1 bit = super_klass_slot;
1633
// Make sure that result is nonzero if the TBZ below misses.
1636
// We're going to need the bitmap in a vector reg and in a core reg,
1637
// so load both now.
1638
ldr(r_bitmap, Address(r_sub_klass, Klass::bitmap_offset()));
1640
ldrd(vtemp, Address(r_sub_klass, Klass::bitmap_offset()));
1642
// First check the bitmap to see if super_klass might be present. If
1643
// the bit is zero, we are certain that super_klass is not one of
1644
// the secondary supers.
1645
tbz(r_bitmap, bit, L_fallthrough);
1647
// Get the first array index that can contain super_klass into r_array_index.
1649
shld(vtemp, vtemp, Klass::SECONDARY_SUPERS_TABLE_MASK - bit);
1650
cnt(vtemp, T8B, vtemp);
1651
addv(vtemp, T8B, vtemp);
1652
fmovd(r_array_index, vtemp);
1654
mov(r_array_index, (u1)1);
1656
// NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word.
1658
// We will consult the secondary-super array.
1659
ldr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset())));
1661
// The value i in r_array_index is >= 1, so even though r_array_base
1662
// points to the length, we don't need to adjust it to point to the
1664
assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code");
1665
assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code");
1667
ldr(result, Address(r_array_base, r_array_index, Address::lsl(LogBytesPerWord)));
1668
eor(result, result, r_super_klass);
1669
cbz(result, L_fallthrough); // Found a match
1671
// Is there another entry to check? Consult the bitmap.
1672
tbz(r_bitmap, (bit + 1) & Klass::SECONDARY_SUPERS_TABLE_MASK, L_fallthrough);
1676
ror(r_bitmap, r_bitmap, bit);
1679
// The slot we just inspected is at secondary_supers[r_array_index - 1].
1680
// The next slot to be inspected, by the stub we're about to call,
1681
// is secondary_supers[r_array_index]. Bits 0 and 1 in the bitmap
1682
// have been checked.
1683
Address stub = RuntimeAddress(StubRoutines::lookup_secondary_supers_table_slow_path_stub());
1687
address call = trampoline_call(stub);
1688
if (call == nullptr) {
1689
return false; // trampoline allocation failed
1693
BLOCK_COMMENT("} lookup_secondary_supers_table");
1695
bind(L_fallthrough);
1697
if (VerifySecondarySupers) {
1698
verify_secondary_supers_table(r_sub_klass, r_super_klass, // r4, r0
1699
temp1, temp2, result); // r1, r2, r5
1704
// Called by code generated by check_klass_subtype_slow_path
1705
// above. This is called when there is a collision in the hashed
1706
// lookup in the secondary supers array.
1707
void MacroAssembler::lookup_secondary_supers_table_slow_path(Register r_super_klass,
1708
Register r_array_base,
1709
Register r_array_index,
1713
assert_different_registers(r_super_klass, r_array_base, r_array_index, r_bitmap, temp1, result, rscratch1);
1716
r_array_length = temp1,
1717
r_sub_klass = noreg; // unused
1719
LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS;
1721
Label L_fallthrough, L_huge;
1723
// Load the array length.
1724
ldrw(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes()));
1725
// And adjust the array base to point to the data.
1726
// NB! Effectively increments current slot index by 1.
1727
assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "");
1728
add(r_array_base, r_array_base, Array<Klass*>::base_offset_in_bytes());
1730
// The bitmap is full to bursting.
1731
// Implicit invariant: BITMAP_FULL implies (length > 0)
1732
assert(Klass::SECONDARY_SUPERS_BITMAP_FULL == ~uintx(0), "");
1733
cmn(r_bitmap, (u1)1);
1736
// NB! Our caller has checked bits 0 and 1 in the bitmap. The
1737
// current slot (at secondary_supers[r_array_index]) has not yet
1738
// been inspected, and r_array_index may be out of bounds if we
1739
// wrapped around the end of the array.
1741
{ // This is conventional linear probing, but instead of terminating
1742
// when a null entry is found in the table, we maintain a bitmap
1743
// in which a 0 indicates missing entries.
1744
// The check above guarantees there are 0s in the bitmap, so the loop
1745
// eventually terminates.
1749
// Check for wraparound.
1750
cmp(r_array_index, r_array_length);
1751
csel(r_array_index, zr, r_array_index, GE);
1753
ldr(rscratch1, Address(r_array_base, r_array_index, Address::lsl(LogBytesPerWord)));
1754
eor(result, rscratch1, r_super_klass);
1755
cbz(result, L_fallthrough);
1757
tbz(r_bitmap, 2, L_fallthrough); // look-ahead check (Bit 2); result is non-zero
1759
ror(r_bitmap, r_bitmap, 1);
1760
add(r_array_index, r_array_index, 1);
1764
{ // Degenerate case: more than 64 secondary supers.
1765
// FIXME: We could do something smarter here, maybe a vectorized
1766
// comparison or a binary search, but is that worth any added
1769
cmp(sp, zr); // Clear Z flag; SP is never zero
1770
repne_scan(r_array_base, r_super_klass, r_array_length, rscratch1);
1771
cset(result, NE); // result == 0 iff we got a match.
1774
bind(L_fallthrough);
1777
// Make sure that the hashed lookup and a linear scan agree.
1778
void MacroAssembler::verify_secondary_supers_table(Register r_sub_klass,
1779
Register r_super_klass,
1783
assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, result, rscratch1);
1786
r_array_base = temp1,
1787
r_array_length = temp2,
1788
r_array_index = noreg, // unused
1789
r_bitmap = noreg; // unused
1791
LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS;
1793
BLOCK_COMMENT("verify_secondary_supers_table {");
1795
// We will consult the secondary-super array.
1796
ldr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset())));
1798
// Load the array length.
1799
ldrw(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes()));
1800
// And adjust the array base to point to the data.
1801
add(r_array_base, r_array_base, Array<Klass*>::base_offset_in_bytes());
1803
cmp(sp, zr); // Clear Z flag; SP is never zero
1804
// Scan R2 words at [R5] for an occurrence of R0.
1805
// Set NZ/Z based on last compare.
1806
repne_scan(/*addr*/r_array_base, /*value*/r_super_klass, /*count*/r_array_length, rscratch2);
1807
// rscratch1 == 0 iff we got a match.
1808
cset(rscratch1, NE);
1812
cset(result, NE); // normalize result to 0/1 for comparison
1814
cmp(rscratch1, result);
1817
mov(r0, r_super_klass); // r0 <- r0
1818
mov(r1, r_sub_klass); // r1 <- r4
1819
mov(r2, /*expected*/rscratch1); // r2 <- r8
1820
mov(r3, result); // r3 <- r5
1821
mov(r4, (address)("mismatch")); // r4 <- const
1822
rt_call(CAST_FROM_FN_PTR(address, Klass::on_secondary_supers_verification_failure), rscratch2);
1823
should_not_reach_here();
1827
BLOCK_COMMENT("} verify_secondary_supers_table");
1830
void MacroAssembler::clinit_barrier(Register klass, Register scratch, Label* L_fast_path, Label* L_slow_path) {
1831
assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required");
1832
assert_different_registers(klass, rthread, scratch);
1834
Label L_fallthrough, L_tmp;
1835
if (L_fast_path == nullptr) {
1836
L_fast_path = &L_fallthrough;
1837
} else if (L_slow_path == nullptr) {
1838
L_slow_path = &L_fallthrough;
1840
// Fast path check: class is fully initialized
1841
ldrb(scratch, Address(klass, InstanceKlass::init_state_offset()));
1842
subs(zr, scratch, InstanceKlass::fully_initialized);
1843
br(Assembler::EQ, *L_fast_path);
1845
// Fast path check: current thread is initializer thread
1846
ldr(scratch, Address(klass, InstanceKlass::init_thread_offset()));
1847
cmp(rthread, scratch);
1849
if (L_slow_path == &L_fallthrough) {
1850
br(Assembler::EQ, *L_fast_path);
1852
} else if (L_fast_path == &L_fallthrough) {
1853
br(Assembler::NE, *L_slow_path);
1860
void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) {
1861
if (!VerifyOops) return;
1863
// Pass register number to verify_oop_subroutine
1864
const char* b = nullptr;
1868
ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line);
1869
b = code_string(ss.as_string());
1871
BLOCK_COMMENT("verify_oop {");
1873
strip_return_address(); // This might happen within a stack frame.
1874
protect_return_address();
1875
stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
1876
stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
1879
movptr(rscratch1, (uintptr_t)(address)b);
1881
// call indirectly to solve generation ordering problem
1882
lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
1883
ldr(rscratch2, Address(rscratch2));
1886
ldp(rscratch2, lr, Address(post(sp, 2 * wordSize)));
1887
ldp(r0, rscratch1, Address(post(sp, 2 * wordSize)));
1888
authenticate_return_address();
1890
BLOCK_COMMENT("} verify_oop");
1893
void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
1894
if (!VerifyOops) return;
1896
const char* b = nullptr;
1900
ss.print("verify_oop_addr: %s (%s:%d)", s, file, line);
1901
b = code_string(ss.as_string());
1903
BLOCK_COMMENT("verify_oop_addr {");
1905
strip_return_address(); // This might happen within a stack frame.
1906
protect_return_address();
1907
stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
1908
stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
1910
// addr may contain sp so we will have to adjust it based on the
1911
// pushes that we just did.
1912
if (addr.uses(sp)) {
1914
ldr(r0, Address(r0, 4 * wordSize));
1918
movptr(rscratch1, (uintptr_t)(address)b);
1920
// call indirectly to solve generation ordering problem
1921
lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
1922
ldr(rscratch2, Address(rscratch2));
1925
ldp(rscratch2, lr, Address(post(sp, 2 * wordSize)));
1926
ldp(r0, rscratch1, Address(post(sp, 2 * wordSize)));
1927
authenticate_return_address();
1929
BLOCK_COMMENT("} verify_oop_addr");
1932
Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
1933
int extra_slot_offset) {
1934
// cf. TemplateTable::prepare_invoke(), if (load_receiver).
1935
int stackElementSize = Interpreter::stackElementSize;
1936
int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
1938
int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
1939
assert(offset1 - offset == stackElementSize, "correct arithmetic");
1941
if (arg_slot.is_constant()) {
1942
return Address(esp, arg_slot.as_constant() * stackElementSize
1945
add(rscratch1, esp, arg_slot.as_register(),
1946
ext::uxtx, exact_log2(stackElementSize));
1947
return Address(rscratch1, offset);
1951
void MacroAssembler::call_VM_leaf_base(address entry_point,
1952
int number_of_arguments,
1956
stp(rscratch1, rmethod, Address(pre(sp, -2 * wordSize)));
1958
mov(rscratch1, entry_point);
1963
ldp(rscratch1, rmethod, Address(post(sp, 2 * wordSize)));
1966
void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
1967
call_VM_leaf_base(entry_point, number_of_arguments);
1970
void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
1971
pass_arg0(this, arg_0);
1972
call_VM_leaf_base(entry_point, 1);
1975
void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1976
assert_different_registers(arg_1, c_rarg0);
1977
pass_arg0(this, arg_0);
1978
pass_arg1(this, arg_1);
1979
call_VM_leaf_base(entry_point, 2);
1982
void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0,
1983
Register arg_1, Register arg_2) {
1984
assert_different_registers(arg_1, c_rarg0);
1985
assert_different_registers(arg_2, c_rarg0, c_rarg1);
1986
pass_arg0(this, arg_0);
1987
pass_arg1(this, arg_1);
1988
pass_arg2(this, arg_2);
1989
call_VM_leaf_base(entry_point, 3);
1992
void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
1993
pass_arg0(this, arg_0);
1994
MacroAssembler::call_VM_leaf_base(entry_point, 1);
1997
void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1999
assert_different_registers(arg_0, c_rarg1);
2000
pass_arg1(this, arg_1);
2001
pass_arg0(this, arg_0);
2002
MacroAssembler::call_VM_leaf_base(entry_point, 2);
2005
void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
2006
assert_different_registers(arg_0, c_rarg1, c_rarg2);
2007
assert_different_registers(arg_1, c_rarg2);
2008
pass_arg2(this, arg_2);
2009
pass_arg1(this, arg_1);
2010
pass_arg0(this, arg_0);
2011
MacroAssembler::call_VM_leaf_base(entry_point, 3);
2014
void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
2015
assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3);
2016
assert_different_registers(arg_1, c_rarg2, c_rarg3);
2017
assert_different_registers(arg_2, c_rarg3);
2018
pass_arg3(this, arg_3);
2019
pass_arg2(this, arg_2);
2020
pass_arg1(this, arg_1);
2021
pass_arg0(this, arg_0);
2022
MacroAssembler::call_VM_leaf_base(entry_point, 4);
2025
void MacroAssembler::null_check(Register reg, int offset) {
2026
if (needs_explicit_null_check(offset)) {
2027
// provoke OS null exception if reg is null by
2028
// accessing M[reg] w/o changing any registers
2029
// NOTE: this is plenty to provoke a segv
2030
ldr(zr, Address(reg));
2032
// nothing to do, (later) access of M[reg + offset]
2033
// will provoke OS null exception if reg is null
2037
// MacroAssembler protected routines needed to implement
2040
void MacroAssembler::mov(Register r, Address dest) {
2041
code_section()->relocate(pc(), dest.rspec());
2042
uint64_t imm64 = (uint64_t)dest.target();
2046
// Move a constant pointer into r. In AArch64 mode the virtual
2047
// address space is 48 bits in size, so we only need three
2048
// instructions to create a patchable instruction sequence that can
2050
void MacroAssembler::movptr(Register r, uintptr_t imm64) {
2054
snprintf(buffer, sizeof(buffer), "0x%" PRIX64, (uint64_t)imm64);
2055
block_comment(buffer);
2058
assert(imm64 < (1ull << 48), "48-bit overflow in address constant");
2059
movz(r, imm64 & 0xffff);
2061
movk(r, imm64 & 0xffff, 16);
2063
movk(r, imm64 & 0xffff, 32);
2066
// Macro to mov replicated immediate to vector register.
2067
// imm64: only the lower 8/16/32 bits are considered for B/H/S type. That is,
2068
// the upper 56/48/32 bits must be zeros for B/H/S type.
2069
// Vd will get the following values for different arrangements in T
2070
// imm64 == hex 000000gh T8B: Vd = ghghghghghghghgh
2071
// imm64 == hex 000000gh T16B: Vd = ghghghghghghghghghghghghghghghgh
2072
// imm64 == hex 0000efgh T4H: Vd = efghefghefghefgh
2073
// imm64 == hex 0000efgh T8H: Vd = efghefghefghefghefghefghefghefgh
2074
// imm64 == hex abcdefgh T2S: Vd = abcdefghabcdefgh
2075
// imm64 == hex abcdefgh T4S: Vd = abcdefghabcdefghabcdefghabcdefgh
2076
// imm64 == hex abcdefgh T1D: Vd = 00000000abcdefgh
2077
// imm64 == hex abcdefgh T2D: Vd = 00000000abcdefgh00000000abcdefgh
2078
// Clobbers rscratch1
2079
void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, uint64_t imm64) {
2080
assert(T != T1Q, "unsupported");
2081
if (T == T1D || T == T2D) {
2082
int imm = operand_valid_for_movi_immediate(imm64, T);
2086
mov(rscratch1, imm64);
2087
dup(Vd, T, rscratch1);
2093
if (T == T8B || T == T16B) assert((imm64 & ~0xff) == 0, "extraneous bits (T8B/T16B)");
2094
if (T == T4H || T == T8H) assert((imm64 & ~0xffff) == 0, "extraneous bits (T4H/T8H)");
2095
if (T == T2S || T == T4S) assert((imm64 & ~0xffffffff) == 0, "extraneous bits (T2S/T4S)");
2097
int shift = operand_valid_for_movi_immediate(imm64, T);
2098
uint32_t imm32 = imm64 & 0xffffffffULL;
2100
movi(Vd, T, (imm32 >> shift) & 0xff, shift);
2102
movw(rscratch1, imm32);
2103
dup(Vd, T, rscratch1);
2107
void MacroAssembler::mov_immediate64(Register dst, uint64_t imm64)
2112
snprintf(buffer, sizeof(buffer), "0x%" PRIX64, imm64);
2113
block_comment(buffer);
2116
if (operand_valid_for_logical_immediate(false, imm64)) {
2117
orr(dst, zr, imm64);
2119
// we can use a combination of MOVZ or MOVN with
2120
// MOVK to build up the constant
2125
for (i = 0; i < 4; i++) {
2126
imm_h[i] = ((imm64 >> (i * 16)) & 0xffffL);
2127
if (imm_h[i] == 0) {
2129
} else if (imm_h[i] == 0xffffL) {
2133
if (zero_count == 4) {
2136
} else if (neg_count == 4) {
2139
} else if (zero_count == 3) {
2140
for (i = 0; i < 4; i++) {
2141
if (imm_h[i] != 0L) {
2142
movz(dst, (uint32_t)imm_h[i], (i << 4));
2146
} else if (neg_count == 3) {
2148
for (int i = 0; i < 4; i++) {
2149
if (imm_h[i] != 0xffffL) {
2150
movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4));
2154
} else if (zero_count == 2) {
2155
// one MOVZ and one MOVK will do
2156
for (i = 0; i < 3; i++) {
2157
if (imm_h[i] != 0L) {
2158
movz(dst, (uint32_t)imm_h[i], (i << 4));
2164
if (imm_h[i] != 0L) {
2165
movk(dst, (uint32_t)imm_h[i], (i << 4));
2168
} else if (neg_count == 2) {
2169
// one MOVN and one MOVK will do
2170
for (i = 0; i < 4; i++) {
2171
if (imm_h[i] != 0xffffL) {
2172
movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4));
2178
if (imm_h[i] != 0xffffL) {
2179
movk(dst, (uint32_t)imm_h[i], (i << 4));
2182
} else if (zero_count == 1) {
2183
// one MOVZ and two MOVKs will do
2184
for (i = 0; i < 4; i++) {
2185
if (imm_h[i] != 0L) {
2186
movz(dst, (uint32_t)imm_h[i], (i << 4));
2192
if (imm_h[i] != 0x0L) {
2193
movk(dst, (uint32_t)imm_h[i], (i << 4));
2196
} else if (neg_count == 1) {
2197
// one MOVN and two MOVKs will do
2198
for (i = 0; i < 4; i++) {
2199
if (imm_h[i] != 0xffffL) {
2200
movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4));
2206
if (imm_h[i] != 0xffffL) {
2207
movk(dst, (uint32_t)imm_h[i], (i << 4));
2211
// use a MOVZ and 3 MOVKs (makes it easier to debug)
2212
movz(dst, (uint32_t)imm_h[0], 0);
2213
for (i = 1; i < 4; i++) {
2214
movk(dst, (uint32_t)imm_h[i], (i << 4));
2220
void MacroAssembler::mov_immediate32(Register dst, uint32_t imm32)
2225
snprintf(buffer, sizeof(buffer), "0x%" PRIX32, imm32);
2226
block_comment(buffer);
2229
if (operand_valid_for_logical_immediate(true, imm32)) {
2230
orrw(dst, zr, imm32);
2232
// we can use MOVZ, MOVN or two calls to MOVK to build up the
2235
imm_h[0] = imm32 & 0xffff;
2236
imm_h[1] = ((imm32 >> 16) & 0xffff);
2237
if (imm_h[0] == 0) {
2238
movzw(dst, imm_h[1], 16);
2239
} else if (imm_h[0] == 0xffff) {
2240
movnw(dst, imm_h[1] ^ 0xffff, 16);
2241
} else if (imm_h[1] == 0) {
2242
movzw(dst, imm_h[0], 0);
2243
} else if (imm_h[1] == 0xffff) {
2244
movnw(dst, imm_h[0] ^ 0xffff, 0);
2246
// use a MOVZ and MOVK (makes it easier to debug)
2247
movzw(dst, imm_h[0], 0);
2248
movkw(dst, imm_h[1], 16);
2253
// Form an address from base + offset in Rd. Rd may or may
2254
// not actually be used: you must use the Address that is returned.
2255
// It is up to you to ensure that the shift provided matches the size
2257
Address MacroAssembler::form_address(Register Rd, Register base, int64_t byte_offset, int shift) {
2258
if (Address::offset_ok_for_immed(byte_offset, shift))
2259
// It fits; no need for any heroics
2260
return Address(base, byte_offset);
2262
// Don't do anything clever with negative or misaligned offsets
2263
unsigned mask = (1 << shift) - 1;
2264
if (byte_offset < 0 || byte_offset & mask) {
2265
mov(Rd, byte_offset);
2270
// See if we can do this with two 12-bit offsets
2272
uint64_t word_offset = byte_offset >> shift;
2273
uint64_t masked_offset = word_offset & 0xfff000;
2274
if (Address::offset_ok_for_immed(word_offset - masked_offset, 0)
2275
&& Assembler::operand_valid_for_add_sub_immediate(masked_offset << shift)) {
2276
add(Rd, base, masked_offset << shift);
2277
word_offset -= masked_offset;
2278
return Address(Rd, word_offset << shift);
2282
// Do it the hard way
2283
mov(Rd, byte_offset);
2288
int MacroAssembler::corrected_idivl(Register result, Register ra, Register rb,
2289
bool want_remainder, Register scratch)
2291
// Full implementation of Java idiv and irem. The function
2292
// returns the (pc) offset of the div instruction - may be needed
2293
// for implicit exceptions.
2295
// constraint : ra/rb =/= scratch
2298
// input : ra: dividend
2302
// quotient (= ra idiv rb)
2303
// remainder (= ra irem rb)
2305
assert(ra != scratch && rb != scratch, "reg cannot be scratch");
2307
int idivl_offset = offset();
2308
if (! want_remainder) {
2309
sdivw(result, ra, rb);
2311
sdivw(scratch, ra, rb);
2312
Assembler::msubw(result, scratch, rb, ra);
2315
return idivl_offset;
2318
int MacroAssembler::corrected_idivq(Register result, Register ra, Register rb,
2319
bool want_remainder, Register scratch)
2321
// Full implementation of Java ldiv and lrem. The function
2322
// returns the (pc) offset of the div instruction - may be needed
2323
// for implicit exceptions.
2325
// constraint : ra/rb =/= scratch
2328
// input : ra: dividend
2332
// quotient (= ra idiv rb)
2333
// remainder (= ra irem rb)
2335
assert(ra != scratch && rb != scratch, "reg cannot be scratch");
2337
int idivq_offset = offset();
2338
if (! want_remainder) {
2339
sdiv(result, ra, rb);
2341
sdiv(scratch, ra, rb);
2342
Assembler::msub(result, scratch, rb, ra);
2345
return idivq_offset;
2348
void MacroAssembler::membar(Membar_mask_bits order_constraint) {
2349
address prev = pc() - NativeMembar::instruction_size;
2350
address last = code()->last_insn();
2351
if (last != nullptr && nativeInstruction_at(last)->is_Membar() && prev == last) {
2352
NativeMembar *bar = NativeMembar_at(prev);
2353
if (AlwaysMergeDMB) {
2354
bar->set_kind(bar->get_kind() | order_constraint);
2355
BLOCK_COMMENT("merged membar(always)");
2358
// Don't promote DMB ST|DMB LD to DMB (a full barrier) because
2359
// doing so would introduce a StoreLoad which the caller did not
2361
if (bar->get_kind() == order_constraint
2362
|| bar->get_kind() == AnyAny
2363
|| order_constraint == AnyAny) {
2364
// We are merging two memory barrier instructions. On AArch64 we
2365
// can do this simply by ORing them together.
2366
bar->set_kind(bar->get_kind() | order_constraint);
2367
BLOCK_COMMENT("merged membar");
2370
// A special case like "DMB ST;DMB LD;DMB ST", the last DMB can be skipped
2371
// We need check the last 2 instructions
2372
address prev2 = prev - NativeMembar::instruction_size;
2373
if (last != code()->last_label() && nativeInstruction_at(prev2)->is_Membar()) {
2374
NativeMembar *bar2 = NativeMembar_at(prev2);
2375
assert(bar2->get_kind() == order_constraint, "it should be merged before");
2376
BLOCK_COMMENT("merged membar(elided)");
2381
code()->set_last_insn(pc());
2382
dmb(Assembler::barrier(order_constraint));
2385
bool MacroAssembler::try_merge_ldst(Register rt, const Address &adr, size_t size_in_bytes, bool is_store) {
2386
if (ldst_can_merge(rt, adr, size_in_bytes, is_store)) {
2387
merge_ldst(rt, adr, size_in_bytes, is_store);
2388
code()->clear_last_insn();
2391
assert(size_in_bytes == 8 || size_in_bytes == 4, "only 8 bytes or 4 bytes load/store is supported.");
2392
const uint64_t mask = size_in_bytes - 1;
2393
if (adr.getMode() == Address::base_plus_offset &&
2394
(adr.offset() & mask) == 0) { // only supports base_plus_offset.
2395
code()->set_last_insn(pc());
2401
void MacroAssembler::ldr(Register Rx, const Address &adr) {
2402
// We always try to merge two adjacent loads into one ldp.
2403
if (!try_merge_ldst(Rx, adr, 8, false)) {
2404
Assembler::ldr(Rx, adr);
2408
void MacroAssembler::ldrw(Register Rw, const Address &adr) {
2409
// We always try to merge two adjacent loads into one ldp.
2410
if (!try_merge_ldst(Rw, adr, 4, false)) {
2411
Assembler::ldrw(Rw, adr);
2415
void MacroAssembler::str(Register Rx, const Address &adr) {
2416
// We always try to merge two adjacent stores into one stp.
2417
if (!try_merge_ldst(Rx, adr, 8, true)) {
2418
Assembler::str(Rx, adr);
2422
void MacroAssembler::strw(Register Rw, const Address &adr) {
2423
// We always try to merge two adjacent stores into one stp.
2424
if (!try_merge_ldst(Rw, adr, 4, true)) {
2425
Assembler::strw(Rw, adr);
2429
// MacroAssembler routines found actually to be needed
2431
void MacroAssembler::push(Register src)
2433
str(src, Address(pre(esp, -1 * wordSize)));
2436
void MacroAssembler::pop(Register dst)
2438
ldr(dst, Address(post(esp, 1 * wordSize)));
2441
// Note: load_unsigned_short used to be called load_unsigned_word.
2442
int MacroAssembler::load_unsigned_short(Register dst, Address src) {
2448
int MacroAssembler::load_unsigned_byte(Register dst, Address src) {
2454
int MacroAssembler::load_signed_short(Register dst, Address src) {
2460
int MacroAssembler::load_signed_byte(Register dst, Address src) {
2466
int MacroAssembler::load_signed_short32(Register dst, Address src) {
2472
int MacroAssembler::load_signed_byte32(Register dst, Address src) {
2478
void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed) {
2479
switch (size_in_bytes) {
2480
case 8: ldr(dst, src); break;
2481
case 4: ldrw(dst, src); break;
2482
case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break;
2483
case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break;
2484
default: ShouldNotReachHere();
2488
void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes) {
2489
switch (size_in_bytes) {
2490
case 8: str(src, dst); break;
2491
case 4: strw(src, dst); break;
2492
case 2: strh(src, dst); break;
2493
case 1: strb(src, dst); break;
2494
default: ShouldNotReachHere();
2498
void MacroAssembler::decrementw(Register reg, int value)
2500
if (value < 0) { incrementw(reg, -value); return; }
2501
if (value == 0) { return; }
2502
if (value < (1 << 12)) { subw(reg, reg, value); return; }
2504
guarantee(reg != rscratch2, "invalid dst for register decrement");
2505
movw(rscratch2, (unsigned)value);
2506
subw(reg, reg, rscratch2);
2510
void MacroAssembler::decrement(Register reg, int value)
2512
if (value < 0) { increment(reg, -value); return; }
2513
if (value == 0) { return; }
2514
if (value < (1 << 12)) { sub(reg, reg, value); return; }
2516
assert(reg != rscratch2, "invalid dst for register decrement");
2517
mov(rscratch2, (uint64_t)value);
2518
sub(reg, reg, rscratch2);
2522
void MacroAssembler::decrementw(Address dst, int value)
2524
assert(!dst.uses(rscratch1), "invalid dst for address decrement");
2525
if (dst.getMode() == Address::literal) {
2526
assert(abs(value) < (1 << 12), "invalid value and address mode combination");
2527
lea(rscratch2, dst);
2528
dst = Address(rscratch2);
2530
ldrw(rscratch1, dst);
2531
decrementw(rscratch1, value);
2532
strw(rscratch1, dst);
2535
void MacroAssembler::decrement(Address dst, int value)
2537
assert(!dst.uses(rscratch1), "invalid address for decrement");
2538
if (dst.getMode() == Address::literal) {
2539
assert(abs(value) < (1 << 12), "invalid value and address mode combination");
2540
lea(rscratch2, dst);
2541
dst = Address(rscratch2);
2543
ldr(rscratch1, dst);
2544
decrement(rscratch1, value);
2545
str(rscratch1, dst);
2548
void MacroAssembler::incrementw(Register reg, int value)
2550
if (value < 0) { decrementw(reg, -value); return; }
2551
if (value == 0) { return; }
2552
if (value < (1 << 12)) { addw(reg, reg, value); return; }
2554
assert(reg != rscratch2, "invalid dst for register increment");
2555
movw(rscratch2, (unsigned)value);
2556
addw(reg, reg, rscratch2);
2560
void MacroAssembler::increment(Register reg, int value)
2562
if (value < 0) { decrement(reg, -value); return; }
2563
if (value == 0) { return; }
2564
if (value < (1 << 12)) { add(reg, reg, value); return; }
2566
assert(reg != rscratch2, "invalid dst for register increment");
2567
movw(rscratch2, (unsigned)value);
2568
add(reg, reg, rscratch2);
2572
void MacroAssembler::incrementw(Address dst, int value)
2574
assert(!dst.uses(rscratch1), "invalid dst for address increment");
2575
if (dst.getMode() == Address::literal) {
2576
assert(abs(value) < (1 << 12), "invalid value and address mode combination");
2577
lea(rscratch2, dst);
2578
dst = Address(rscratch2);
2580
ldrw(rscratch1, dst);
2581
incrementw(rscratch1, value);
2582
strw(rscratch1, dst);
2585
void MacroAssembler::increment(Address dst, int value)
2587
assert(!dst.uses(rscratch1), "invalid dst for address increment");
2588
if (dst.getMode() == Address::literal) {
2589
assert(abs(value) < (1 << 12), "invalid value and address mode combination");
2590
lea(rscratch2, dst);
2591
dst = Address(rscratch2);
2593
ldr(rscratch1, dst);
2594
increment(rscratch1, value);
2595
str(rscratch1, dst);
2598
// Push lots of registers in the bit set supplied. Don't push sp.
2599
// Return the number of words pushed
2600
int MacroAssembler::push(unsigned int bitset, Register stack) {
2601
int words_pushed = 0;
2603
// Scan bitset to accumulate register pairs
2604
unsigned char regs[32];
2606
for (int reg = 0; reg <= 30; reg++) {
2608
regs[count++] = reg;
2611
regs[count++] = zr->raw_encoding();
2612
count &= ~1; // Only push an even number of regs
2615
stp(as_Register(regs[0]), as_Register(regs[1]),
2616
Address(pre(stack, -count * wordSize)));
2619
for (int i = 2; i < count; i += 2) {
2620
stp(as_Register(regs[i]), as_Register(regs[i+1]),
2621
Address(stack, i * wordSize));
2625
assert(words_pushed == count, "oops, pushed != count");
2630
int MacroAssembler::pop(unsigned int bitset, Register stack) {
2631
int words_pushed = 0;
2633
// Scan bitset to accumulate register pairs
2634
unsigned char regs[32];
2636
for (int reg = 0; reg <= 30; reg++) {
2638
regs[count++] = reg;
2641
regs[count++] = zr->raw_encoding();
2644
for (int i = 2; i < count; i += 2) {
2645
ldp(as_Register(regs[i]), as_Register(regs[i+1]),
2646
Address(stack, i * wordSize));
2650
ldp(as_Register(regs[0]), as_Register(regs[1]),
2651
Address(post(stack, count * wordSize)));
2655
assert(words_pushed == count, "oops, pushed != count");
2660
// Push lots of registers in the bit set supplied. Don't push sp.
2661
// Return the number of dwords pushed
2662
int MacroAssembler::push_fp(unsigned int bitset, Register stack, FpPushPopMode mode) {
2663
int words_pushed = 0;
2664
bool use_sve = false;
2665
int sve_vector_size_in_bytes = 0;
2668
use_sve = Matcher::supports_scalable_vector();
2669
sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
2672
// Scan bitset to accumulate register pairs
2673
unsigned char regs[32];
2675
for (int reg = 0; reg <= 31; reg++) {
2677
regs[count++] = reg;
2685
if (mode == PushPopFull) {
2686
if (use_sve && sve_vector_size_in_bytes > 16) {
2696
if (mode == PushPopSVE) {
2697
snprintf(buffer, sizeof(buffer), "push_fp: %d SVE registers", count);
2698
} else if (mode == PushPopNeon) {
2699
snprintf(buffer, sizeof(buffer), "push_fp: %d Neon registers", count);
2701
snprintf(buffer, sizeof(buffer), "push_fp: %d fp registers", count);
2703
block_comment(buffer);
2707
if (mode == PushPopSVE) {
2708
sub(stack, stack, sve_vector_size_in_bytes * count);
2709
for (int i = 0; i < count; i++) {
2710
sve_str(as_FloatRegister(regs[i]), Address(stack, i));
2712
return count * sve_vector_size_in_bytes / 8;
2715
if (mode == PushPopNeon) {
2717
strq(as_FloatRegister(regs[0]), Address(pre(stack, -wordSize * 2)));
2721
bool odd = (count & 1) == 1;
2722
int push_slots = count + (odd ? 1 : 0);
2724
// Always pushing full 128 bit registers.
2725
stpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -push_slots * wordSize * 2)));
2728
for (int i = 2; i + 1 < count; i += 2) {
2729
stpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2));
2734
strq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2));
2738
assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count);
2742
if (mode == PushPopFp) {
2743
bool odd = (count & 1) == 1;
2744
int push_slots = count + (odd ? 1 : 0);
2747
// Stack pointer must be 16 bytes aligned
2748
strd(as_FloatRegister(regs[0]), Address(pre(stack, -push_slots * wordSize)));
2752
stpd(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -push_slots * wordSize)));
2755
for (int i = 2; i + 1 < count; i += 2) {
2756
stpd(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize));
2761
// Stack pointer must be 16 bytes aligned
2762
strd(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize));
2766
assert(words_pushed == count, "oops, pushed != count");
2774
// Return the number of dwords popped
2775
int MacroAssembler::pop_fp(unsigned int bitset, Register stack, FpPushPopMode mode) {
2776
int words_pushed = 0;
2777
bool use_sve = false;
2778
int sve_vector_size_in_bytes = 0;
2781
use_sve = Matcher::supports_scalable_vector();
2782
sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
2784
// Scan bitset to accumulate register pairs
2785
unsigned char regs[32];
2787
for (int reg = 0; reg <= 31; reg++) {
2789
regs[count++] = reg;
2797
if (mode == PushPopFull) {
2798
if (use_sve && sve_vector_size_in_bytes > 16) {
2808
if (mode == PushPopSVE) {
2809
snprintf(buffer, sizeof(buffer), "pop_fp: %d SVE registers", count);
2810
} else if (mode == PushPopNeon) {
2811
snprintf(buffer, sizeof(buffer), "pop_fp: %d Neon registers", count);
2813
snprintf(buffer, sizeof(buffer), "pop_fp: %d fp registers", count);
2815
block_comment(buffer);
2819
if (mode == PushPopSVE) {
2820
for (int i = count - 1; i >= 0; i--) {
2821
sve_ldr(as_FloatRegister(regs[i]), Address(stack, i));
2823
add(stack, stack, sve_vector_size_in_bytes * count);
2824
return count * sve_vector_size_in_bytes / 8;
2827
if (mode == PushPopNeon) {
2829
ldrq(as_FloatRegister(regs[0]), Address(post(stack, wordSize * 2)));
2833
bool odd = (count & 1) == 1;
2834
int push_slots = count + (odd ? 1 : 0);
2837
ldrq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2));
2841
for (int i = 2; i + 1 < count; i += 2) {
2842
ldpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2));
2846
ldpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, push_slots * wordSize * 2)));
2849
assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count);
2854
if (mode == PushPopFp) {
2855
bool odd = (count & 1) == 1;
2856
int push_slots = count + (odd ? 1 : 0);
2859
ldrd(as_FloatRegister(regs[0]), Address(post(stack, push_slots * wordSize)));
2864
ldrd(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize));
2868
for (int i = 2; i + 1 < count; i += 2) {
2869
ldpd(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize));
2873
ldpd(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, push_slots * wordSize)));
2876
assert(words_pushed == count, "oops, pushed != count");
2884
// Return the number of dwords pushed
2885
int MacroAssembler::push_p(unsigned int bitset, Register stack) {
2886
bool use_sve = false;
2887
int sve_predicate_size_in_slots = 0;
2890
use_sve = Matcher::supports_scalable_vector();
2892
sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots();
2900
unsigned char regs[PRegister::number_of_registers];
2902
for (int reg = 0; reg < PRegister::number_of_registers; reg++) {
2904
regs[count++] = reg;
2912
int total_push_bytes = align_up(sve_predicate_size_in_slots *
2913
VMRegImpl::stack_slot_size * count, 16);
2914
sub(stack, stack, total_push_bytes);
2915
for (int i = 0; i < count; i++) {
2916
sve_str(as_PRegister(regs[i]), Address(stack, i));
2918
return total_push_bytes / 8;
2921
// Return the number of dwords popped
2922
int MacroAssembler::pop_p(unsigned int bitset, Register stack) {
2923
bool use_sve = false;
2924
int sve_predicate_size_in_slots = 0;
2927
use_sve = Matcher::supports_scalable_vector();
2929
sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots();
2937
unsigned char regs[PRegister::number_of_registers];
2939
for (int reg = 0; reg < PRegister::number_of_registers; reg++) {
2941
regs[count++] = reg;
2949
int total_pop_bytes = align_up(sve_predicate_size_in_slots *
2950
VMRegImpl::stack_slot_size * count, 16);
2951
for (int i = count - 1; i >= 0; i--) {
2952
sve_ldr(as_PRegister(regs[i]), Address(stack, i));
2954
add(stack, stack, total_pop_bytes);
2955
return total_pop_bytes / 8;
2959
void MacroAssembler::verify_heapbase(const char* msg) {
2961
assert (UseCompressedOops || UseCompressedClassPointers, "should be compressed");
2962
assert (Universe::heap() != nullptr, "java heap should be initialized");
2963
if (!UseCompressedOops || Universe::ptr_base() == nullptr) {
2964
// rheapbase is allocated as general register
2967
if (CheckCompressedOops) {
2969
push(1 << rscratch1->encoding(), sp); // cmpptr trashes rscratch1
2970
cmpptr(rheapbase, ExternalAddress(CompressedOops::ptrs_base_addr()));
2971
br(Assembler::EQ, ok);
2974
pop(1 << rscratch1->encoding(), sp);
2980
void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp2) {
2981
assert_different_registers(value, tmp1, tmp2);
2982
Label done, tagged, weak_tagged;
2984
cbz(value, done); // Use null as-is.
2985
tst(value, JNIHandles::tag_mask); // Test for tag.
2986
br(Assembler::NE, tagged);
2988
// Resolve local handle
2989
access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, value, Address(value, 0), tmp1, tmp2);
2994
STATIC_ASSERT(JNIHandles::TypeTag::weak_global == 0b1);
2995
tbnz(value, 0, weak_tagged); // Test for weak tag.
2997
// Resolve global handle
2998
access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2);
3004
access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
3005
value, Address(value, -JNIHandles::TypeTag::weak_global), tmp1, tmp2);
3011
void MacroAssembler::resolve_global_jobject(Register value, Register tmp1, Register tmp2) {
3012
assert_different_registers(value, tmp1, tmp2);
3015
cbz(value, done); // Use null as-is.
3019
STATIC_ASSERT(JNIHandles::TypeTag::global == 0b10);
3020
Label valid_global_tag;
3021
tbnz(value, 1, valid_global_tag); // Test for global tag
3022
stop("non global jobject using resolve_global_jobject");
3023
bind(valid_global_tag);
3027
// Resolve global handle
3028
access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2);
3034
void MacroAssembler::stop(const char* msg) {
3037
emit_int64((uintptr_t)msg);
3040
void MacroAssembler::unimplemented(const char* what) {
3041
const char* buf = nullptr;
3045
ss.print("unimplemented: %s", what);
3046
buf = code_string(ss.as_string());
3051
void MacroAssembler::_assert_asm(Assembler::Condition cc, const char* msg) {
3060
// If a constant does not fit in an immediate field, generate some
3061
// number of MOV instructions and then perform the operation.
3062
void MacroAssembler::wrap_add_sub_imm_insn(Register Rd, Register Rn, uint64_t imm,
3063
add_sub_imm_insn insn1,
3064
add_sub_reg_insn insn2,
3066
assert(Rd != zr, "Rd = zr and not setting flags?");
3067
bool fits = operand_valid_for_add_sub_immediate(is32 ? (int32_t)imm : imm);
3069
(this->*insn1)(Rd, Rn, imm);
3071
if (uabs(imm) < (1 << 24)) {
3072
(this->*insn1)(Rd, Rn, imm & -(1 << 12));
3073
(this->*insn1)(Rd, Rd, imm & ((1 << 12)-1));
3075
assert_different_registers(Rd, Rn);
3077
(this->*insn2)(Rd, Rn, Rd, LSL, 0);
3082
// Separate vsn which sets the flags. Optimisations are more restricted
3083
// because we must set the flags correctly.
3084
void MacroAssembler::wrap_adds_subs_imm_insn(Register Rd, Register Rn, uint64_t imm,
3085
add_sub_imm_insn insn1,
3086
add_sub_reg_insn insn2,
3088
bool fits = operand_valid_for_add_sub_immediate(is32 ? (int32_t)imm : imm);
3090
(this->*insn1)(Rd, Rn, imm);
3092
assert_different_registers(Rd, Rn);
3093
assert(Rd != zr, "overflow in immediate operand");
3095
(this->*insn2)(Rd, Rn, Rd, LSL, 0);
3100
void MacroAssembler::add(Register Rd, Register Rn, RegisterOrConstant increment) {
3101
if (increment.is_register()) {
3102
add(Rd, Rn, increment.as_register());
3104
add(Rd, Rn, increment.as_constant());
3108
void MacroAssembler::addw(Register Rd, Register Rn, RegisterOrConstant increment) {
3109
if (increment.is_register()) {
3110
addw(Rd, Rn, increment.as_register());
3112
addw(Rd, Rn, increment.as_constant());
3116
void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) {
3117
if (decrement.is_register()) {
3118
sub(Rd, Rn, decrement.as_register());
3120
sub(Rd, Rn, decrement.as_constant());
3124
void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) {
3125
if (decrement.is_register()) {
3126
subw(Rd, Rn, decrement.as_register());
3128
subw(Rd, Rn, decrement.as_constant());
3132
void MacroAssembler::reinit_heapbase()
3134
if (UseCompressedOops) {
3135
if (Universe::is_fully_initialized()) {
3136
mov(rheapbase, CompressedOops::ptrs_base());
3138
lea(rheapbase, ExternalAddress(CompressedOops::ptrs_base_addr()));
3139
ldr(rheapbase, Address(rheapbase));
3144
// this simulates the behaviour of the x86 cmpxchg instruction using a
3145
// load linked/store conditional pair. we use the acquire/release
3146
// versions of these instructions so that we flush pending writes as
3147
// per Java semantics.
3149
// n.b the x86 version assumes the old value to be compared against is
3150
// in rax and updates rax with the value located in memory if the
3151
// cmpxchg fails. we supply a register for the old value explicitly
3153
// the aarch64 load linked/store conditional instructions do not
3154
// accept an offset. so, unlike x86, we must provide a plain register
3155
// to identify the memory word to be compared/exchanged rather than a
3156
// register+offset Address.
3158
void MacroAssembler::cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp,
3159
Label &succeed, Label *fail) {
3160
// oldv holds comparison value
3161
// newv holds value to write in exchange
3162
// addr identifies memory word to compare against/update
3165
casal(Assembler::xword, oldv, newv, addr);
3167
br(Assembler::EQ, succeed);
3170
Label retry_load, nope;
3171
prfm(Address(addr), PSTL1STRM);
3173
// flush and load exclusive from the memory location
3174
// and fail if it is not what we expect
3177
br(Assembler::NE, nope);
3178
// if we store+flush with no intervening write tmp will be zero
3179
stlxr(tmp, newv, addr);
3181
// retry so we only ever return after a load fails to compare
3182
// ensures we don't return a stale value after a failed write.
3184
// if the memory word differs we return it in oldv and signal a fail
3193
void MacroAssembler::cmpxchg_obj_header(Register oldv, Register newv, Register obj, Register tmp,
3194
Label &succeed, Label *fail) {
3195
assert(oopDesc::mark_offset_in_bytes() == 0, "assumption");
3196
cmpxchgptr(oldv, newv, obj, tmp, succeed, fail);
3199
void MacroAssembler::cmpxchgw(Register oldv, Register newv, Register addr, Register tmp,
3200
Label &succeed, Label *fail) {
3201
// oldv holds comparison value
3202
// newv holds value to write in exchange
3203
// addr identifies memory word to compare against/update
3204
// tmp returns 0/1 for success/failure
3207
casal(Assembler::word, oldv, newv, addr);
3209
br(Assembler::EQ, succeed);
3212
Label retry_load, nope;
3213
prfm(Address(addr), PSTL1STRM);
3215
// flush and load exclusive from the memory location
3216
// and fail if it is not what we expect
3219
br(Assembler::NE, nope);
3220
// if we store+flush with no intervening write tmp will be zero
3221
stlxrw(tmp, newv, addr);
3223
// retry so we only ever return after a load fails to compare
3224
// ensures we don't return a stale value after a failed write.
3226
// if the memory word differs we return it in oldv and signal a fail
3235
// A generic CAS; success or failure is in the EQ flag. A weak CAS
3236
// doesn't retry and may fail spuriously. If the oldval is wanted,
3237
// Pass a register for the result, otherwise pass noreg.
3239
// Clobbers rscratch1
3240
void MacroAssembler::cmpxchg(Register addr, Register expected,
3242
enum operand_size size,
3243
bool acquire, bool release,
3246
if (result == noreg) result = rscratch1;
3247
BLOCK_COMMENT("cmpxchg {");
3249
mov(result, expected);
3250
lse_cas(result, new_val, addr, size, acquire, release, /*not_pair*/ true);
3251
compare_eq(result, expected, size);
3253
// Poison rscratch1 which is written on !UseLSE branch
3254
mov(rscratch1, 0x1f1f1f1f1f1f1f1f);
3257
Label retry_load, done;
3258
prfm(Address(addr), PSTL1STRM);
3260
load_exclusive(result, addr, size, acquire);
3261
compare_eq(result, expected, size);
3262
br(Assembler::NE, done);
3263
store_exclusive(rscratch1, new_val, addr, size, release);
3265
cmpw(rscratch1, 0u); // If the store fails, return NE to our caller.
3267
cbnzw(rscratch1, retry_load);
3271
BLOCK_COMMENT("} cmpxchg");
3274
// A generic comparison. Only compares for equality, clobbers rscratch1.
3275
void MacroAssembler::compare_eq(Register rm, Register rn, enum operand_size size) {
3276
if (size == xword) {
3278
} else if (size == word) {
3280
} else if (size == halfword) {
3281
eorw(rscratch1, rm, rn);
3282
ands(zr, rscratch1, 0xffff);
3283
} else if (size == byte) {
3284
eorw(rscratch1, rm, rn);
3285
ands(zr, rscratch1, 0xff);
3287
ShouldNotReachHere();
3292
static bool different(Register a, RegisterOrConstant b, Register c) {
3293
if (b.is_constant())
3296
return a != b.as_register() && a != c && b.as_register() != c;
3299
#define ATOMIC_OP(NAME, LDXR, OP, IOP, AOP, STXR, sz) \
3300
void MacroAssembler::atomic_##NAME(Register prev, RegisterOrConstant incr, Register addr) { \
3302
prev = prev->is_valid() ? prev : zr; \
3303
if (incr.is_register()) { \
3304
AOP(sz, incr.as_register(), prev, addr); \
3306
mov(rscratch2, incr.as_constant()); \
3307
AOP(sz, rscratch2, prev, addr); \
3311
Register result = rscratch2; \
3312
if (prev->is_valid()) \
3313
result = different(prev, incr, addr) ? prev : rscratch2; \
3316
prfm(Address(addr), PSTL1STRM); \
3318
LDXR(result, addr); \
3319
OP(rscratch1, result, incr); \
3320
STXR(rscratch2, rscratch1, addr); \
3321
cbnzw(rscratch2, retry_load); \
3322
if (prev->is_valid() && prev != result) { \
3323
IOP(prev, rscratch1, incr); \
3327
ATOMIC_OP(add, ldxr, add, sub, ldadd, stxr, Assembler::xword)
3328
ATOMIC_OP(addw, ldxrw, addw, subw, ldadd, stxrw, Assembler::word)
3329
ATOMIC_OP(addal, ldaxr, add, sub, ldaddal, stlxr, Assembler::xword)
3330
ATOMIC_OP(addalw, ldaxrw, addw, subw, ldaddal, stlxrw, Assembler::word)
3334
#define ATOMIC_XCHG(OP, AOP, LDXR, STXR, sz) \
3335
void MacroAssembler::atomic_##OP(Register prev, Register newv, Register addr) { \
3337
prev = prev->is_valid() ? prev : zr; \
3338
AOP(sz, newv, prev, addr); \
3341
Register result = rscratch2; \
3342
if (prev->is_valid()) \
3343
result = different(prev, newv, addr) ? prev : rscratch2; \
3346
prfm(Address(addr), PSTL1STRM); \
3348
LDXR(result, addr); \
3349
STXR(rscratch1, newv, addr); \
3350
cbnzw(rscratch1, retry_load); \
3351
if (prev->is_valid() && prev != result) \
3352
mov(prev, result); \
3355
ATOMIC_XCHG(xchg, swp, ldxr, stxr, Assembler::xword)
3356
ATOMIC_XCHG(xchgw, swp, ldxrw, stxrw, Assembler::word)
3357
ATOMIC_XCHG(xchgl, swpl, ldxr, stlxr, Assembler::xword)
3358
ATOMIC_XCHG(xchglw, swpl, ldxrw, stlxrw, Assembler::word)
3359
ATOMIC_XCHG(xchgal, swpal, ldaxr, stlxr, Assembler::xword)
3360
ATOMIC_XCHG(xchgalw, swpal, ldaxrw, stlxrw, Assembler::word)
3365
extern "C" void findpc(intptr_t x);
3368
void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[])
3370
// In order to get locks to work, we need to fake a in_VM state
3371
if (ShowMessageBoxOnError ) {
3372
JavaThread* thread = JavaThread::current();
3373
JavaThreadState saved_state = thread->thread_state();
3374
thread->set_thread_state(_thread_in_vm);
3376
if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
3378
BytecodeCounter::print();
3381
if (os::message_box(msg, "Execution stopped, print registers?")) {
3383
tty->print_cr(" pc = 0x%016" PRIx64, pc);
3389
tty->print_cr(" r0 = 0x%016" PRIx64, regs[0]);
3390
tty->print_cr(" r1 = 0x%016" PRIx64, regs[1]);
3391
tty->print_cr(" r2 = 0x%016" PRIx64, regs[2]);
3392
tty->print_cr(" r3 = 0x%016" PRIx64, regs[3]);
3393
tty->print_cr(" r4 = 0x%016" PRIx64, regs[4]);
3394
tty->print_cr(" r5 = 0x%016" PRIx64, regs[5]);
3395
tty->print_cr(" r6 = 0x%016" PRIx64, regs[6]);
3396
tty->print_cr(" r7 = 0x%016" PRIx64, regs[7]);
3397
tty->print_cr(" r8 = 0x%016" PRIx64, regs[8]);
3398
tty->print_cr(" r9 = 0x%016" PRIx64, regs[9]);
3399
tty->print_cr("r10 = 0x%016" PRIx64, regs[10]);
3400
tty->print_cr("r11 = 0x%016" PRIx64, regs[11]);
3401
tty->print_cr("r12 = 0x%016" PRIx64, regs[12]);
3402
tty->print_cr("r13 = 0x%016" PRIx64, regs[13]);
3403
tty->print_cr("r14 = 0x%016" PRIx64, regs[14]);
3404
tty->print_cr("r15 = 0x%016" PRIx64, regs[15]);
3405
tty->print_cr("r16 = 0x%016" PRIx64, regs[16]);
3406
tty->print_cr("r17 = 0x%016" PRIx64, regs[17]);
3407
tty->print_cr("r18 = 0x%016" PRIx64, regs[18]);
3408
tty->print_cr("r19 = 0x%016" PRIx64, regs[19]);
3409
tty->print_cr("r20 = 0x%016" PRIx64, regs[20]);
3410
tty->print_cr("r21 = 0x%016" PRIx64, regs[21]);
3411
tty->print_cr("r22 = 0x%016" PRIx64, regs[22]);
3412
tty->print_cr("r23 = 0x%016" PRIx64, regs[23]);
3413
tty->print_cr("r24 = 0x%016" PRIx64, regs[24]);
3414
tty->print_cr("r25 = 0x%016" PRIx64, regs[25]);
3415
tty->print_cr("r26 = 0x%016" PRIx64, regs[26]);
3416
tty->print_cr("r27 = 0x%016" PRIx64, regs[27]);
3417
tty->print_cr("r28 = 0x%016" PRIx64, regs[28]);
3418
tty->print_cr("r30 = 0x%016" PRIx64, regs[30]);
3419
tty->print_cr("r31 = 0x%016" PRIx64, regs[31]);
3423
fatal("DEBUG MESSAGE: %s", msg);
3426
RegSet MacroAssembler::call_clobbered_gp_registers() {
3427
RegSet regs = RegSet::range(r0, r17) - RegSet::of(rscratch1, rscratch2);
3434
void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude) {
3435
int step = 4 * wordSize;
3436
push(call_clobbered_gp_registers() - exclude, sp);
3438
mov(rscratch1, -step);
3439
// Push v0-v7, v16-v31.
3440
for (int i = 31; i>= 4; i -= 4) {
3441
if (i <= v7->encoding() || i >= v16->encoding())
3442
st1(as_FloatRegister(i-3), as_FloatRegister(i-2), as_FloatRegister(i-1),
3443
as_FloatRegister(i), T1D, Address(post(sp, rscratch1)));
3445
st1(as_FloatRegister(0), as_FloatRegister(1), as_FloatRegister(2),
3446
as_FloatRegister(3), T1D, Address(sp));
3449
void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude) {
3450
for (int i = 0; i < 32; i += 4) {
3451
if (i <= v7->encoding() || i >= v16->encoding())
3452
ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2),
3453
as_FloatRegister(i+3), T1D, Address(post(sp, 4 * wordSize)));
3456
reinitialize_ptrue();
3458
pop(call_clobbered_gp_registers() - exclude, sp);
3461
void MacroAssembler::push_CPU_state(bool save_vectors, bool use_sve,
3462
int sve_vector_size_in_bytes, int total_predicate_in_bytes) {
3463
push(RegSet::range(r0, r29), sp); // integer registers except lr & sp
3464
if (save_vectors && use_sve && sve_vector_size_in_bytes > 16) {
3465
sub(sp, sp, sve_vector_size_in_bytes * FloatRegister::number_of_registers);
3466
for (int i = 0; i < FloatRegister::number_of_registers; i++) {
3467
sve_str(as_FloatRegister(i), Address(sp, i));
3470
int step = (save_vectors ? 8 : 4) * wordSize;
3471
mov(rscratch1, -step);
3473
for (int i = 28; i >= 4; i -= 4) {
3474
st1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2),
3475
as_FloatRegister(i+3), save_vectors ? T2D : T1D, Address(post(sp, rscratch1)));
3477
st1(v0, v1, v2, v3, save_vectors ? T2D : T1D, sp);
3479
if (save_vectors && use_sve && total_predicate_in_bytes > 0) {
3480
sub(sp, sp, total_predicate_in_bytes);
3481
for (int i = 0; i < PRegister::number_of_registers; i++) {
3482
sve_str(as_PRegister(i), Address(sp, i));
3487
void MacroAssembler::pop_CPU_state(bool restore_vectors, bool use_sve,
3488
int sve_vector_size_in_bytes, int total_predicate_in_bytes) {
3489
if (restore_vectors && use_sve && total_predicate_in_bytes > 0) {
3490
for (int i = PRegister::number_of_registers - 1; i >= 0; i--) {
3491
sve_ldr(as_PRegister(i), Address(sp, i));
3493
add(sp, sp, total_predicate_in_bytes);
3495
if (restore_vectors && use_sve && sve_vector_size_in_bytes > 16) {
3496
for (int i = FloatRegister::number_of_registers - 1; i >= 0; i--) {
3497
sve_ldr(as_FloatRegister(i), Address(sp, i));
3499
add(sp, sp, sve_vector_size_in_bytes * FloatRegister::number_of_registers);
3501
int step = (restore_vectors ? 8 : 4) * wordSize;
3502
for (int i = 0; i <= 28; i += 4)
3503
ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2),
3504
as_FloatRegister(i+3), restore_vectors ? T2D : T1D, Address(post(sp, step)));
3507
// We may use predicate registers and rely on ptrue with SVE,
3508
// regardless of wide vector (> 8 bytes) used or not.
3510
reinitialize_ptrue();
3513
// integer registers except lr & sp
3514
pop(RegSet::range(r0, r17), sp);
3516
ldp(zr, r19, Address(post(sp, 2 * wordSize)));
3517
pop(RegSet::range(r20, r29), sp);
3519
pop(RegSet::range(r18_tls, r29), sp);
3524
* Helpers for multiply_to_len().
3526
void MacroAssembler::add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo,
3527
Register src1, Register src2) {
3528
adds(dest_lo, dest_lo, src1);
3529
adc(dest_hi, dest_hi, zr);
3530
adds(dest_lo, dest_lo, src2);
3531
adc(final_dest_hi, dest_hi, zr);
3534
// Generate an address from (r + r1 extend offset). "size" is the
3535
// size of the operand. The result may be in rscratch2.
3536
Address MacroAssembler::offsetted_address(Register r, Register r1,
3537
Address::extend ext, int offset, int size) {
3538
if (offset || (ext.shift() % size != 0)) {
3539
lea(rscratch2, Address(r, r1, ext));
3540
return Address(rscratch2, offset);
3542
return Address(r, r1, ext);
3546
Address MacroAssembler::spill_address(int size, int offset, Register tmp)
3548
assert(offset >= 0, "spill to negative address?");
3549
// Offset reachable ?
3550
// Not aligned - 9 bits signed offset
3551
// Aligned - 12 bits unsigned offset shifted
3553
if ((offset & (size-1)) && offset >= (1<<8)) {
3554
add(tmp, base, offset & ((1<<12)-1));
3559
if (offset >= (1<<12) * size) {
3560
add(tmp, base, offset & (((1<<12)-1)<<12));
3562
offset &= ~(((1<<12)-1)<<12);
3565
return Address(base, offset);
3568
Address MacroAssembler::sve_spill_address(int sve_reg_size_in_bytes, int offset, Register tmp) {
3569
assert(offset >= 0, "spill to negative address?");
3573
// An immediate offset in the range 0 to 255 which is multiplied
3574
// by the current vector or predicate register size in bytes.
3575
if (offset % sve_reg_size_in_bytes == 0 && offset < ((1<<8)*sve_reg_size_in_bytes)) {
3576
return Address(base, offset / sve_reg_size_in_bytes);
3579
add(tmp, base, offset);
3580
return Address(tmp);
3583
// Checks whether offset is aligned.
3584
// Returns true if it is, else false.
3585
bool MacroAssembler::merge_alignment_check(Register base,
3588
int64_t prev_offset) const {
3589
if (AvoidUnalignedAccesses) {
3591
// Checks whether low offset if aligned to pair of registers.
3592
int64_t pair_mask = size * 2 - 1;
3593
int64_t offset = prev_offset > cur_offset ? cur_offset : prev_offset;
3594
return (offset & pair_mask) == 0;
3595
} else { // If base is not sp, we can't guarantee the access is aligned.
3599
int64_t mask = size - 1;
3600
// Load/store pair instruction only supports element size aligned offset.
3601
return (cur_offset & mask) == 0 && (prev_offset & mask) == 0;
3605
// Checks whether current and previous loads/stores can be merged.
3606
// Returns true if it can be merged, else false.
3607
bool MacroAssembler::ldst_can_merge(Register rt,
3609
size_t cur_size_in_bytes,
3610
bool is_store) const {
3611
address prev = pc() - NativeInstruction::instruction_size;
3612
address last = code()->last_insn();
3614
if (last == nullptr || !nativeInstruction_at(last)->is_Imm_LdSt()) {
3618
if (adr.getMode() != Address::base_plus_offset || prev != last) {
3622
NativeLdSt* prev_ldst = NativeLdSt_at(prev);
3623
size_t prev_size_in_bytes = prev_ldst->size_in_bytes();
3625
assert(prev_size_in_bytes == 4 || prev_size_in_bytes == 8, "only supports 64/32bit merging.");
3626
assert(cur_size_in_bytes == 4 || cur_size_in_bytes == 8, "only supports 64/32bit merging.");
3628
if (cur_size_in_bytes != prev_size_in_bytes || is_store != prev_ldst->is_store()) {
3632
int64_t max_offset = 63 * prev_size_in_bytes;
3633
int64_t min_offset = -64 * prev_size_in_bytes;
3635
assert(prev_ldst->is_not_pre_post_index(), "pre-index or post-index is not supported to be merged.");
3637
// Only same base can be merged.
3638
if (adr.base() != prev_ldst->base()) {
3642
int64_t cur_offset = adr.offset();
3643
int64_t prev_offset = prev_ldst->offset();
3644
size_t diff = abs(cur_offset - prev_offset);
3645
if (diff != prev_size_in_bytes) {
3649
// Following cases can not be merged:
3651
// ldr x3, [x2, #16]
3654
// ldr x2, [x3, #16]
3655
// If t1 and t2 is the same in "ldp t1, t2, [xn, #imm]", we'll get SIGILL.
3656
if (!is_store && (adr.base() == prev_ldst->target() || rt == prev_ldst->target())) {
3660
int64_t low_offset = prev_offset > cur_offset ? cur_offset : prev_offset;
3661
// Offset range must be in ldp/stp instruction's range.
3662
if (low_offset > max_offset || low_offset < min_offset) {
3666
if (merge_alignment_check(adr.base(), prev_size_in_bytes, cur_offset, prev_offset)) {
3673
// Merge current load/store with previous load/store into ldp/stp.
3674
void MacroAssembler::merge_ldst(Register rt,
3676
size_t cur_size_in_bytes,
3679
assert(ldst_can_merge(rt, adr, cur_size_in_bytes, is_store) == true, "cur and prev must be able to be merged.");
3681
Register rt_low, rt_high;
3682
address prev = pc() - NativeInstruction::instruction_size;
3683
NativeLdSt* prev_ldst = NativeLdSt_at(prev);
3687
if (adr.offset() < prev_ldst->offset()) {
3688
offset = adr.offset();
3690
rt_high = prev_ldst->target();
3692
offset = prev_ldst->offset();
3693
rt_low = prev_ldst->target();
3697
Address adr_p = Address(prev_ldst->base(), offset);
3698
// Overwrite previous generated binary.
3699
code_section()->set_end(prev);
3701
const size_t sz = prev_ldst->size_in_bytes();
3702
assert(sz == 8 || sz == 4, "only supports 64/32bit merging.");
3704
BLOCK_COMMENT("merged ldr pair");
3706
ldp(rt_low, rt_high, adr_p);
3708
ldpw(rt_low, rt_high, adr_p);
3711
BLOCK_COMMENT("merged str pair");
3713
stp(rt_low, rt_high, adr_p);
3715
stpw(rt_low, rt_high, adr_p);
3721
* Multiply 64 bit by 64 bit first loop.
3723
void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
3724
Register y, Register y_idx, Register z,
3725
Register carry, Register product,
3726
Register idx, Register kdx) {
3728
// jlong carry, x[], y[], z[];
3729
// for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
3730
// huge_128 product = y[idx] * x[xstart] + carry;
3731
// z[kdx] = (jlong)product;
3732
// carry = (jlong)(product >>> 64);
3734
// z[xstart] = carry;
3737
Label L_first_loop, L_first_loop_exit;
3738
Label L_one_x, L_one_y, L_multiply;
3740
subsw(xstart, xstart, 1);
3741
br(Assembler::MI, L_one_x);
3743
lea(rscratch1, Address(x, xstart, Address::lsl(LogBytesPerInt)));
3744
ldr(x_xstart, Address(rscratch1));
3745
ror(x_xstart, x_xstart, 32); // convert big-endian to little-endian
3749
br(Assembler::MI, L_first_loop_exit);
3751
br(Assembler::MI, L_one_y);
3752
lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt)));
3753
ldr(y_idx, Address(rscratch1));
3754
ror(y_idx, y_idx, 32); // convert big-endian to little-endian
3757
// AArch64 has a multiply-accumulate instruction that we can't use
3758
// here because it has no way to process carries, so we have to use
3759
// separate add and adc instructions. Bah.
3760
umulh(rscratch1, x_xstart, y_idx); // x_xstart * y_idx -> rscratch1:product
3761
mul(product, x_xstart, y_idx);
3762
adds(product, product, carry);
3763
adc(carry, rscratch1, zr); // x_xstart * y_idx + carry -> carry:product
3766
ror(product, product, 32); // back to big-endian
3767
str(product, offsetted_address(z, kdx, Address::uxtw(LogBytesPerInt), 0, BytesPerLong));
3772
ldrw(y_idx, Address(y, 0));
3776
ldrw(x_xstart, Address(x, 0));
3779
bind(L_first_loop_exit);
3783
* Multiply 128 bit by 128. Unrolled inner loop.
3786
void MacroAssembler::multiply_128_x_128_loop(Register y, Register z,
3787
Register carry, Register carry2,
3788
Register idx, Register jdx,
3789
Register yz_idx1, Register yz_idx2,
3790
Register tmp, Register tmp3, Register tmp4,
3791
Register tmp6, Register product_hi) {
3793
// jlong carry, x[], y[], z[];
3794
// int kdx = ystart+1;
3795
// for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
3796
// huge_128 tmp3 = (y[idx+1] * product_hi) + z[kdx+idx+1] + carry;
3797
// jlong carry2 = (jlong)(tmp3 >>> 64);
3798
// huge_128 tmp4 = (y[idx] * product_hi) + z[kdx+idx] + carry2;
3799
// carry = (jlong)(tmp4 >>> 64);
3800
// z[kdx+idx+1] = (jlong)tmp3;
3801
// z[kdx+idx] = (jlong)tmp4;
3805
// yz_idx1 = (y[idx] * product_hi) + z[kdx+idx] + carry;
3806
// z[kdx+idx] = (jlong)yz_idx1;
3807
// carry = (jlong)(yz_idx1 >>> 64);
3811
Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
3818
br(Assembler::MI, L_third_loop_exit);
3821
lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt)));
3823
ldp(yz_idx2, yz_idx1, Address(rscratch1, 0));
3825
lea(tmp6, Address(z, idx, Address::uxtw(LogBytesPerInt)));
3827
ror(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian
3828
ror(yz_idx2, yz_idx2, 32);
3830
ldp(rscratch2, rscratch1, Address(tmp6, 0));
3832
mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3
3833
umulh(tmp4, product_hi, yz_idx1);
3835
ror(rscratch1, rscratch1, 32); // convert big-endian to little-endian
3836
ror(rscratch2, rscratch2, 32);
3838
mul(tmp, product_hi, yz_idx2); // yz_idx2 * product_hi -> carry2:tmp
3839
umulh(carry2, product_hi, yz_idx2);
3841
// propagate sum of both multiplications into carry:tmp4:tmp3
3842
adds(tmp3, tmp3, carry);
3843
adc(tmp4, tmp4, zr);
3844
adds(tmp3, tmp3, rscratch1);
3845
adcs(tmp4, tmp4, tmp);
3846
adc(carry, carry2, zr);
3847
adds(tmp4, tmp4, rscratch2);
3848
adc(carry, carry, zr);
3850
ror(tmp3, tmp3, 32); // convert little-endian to big-endian
3851
ror(tmp4, tmp4, 32);
3852
stp(tmp4, tmp3, Address(tmp6, 0));
3855
bind (L_third_loop_exit);
3857
andw (idx, idx, 0x3);
3858
cbz(idx, L_post_third_loop_done);
3862
br(Assembler::MI, L_check_1);
3864
lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt)));
3865
ldr(yz_idx1, Address(rscratch1, 0));
3866
ror(yz_idx1, yz_idx1, 32);
3867
mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3
3868
umulh(tmp4, product_hi, yz_idx1);
3869
lea(rscratch1, Address(z, idx, Address::uxtw(LogBytesPerInt)));
3870
ldr(yz_idx2, Address(rscratch1, 0));
3871
ror(yz_idx2, yz_idx2, 32);
3873
add2_with_carry(carry, tmp4, tmp3, carry, yz_idx2);
3875
ror(tmp3, tmp3, 32);
3876
str(tmp3, Address(rscratch1, 0));
3880
andw (idx, idx, 0x1);
3882
br(Assembler::MI, L_post_third_loop_done);
3883
ldrw(tmp4, Address(y, idx, Address::uxtw(LogBytesPerInt)));
3884
mul(tmp3, tmp4, product_hi); // tmp4 * product_hi -> carry2:tmp3
3885
umulh(carry2, tmp4, product_hi);
3886
ldrw(tmp4, Address(z, idx, Address::uxtw(LogBytesPerInt)));
3888
add2_with_carry(carry2, tmp3, tmp4, carry);
3890
strw(tmp3, Address(z, idx, Address::uxtw(LogBytesPerInt)));
3891
extr(carry, carry2, tmp3, 32);
3893
bind(L_post_third_loop_done);
3897
* Code for BigInteger::multiplyToLen() intrinsic.
3914
void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen,
3915
Register z, Register tmp0,
3916
Register tmp1, Register tmp2, Register tmp3, Register tmp4,
3917
Register tmp5, Register tmp6, Register product_hi) {
3919
assert_different_registers(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, product_hi);
3921
const Register idx = tmp1;
3922
const Register kdx = tmp2;
3923
const Register xstart = tmp3;
3925
const Register y_idx = tmp4;
3926
const Register carry = tmp5;
3927
const Register product = xlen;
3928
const Register x_xstart = tmp0;
3932
// final static long LONG_MASK = 0xffffffffL;
3933
// int xstart = xlen - 1;
3934
// int ystart = ylen - 1;
3936
// for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
3937
// long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry;
3938
// z[kdx] = (int)product;
3939
// carry = product >>> 32;
3941
// z[xstart] = (int)carry;
3944
movw(idx, ylen); // idx = ylen;
3945
addw(kdx, xlen, ylen); // kdx = xlen+ylen;
3946
mov(carry, zr); // carry = 0;
3951
subsw(xstart, xstart, 1);
3952
br(Assembler::MI, L_done);
3954
multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
3956
Label L_second_loop;
3957
cbzw(kdx, L_second_loop);
3963
strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt)));
3964
lsr(carry, carry, 32);
3968
strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt)));
3970
// Second and third (nested) loops.
3972
// for (int i = xstart-1; i >= 0; i--) { // Second loop
3974
// for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop
3975
// long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) +
3976
// (z[k] & LONG_MASK) + carry;
3977
// z[k] = (int)product;
3978
// carry = product >>> 32;
3980
// z[i] = (int)carry;
3983
// i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = product_hi
3985
const Register jdx = tmp1;
3987
bind(L_second_loop);
3988
mov(carry, zr); // carry = 0;
3989
movw(jdx, ylen); // j = ystart+1
3991
subsw(xstart, xstart, 1); // i = xstart-1;
3992
br(Assembler::MI, L_done);
3994
str(z, Address(pre(sp, -4 * wordSize)));
3997
lea(z, offsetted_address(z, xstart, Address::uxtw(LogBytesPerInt), 4, BytesPerInt)); // z = z + k - j
3998
subsw(xstart, xstart, 1); // i = xstart-1;
3999
br(Assembler::MI, L_last_x);
4001
lea(rscratch1, Address(x, xstart, Address::uxtw(LogBytesPerInt)));
4002
ldr(product_hi, Address(rscratch1));
4003
ror(product_hi, product_hi, 32); // convert big-endian to little-endian
4005
Label L_third_loop_prologue;
4006
bind(L_third_loop_prologue);
4008
str(ylen, Address(sp, wordSize));
4009
stp(x, xstart, Address(sp, 2 * wordSize));
4010
multiply_128_x_128_loop(y, z, carry, x, jdx, ylen, product,
4011
tmp2, x_xstart, tmp3, tmp4, tmp6, product_hi);
4012
ldp(z, ylen, Address(post(sp, 2 * wordSize)));
4013
ldp(x, xlen, Address(post(sp, 2 * wordSize))); // copy old xstart -> xlen
4015
addw(tmp3, xlen, 1);
4016
strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt)));
4017
subsw(tmp3, tmp3, 1);
4018
br(Assembler::MI, L_done);
4020
lsr(carry, carry, 32);
4021
strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt)));
4024
// Next infrequent code is moved outside loops.
4026
ldrw(product_hi, Address(x, 0));
4027
b(L_third_loop_prologue);
4032
// Code for BigInteger::mulAdd intrinsic
4035
// offset = r2 (already out.length-offset)
4039
// pseudo code from java implementation:
4041
// offset = out.length-offset - 1;
4042
// for (int j=len-1; j >= 0; j--) {
4043
// product = (in[j] & LONG_MASK) * kLong + (out[offset] & LONG_MASK) + carry;
4044
// out[offset--] = (int)product;
4045
// carry = product >>> 32;
4047
// return (int)carry;
4048
void MacroAssembler::mul_add(Register out, Register in, Register offset,
4049
Register len, Register k) {
4052
cmp(len, zr); // cmp, not cbz/cbnz: to use condition twice => less branches
4053
csel(out, zr, out, Assembler::EQ);
4054
br(Assembler::EQ, END);
4055
add(in, in, len, LSL, 2); // in[j+1] address
4056
add(offset, out, offset, LSL, 2); // out[offset + 1] address
4057
mov(out, zr); // used to keep carry now
4059
ldrw(rscratch1, Address(pre(in, -4)));
4060
madd(rscratch1, rscratch1, k, out);
4061
ldrw(rscratch2, Address(pre(offset, -4)));
4062
add(rscratch1, rscratch1, rscratch2);
4063
strw(rscratch1, Address(offset));
4064
lsr(out, rscratch1, 32);
4066
br(Assembler::NE, LOOP);
4071
* Emits code to update CRC-32 with a byte value according to constants in table
4073
* @param [in,out]crc Register containing the crc.
4074
* @param [in]val Register containing the byte to fold into the CRC.
4075
* @param [in]table Register containing the table of crc constants.
4078
* val = crc_table[(val ^ crc) & 0xFF];
4079
* crc = val ^ (crc >> 8);
4082
void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) {
4084
andr(val, val, 0xff);
4085
ldrw(val, Address(table, val, Address::lsl(2)));
4086
eor(crc, val, crc, Assembler::LSR, 8);
4090
* Emits code to update CRC-32 with a 32-bit value according to tables 0 to 3
4092
* @param [in,out]crc Register containing the crc.
4093
* @param [in]v Register containing the 32-bit to fold into the CRC.
4094
* @param [in]table0 Register containing table 0 of crc constants.
4095
* @param [in]table1 Register containing table 1 of crc constants.
4096
* @param [in]table2 Register containing table 2 of crc constants.
4097
* @param [in]table3 Register containing table 3 of crc constants.
4101
* crc = table3[v&0xff]^table2[(v>>8)&0xff]^table1[(v>>16)&0xff]^table0[v>>24]
4104
void MacroAssembler::update_word_crc32(Register crc, Register v, Register tmp,
4105
Register table0, Register table1, Register table2, Register table3,
4107
eor(v, crc, v, upper ? LSR:LSL, upper ? 32:0);
4109
ldrw(crc, Address(table3, tmp, Address::lsl(2)));
4111
ldrw(tmp, Address(table2, tmp, Address::lsl(2)));
4113
ubfx(tmp, v, 16, 8);
4114
ldrw(tmp, Address(table1, tmp, Address::lsl(2)));
4116
ubfx(tmp, v, 24, 8);
4117
ldrw(tmp, Address(table0, tmp, Address::lsl(2)));
4121
void MacroAssembler::kernel_crc32_using_crypto_pmull(Register crc, Register buf,
4122
Register len, Register tmp0, Register tmp1, Register tmp2, Register tmp3) {
4123
Label CRC_by4_loop, CRC_by1_loop, CRC_less128, CRC_by128_pre, CRC_by32_loop, CRC_less32, L_exit;
4124
assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2);
4126
subs(tmp0, len, 384);
4128
br(Assembler::GE, CRC_by128_pre);
4131
br(Assembler::GE, CRC_by32_loop);
4133
adds(len, len, 32 - 4);
4134
br(Assembler::GE, CRC_by4_loop);
4136
br(Assembler::GT, CRC_by1_loop);
4139
BIND(CRC_by32_loop);
4140
ldp(tmp0, tmp1, Address(buf));
4141
crc32x(crc, crc, tmp0);
4142
ldp(tmp2, tmp3, Address(buf, 16));
4143
crc32x(crc, crc, tmp1);
4145
crc32x(crc, crc, tmp2);
4147
crc32x(crc, crc, tmp3);
4148
br(Assembler::GE, CRC_by32_loop);
4150
br(Assembler::NE, CRC_less32);
4154
ldrw(tmp0, Address(post(buf, 4)));
4156
crc32w(crc, crc, tmp0);
4157
br(Assembler::GE, CRC_by4_loop);
4159
br(Assembler::LE, L_exit);
4161
ldrb(tmp0, Address(post(buf, 1)));
4163
crc32b(crc, crc, tmp0);
4164
br(Assembler::GT, CRC_by1_loop);
4167
BIND(CRC_by128_pre);
4168
kernel_crc32_common_fold_using_crypto_pmull(crc, buf, len, tmp0, tmp1, tmp2,
4169
4*256*sizeof(juint) + 8*sizeof(juint));
4171
crc32x(crc, crc, tmp0);
4172
crc32x(crc, crc, tmp1);
4174
cbnz(len, CRC_less128);
4180
void MacroAssembler::kernel_crc32_using_crc32(Register crc, Register buf,
4181
Register len, Register tmp0, Register tmp1, Register tmp2,
4183
Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit;
4184
assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3);
4188
subs(len, len, 128);
4189
br(Assembler::GE, CRC_by64_pre);
4191
adds(len, len, 128-32);
4192
br(Assembler::GE, CRC_by32_loop);
4194
adds(len, len, 32-4);
4195
br(Assembler::GE, CRC_by4_loop);
4197
br(Assembler::GT, CRC_by1_loop);
4200
BIND(CRC_by32_loop);
4201
ldp(tmp0, tmp1, Address(post(buf, 16)));
4203
crc32x(crc, crc, tmp0);
4204
ldr(tmp2, Address(post(buf, 8)));
4205
crc32x(crc, crc, tmp1);
4206
ldr(tmp3, Address(post(buf, 8)));
4207
crc32x(crc, crc, tmp2);
4208
crc32x(crc, crc, tmp3);
4209
br(Assembler::GE, CRC_by32_loop);
4211
br(Assembler::NE, CRC_less32);
4215
ldrw(tmp0, Address(post(buf, 4)));
4217
crc32w(crc, crc, tmp0);
4218
br(Assembler::GE, CRC_by4_loop);
4220
br(Assembler::LE, L_exit);
4222
ldrb(tmp0, Address(post(buf, 1)));
4224
crc32b(crc, crc, tmp0);
4225
br(Assembler::GT, CRC_by1_loop);
4230
ldp(tmp0, tmp1, Address(buf, 8));
4231
crc32x(crc, crc, tmp0);
4232
ldr(tmp2, Address(buf, 24));
4233
crc32x(crc, crc, tmp1);
4234
ldr(tmp3, Address(buf, 32));
4235
crc32x(crc, crc, tmp2);
4236
ldr(tmp0, Address(buf, 40));
4237
crc32x(crc, crc, tmp3);
4238
ldr(tmp1, Address(buf, 48));
4239
crc32x(crc, crc, tmp0);
4240
ldr(tmp2, Address(buf, 56));
4241
crc32x(crc, crc, tmp1);
4242
ldr(tmp3, Address(pre(buf, 64)));
4246
align(CodeEntryAlignment);
4247
BIND(CRC_by64_loop);
4249
crc32x(crc, crc, tmp2);
4250
ldr(tmp0, Address(buf, 8));
4251
crc32x(crc, crc, tmp3);
4252
ldr(tmp1, Address(buf, 16));
4253
crc32x(crc, crc, tmp0);
4254
ldr(tmp2, Address(buf, 24));
4255
crc32x(crc, crc, tmp1);
4256
ldr(tmp3, Address(buf, 32));
4257
crc32x(crc, crc, tmp2);
4258
ldr(tmp0, Address(buf, 40));
4259
crc32x(crc, crc, tmp3);
4260
ldr(tmp1, Address(buf, 48));
4261
crc32x(crc, crc, tmp0);
4262
ldr(tmp2, Address(buf, 56));
4263
crc32x(crc, crc, tmp1);
4264
ldr(tmp3, Address(pre(buf, 64)));
4265
br(Assembler::GE, CRC_by64_loop);
4268
crc32x(crc, crc, tmp2);
4269
crc32x(crc, crc, tmp3);
4274
br(Assembler::NE, CRC_less64);
4280
* @param crc register containing existing CRC (32-bit)
4281
* @param buf register pointing to input byte buffer (byte*)
4282
* @param len register containing number of bytes
4283
* @param table register that will contain address of CRC table
4284
* @param tmp scratch register
4286
void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len,
4287
Register table0, Register table1, Register table2, Register table3,
4288
Register tmp, Register tmp2, Register tmp3) {
4289
Label L_by16, L_by16_loop, L_by4, L_by4_loop, L_by1, L_by1_loop, L_exit;
4291
if (UseCryptoPmullForCRC32) {
4292
kernel_crc32_using_crypto_pmull(crc, buf, len, table0, table1, table2, table3);
4297
kernel_crc32_using_crc32(crc, buf, len, table0, table1, table2, table3);
4305
adrp(table0, ExternalAddress(StubRoutines::crc_table_addr()), offset);
4306
add(table0, table0, offset);
4308
add(table1, table0, 1*256*sizeof(juint));
4309
add(table2, table0, 2*256*sizeof(juint));
4310
add(table3, table0, 3*256*sizeof(juint));
4312
{ // Neon code start
4314
br(Assembler::LT, L_by16);
4315
eor(v16, T16B, v16, v16);
4319
add(tmp, table0, 4*256*sizeof(juint)); // Point at the Neon constants
4321
ld1(v0, v1, T2D, post(buf, 32));
4322
ld1r(v4, T2D, post(tmp, 8));
4323
ld1r(v5, T2D, post(tmp, 8));
4324
ld1r(v6, T2D, post(tmp, 8));
4325
ld1r(v7, T2D, post(tmp, 8));
4326
mov(v16, S, 0, crc);
4328
eor(v0, T16B, v0, v16);
4332
pmull(v22, T8H, v0, v5, T8B);
4333
pmull(v20, T8H, v0, v7, T8B);
4334
pmull(v23, T8H, v0, v4, T8B);
4335
pmull(v21, T8H, v0, v6, T8B);
4337
pmull2(v18, T8H, v0, v5, T16B);
4338
pmull2(v16, T8H, v0, v7, T16B);
4339
pmull2(v19, T8H, v0, v4, T16B);
4340
pmull2(v17, T8H, v0, v6, T16B);
4342
uzp1(v24, T8H, v20, v22);
4343
uzp2(v25, T8H, v20, v22);
4344
eor(v20, T16B, v24, v25);
4346
uzp1(v26, T8H, v16, v18);
4347
uzp2(v27, T8H, v16, v18);
4348
eor(v16, T16B, v26, v27);
4350
ushll2(v22, T4S, v20, T8H, 8);
4351
ushll(v20, T4S, v20, T4H, 8);
4353
ushll2(v18, T4S, v16, T8H, 8);
4354
ushll(v16, T4S, v16, T4H, 8);
4356
eor(v22, T16B, v23, v22);
4357
eor(v18, T16B, v19, v18);
4358
eor(v20, T16B, v21, v20);
4359
eor(v16, T16B, v17, v16);
4361
uzp1(v17, T2D, v16, v20);
4362
uzp2(v21, T2D, v16, v20);
4363
eor(v17, T16B, v17, v21);
4365
ushll2(v20, T2D, v17, T4S, 16);
4366
ushll(v16, T2D, v17, T2S, 16);
4368
eor(v20, T16B, v20, v22);
4369
eor(v16, T16B, v16, v18);
4371
uzp1(v17, T2D, v20, v16);
4372
uzp2(v21, T2D, v20, v16);
4373
eor(v28, T16B, v17, v21);
4375
pmull(v22, T8H, v1, v5, T8B);
4376
pmull(v20, T8H, v1, v7, T8B);
4377
pmull(v23, T8H, v1, v4, T8B);
4378
pmull(v21, T8H, v1, v6, T8B);
4380
pmull2(v18, T8H, v1, v5, T16B);
4381
pmull2(v16, T8H, v1, v7, T16B);
4382
pmull2(v19, T8H, v1, v4, T16B);
4383
pmull2(v17, T8H, v1, v6, T16B);
4385
ld1(v0, v1, T2D, post(buf, 32));
4387
uzp1(v24, T8H, v20, v22);
4388
uzp2(v25, T8H, v20, v22);
4389
eor(v20, T16B, v24, v25);
4391
uzp1(v26, T8H, v16, v18);
4392
uzp2(v27, T8H, v16, v18);
4393
eor(v16, T16B, v26, v27);
4395
ushll2(v22, T4S, v20, T8H, 8);
4396
ushll(v20, T4S, v20, T4H, 8);
4398
ushll2(v18, T4S, v16, T8H, 8);
4399
ushll(v16, T4S, v16, T4H, 8);
4401
eor(v22, T16B, v23, v22);
4402
eor(v18, T16B, v19, v18);
4403
eor(v20, T16B, v21, v20);
4404
eor(v16, T16B, v17, v16);
4406
uzp1(v17, T2D, v16, v20);
4407
uzp2(v21, T2D, v16, v20);
4408
eor(v16, T16B, v17, v21);
4410
ushll2(v20, T2D, v16, T4S, 16);
4411
ushll(v16, T2D, v16, T2S, 16);
4413
eor(v20, T16B, v22, v20);
4414
eor(v16, T16B, v16, v18);
4416
uzp1(v17, T2D, v20, v16);
4417
uzp2(v21, T2D, v20, v16);
4418
eor(v20, T16B, v17, v21);
4420
shl(v16, T2D, v28, 1);
4421
shl(v17, T2D, v20, 1);
4423
eor(v0, T16B, v0, v16);
4424
eor(v1, T16B, v1, v17);
4427
br(Assembler::GE, L_fold);
4431
update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
4432
update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
4434
update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
4435
update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
4437
update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
4438
update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
4440
update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
4441
update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
4448
br(Assembler::GE, L_by16_loop);
4449
adds(len, len, 16-4);
4450
br(Assembler::GE, L_by4_loop);
4452
br(Assembler::GT, L_by1_loop);
4456
ldrw(tmp, Address(post(buf, 4)));
4457
update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3);
4459
br(Assembler::GE, L_by4_loop);
4461
br(Assembler::LE, L_exit);
4464
ldrb(tmp, Address(post(buf, 1)));
4465
update_byte_crc32(crc, tmp, table0);
4466
br(Assembler::GT, L_by1_loop);
4469
align(CodeEntryAlignment);
4472
ldp(tmp, tmp3, Address(post(buf, 16)));
4473
update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
4474
update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
4475
update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, false);
4476
update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, true);
4477
br(Assembler::GE, L_by16_loop);
4478
adds(len, len, 16-4);
4479
br(Assembler::GE, L_by4_loop);
4481
br(Assembler::GT, L_by1_loop);
4486
void MacroAssembler::kernel_crc32c_using_crypto_pmull(Register crc, Register buf,
4487
Register len, Register tmp0, Register tmp1, Register tmp2, Register tmp3) {
4488
Label CRC_by4_loop, CRC_by1_loop, CRC_less128, CRC_by128_pre, CRC_by32_loop, CRC_less32, L_exit;
4489
assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2);
4491
subs(tmp0, len, 384);
4492
br(Assembler::GE, CRC_by128_pre);
4495
br(Assembler::GE, CRC_by32_loop);
4497
adds(len, len, 32 - 4);
4498
br(Assembler::GE, CRC_by4_loop);
4500
br(Assembler::GT, CRC_by1_loop);
4503
BIND(CRC_by32_loop);
4504
ldp(tmp0, tmp1, Address(buf));
4505
crc32cx(crc, crc, tmp0);
4506
ldr(tmp2, Address(buf, 16));
4507
crc32cx(crc, crc, tmp1);
4508
ldr(tmp3, Address(buf, 24));
4509
crc32cx(crc, crc, tmp2);
4512
crc32cx(crc, crc, tmp3);
4513
br(Assembler::GE, CRC_by32_loop);
4515
br(Assembler::NE, CRC_less32);
4519
ldrw(tmp0, Address(post(buf, 4)));
4521
crc32cw(crc, crc, tmp0);
4522
br(Assembler::GE, CRC_by4_loop);
4524
br(Assembler::LE, L_exit);
4526
ldrb(tmp0, Address(post(buf, 1)));
4528
crc32cb(crc, crc, tmp0);
4529
br(Assembler::GT, CRC_by1_loop);
4532
BIND(CRC_by128_pre);
4533
kernel_crc32_common_fold_using_crypto_pmull(crc, buf, len, tmp0, tmp1, tmp2,
4534
4*256*sizeof(juint) + 8*sizeof(juint) + 0x50);
4536
crc32cx(crc, crc, tmp0);
4537
crc32cx(crc, crc, tmp1);
4539
cbnz(len, CRC_less128);
4544
void MacroAssembler::kernel_crc32c_using_crc32c(Register crc, Register buf,
4545
Register len, Register tmp0, Register tmp1, Register tmp2,
4547
Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit;
4548
assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3);
4550
subs(len, len, 128);
4551
br(Assembler::GE, CRC_by64_pre);
4553
adds(len, len, 128-32);
4554
br(Assembler::GE, CRC_by32_loop);
4556
adds(len, len, 32-4);
4557
br(Assembler::GE, CRC_by4_loop);
4559
br(Assembler::GT, CRC_by1_loop);
4562
BIND(CRC_by32_loop);
4563
ldp(tmp0, tmp1, Address(post(buf, 16)));
4565
crc32cx(crc, crc, tmp0);
4566
ldr(tmp2, Address(post(buf, 8)));
4567
crc32cx(crc, crc, tmp1);
4568
ldr(tmp3, Address(post(buf, 8)));
4569
crc32cx(crc, crc, tmp2);
4570
crc32cx(crc, crc, tmp3);
4571
br(Assembler::GE, CRC_by32_loop);
4573
br(Assembler::NE, CRC_less32);
4577
ldrw(tmp0, Address(post(buf, 4)));
4579
crc32cw(crc, crc, tmp0);
4580
br(Assembler::GE, CRC_by4_loop);
4582
br(Assembler::LE, L_exit);
4584
ldrb(tmp0, Address(post(buf, 1)));
4586
crc32cb(crc, crc, tmp0);
4587
br(Assembler::GT, CRC_by1_loop);
4592
ldp(tmp0, tmp1, Address(buf, 8));
4593
crc32cx(crc, crc, tmp0);
4594
ldr(tmp2, Address(buf, 24));
4595
crc32cx(crc, crc, tmp1);
4596
ldr(tmp3, Address(buf, 32));
4597
crc32cx(crc, crc, tmp2);
4598
ldr(tmp0, Address(buf, 40));
4599
crc32cx(crc, crc, tmp3);
4600
ldr(tmp1, Address(buf, 48));
4601
crc32cx(crc, crc, tmp0);
4602
ldr(tmp2, Address(buf, 56));
4603
crc32cx(crc, crc, tmp1);
4604
ldr(tmp3, Address(pre(buf, 64)));
4608
align(CodeEntryAlignment);
4609
BIND(CRC_by64_loop);
4611
crc32cx(crc, crc, tmp2);
4612
ldr(tmp0, Address(buf, 8));
4613
crc32cx(crc, crc, tmp3);
4614
ldr(tmp1, Address(buf, 16));
4615
crc32cx(crc, crc, tmp0);
4616
ldr(tmp2, Address(buf, 24));
4617
crc32cx(crc, crc, tmp1);
4618
ldr(tmp3, Address(buf, 32));
4619
crc32cx(crc, crc, tmp2);
4620
ldr(tmp0, Address(buf, 40));
4621
crc32cx(crc, crc, tmp3);
4622
ldr(tmp1, Address(buf, 48));
4623
crc32cx(crc, crc, tmp0);
4624
ldr(tmp2, Address(buf, 56));
4625
crc32cx(crc, crc, tmp1);
4626
ldr(tmp3, Address(pre(buf, 64)));
4627
br(Assembler::GE, CRC_by64_loop);
4630
crc32cx(crc, crc, tmp2);
4631
crc32cx(crc, crc, tmp3);
4636
br(Assembler::NE, CRC_less64);
4641
* @param crc register containing existing CRC (32-bit)
4642
* @param buf register pointing to input byte buffer (byte*)
4643
* @param len register containing number of bytes
4644
* @param table register that will contain address of CRC table
4645
* @param tmp scratch register
4647
void MacroAssembler::kernel_crc32c(Register crc, Register buf, Register len,
4648
Register table0, Register table1, Register table2, Register table3,
4649
Register tmp, Register tmp2, Register tmp3) {
4650
if (UseCryptoPmullForCRC32) {
4651
kernel_crc32c_using_crypto_pmull(crc, buf, len, table0, table1, table2, table3);
4653
kernel_crc32c_using_crc32c(crc, buf, len, table0, table1, table2, table3);
4657
void MacroAssembler::kernel_crc32_common_fold_using_crypto_pmull(Register crc, Register buf,
4658
Register len, Register tmp0, Register tmp1, Register tmp2, size_t table_offset) {
4659
Label CRC_by128_loop;
4660
assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2);
4663
Register table = tmp0;
4666
adrp(table, ExternalAddress(StubRoutines::crc_table_addr()), offset);
4667
add(table, table, offset);
4669
add(table, table, table_offset);
4671
// Registers v0..v7 are used as data registers.
4672
// Registers v16..v31 are used as tmp registers.
4673
sub(buf, buf, 0x10);
4674
ldrq(v0, Address(buf, 0x10));
4675
ldrq(v1, Address(buf, 0x20));
4676
ldrq(v2, Address(buf, 0x30));
4677
ldrq(v3, Address(buf, 0x40));
4678
ldrq(v4, Address(buf, 0x50));
4679
ldrq(v5, Address(buf, 0x60));
4680
ldrq(v6, Address(buf, 0x70));
4681
ldrq(v7, Address(pre(buf, 0x80)));
4684
mov(v31, S, 0, crc);
4685
eor(v0, T16B, v0, v31);
4687
// Register v16 contains constants from the crc table.
4688
ldrq(v16, Address(table));
4691
align(OptoLoopAlignment);
4692
BIND(CRC_by128_loop);
4693
pmull (v17, T1Q, v0, v16, T1D);
4694
pmull2(v18, T1Q, v0, v16, T2D);
4695
ldrq(v0, Address(buf, 0x10));
4696
eor3(v0, T16B, v17, v18, v0);
4698
pmull (v19, T1Q, v1, v16, T1D);
4699
pmull2(v20, T1Q, v1, v16, T2D);
4700
ldrq(v1, Address(buf, 0x20));
4701
eor3(v1, T16B, v19, v20, v1);
4703
pmull (v21, T1Q, v2, v16, T1D);
4704
pmull2(v22, T1Q, v2, v16, T2D);
4705
ldrq(v2, Address(buf, 0x30));
4706
eor3(v2, T16B, v21, v22, v2);
4708
pmull (v23, T1Q, v3, v16, T1D);
4709
pmull2(v24, T1Q, v3, v16, T2D);
4710
ldrq(v3, Address(buf, 0x40));
4711
eor3(v3, T16B, v23, v24, v3);
4713
pmull (v25, T1Q, v4, v16, T1D);
4714
pmull2(v26, T1Q, v4, v16, T2D);
4715
ldrq(v4, Address(buf, 0x50));
4716
eor3(v4, T16B, v25, v26, v4);
4718
pmull (v27, T1Q, v5, v16, T1D);
4719
pmull2(v28, T1Q, v5, v16, T2D);
4720
ldrq(v5, Address(buf, 0x60));
4721
eor3(v5, T16B, v27, v28, v5);
4723
pmull (v29, T1Q, v6, v16, T1D);
4724
pmull2(v30, T1Q, v6, v16, T2D);
4725
ldrq(v6, Address(buf, 0x70));
4726
eor3(v6, T16B, v29, v30, v6);
4728
// Reuse registers v23, v24.
4729
// Using them won't block the first instruction of the next iteration.
4730
pmull (v23, T1Q, v7, v16, T1D);
4731
pmull2(v24, T1Q, v7, v16, T2D);
4732
ldrq(v7, Address(pre(buf, 0x80)));
4733
eor3(v7, T16B, v23, v24, v7);
4735
subs(len, len, 0x80);
4736
br(Assembler::GE, CRC_by128_loop);
4738
// fold into 512 bits
4739
// Use v31 for constants because v16 can be still in use.
4740
ldrq(v31, Address(table, 0x10));
4742
pmull (v17, T1Q, v0, v31, T1D);
4743
pmull2(v18, T1Q, v0, v31, T2D);
4744
eor3(v0, T16B, v17, v18, v4);
4746
pmull (v19, T1Q, v1, v31, T1D);
4747
pmull2(v20, T1Q, v1, v31, T2D);
4748
eor3(v1, T16B, v19, v20, v5);
4750
pmull (v21, T1Q, v2, v31, T1D);
4751
pmull2(v22, T1Q, v2, v31, T2D);
4752
eor3(v2, T16B, v21, v22, v6);
4754
pmull (v23, T1Q, v3, v31, T1D);
4755
pmull2(v24, T1Q, v3, v31, T2D);
4756
eor3(v3, T16B, v23, v24, v7);
4758
// fold into 128 bits
4759
// Use v17 for constants because v31 can be still in use.
4760
ldrq(v17, Address(table, 0x20));
4761
pmull (v25, T1Q, v0, v17, T1D);
4762
pmull2(v26, T1Q, v0, v17, T2D);
4763
eor3(v3, T16B, v3, v25, v26);
4765
// Use v18 for constants because v17 can be still in use.
4766
ldrq(v18, Address(table, 0x30));
4767
pmull (v27, T1Q, v1, v18, T1D);
4768
pmull2(v28, T1Q, v1, v18, T2D);
4769
eor3(v3, T16B, v3, v27, v28);
4771
// Use v19 for constants because v18 can be still in use.
4772
ldrq(v19, Address(table, 0x40));
4773
pmull (v29, T1Q, v2, v19, T1D);
4774
pmull2(v30, T1Q, v2, v19, T2D);
4775
eor3(v0, T16B, v3, v29, v30);
4777
add(len, len, 0x80);
4778
add(buf, buf, 0x10);
4780
mov(tmp0, v0, D, 0);
4781
mov(tmp1, v0, D, 1);
4784
SkipIfEqual::SkipIfEqual(
4785
MacroAssembler* masm, const bool* flag_addr, bool value) {
4788
_masm->adrp(rscratch1, ExternalAddress((address)flag_addr), offset);
4789
_masm->ldrb(rscratch1, Address(rscratch1, offset));
4791
_masm->cbnzw(rscratch1, _label);
4793
_masm->cbzw(rscratch1, _label);
4797
SkipIfEqual::~SkipIfEqual() {
4798
_masm->bind(_label);
4801
void MacroAssembler::addptr(const Address &dst, int32_t src) {
4803
switch(dst.getMode()) {
4804
case Address::base_plus_offset:
4805
// This is the expected mode, although we allow all the other
4807
adr = form_address(rscratch2, dst.base(), dst.offset(), LogBytesPerWord);
4810
lea(rscratch2, dst);
4811
adr = Address(rscratch2);
4814
ldr(rscratch1, adr);
4815
add(rscratch1, rscratch1, src);
4816
str(rscratch1, adr);
4819
void MacroAssembler::cmpptr(Register src1, Address src2) {
4821
adrp(rscratch1, src2, offset);
4822
ldr(rscratch1, Address(rscratch1, offset));
4823
cmp(src1, rscratch1);
4826
void MacroAssembler::cmpoop(Register obj1, Register obj2) {
4830
void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
4831
load_method_holder(rresult, rmethod);
4832
ldr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
4835
void MacroAssembler::load_method_holder(Register holder, Register method) {
4836
ldr(holder, Address(method, Method::const_offset())); // ConstMethod*
4837
ldr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool*
4838
ldr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass*
4841
void MacroAssembler::load_klass(Register dst, Register src) {
4842
if (UseCompressedClassPointers) {
4843
ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
4844
decode_klass_not_null(dst);
4846
ldr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
4850
void MacroAssembler::restore_cpu_control_state_after_jni(Register tmp1, Register tmp2) {
4851
if (RestoreMXCSROnJNICalls) {
4855
// Set FPCR to the state we need. We do want Round to Nearest. We
4856
// don't want non-IEEE rounding modes or floating-point traps.
4857
bfi(tmp1, zr, 22, 4); // Clear DN, FZ, and Rmode
4858
bfi(tmp1, zr, 8, 5); // Clear exception-control bits (8-12)
4859
bfi(tmp1, zr, 0, 2); // Clear AH:FIZ
4860
eor(tmp2, tmp1, tmp2);
4861
cbz(tmp2, OK); // Only reset FPCR if it's wrong
4867
// ((OopHandle)result).resolve();
4868
void MacroAssembler::resolve_oop_handle(Register result, Register tmp1, Register tmp2) {
4869
// OopHandle::resolve is an indirection.
4870
access_load_at(T_OBJECT, IN_NATIVE, result, Address(result, 0), tmp1, tmp2);
4873
// ((WeakHandle)result).resolve();
4874
void MacroAssembler::resolve_weak_handle(Register result, Register tmp1, Register tmp2) {
4875
assert_different_registers(result, tmp1, tmp2);
4878
// A null weak handle resolves to null.
4879
cbz(result, resolved);
4881
// Only 64 bit platforms support GCs that require a tmp register
4882
// WeakHandle::resolve is an indirection like jweak.
4883
access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
4884
result, Address(result), tmp1, tmp2);
4888
void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, Register tmp2) {
4889
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
4890
ldr(dst, Address(rmethod, Method::const_offset()));
4891
ldr(dst, Address(dst, ConstMethod::constants_offset()));
4892
ldr(dst, Address(dst, ConstantPool::pool_holder_offset()));
4893
ldr(dst, Address(dst, mirror_offset));
4894
resolve_oop_handle(dst, tmp1, tmp2);
4897
void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) {
4898
if (UseCompressedClassPointers) {
4899
ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
4900
if (CompressedKlassPointers::base() == nullptr) {
4901
cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift());
4903
} else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
4904
&& CompressedKlassPointers::shift() == 0) {
4905
// Only the bottom 32 bits matter
4906
cmpw(trial_klass, tmp);
4909
decode_klass_not_null(tmp);
4911
ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
4913
cmp(trial_klass, tmp);
4916
void MacroAssembler::store_klass(Register dst, Register src) {
4917
// FIXME: Should this be a store release? concurrent gcs assumes
4918
// klass length is valid if klass field is not null.
4919
if (UseCompressedClassPointers) {
4920
encode_klass_not_null(src);
4921
strw(src, Address(dst, oopDesc::klass_offset_in_bytes()));
4923
str(src, Address(dst, oopDesc::klass_offset_in_bytes()));
4927
void MacroAssembler::store_klass_gap(Register dst, Register src) {
4928
if (UseCompressedClassPointers) {
4929
// Store to klass gap in destination
4930
strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes()));
4934
// Algorithm must match CompressedOops::encode.
4935
void MacroAssembler::encode_heap_oop(Register d, Register s) {
4937
verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
4939
verify_oop_msg(s, "broken oop in encode_heap_oop");
4940
if (CompressedOops::base() == nullptr) {
4941
if (CompressedOops::shift() != 0) {
4942
assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
4943
lsr(d, s, LogMinObjAlignmentInBytes);
4948
subs(d, s, rheapbase);
4949
csel(d, d, zr, Assembler::HS);
4950
lsr(d, d, LogMinObjAlignmentInBytes);
4952
/* Old algorithm: is this any worse?
4955
sub(r, r, rheapbase);
4957
lsr(r, r, LogMinObjAlignmentInBytes);
4962
void MacroAssembler::encode_heap_oop_not_null(Register r) {
4964
verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?");
4965
if (CheckCompressedOops) {
4968
stop("null oop passed to encode_heap_oop_not_null");
4972
verify_oop_msg(r, "broken oop in encode_heap_oop_not_null");
4973
if (CompressedOops::base() != nullptr) {
4974
sub(r, r, rheapbase);
4976
if (CompressedOops::shift() != 0) {
4977
assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
4978
lsr(r, r, LogMinObjAlignmentInBytes);
4982
void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
4984
verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?");
4985
if (CheckCompressedOops) {
4988
stop("null oop passed to encode_heap_oop_not_null2");
4992
verify_oop_msg(src, "broken oop in encode_heap_oop_not_null2");
4994
Register data = src;
4995
if (CompressedOops::base() != nullptr) {
4996
sub(dst, src, rheapbase);
4999
if (CompressedOops::shift() != 0) {
5000
assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5001
lsr(dst, data, LogMinObjAlignmentInBytes);
5008
void MacroAssembler::decode_heap_oop(Register d, Register s) {
5010
verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?");
5012
if (CompressedOops::base() == nullptr) {
5013
if (CompressedOops::shift() != 0 || d != s) {
5014
lsl(d, s, CompressedOops::shift());
5021
add(d, rheapbase, s, Assembler::LSL, LogMinObjAlignmentInBytes);
5024
verify_oop_msg(d, "broken oop in decode_heap_oop");
5027
void MacroAssembler::decode_heap_oop_not_null(Register r) {
5028
assert (UseCompressedOops, "should only be used for compressed headers");
5029
assert (Universe::heap() != nullptr, "java heap should be initialized");
5030
// Cannot assert, unverified entry point counts instructions (see .ad file)
5031
// vtableStubs also counts instructions in pd_code_size_limit.
5032
// Also do not verify_oop as this is called by verify_oop.
5033
if (CompressedOops::shift() != 0) {
5034
assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5035
if (CompressedOops::base() != nullptr) {
5036
add(r, rheapbase, r, Assembler::LSL, LogMinObjAlignmentInBytes);
5038
add(r, zr, r, Assembler::LSL, LogMinObjAlignmentInBytes);
5041
assert (CompressedOops::base() == nullptr, "sanity");
5045
void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
5046
assert (UseCompressedOops, "should only be used for compressed headers");
5047
assert (Universe::heap() != nullptr, "java heap should be initialized");
5048
// Cannot assert, unverified entry point counts instructions (see .ad file)
5049
// vtableStubs also counts instructions in pd_code_size_limit.
5050
// Also do not verify_oop as this is called by verify_oop.
5051
if (CompressedOops::shift() != 0) {
5052
assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5053
if (CompressedOops::base() != nullptr) {
5054
add(dst, rheapbase, src, Assembler::LSL, LogMinObjAlignmentInBytes);
5056
add(dst, zr, src, Assembler::LSL, LogMinObjAlignmentInBytes);
5059
assert (CompressedOops::base() == nullptr, "sanity");
5066
MacroAssembler::KlassDecodeMode MacroAssembler::_klass_decode_mode(KlassDecodeNone);
5068
MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode() {
5069
assert(UseCompressedClassPointers, "not using compressed class pointers");
5070
assert(Metaspace::initialized(), "metaspace not initialized yet");
5072
if (_klass_decode_mode != KlassDecodeNone) {
5073
return _klass_decode_mode;
5076
assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift()
5077
|| 0 == CompressedKlassPointers::shift(), "decode alg wrong");
5079
if (CompressedKlassPointers::base() == nullptr) {
5080
return (_klass_decode_mode = KlassDecodeZero);
5083
if (operand_valid_for_logical_immediate(
5084
/*is32*/false, (uint64_t)CompressedKlassPointers::base())) {
5085
const uint64_t range_mask =
5086
(1ULL << log2i(CompressedKlassPointers::range())) - 1;
5087
if (((uint64_t)CompressedKlassPointers::base() & range_mask) == 0) {
5088
return (_klass_decode_mode = KlassDecodeXor);
5092
const uint64_t shifted_base =
5093
(uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift();
5094
guarantee((shifted_base & 0xffff0000ffffffff) == 0,
5095
"compressed class base bad alignment");
5097
return (_klass_decode_mode = KlassDecodeMovk);
5100
void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
5101
switch (klass_decode_mode()) {
5102
case KlassDecodeZero:
5103
if (CompressedKlassPointers::shift() != 0) {
5104
lsr(dst, src, LogKlassAlignmentInBytes);
5106
if (dst != src) mov(dst, src);
5110
case KlassDecodeXor:
5111
if (CompressedKlassPointers::shift() != 0) {
5112
eor(dst, src, (uint64_t)CompressedKlassPointers::base());
5113
lsr(dst, dst, LogKlassAlignmentInBytes);
5115
eor(dst, src, (uint64_t)CompressedKlassPointers::base());
5119
case KlassDecodeMovk:
5120
if (CompressedKlassPointers::shift() != 0) {
5121
ubfx(dst, src, LogKlassAlignmentInBytes, 32);
5127
case KlassDecodeNone:
5128
ShouldNotReachHere();
5133
void MacroAssembler::encode_klass_not_null(Register r) {
5134
encode_klass_not_null(r, r);
5137
void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
5138
assert (UseCompressedClassPointers, "should only be used for compressed headers");
5140
switch (klass_decode_mode()) {
5141
case KlassDecodeZero:
5142
if (CompressedKlassPointers::shift() != 0) {
5143
lsl(dst, src, LogKlassAlignmentInBytes);
5145
if (dst != src) mov(dst, src);
5149
case KlassDecodeXor:
5150
if (CompressedKlassPointers::shift() != 0) {
5151
lsl(dst, src, LogKlassAlignmentInBytes);
5152
eor(dst, dst, (uint64_t)CompressedKlassPointers::base());
5154
eor(dst, src, (uint64_t)CompressedKlassPointers::base());
5158
case KlassDecodeMovk: {
5159
const uint64_t shifted_base =
5160
(uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift();
5162
if (dst != src) movw(dst, src);
5163
movk(dst, shifted_base >> 32, 32);
5165
if (CompressedKlassPointers::shift() != 0) {
5166
lsl(dst, dst, LogKlassAlignmentInBytes);
5172
case KlassDecodeNone:
5173
ShouldNotReachHere();
5178
void MacroAssembler::decode_klass_not_null(Register r) {
5179
decode_klass_not_null(r, r);
5182
void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
5185
ThreadInVMfromUnknown tiv;
5186
assert (UseCompressedOops, "should only be used for compressed oops");
5187
assert (Universe::heap() != nullptr, "java heap should be initialized");
5188
assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
5189
assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop");
5192
int oop_index = oop_recorder()->find_index(obj);
5193
InstructionMark im(this);
5194
RelocationHolder rspec = oop_Relocation::spec(oop_index);
5195
code_section()->relocate(inst_mark(), rspec);
5196
movz(dst, 0xDEAD, 16);
5200
void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
5201
assert (UseCompressedClassPointers, "should only be used for compressed headers");
5202
assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
5203
int index = oop_recorder()->find_index(k);
5204
assert(! Universe::heap()->is_in(k), "should not be an oop");
5206
InstructionMark im(this);
5207
RelocationHolder rspec = metadata_Relocation::spec(index);
5208
code_section()->relocate(inst_mark(), rspec);
5209
narrowKlass nk = CompressedKlassPointers::encode(k);
5210
movz(dst, (nk >> 16), 16);
5211
movk(dst, nk & 0xffff);
5214
void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators,
5215
Register dst, Address src,
5216
Register tmp1, Register tmp2) {
5217
BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
5218
decorators = AccessInternal::decorator_fixup(decorators, type);
5219
bool as_raw = (decorators & AS_RAW) != 0;
5221
bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, tmp2);
5223
bs->load_at(this, decorators, type, dst, src, tmp1, tmp2);
5227
void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators,
5228
Address dst, Register val,
5229
Register tmp1, Register tmp2, Register tmp3) {
5230
BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
5231
decorators = AccessInternal::decorator_fixup(decorators, type);
5232
bool as_raw = (decorators & AS_RAW) != 0;
5234
bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
5236
bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
5240
void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1,
5241
Register tmp2, DecoratorSet decorators) {
5242
access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, tmp2);
5245
void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1,
5246
Register tmp2, DecoratorSet decorators) {
5247
access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, tmp2);
5250
void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1,
5251
Register tmp2, Register tmp3, DecoratorSet decorators) {
5252
access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3);
5255
// Used for storing nulls.
5256
void MacroAssembler::store_heap_oop_null(Address dst) {
5257
access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
5260
Address MacroAssembler::allocate_metadata_address(Metadata* obj) {
5261
assert(oop_recorder() != nullptr, "this assembler needs a Recorder");
5262
int index = oop_recorder()->allocate_metadata_index(obj);
5263
RelocationHolder rspec = metadata_Relocation::spec(index);
5264
return Address((address)obj, rspec);
5267
// Move an oop into a register.
5268
void MacroAssembler::movoop(Register dst, jobject obj) {
5270
if (obj == nullptr) {
5271
oop_index = oop_recorder()->allocate_oop_index(obj);
5275
ThreadInVMfromUnknown tiv;
5276
assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop");
5279
oop_index = oop_recorder()->find_index(obj);
5281
RelocationHolder rspec = oop_Relocation::spec(oop_index);
5283
if (BarrierSet::barrier_set()->barrier_set_assembler()->supports_instruction_patching()) {
5284
mov(dst, Address((address)obj, rspec));
5286
address dummy = address(uintptr_t(pc()) & -wordSize); // A nearby aligned address
5287
ldr_constant(dst, Address(dummy, rspec));
5292
// Move a metadata address into a register.
5293
void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
5295
if (obj == nullptr) {
5296
oop_index = oop_recorder()->allocate_metadata_index(obj);
5298
oop_index = oop_recorder()->find_index(obj);
5300
RelocationHolder rspec = metadata_Relocation::spec(oop_index);
5301
mov(dst, Address((address)obj, rspec));
5304
Address MacroAssembler::constant_oop_address(jobject obj) {
5307
ThreadInVMfromUnknown tiv;
5308
assert(oop_recorder() != nullptr, "this assembler needs an OopRecorder");
5309
assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "not an oop");
5312
int oop_index = oop_recorder()->find_index(obj);
5313
return Address((address)obj, oop_Relocation::spec(oop_index));
5316
// Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
5317
void MacroAssembler::tlab_allocate(Register obj,
5318
Register var_size_in_bytes,
5319
int con_size_in_bytes,
5323
BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
5324
bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
5327
void MacroAssembler::verify_tlab() {
5329
if (UseTLAB && VerifyOops) {
5332
stp(rscratch2, rscratch1, Address(pre(sp, -16)));
5334
ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_top_offset())));
5335
ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset())));
5336
cmp(rscratch2, rscratch1);
5337
br(Assembler::HS, next);
5338
STOP("assert(top >= start)");
5339
should_not_reach_here();
5342
ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_end_offset())));
5343
ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_top_offset())));
5344
cmp(rscratch2, rscratch1);
5345
br(Assembler::HS, ok);
5346
STOP("assert(top <= end)");
5347
should_not_reach_here();
5350
ldp(rscratch2, rscratch1, Address(post(sp, 16)));
5355
// Writes to stack successive pages until offset reached to check for
5356
// stack overflow + shadow pages. This clobbers tmp.
5357
void MacroAssembler::bang_stack_size(Register size, Register tmp) {
5358
assert_different_registers(tmp, size, rscratch1);
5360
// Bang stack for total size given plus shadow page size.
5361
// Bang one page at a time because large size can bang beyond yellow and
5364
mov(rscratch1, (int)os::vm_page_size());
5366
lea(tmp, Address(tmp, -(int)os::vm_page_size()));
5367
subsw(size, size, rscratch1);
5368
str(size, Address(tmp));
5369
br(Assembler::GT, loop);
5371
// Bang down shadow pages too.
5372
// At this point, (tmp-0) is the last address touched, so don't
5373
// touch it again. (It was touched as (tmp-pagesize) but then tmp
5374
// was post-decremented.) Skip this address by starting at i=1, and
5375
// touch a few more pages below. N.B. It is important to touch all
5376
// the way down to and including i=StackShadowPages.
5377
for (int i = 0; i < (int)(StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()) - 1; i++) {
5378
// this could be any sized move but this is can be a debugging crumb
5379
// so the bigger the better.
5380
lea(tmp, Address(tmp, -(int)os::vm_page_size()));
5381
str(size, Address(tmp));
5385
// Move the address of the polling page into dest.
5386
void MacroAssembler::get_polling_page(Register dest, relocInfo::relocType rtype) {
5387
ldr(dest, Address(rthread, JavaThread::polling_page_offset()));
5390
// Read the polling page. The address of the polling page must
5392
address MacroAssembler::read_polling_page(Register r, relocInfo::relocType rtype) {
5395
InstructionMark im(this);
5396
code_section()->relocate(inst_mark(), rtype);
5397
ldrw(zr, Address(r, 0));
5400
verify_cross_modify_fence_not_required();
5404
void MacroAssembler::adrp(Register reg1, const Address &dest, uint64_t &byte_offset) {
5405
relocInfo::relocType rtype = dest.rspec().reloc()->type();
5406
uint64_t low_page = (uint64_t)CodeCache::low_bound() >> 12;
5407
uint64_t high_page = (uint64_t)(CodeCache::high_bound()-1) >> 12;
5408
uint64_t dest_page = (uint64_t)dest.target() >> 12;
5409
int64_t offset_low = dest_page - low_page;
5410
int64_t offset_high = dest_page - high_page;
5412
assert(is_valid_AArch64_address(dest.target()), "bad address");
5413
assert(dest.getMode() == Address::literal, "ADRP must be applied to a literal address");
5415
InstructionMark im(this);
5416
code_section()->relocate(inst_mark(), dest.rspec());
5417
// 8143067: Ensure that the adrp can reach the dest from anywhere within
5418
// the code cache so that if it is relocated we know it will still reach
5419
if (offset_high >= -(1<<20) && offset_low < (1<<20)) {
5420
_adrp(reg1, dest.target());
5422
uint64_t target = (uint64_t)dest.target();
5423
uint64_t adrp_target
5424
= (target & 0xffffffffULL) | ((uint64_t)pc() & 0xffff00000000ULL);
5426
_adrp(reg1, (address)adrp_target);
5427
movk(reg1, target >> 32, 32);
5429
byte_offset = (uint64_t)dest.target() & 0xfff;
5432
void MacroAssembler::load_byte_map_base(Register reg) {
5433
CardTable::CardValue* byte_map_base =
5434
((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base();
5436
// Strictly speaking the byte_map_base isn't an address at all, and it might
5437
// even be negative. It is thus materialised as a constant.
5438
mov(reg, (uint64_t)byte_map_base);
5441
void MacroAssembler::build_frame(int framesize) {
5442
assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR");
5443
assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");
5444
protect_return_address();
5445
if (framesize < ((1 << 9) + 2 * wordSize)) {
5446
sub(sp, sp, framesize);
5447
stp(rfp, lr, Address(sp, framesize - 2 * wordSize));
5448
if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize);
5450
stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
5451
if (PreserveFramePointer) mov(rfp, sp);
5452
if (framesize < ((1 << 12) + 2 * wordSize))
5453
sub(sp, sp, framesize - 2 * wordSize);
5455
mov(rscratch1, framesize - 2 * wordSize);
5456
sub(sp, sp, rscratch1);
5459
verify_cross_modify_fence_not_required();
5462
void MacroAssembler::remove_frame(int framesize) {
5463
assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR");
5464
assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");
5465
if (framesize < ((1 << 9) + 2 * wordSize)) {
5466
ldp(rfp, lr, Address(sp, framesize - 2 * wordSize));
5467
add(sp, sp, framesize);
5469
if (framesize < ((1 << 12) + 2 * wordSize))
5470
add(sp, sp, framesize - 2 * wordSize);
5472
mov(rscratch1, framesize - 2 * wordSize);
5473
add(sp, sp, rscratch1);
5475
ldp(rfp, lr, Address(post(sp, 2 * wordSize)));
5477
authenticate_return_address();
5481
// This method counts leading positive bytes (highest bit not set) in provided byte array
5482
address MacroAssembler::count_positives(Register ary1, Register len, Register result) {
5483
// Simple and most common case of aligned small array which is not at the
5484
// end of memory page is placed here. All other cases are in stub.
5485
Label LOOP, END, STUB, STUB_LONG, SET_RESULT, DONE;
5486
const uint64_t UPPER_BIT_MASK=0x8080808080808080;
5487
assert_different_registers(ary1, len, result);
5492
cmpw(len, 4 * wordSize);
5493
br(GE, STUB_LONG); // size > 32 then go to stub
5495
int shift = 64 - exact_log2(os::vm_page_size());
5496
lsl(rscratch1, ary1, shift);
5497
mov(rscratch2, (size_t)(4 * wordSize) << shift);
5498
adds(rscratch2, rscratch1, rscratch2); // At end of page?
5499
br(CS, STUB); // at the end of page then go to stub
5500
subs(len, len, wordSize);
5504
ldr(rscratch1, Address(post(ary1, wordSize)));
5505
tst(rscratch1, UPPER_BIT_MASK);
5507
subs(len, len, wordSize);
5509
cmpw(len, -wordSize);
5513
ldr(rscratch1, Address(ary1));
5514
sub(rscratch2, zr, len, LSL, 3); // LSL 3 is to get bits from bytes
5515
lslv(rscratch1, rscratch1, rscratch2);
5516
tst(rscratch1, UPPER_BIT_MASK);
5521
RuntimeAddress count_pos = RuntimeAddress(StubRoutines::aarch64::count_positives());
5522
assert(count_pos.target() != nullptr, "count_positives stub has not been generated");
5523
address tpc1 = trampoline_call(count_pos);
5524
if (tpc1 == nullptr) {
5525
DEBUG_ONLY(reset_labels(STUB_LONG, SET_RESULT, DONE));
5526
postcond(pc() == badAddress);
5532
RuntimeAddress count_pos_long = RuntimeAddress(StubRoutines::aarch64::count_positives_long());
5533
assert(count_pos_long.target() != nullptr, "count_positives_long stub has not been generated");
5534
address tpc2 = trampoline_call(count_pos_long);
5535
if (tpc2 == nullptr) {
5536
DEBUG_ONLY(reset_labels(SET_RESULT, DONE));
5537
postcond(pc() == badAddress);
5544
add(len, len, wordSize);
5545
sub(result, result, len);
5548
postcond(pc() != badAddress);
5552
// Clobbers: rscratch1, rscratch2, rflags
5553
// May also clobber v0-v7 when (!UseSimpleArrayEquals && UseSIMDForArrayEquals)
5554
address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3,
5555
Register tmp4, Register tmp5, Register result,
5556
Register cnt1, int elem_size) {
5558
Register tmp1 = rscratch1;
5559
Register tmp2 = rscratch2;
5560
int elem_per_word = wordSize/elem_size;
5561
int log_elem_size = exact_log2(elem_size);
5562
int klass_offset = arrayOopDesc::klass_offset_in_bytes();
5563
int length_offset = arrayOopDesc::length_offset_in_bytes();
5565
= arrayOopDesc::base_offset_in_bytes(elem_size == 2 ? T_CHAR : T_BYTE);
5566
// When the length offset is not aligned to 8 bytes,
5567
// then we align it down. This is valid because the new
5568
// offset will always be the klass which is the same
5570
int start_offset = align_down(length_offset, BytesPerWord);
5571
int extra_length = base_offset - start_offset;
5572
assert(start_offset == length_offset || start_offset == klass_offset,
5573
"start offset must be 8-byte-aligned or be the klass offset");
5574
assert(base_offset != start_offset, "must include the length field");
5575
extra_length = extra_length / elem_size; // We count in elements, not bytes.
5576
int stubBytesThreshold = 3 * 64 + (UseSIMDForArrayEquals ? 0 : 16);
5578
assert(elem_size == 1 || elem_size == 2, "must be char or byte");
5579
assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2);
5583
const char kind = (elem_size == 2) ? 'U' : 'L';
5585
snprintf(comment, sizeof comment, "array_equals%c{", kind);
5586
BLOCK_COMMENT(comment);
5592
cmpoop(a1, a2); // May have read barriers for a1 and a2.
5595
if (UseSimpleArrayEquals) {
5596
Label NEXT_WORD, SHORT, TAIL03, TAIL01, A_MIGHT_BE_NULL, A_IS_NOT_NULL;
5597
// if (a1 == nullptr || a2 == nullptr)
5599
// a1 & a2 == 0 means (some-pointer is null) or
5600
// (very-rare-or-even-probably-impossible-pointer-values)
5601
// so, we can save one branch in most cases
5604
br(EQ, A_MIGHT_BE_NULL);
5605
// if (a1.length != a2.length)
5607
bind(A_IS_NOT_NULL);
5608
ldrw(cnt1, Address(a1, length_offset));
5609
// Increase loop counter by diff between base- and actual start-offset.
5610
addw(cnt1, cnt1, extra_length);
5611
lea(a1, Address(a1, start_offset));
5612
lea(a2, Address(a2, start_offset));
5613
// Check for short strings, i.e. smaller than wordSize.
5614
subs(cnt1, cnt1, elem_per_word);
5615
br(Assembler::LT, SHORT);
5616
// Main 8 byte comparison loop.
5618
ldr(tmp1, Address(post(a1, wordSize)));
5619
ldr(tmp2, Address(post(a2, wordSize)));
5620
subs(cnt1, cnt1, elem_per_word);
5621
eor(tmp5, tmp1, tmp2);
5623
} br(GT, NEXT_WORD);
5624
// Last longword. In the case where length == 4 we compare the
5625
// same longword twice, but that's still faster than another
5626
// conditional branch.
5627
// cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when
5629
if (log_elem_size > 0)
5630
lsl(cnt1, cnt1, log_elem_size);
5631
ldr(tmp3, Address(a1, cnt1));
5632
ldr(tmp4, Address(a2, cnt1));
5633
eor(tmp5, tmp3, tmp4);
5636
bind(A_MIGHT_BE_NULL);
5637
// in case both a1 and a2 are not-null, proceed with loads
5643
tbz(cnt1, 2 - log_elem_size, TAIL03); // 0-7 bytes left.
5645
ldrw(tmp1, Address(post(a1, 4)));
5646
ldrw(tmp2, Address(post(a2, 4)));
5647
eorw(tmp5, tmp1, tmp2);
5651
tbz(cnt1, 1 - log_elem_size, TAIL01); // 0-3 bytes left.
5653
ldrh(tmp3, Address(post(a1, 2)));
5654
ldrh(tmp4, Address(post(a2, 2)));
5655
eorw(tmp5, tmp3, tmp4);
5659
if (elem_size == 1) { // Only needed when comparing byte arrays.
5660
tbz(cnt1, 0, SAME); // 0-1 bytes left.
5664
eorw(tmp5, tmp1, tmp2);
5669
Label NEXT_DWORD, SHORT, TAIL, TAIL2, STUB,
5670
CSET_EQ, LAST_CHECK;
5673
ldrw(cnt1, Address(a1, length_offset));
5675
// Increase loop counter by diff between base- and actual start-offset.
5676
addw(cnt1, cnt1, extra_length);
5678
// on most CPUs a2 is still "locked"(surprisingly) in ldrw and it's
5679
// faster to perform another branch before comparing a1 and a2
5680
cmp(cnt1, (u1)elem_per_word);
5681
br(LE, SHORT); // short or same
5682
ldr(tmp3, Address(pre(a1, start_offset)));
5683
subs(zr, cnt1, stubBytesThreshold);
5685
ldr(tmp4, Address(pre(a2, start_offset)));
5686
sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size);
5688
// Main 16 byte comparison loop with 2 exits
5690
ldr(tmp1, Address(pre(a1, wordSize)));
5691
ldr(tmp2, Address(pre(a2, wordSize)));
5692
subs(cnt1, cnt1, 2 * elem_per_word);
5694
eor(tmp4, tmp3, tmp4);
5696
ldr(tmp3, Address(pre(a1, wordSize)));
5697
ldr(tmp4, Address(pre(a2, wordSize)));
5698
cmp(cnt1, (u1)elem_per_word);
5701
} br(EQ, NEXT_DWORD);
5705
eor(tmp4, tmp3, tmp4);
5706
eor(tmp2, tmp1, tmp2);
5707
lslv(tmp2, tmp2, tmp5);
5708
orr(tmp5, tmp4, tmp2);
5713
eor(tmp2, tmp1, tmp2);
5718
ldr(tmp4, Address(pre(a2, start_offset)));
5719
if (elem_size == 2) { // convert to byte counter
5722
eor(tmp5, tmp3, tmp4);
5724
RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_array_equals());
5725
assert(stub.target() != nullptr, "array_equals_long stub has not been generated");
5726
address tpc = trampoline_call(stub);
5727
if (tpc == nullptr) {
5728
DEBUG_ONLY(reset_labels(SHORT, LAST_CHECK, CSET_EQ, SAME, DONE));
5729
postcond(pc() == badAddress);
5734
// (a1 != null && a2 == null) || (a1 != null && a2 != null && a1 == a2)
5735
// so, if a2 == null => return false(0), else return true, so we can return a2
5739
sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size);
5740
ldr(tmp3, Address(a1, start_offset));
5741
ldr(tmp4, Address(a2, start_offset));
5743
eor(tmp4, tmp3, tmp4);
5744
lslv(tmp5, tmp4, tmp5);
5756
BLOCK_COMMENT("} array_equals");
5757
postcond(pc() != badAddress);
5763
// For Strings we're passed the address of the first characters in a1
5764
// and a2 and the length in cnt1.
5765
// There are two implementations. For arrays >= 8 bytes, all
5766
// comparisons (including the final one, which may overlap) are
5767
// performed 8 bytes at a time. For strings < 8 bytes, we compare a
5768
// halfword, then a short, and then a byte.
5770
void MacroAssembler::string_equals(Register a1, Register a2,
5771
Register result, Register cnt1)
5773
Label SAME, DONE, SHORT, NEXT_WORD;
5774
Register tmp1 = rscratch1;
5775
Register tmp2 = rscratch2;
5776
Register cnt2 = tmp2; // cnt2 only used in array length compare
5778
assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2);
5783
snprintf(comment, sizeof comment, "{string_equalsL");
5784
BLOCK_COMMENT(comment);
5790
// Check for short strings, i.e. smaller than wordSize.
5791
subs(cnt1, cnt1, wordSize);
5792
br(Assembler::LT, SHORT);
5793
// Main 8 byte comparison loop.
5795
ldr(tmp1, Address(post(a1, wordSize)));
5796
ldr(tmp2, Address(post(a2, wordSize)));
5797
subs(cnt1, cnt1, wordSize);
5798
eor(tmp1, tmp1, tmp2);
5800
} br(GT, NEXT_WORD);
5801
// Last longword. In the case where length == 4 we compare the
5802
// same longword twice, but that's still faster than another
5803
// conditional branch.
5804
// cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when
5806
ldr(tmp1, Address(a1, cnt1));
5807
ldr(tmp2, Address(a2, cnt1));
5808
eor(tmp2, tmp1, tmp2);
5813
Label TAIL03, TAIL01;
5815
tbz(cnt1, 2, TAIL03); // 0-7 bytes left.
5817
ldrw(tmp1, Address(post(a1, 4)));
5818
ldrw(tmp2, Address(post(a2, 4)));
5819
eorw(tmp1, tmp1, tmp2);
5823
tbz(cnt1, 1, TAIL01); // 0-3 bytes left.
5825
ldrh(tmp1, Address(post(a1, 2)));
5826
ldrh(tmp2, Address(post(a2, 2)));
5827
eorw(tmp1, tmp1, tmp2);
5831
tbz(cnt1, 0, SAME); // 0-1 bytes left.
5835
eorw(tmp1, tmp1, tmp2);
5838
// Arrays are equal.
5844
BLOCK_COMMENT("} string_equals");
5848
// The size of the blocks erased by the zero_blocks stub. We must
5849
// handle anything smaller than this ourselves in zero_words().
5850
const int MacroAssembler::zero_words_block_size = 8;
5852
// zero_words() is used by C2 ClearArray patterns and by
5853
// C1_MacroAssembler. It is as small as possible, handling small word
5854
// counts locally and delegating anything larger to the zero_blocks
5855
// stub. It is expanded many times in compiled code, so it is
5856
// important to keep it short.
5858
// ptr: Address of a buffer to be zeroed.
5859
// cnt: Count in HeapWords.
5861
// ptr, cnt, rscratch1, and rscratch2 are clobbered.
5862
address MacroAssembler::zero_words(Register ptr, Register cnt)
5864
assert(is_power_of_2(zero_words_block_size), "adjust this");
5866
BLOCK_COMMENT("zero_words {");
5867
assert(ptr == r10 && cnt == r11, "mismatch in register usage");
5868
RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks());
5869
assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated");
5871
subs(rscratch1, cnt, zero_words_block_size);
5875
RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks());
5876
assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated");
5877
// Make sure this is a C2 compilation. C1 allocates space only for
5878
// trampoline stubs generated by Call LIR ops, and in any case it
5879
// makes sense for a C1 compilation task to proceed as quickly as
5882
if (StubRoutines::aarch64::complete()
5883
&& Thread::current()->is_Compiler_thread()
5884
&& (task = ciEnv::current()->task())
5885
&& is_c2_compile(task->comp_level())) {
5886
address tpc = trampoline_call(zero_blocks);
5887
if (tpc == nullptr) {
5888
DEBUG_ONLY(reset_labels(around));
5892
far_call(zero_blocks);
5897
// We have a few words left to do. zero_blocks has adjusted r10 and r11
5899
for (int i = zero_words_block_size >> 1; i > 1; i >>= 1) {
5901
tbz(cnt, exact_log2(i), l);
5902
for (int j = 0; j < i; j += 2) {
5903
stp(zr, zr, post(ptr, 2 * BytesPerWord));
5910
str(zr, Address(ptr));
5914
BLOCK_COMMENT("} zero_words");
5918
// base: Address of a buffer to be zeroed, 8 bytes aligned.
5919
// cnt: Immediate count in HeapWords.
5921
// r10, r11, rscratch1, and rscratch2 are clobbered.
5922
address MacroAssembler::zero_words(Register base, uint64_t cnt)
5924
assert(wordSize <= BlockZeroingLowLimit,
5925
"increase BlockZeroingLowLimit");
5926
address result = nullptr;
5927
if (cnt <= (uint64_t)BlockZeroingLowLimit / BytesPerWord) {
5931
snprintf(buf, sizeof buf, "zero_words (count = %" PRIu64 ") {", cnt);
5936
uint64_t loops = cnt/16;
5938
mov(rscratch2, loops - 1);
5943
for (int i = 0; i < 16; i += 2) {
5944
stp(zr, zr, Address(base, i * BytesPerWord));
5946
add(base, base, 16 * BytesPerWord);
5948
subs(rscratch2, rscratch2, 1);
5954
int i = cnt & 1; // store any odd word to start
5955
if (i) str(zr, Address(base));
5956
for (; i < (int)cnt; i += 2) {
5957
stp(zr, zr, Address(base, i * wordSize));
5959
BLOCK_COMMENT("} zero_words");
5962
mov(r10, base); mov(r11, cnt);
5963
result = zero_words(r10, r11);
5968
// Zero blocks of memory by using DC ZVA.
5970
// Aligns the base address first sufficiently for DC ZVA, then uses
5971
// DC ZVA repeatedly for every full block. cnt is the size to be
5972
// zeroed in HeapWords. Returns the count of words left to be zeroed
5975
// NOTE: This is intended to be used in the zero_blocks() stub. If
5976
// you want to use it elsewhere, note that cnt must be >= 2*zva_length.
5977
void MacroAssembler::zero_dcache_blocks(Register base, Register cnt) {
5978
Register tmp = rscratch1;
5979
Register tmp2 = rscratch2;
5980
int zva_length = VM_Version::zva_length();
5981
Label initial_table_end, loop_zva;
5984
// Base must be 16 byte aligned. If not just return and let caller handle it
5986
br(Assembler::NE, fini);
5987
// Align base with ZVA length.
5989
andr(tmp, tmp, zva_length - 1);
5991
// tmp: the number of bytes to be filled to align the base with ZVA length.
5992
add(base, base, tmp);
5993
sub(cnt, cnt, tmp, Assembler::ASR, 3);
5994
adr(tmp2, initial_table_end);
5995
sub(tmp2, tmp2, tmp, Assembler::LSR, 2);
5998
for (int i = -zva_length + 16; i < 0; i += 16)
5999
stp(zr, zr, Address(base, i));
6000
bind(initial_table_end);
6002
sub(cnt, cnt, zva_length >> 3);
6004
dc(Assembler::ZVA, base);
6005
subs(cnt, cnt, zva_length >> 3);
6006
add(base, base, zva_length);
6007
br(Assembler::GE, loop_zva);
6008
add(cnt, cnt, zva_length >> 3); // count not zeroed by DC ZVA
6012
// base: Address of a buffer to be filled, 8 bytes aligned.
6013
// cnt: Count in 8-byte unit.
6014
// value: Value to be filled with.
6015
// base will point to the end of the buffer after filling.
6016
void MacroAssembler::fill_words(Register base, Register cnt, Register value)
6023
// if ((p & 8) != 0) {
6027
// scratch1 = cnt & 14;
6030
// switch (scratch1 / 2) {
6049
// if ((cnt & 1) == 1) {
6053
assert_different_registers(base, cnt, value, rscratch1, rscratch2);
6055
Label fini, skip, entry, loop;
6056
const int unroll = 8; // Number of stp instructions we'll unroll
6060
str(value, Address(post(base, 8)));
6064
andr(rscratch1, cnt, (unroll-1) * 2);
6065
sub(cnt, cnt, rscratch1);
6066
add(base, base, rscratch1, Assembler::LSL, 3);
6067
adr(rscratch2, entry);
6068
sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 1);
6072
add(base, base, unroll * 16);
6073
for (int i = -unroll; i < 0; i++)
6074
stp(value, value, Address(base, i * 16));
6076
subs(cnt, cnt, unroll * 2);
6077
br(Assembler::GE, loop);
6080
str(value, Address(post(base, 8)));
6086
// - sun/nio/cs/ISO_8859_1$Encoder.implEncodeISOArray
6087
// return the number of characters copied.
6088
// - java/lang/StringUTF16.compress
6089
// return index of non-latin1 character if copy fails, otherwise 'len'.
6091
// This version always returns the number of characters copied, and does not
6092
// clobber the 'len' register. A successful copy will complete with the post-
6093
// condition: 'res' == 'len', while an unsuccessful copy will exit with the
6094
// post-condition: 0 <= 'res' < 'len'.
6096
// NOTE: Attempts to use 'ld2' (and 'umaxv' in the ISO part) has proven to
6097
// degrade performance (on Ampere Altra - Neoverse N1), to an extent
6098
// beyond the acceptable, even though the footprint would be smaller.
6099
// Using 'umaxv' in the ASCII-case comes with a small penalty but does
6100
// avoid additional bloat.
6102
// Clobbers: src, dst, res, rscratch1, rscratch2, rflags
6103
void MacroAssembler::encode_iso_array(Register src, Register dst,
6104
Register len, Register res, bool ascii,
6105
FloatRegister vtmp0, FloatRegister vtmp1,
6106
FloatRegister vtmp2, FloatRegister vtmp3,
6107
FloatRegister vtmp4, FloatRegister vtmp5)
6110
Register max = rscratch1;
6111
Register chk = rscratch2;
6113
prfm(Address(src), PLDL1STRM);
6116
#define ASCII(insn) do { if (ascii) { insn; } } while (0)
6118
Label LOOP_32, DONE_32, FAIL_32;
6124
ld1(vtmp0, vtmp1, vtmp2, vtmp3, T8H, Address(post(src, 64)));
6125
// Extract lower bytes.
6126
FloatRegister vlo0 = vtmp4;
6127
FloatRegister vlo1 = vtmp5;
6128
uzp1(vlo0, T16B, vtmp0, vtmp1);
6129
uzp1(vlo1, T16B, vtmp2, vtmp3);
6131
orr(vtmp0, T16B, vtmp0, vtmp1);
6132
orr(vtmp2, T16B, vtmp2, vtmp3);
6133
// Extract merged upper bytes.
6134
FloatRegister vhix = vtmp0;
6135
uzp2(vhix, T16B, vtmp0, vtmp2);
6136
// ISO-check on hi-parts (all zero).
6137
// ASCII-check on lo-parts (no sign).
6138
FloatRegister vlox = vtmp1; // Merge lower bytes.
6139
ASCII(orr(vlox, T16B, vlo0, vlo1));
6140
umov(chk, vhix, D, 1); ASCII(cm(LT, vlox, T16B, vlox));
6141
fmovd(max, vhix); ASCII(umaxv(vlox, T16B, vlox));
6142
orr(chk, chk, max); ASCII(umov(max, vlox, B, 0));
6143
ASCII(orr(chk, chk, max));
6146
st1(vlo0, vlo1, T16B, Address(post(dst, 32)));
6153
Label LOOP_8, SKIP_8;
6159
FloatRegister vhi = vtmp0;
6160
FloatRegister vlo = vtmp1;
6161
ld1(vtmp3, T8H, src);
6162
uzp1(vlo, T16B, vtmp3, vtmp3);
6163
uzp2(vhi, T16B, vtmp3, vtmp3);
6164
// ISO-check on hi-parts (all zero).
6165
// ASCII-check on lo-parts (no sign).
6166
ASCII(cm(LT, vtmp2, T16B, vlo));
6167
fmovd(chk, vhi); ASCII(umaxv(vtmp2, T16B, vtmp2));
6168
ASCII(umov(max, vtmp2, B, 0));
6169
ASCII(orr(chk, chk, max));
6172
strd(vlo, Address(post(dst, 8)));
6186
Register chr = rscratch1;
6187
ldrh(chr, Address(post(src, 2)));
6188
tst(chr, ascii ? 0xff80 : 0xff00);
6190
strb(chr, Address(post(dst, 1)));
6195
// Return index where we stopped.
6196
subw(res, len, cnt);
6199
// Inflate byte[] array to char[].
6200
// Clobbers: src, dst, len, rflags, rscratch1, v0-v6
6201
address MacroAssembler::byte_array_inflate(Register src, Register dst, Register len,
6202
FloatRegister vtmp1, FloatRegister vtmp2,
6203
FloatRegister vtmp3, Register tmp4) {
6204
Label big, done, after_init, to_stub;
6206
assert_different_registers(src, dst, len, tmp4, rscratch1);
6212
// Short string: less than 8 bytes.
6218
// Use SIMD to do 4 bytes.
6219
ldrs(vtmp2, post(src, 4));
6220
zip1(vtmp3, T8B, vtmp2, vtmp1);
6222
strd(vtmp3, post(dst, 8));
6226
// Do the remaining bytes by steam.
6228
ldrb(tmp4, post(src, 1));
6229
strh(tmp4, post(dst, 2));
6238
if (SoftwarePrefetchHintDistance >= 0) {
6240
RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_byte_array_inflate());
6241
assert(stub.target() != nullptr, "large_byte_array_inflate stub has not been generated");
6242
address tpc = trampoline_call(stub);
6243
if (tpc == nullptr) {
6244
DEBUG_ONLY(reset_labels(big, done));
6245
postcond(pc() == badAddress);
6251
// Unpack the bytes 8 at a time.
6254
Label loop, around, loop_last, loop_start;
6256
if (SoftwarePrefetchHintDistance >= 0) {
6257
const int large_loop_threshold = (64 + 16)/8;
6258
ldrd(vtmp2, post(src, 8));
6260
cmp(tmp4, (u1)large_loop_threshold);
6265
ldrd(vtmp2, post(src, 8));
6267
subs(tmp4, tmp4, 1);
6269
zip1(vtmp2, T16B, vtmp2, vtmp1);
6270
ldrd(vtmp3, post(src, 8));
6271
st1(vtmp2, T8H, post(dst, 16));
6272
subs(tmp4, tmp4, 1);
6273
zip1(vtmp3, T16B, vtmp3, vtmp1);
6274
st1(vtmp3, T8H, post(dst, 16));
6278
zip1(vtmp2, T16B, vtmp2, vtmp1);
6279
st1(vtmp2, T8H, post(dst, 16));
6285
ldrd(vtmp2, post(src, 8));
6287
zip1(vtmp3, T16B, vtmp2, vtmp1);
6288
st1(vtmp3, T8H, post(dst, 16));
6293
// Do the tail of up to 8 bytes.
6295
ldrd(vtmp3, Address(src, -8));
6296
add(dst, dst, len, ext::uxtw, 1);
6297
zip1(vtmp3, T16B, vtmp3, vtmp1);
6298
strq(vtmp3, Address(dst, -16));
6301
postcond(pc() != badAddress);
6305
// Compress char[] array to byte[].
6306
// Intrinsic for java.lang.StringUTF16.compress(char[] src, int srcOff, byte[] dst, int dstOff, int len)
6307
// Return the array length if every element in array can be encoded,
6308
// otherwise, the index of first non-latin1 (> 0xff) character.
6309
void MacroAssembler::char_array_compress(Register src, Register dst, Register len,
6311
FloatRegister tmp0, FloatRegister tmp1,
6312
FloatRegister tmp2, FloatRegister tmp3,
6313
FloatRegister tmp4, FloatRegister tmp5) {
6314
encode_iso_array(src, dst, len, res, false, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5);
6317
// java.math.round(double a)
6318
// Returns the closest long to the argument, with ties rounding to
6319
// positive infinity. This requires some fiddling for corner
6320
// cases. We take care to avoid double rounding in e.g. (jlong)(a + 0.5).
6321
void MacroAssembler::java_round_double(Register dst, FloatRegister src,
6322
FloatRegister ftmp) {
6324
BLOCK_COMMENT("java_round_double: { ");
6325
fmovd(rscratch1, src);
6326
// Use RoundToNearestTiesAway unless src small and -ve.
6328
// Test if src >= 0 || abs(src) >= 0x1.0p52
6329
eor(rscratch1, rscratch1, UCONST64(1) << 63); // flip sign bit
6330
mov(rscratch2, julong_cast(0x1.0p52));
6331
cmp(rscratch1, rscratch2);
6333
// src < 0 && abs(src) < 0x1.0p52
6334
// src may have a fractional part, so add 0.5
6336
faddd(ftmp, src, ftmp);
6337
// Convert double to jlong, use RoundTowardsNegative
6341
BLOCK_COMMENT("} java_round_double");
6344
void MacroAssembler::java_round_float(Register dst, FloatRegister src,
6345
FloatRegister ftmp) {
6347
BLOCK_COMMENT("java_round_float: { ");
6348
fmovs(rscratch1, src);
6349
// Use RoundToNearestTiesAway unless src small and -ve.
6351
// Test if src >= 0 || abs(src) >= 0x1.0p23
6352
eor(rscratch1, rscratch1, 0x80000000); // flip sign bit
6353
mov(rscratch2, jint_cast(0x1.0p23f));
6354
cmp(rscratch1, rscratch2);
6356
// src < 0 && |src| < 0x1.0p23
6357
// src may have a fractional part, so add 0.5
6359
fadds(ftmp, src, ftmp);
6360
// Convert float to jint, use RoundTowardsNegative
6361
fcvtmssw(dst, ftmp);
6364
BLOCK_COMMENT("} java_round_float");
6367
// get_thread() can be called anywhere inside generated code so we
6368
// need to save whatever non-callee save context might get clobbered
6369
// by the call to JavaThread::aarch64_get_thread_helper() or, indeed,
6370
// the call setup code.
6372
// On Linux, aarch64_get_thread_helper() clobbers only r0, r1, and flags.
6373
// On other systems, the helper is a usual C function.
6375
void MacroAssembler::get_thread(Register dst) {
6377
LINUX_ONLY(RegSet::range(r0, r1) + lr - dst)
6378
NOT_LINUX (RegSet::range(r0, r17) + lr - dst);
6380
protect_return_address();
6381
push(saved_regs, sp);
6383
mov(lr, CAST_FROM_FN_PTR(address, JavaThread::aarch64_get_thread_helper));
6385
if (dst != c_rarg0) {
6389
pop(saved_regs, sp);
6390
authenticate_return_address();
6393
void MacroAssembler::cache_wb(Address line) {
6394
assert(line.getMode() == Address::base_plus_offset, "mode should be base_plus_offset");
6395
assert(line.index() == noreg, "index should be noreg");
6396
assert(line.offset() == 0, "offset should be 0");
6397
// would like to assert this
6398
// assert(line._ext.shift == 0, "shift should be zero");
6399
if (VM_Version::supports_dcpop()) {
6400
// writeback using clear virtual address to point of persistence
6401
dc(Assembler::CVAP, line.base());
6403
// no need to generate anything as Unsafe.writebackMemory should
6404
// never invoke this stub
6408
void MacroAssembler::cache_wbsync(bool is_pre) {
6409
// we only need a barrier post sync
6411
membar(Assembler::AnyAny);
6415
void MacroAssembler::verify_sve_vector_length(Register tmp) {
6416
// Make sure that native code does not change SVE vector length.
6417
if (!UseSVE) return;
6421
subsw(zr, tmp, VM_Version::get_initial_sve_vector_length());
6423
stop("Error: SVE vector length has changed since jvm startup");
6427
void MacroAssembler::verify_ptrue() {
6432
sve_cntp(rscratch1, B, ptrue, ptrue); // get true elements count.
6433
sve_dec(rscratch1, B);
6434
cbz(rscratch1, verify_ok);
6435
stop("Error: the preserved predicate register (p7) elements are not all true");
6439
void MacroAssembler::safepoint_isb() {
6442
if (VerifyCrossModifyFence) {
6443
// Clear the thread state.
6444
strb(zr, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset())));
6450
void MacroAssembler::verify_cross_modify_fence_not_required() {
6451
if (VerifyCrossModifyFence) {
6452
// Check if thread needs a cross modify fence.
6453
ldrb(rscratch1, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset())));
6454
Label fence_not_required;
6455
cbz(rscratch1, fence_not_required);
6456
// If it does then fail.
6457
lea(rscratch1, CAST_FROM_FN_PTR(address, JavaThread::verify_cross_modify_fence_failure));
6458
mov(c_rarg0, rthread);
6460
bind(fence_not_required);
6465
void MacroAssembler::spin_wait() {
6466
for (int i = 0; i < VM_Version::spin_wait_desc().inst_count(); ++i) {
6467
switch (VM_Version::spin_wait_desc().inst()) {
6474
case SpinWait::YIELD:
6478
ShouldNotReachHere();
6483
// Stack frame creation/removal
6485
void MacroAssembler::enter(bool strip_ret_addr) {
6486
if (strip_ret_addr) {
6487
// Addresses can only be signed once. If there are multiple nested frames being created
6488
// in the same function, then the return address needs stripping first.
6489
strip_return_address();
6491
protect_return_address();
6492
stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
6496
void MacroAssembler::leave() {
6498
ldp(rfp, lr, Address(post(sp, 2 * wordSize)));
6499
authenticate_return_address();
6503
// Use the AArch64 PAC feature to add ROP protection for generated code. Use whenever creating/
6504
// destroying stack frames or whenever directly loading/storing the LR to memory.
6505
// If ROP protection is not set then these functions are no-ops.
6506
// For more details on PAC see pauth_aarch64.hpp.
6508
// Sign the LR. Use during construction of a stack frame, before storing the LR to memory.
6509
// Uses value zero as the modifier.
6511
void MacroAssembler::protect_return_address() {
6512
if (VM_Version::use_rop_protection()) {
6513
check_return_address();
6518
// Sign the return value in the given register. Use before updating the LR in the existing stack
6519
// frame for the current function.
6520
// Uses value zero as the modifier.
6522
void MacroAssembler::protect_return_address(Register return_reg) {
6523
if (VM_Version::use_rop_protection()) {
6524
check_return_address(return_reg);
6529
// Authenticate the LR. Use before function return, after restoring FP and loading LR from memory.
6530
// Uses value zero as the modifier.
6532
void MacroAssembler::authenticate_return_address() {
6533
if (VM_Version::use_rop_protection()) {
6535
check_return_address();
6539
// Authenticate the return value in the given register. Use before updating the LR in the existing
6540
// stack frame for the current function.
6541
// Uses value zero as the modifier.
6543
void MacroAssembler::authenticate_return_address(Register return_reg) {
6544
if (VM_Version::use_rop_protection()) {
6546
check_return_address(return_reg);
6550
// Strip any PAC data from LR without performing any authentication. Use with caution - only if
6551
// there is no guaranteed way of authenticating the LR.
6553
void MacroAssembler::strip_return_address() {
6554
if (VM_Version::use_rop_protection()) {
6560
// PAC failures can be difficult to debug. After an authentication failure, a segfault will only
6561
// occur when the pointer is used - ie when the program returns to the invalid LR. At this point
6562
// it is difficult to debug back to the callee function.
6563
// This function simply loads from the address in the given register.
6564
// Use directly after authentication to catch authentication failures.
6565
// Also use before signing to check that the pointer is valid and hasn't already been signed.
6567
void MacroAssembler::check_return_address(Register return_reg) {
6568
if (VM_Version::use_rop_protection()) {
6569
ldr(zr, Address(return_reg));
6574
// The java_calling_convention describes stack locations as ideal slots on
6575
// a frame with no abi restrictions. Since we must observe abi restrictions
6576
// (like the placement of the register window) the slots must be biased by
6577
// the following value.
6578
static int reg2offset_in(VMReg r) {
6579
// Account for saved rfp and lr
6580
// This should really be in_preserve_stack_slots
6581
return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size;
6584
static int reg2offset_out(VMReg r) {
6585
return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
6588
// On 64bit we will store integer like items to the stack as
6589
// 64bits items (AArch64 ABI) even though java would only store
6590
// 32bits for a parameter. On 32bit it will simply be 32bits
6591
// So this routine will do 32->32 on 32bit and 32->64 on 64bit
6592
void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp) {
6593
if (src.first()->is_stack()) {
6594
if (dst.first()->is_stack()) {
6596
ldr(tmp, Address(rfp, reg2offset_in(src.first())));
6597
str(tmp, Address(sp, reg2offset_out(dst.first())));
6600
ldrsw(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first())));
6602
} else if (dst.first()->is_stack()) {
6604
str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first())));
6606
if (dst.first() != src.first()) {
6607
sxtw(dst.first()->as_Register(), src.first()->as_Register());
6612
// An oop arg. Must pass a handle not the oop itself
6613
void MacroAssembler::object_move(
6615
int oop_handle_offset,
6616
int framesize_in_slots,
6620
int* receiver_offset) {
6622
// must pass a handle. First figure out the location we use as a handle
6624
Register rHandle = dst.first()->is_stack() ? rscratch2 : dst.first()->as_Register();
6626
// See if oop is null if it is we need no handle
6628
if (src.first()->is_stack()) {
6630
// Oop is already on the stack as an argument
6631
int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
6632
map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
6634
*receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
6637
ldr(rscratch1, Address(rfp, reg2offset_in(src.first())));
6638
lea(rHandle, Address(rfp, reg2offset_in(src.first())));
6639
// conditionally move a null
6641
csel(rHandle, zr, rHandle, Assembler::EQ);
6644
// Oop is in an a register we must store it to the space we reserve
6645
// on the stack for oop_handles and pass a handle if oop is non-null
6647
const Register rOop = src.first()->as_Register();
6649
if (rOop == j_rarg0)
6651
else if (rOop == j_rarg1)
6653
else if (rOop == j_rarg2)
6655
else if (rOop == j_rarg3)
6657
else if (rOop == j_rarg4)
6659
else if (rOop == j_rarg5)
6661
else if (rOop == j_rarg6)
6664
assert(rOop == j_rarg7, "wrong register");
6668
oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset;
6669
int offset = oop_slot*VMRegImpl::stack_slot_size;
6671
map->set_oop(VMRegImpl::stack2reg(oop_slot));
6672
// Store oop in handle area, may be null
6673
str(rOop, Address(sp, offset));
6675
*receiver_offset = offset;
6679
lea(rHandle, Address(sp, offset));
6680
// conditionally move a null
6681
csel(rHandle, zr, rHandle, Assembler::EQ);
6684
// If arg is on the stack then place it otherwise it is already in correct reg.
6685
if (dst.first()->is_stack()) {
6686
str(rHandle, Address(sp, reg2offset_out(dst.first())));
6690
// A float arg may have to do float reg int reg conversion
6691
void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp) {
6692
if (src.first()->is_stack()) {
6693
if (dst.first()->is_stack()) {
6694
ldrw(tmp, Address(rfp, reg2offset_in(src.first())));
6695
strw(tmp, Address(sp, reg2offset_out(dst.first())));
6697
ldrs(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first())));
6699
} else if (src.first() != dst.first()) {
6700
if (src.is_single_phys_reg() && dst.is_single_phys_reg())
6701
fmovs(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
6703
strs(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first())));
6708
void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp) {
6709
if (src.first()->is_stack()) {
6710
if (dst.first()->is_stack()) {
6712
ldr(tmp, Address(rfp, reg2offset_in(src.first())));
6713
str(tmp, Address(sp, reg2offset_out(dst.first())));
6716
ldr(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first())));
6718
} else if (dst.first()->is_stack()) {
6720
// Do we really have to sign extend???
6721
// __ movslq(src.first()->as_Register(), src.first()->as_Register());
6722
str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first())));
6724
if (dst.first() != src.first()) {
6725
mov(dst.first()->as_Register(), src.first()->as_Register());
6732
void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp) {
6733
if (src.first()->is_stack()) {
6734
if (dst.first()->is_stack()) {
6735
ldr(tmp, Address(rfp, reg2offset_in(src.first())));
6736
str(tmp, Address(sp, reg2offset_out(dst.first())));
6738
ldrd(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first())));
6740
} else if (src.first() != dst.first()) {
6741
if (src.is_single_phys_reg() && dst.is_single_phys_reg())
6742
fmovd(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
6744
strd(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first())));
6748
// Implements lightweight-locking.
6750
// - obj: the object to be locked
6751
// - t1, t2, t3: temporary registers, will be destroyed
6752
// - slow: branched to if locking fails, absolute offset may larger than 32KB (imm14 encoding).
6753
void MacroAssembler::lightweight_lock(Register obj, Register t1, Register t2, Register t3, Label& slow) {
6754
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
6755
assert_different_registers(obj, t1, t2, t3, rscratch1);
6758
const Register top = t1;
6759
const Register mark = t2;
6760
const Register t = t3;
6762
// Preload the markWord. It is important that this is the first
6763
// instruction emitted as it is part of C1's null check semantics.
6764
ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes()));
6766
// Check if the lock-stack is full.
6767
ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
6768
cmpw(top, (unsigned)LockStack::end_offset());
6769
br(Assembler::GE, slow);
6771
// Check for recursion.
6772
subw(t, top, oopSize);
6773
ldr(t, Address(rthread, t));
6775
br(Assembler::EQ, push);
6777
// Check header for monitor (0b10).
6778
tst(mark, markWord::monitor_value);
6779
br(Assembler::NE, slow);
6781
// Try to lock. Transition lock bits 0b01 => 0b00
6782
assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea");
6783
orr(mark, mark, markWord::unlocked_value);
6784
eor(t, mark, markWord::unlocked_value);
6785
cmpxchg(/*addr*/ obj, /*expected*/ mark, /*new*/ t, Assembler::xword,
6786
/*acquire*/ true, /*release*/ false, /*weak*/ false, noreg);
6787
br(Assembler::NE, slow);
6790
// After successful lock, push object on lock-stack.
6791
str(obj, Address(rthread, top));
6792
addw(top, top, oopSize);
6793
strw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
6796
// Implements lightweight-unlocking.
6798
// - obj: the object to be unlocked
6799
// - t1, t2, t3: temporary registers
6800
// - slow: branched to if unlocking fails, absolute offset may larger than 32KB (imm14 encoding).
6801
void MacroAssembler::lightweight_unlock(Register obj, Register t1, Register t2, Register t3, Label& slow) {
6802
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
6803
// cmpxchg clobbers rscratch1.
6804
assert_different_registers(obj, t1, t2, t3, rscratch1);
6808
// Check for lock-stack underflow.
6810
ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
6811
cmpw(t1, (unsigned)LockStack::start_offset());
6812
br(Assembler::GE, stack_ok);
6813
STOP("Lock-stack underflow");
6818
Label unlocked, push_and_slow;
6819
const Register top = t1;
6820
const Register mark = t2;
6821
const Register t = t3;
6823
// Check if obj is top of lock-stack.
6824
ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
6825
subw(top, top, oopSize);
6826
ldr(t, Address(rthread, top));
6828
br(Assembler::NE, slow);
6831
DEBUG_ONLY(str(zr, Address(rthread, top));)
6832
strw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
6834
// Check if recursive.
6835
subw(t, top, oopSize);
6836
ldr(t, Address(rthread, t));
6838
br(Assembler::EQ, unlocked);
6840
// Not recursive. Check header for monitor (0b10).
6841
ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes()));
6842
tbnz(mark, log2i_exact(markWord::monitor_value), push_and_slow);
6845
// Check header not unlocked (0b01).
6847
tbz(mark, log2i_exact(markWord::unlocked_value), not_unlocked);
6848
stop("lightweight_unlock already unlocked");
6852
// Try to unlock. Transition lock bits 0b00 => 0b01
6853
assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea");
6854
orr(t, mark, markWord::unlocked_value);
6855
cmpxchg(obj, mark, t, Assembler::xword,
6856
/*acquire*/ false, /*release*/ true, /*weak*/ false, noreg);
6857
br(Assembler::EQ, unlocked);
6859
bind(push_and_slow);
6860
// Restore lock-stack and handle the unlock in runtime.
6861
DEBUG_ONLY(str(obj, Address(rthread, top));)
6862
addw(top, top, oopSize);
6863
strw(top, Address(rthread, JavaThread::lock_stack_top_offset()));