2
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
25
#include "precompiled.hpp"
26
#include "asm/assembler.hpp"
27
#include "asm/assembler.inline.hpp"
28
#include "opto/c2_MacroAssembler.hpp"
29
#include "runtime/basicLock.hpp"
31
// TODO: 8 bytes at a time? pre-fetch?
32
// Compare char[] arrays aligned to 4 bytes.
33
void C2_MacroAssembler::char_arrays_equals(Register ary1, Register ary2,
34
Register limit, Register result,
35
Register chr1, Register chr2, Label& Ldone) {
43
// Note: limit contains number of bytes (2*char_elements) != 0.
44
tst(limit, 0x2); // trailing character ?
47
// compare the trailing char
48
sub(limit, limit, sizeof(jchar));
49
ldrh(chr1, Address(ary1, limit));
50
ldrh(chr2, Address(ary2, limit));
52
mov(result, 0, ne); // not equal
60
// word by word compare, don't need alignment check
63
// Shift ary1 and ary2 to the end of the arrays, negate limit
64
add(ary1, limit, ary1);
65
add(ary2, limit, ary2);
69
ldr_u32(chr1, Address(ary1, limit));
70
ldr_u32(chr2, Address(ary2, limit));
72
mov(result, 0, ne); // not equal
74
adds(limit, limit, 2*sizeof(jchar));
77
// Caller should set it:
78
// mov(result_reg, 1); //equal
81
void C2_MacroAssembler::fast_lock(Register Roop, Register Rbox, Register Rscratch, Register Rscratch2) {
82
assert(VM_Version::supports_ldrex(), "unsupported, yet?");
83
assert_different_registers(Roop, Rbox, Rscratch, Rscratch2);
85
Label fast_lock, done;
87
if (DiagnoseSyncOnValueBasedClasses != 0) {
88
load_klass(Rscratch, Roop);
89
ldr_u32(Rscratch, Address(Rscratch, Klass::access_flags_offset()));
90
tst(Rscratch, JVM_ACC_IS_VALUE_BASED_CLASS);
94
if (LockingMode == LM_LIGHTWEIGHT) {
96
lightweight_lock(Roop /* obj */, Rbox /* t1 */, Rscratch /* t2 */, Rscratch2 /* t3 */,
97
1 /* savemask (save t1) */, done);
102
} else if (LockingMode == LM_LEGACY) {
104
Register Rmark = Rscratch2;
106
ldr(Rmark, Address(Roop, oopDesc::mark_offset_in_bytes()));
107
tst(Rmark, markWord::unlocked_value);
110
// Check for recursive lock
111
// See comments in InterpreterMacroAssembler::lock_object for
112
// explanations on the fast recursive locking check.
113
// -1- test low 2 bits
114
movs(Rscratch, AsmOperand(Rmark, lsl, 30));
115
// -2- test (hdr - SP) if the low two bits are 0
116
sub(Rscratch, Rmark, SP, eq);
117
movs(Rscratch, AsmOperand(Rscratch, lsr, exact_log2(os::vm_page_size())), eq);
118
// If still 'eq' then recursive locking OK
119
// set to zero if recursive lock, set to non zero otherwise (see discussion in JDK-8153107)
120
str(Rscratch, Address(Rbox, BasicLock::displaced_header_offset_in_bytes()));
124
str(Rmark, Address(Rbox, BasicLock::displaced_header_offset_in_bytes()));
126
bool allow_fallthrough_on_failure = true;
127
bool one_shot = true;
128
cas_for_lock_acquire(Rmark, Rbox, Roop, Rscratch, done, allow_fallthrough_on_failure, one_shot);
133
// At this point flags are set as follows:
135
// NE -> Failure, branch to slow path
138
void C2_MacroAssembler::fast_unlock(Register Roop, Register Rbox, Register Rscratch, Register Rscratch2) {
139
assert(VM_Version::supports_ldrex(), "unsupported, yet?");
140
assert_different_registers(Roop, Rbox, Rscratch, Rscratch2);
144
if (LockingMode == LM_LIGHTWEIGHT) {
146
lightweight_unlock(Roop /* obj */, Rbox /* t1 */, Rscratch /* t2 */, Rscratch2 /* t3 */,
147
1 /* savemask (save t1) */, done);
149
cmp(Roop, Roop); // Success: Set Z
152
} else if (LockingMode == LM_LEGACY) {
154
Register Rmark = Rscratch2;
156
// Find the lock address and load the displaced header from the stack.
157
ldr(Rmark, Address(Rbox, BasicLock::displaced_header_offset_in_bytes()));
158
// If hdr is null, we've got recursive locking and there's nothing more to do
162
// Restore the object header
163
bool allow_fallthrough_on_failure = true;
164
bool one_shot = true;
165
cas_for_lock_release(Rbox, Rmark, Roop, Rscratch, done, allow_fallthrough_on_failure, one_shot);
169
// At this point flags are set as follows:
171
// NE -> Failure, branch to slow path