2
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
24
#include "precompiled.hpp"
25
#include "foreignGlobals.hpp"
26
#include "classfile/javaClasses.hpp"
27
#include "memory/resourceArea.hpp"
28
#include "prims/foreignGlobals.inline.hpp"
29
#include "runtime/jniHandles.inline.hpp"
30
#include "utilities/resourceHash.hpp"
32
StubLocations::StubLocations() {
33
for (uint32_t i = 0; i < LOCATION_LIMIT; i++) {
34
_locs[i] = VMStorage::invalid();
38
void StubLocations::set(uint32_t loc, VMStorage storage) {
39
assert(loc < LOCATION_LIMIT, "oob");
43
void StubLocations::set_frame_data(uint32_t loc, int offset) {
44
set(loc, VMStorage(StorageType::FRAME_DATA, 8, offset));
47
VMStorage StubLocations::get(uint32_t loc) const {
48
assert(loc < LOCATION_LIMIT, "oob");
49
VMStorage storage = _locs[loc];
50
assert(storage.is_valid(), "not set");
54
VMStorage StubLocations::get(VMStorage placeholder) const {
55
assert(placeholder.type() == StorageType::PLACEHOLDER, "must be");
56
return get(placeholder.index());
59
int StubLocations::data_offset(uint32_t loc) const {
60
VMStorage storage = get(loc);
61
assert(storage.type() == StorageType::FRAME_DATA, "must be");
62
return storage.offset();
65
#define FOREIGN_ABI "jdk/internal/foreign/abi/"
67
const CallRegs ForeignGlobals::parse_call_regs(jobject jconv) {
68
oop conv_oop = JNIHandles::resolve_non_null(jconv);
69
objArrayOop arg_regs_oop = jdk_internal_foreign_abi_CallConv::argRegs(conv_oop);
70
objArrayOop ret_regs_oop = jdk_internal_foreign_abi_CallConv::retRegs(conv_oop);
71
int num_args = arg_regs_oop->length();
72
int num_rets = ret_regs_oop->length();
73
CallRegs result(num_args, num_rets);
75
for (int i = 0; i < num_args; i++) {
76
result._arg_regs.push(parse_vmstorage(arg_regs_oop->obj_at(i)));
79
for (int i = 0; i < num_rets; i++) {
80
result._ret_regs.push(parse_vmstorage(ret_regs_oop->obj_at(i)));
86
VMStorage ForeignGlobals::parse_vmstorage(oop storage) {
87
jbyte type = jdk_internal_foreign_abi_VMStorage::type(storage);
88
jshort segment_mask_or_size = jdk_internal_foreign_abi_VMStorage::segment_mask_or_size(storage);
89
jint index_or_offset = jdk_internal_foreign_abi_VMStorage::index_or_offset(storage);
91
return VMStorage(static_cast<StorageType>(type), segment_mask_or_size, index_or_offset);
94
int RegSpiller::compute_spill_area(const GrowableArray<VMStorage>& regs) {
96
for (int i = 0; i < regs.length(); i++) {
97
result_size += pd_reg_size(regs.at(i));
102
void RegSpiller::generate(MacroAssembler* masm, int rsp_offset, bool spill) const {
103
assert(rsp_offset != -1, "rsp_offset should be set");
104
int offset = rsp_offset;
105
for (int i = 0; i < _regs.length(); i++) {
106
VMStorage reg = _regs.at(i);
108
pd_store_reg(masm, offset, reg);
110
pd_load_reg(masm, offset, reg);
112
offset += pd_reg_size(reg);
116
void ArgumentShuffle::print_on(outputStream* os) const {
117
os->print_cr("Argument shuffle {");
118
for (int i = 0; i < _moves.length(); i++) {
119
Move move = _moves.at(i);
120
VMStorage from_reg = move.from;
121
VMStorage to_reg = move.to;
123
os->print("Move from ");
124
from_reg.print_on(os);
132
int ForeignGlobals::compute_out_arg_bytes(const GrowableArray<VMStorage>& out_regs) {
133
uint32_t max_stack_offset = 0;
134
for (VMStorage reg : out_regs) {
136
max_stack_offset = MAX2(max_stack_offset, reg.offset() + reg.stack_size());
138
return align_up(max_stack_offset, 8);
141
int ForeignGlobals::java_calling_convention(const BasicType* signature, int num_args, GrowableArray<VMStorage>& out_regs) {
142
VMRegPair* vm_regs = NEW_RESOURCE_ARRAY(VMRegPair, num_args);
143
int slots = align_up(SharedRuntime::java_calling_convention(signature, vm_regs, num_args), 2);
144
for (int i = 0; i < num_args; i++) {
145
VMRegPair pair = vm_regs[i];
146
// note, we ignore second here. Signature should consist of register-size values. So there should be
147
// no need for multi-register pairs.
148
if (signature[i] != T_VOID) {
149
out_regs.push(as_VMStorage(pair.first(), signature[i]));
152
return slots << LogBytesPerInt;
155
GrowableArray<VMStorage> ForeignGlobals::replace_place_holders(const GrowableArray<VMStorage>& regs, const StubLocations& locs) {
156
GrowableArray<VMStorage> result(regs.length());
157
for (VMStorage reg : regs) {
158
result.push(reg.type() == StorageType::PLACEHOLDER ? locs.get(reg) : reg);
163
GrowableArray<VMStorage> ForeignGlobals::upcall_filter_receiver_reg(const GrowableArray<VMStorage>& unfiltered_regs) {
164
GrowableArray<VMStorage> out(unfiltered_regs.length() - 1);
165
// drop first arg reg
166
for (int i = 1; i < unfiltered_regs.length(); i++) {
167
out.push(unfiltered_regs.at(i));
172
GrowableArray<VMStorage> ForeignGlobals::downcall_filter_offset_regs(const GrowableArray<VMStorage>& regs,
173
BasicType* signature, int num_args,
175
GrowableArray<VMStorage> result(regs.length());
177
for (int sig_idx = 0; sig_idx < num_args; sig_idx++) {
178
if (signature[sig_idx] == T_VOID) {
179
continue; // ignore upper halves
182
result.push(regs.at(reg_idx++));
183
if (signature[sig_idx] == T_OBJECT) {
185
sig_idx++; // skip offset
192
class ArgumentShuffle::ComputeMoveOrder: public StackObj {
195
// segment_mask_or_size is not taken into account since
196
// VMStorages that differ only in mask or size can still
198
static inline unsigned hash(const VMStorage& vms) {
199
return static_cast<unsigned int>(vms.type()) ^ vms.index_or_offset();
201
static inline bool equals(const VMStorage& a, const VMStorage& b) {
202
return a.type() == b.type() && a.index_or_offset() == b.index_or_offset();
205
using KillerTable = ResourceHashtable<
206
VMStorage, MoveOperation*,
207
32, // doesn't need to be big. don't have that many argument registers (in known ABIs)
208
AnyObj::RESOURCE_AREA,
210
ComputeMoveOrder::hash,
211
ComputeMoveOrder::equals
214
class MoveOperation: public ResourceObj {
215
friend class ComputeMoveOrder;
220
MoveOperation* _next;
221
MoveOperation* _prev;
224
MoveOperation(VMStorage src, VMStorage dst):
225
_src(src), _dst(dst), _processed(false), _next(nullptr), _prev(nullptr) {}
227
const VMStorage& src() const { return _src; }
228
const VMStorage& dst() const { return _dst; }
229
MoveOperation* next() const { return _next; }
230
MoveOperation* prev() const { return _prev; }
231
void set_processed() { _processed = true; }
232
bool is_processed() const { return _processed; }
235
void break_cycle(VMStorage temp_register) {
236
// create a new store following the last store
237
// to move from the temp_register to the original
238
MoveOperation* new_store = new MoveOperation(temp_register, _dst);
240
// break the cycle of links and insert new_store at the end
241
// break the reverse link.
242
MoveOperation* p = prev();
243
assert(p->next() == this, "must be");
245
p->_next = new_store;
246
new_store->_prev = p;
248
// change the original store to save it's value in the temp.
249
_dst = temp_register;
252
void link(KillerTable& killer) {
253
// link this store in front the store that it depends on
254
MoveOperation** n = killer.get(_src);
256
MoveOperation* src_killer = *n;
257
assert(_next == nullptr && src_killer->_prev == nullptr, "shouldn't have been set yet");
259
src_killer->_prev = this;
269
const GrowableArray<VMStorage>& _in_regs;
270
const GrowableArray<VMStorage>& _out_regs;
271
VMStorage _tmp_vmreg;
272
GrowableArray<MoveOperation*> _edges;
273
GrowableArray<Move> _moves;
276
ComputeMoveOrder(const GrowableArray<VMStorage>& in_regs,
277
const GrowableArray<VMStorage>& out_regs,
278
VMStorage tmp_vmreg) :
281
_tmp_vmreg(tmp_vmreg),
282
_edges(in_regs.length()),
283
_moves(in_regs.length()) {
284
assert(in_regs.length() == out_regs.length(),
285
"stray registers? %d != %d", in_regs.length(), out_regs.length());
289
for (int i = 0; i < _in_regs.length(); i++) {
290
VMStorage in_reg = _in_regs.at(i);
291
VMStorage out_reg = _out_regs.at(i);
293
if (out_reg.is_stack() || out_reg.is_frame_data()) {
294
// Move operations where the dest is the stack can all be
295
// scheduled first since they can't interfere with the other moves.
296
// The input and output stack spaces are distinct from each other.
297
Move move{in_reg, out_reg};
299
} else if (in_reg == out_reg) {
300
// Can skip non-stack identity moves.
303
_edges.append(new MoveOperation(in_reg, out_reg));
306
// Break any cycles in the register moves and emit the in the
308
compute_store_order();
311
// Walk the edges breaking cycles between moves. The result list
312
// can be walked in order to produce the proper set of loads
313
void compute_store_order() {
314
// Record which moves kill which registers
315
KillerTable killer; // a map of VMStorage -> MoveOperation*
316
for (int i = 0; i < _edges.length(); i++) {
317
MoveOperation* s = _edges.at(i);
318
assert(!killer.contains(s->dst()),
319
"multiple moves with the same register as destination");
320
killer.put(s->dst(), s);
322
assert(!killer.contains(_tmp_vmreg),
323
"make sure temp isn't in the registers that are killed");
325
// create links between loads and stores
326
for (int i = 0; i < _edges.length(); i++) {
327
_edges.at(i)->link(killer);
330
// at this point, all the move operations are chained together
331
// in one or more doubly linked lists. Processing them backwards finds
332
// the beginning of the chain, forwards finds the end. If there's
333
// a cycle it can be broken at any point, so pick an edge and walk
334
// backward until the list ends or we end where we started.
335
for (int e = 0; e < _edges.length(); e++) {
336
MoveOperation* s = _edges.at(e);
337
if (!s->is_processed()) {
338
MoveOperation* start = s;
339
// search for the beginning of the chain or cycle
340
while (start->prev() != nullptr && start->prev() != s) {
341
start = start->prev();
343
if (start->prev() == s) {
344
start->break_cycle(_tmp_vmreg);
346
// walk the chain forward inserting to store list
347
while (start != nullptr) {
348
_moves.push(start->as_move());
350
start->set_processed();
351
start = start->next();
358
static GrowableArray<Move> compute_move_order(const GrowableArray<VMStorage>& in_regs,
359
const GrowableArray<VMStorage>& out_regs,
360
VMStorage tmp_vmreg) {
361
ComputeMoveOrder cmo(in_regs, out_regs, tmp_vmreg);
367
ArgumentShuffle::ArgumentShuffle(
368
const GrowableArray<VMStorage>& in_regs,
369
const GrowableArray<VMStorage>& out_regs,
370
VMStorage shuffle_temp) {
371
_moves = ComputeMoveOrder::compute_move_order(in_regs, out_regs, shuffle_temp);