2
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
25
#include "precompiled.hpp"
26
#include "gc/shared/barrierSet.hpp"
27
#include "gc/shared/c2/barrierSetC2.hpp"
28
#include "memory/allocation.inline.hpp"
29
#include "memory/resourceArea.hpp"
30
#include "oops/compressedOops.hpp"
32
#include "opto/addnode.hpp"
33
#include "opto/callnode.hpp"
34
#include "opto/idealGraphPrinter.hpp"
35
#include "opto/matcher.hpp"
36
#include "opto/memnode.hpp"
37
#include "opto/movenode.hpp"
38
#include "opto/opcodes.hpp"
39
#include "opto/regmask.hpp"
40
#include "opto/rootnode.hpp"
41
#include "opto/runtime.hpp"
42
#include "opto/type.hpp"
43
#include "opto/vectornode.hpp"
44
#include "runtime/os.inline.hpp"
45
#include "runtime/sharedRuntime.hpp"
46
#include "utilities/align.hpp"
48
OptoReg::Name OptoReg::c_frame_pointer;
50
const RegMask *Matcher::idealreg2regmask[_last_machine_leaf];
51
RegMask Matcher::mreg2regmask[_last_Mach_Reg];
52
RegMask Matcher::caller_save_regmask;
53
RegMask Matcher::caller_save_regmask_exclude_soe;
54
RegMask Matcher::mh_caller_save_regmask;
55
RegMask Matcher::mh_caller_save_regmask_exclude_soe;
56
RegMask Matcher::STACK_ONLY_mask;
57
RegMask Matcher::c_frame_ptr_mask;
58
const uint Matcher::_begin_rematerialize = _BEGIN_REMATERIALIZE;
59
const uint Matcher::_end_rematerialize = _END_REMATERIALIZE;
61
//---------------------------Matcher-------------------------------------------
63
: PhaseTransform( Phase::Ins_Select ),
64
_states_arena(Chunk::medium_size, mtCompiler),
65
_new_nodes(C->comp_arena()),
66
_visited(&_states_arena),
67
_shared(&_states_arena),
68
_dontcare(&_states_arena),
69
_reduceOp(reduceOp), _leftOp(leftOp), _rightOp(rightOp),
70
_swallowed(swallowed),
71
_begin_inst_chain_rule(_BEGIN_INST_CHAIN_RULE),
72
_end_inst_chain_rule(_END_INST_CHAIN_RULE),
73
_must_clone(must_clone),
74
_shared_nodes(C->comp_arena()),
76
_old2new_map(C->comp_arena()),
77
_new2old_map(C->comp_arena()),
78
_reused(C->comp_arena()),
80
_allocation_started(false),
82
_register_save_policy(register_save_policy),
83
_c_reg_save_policy(c_reg_save_policy),
84
_register_save_type(register_save_type) {
87
idealreg2spillmask [Op_RegI] = nullptr;
88
idealreg2spillmask [Op_RegN] = nullptr;
89
idealreg2spillmask [Op_RegL] = nullptr;
90
idealreg2spillmask [Op_RegF] = nullptr;
91
idealreg2spillmask [Op_RegD] = nullptr;
92
idealreg2spillmask [Op_RegP] = nullptr;
93
idealreg2spillmask [Op_VecA] = nullptr;
94
idealreg2spillmask [Op_VecS] = nullptr;
95
idealreg2spillmask [Op_VecD] = nullptr;
96
idealreg2spillmask [Op_VecX] = nullptr;
97
idealreg2spillmask [Op_VecY] = nullptr;
98
idealreg2spillmask [Op_VecZ] = nullptr;
99
idealreg2spillmask [Op_RegFlags] = nullptr;
100
idealreg2spillmask [Op_RegVectMask] = nullptr;
102
idealreg2debugmask [Op_RegI] = nullptr;
103
idealreg2debugmask [Op_RegN] = nullptr;
104
idealreg2debugmask [Op_RegL] = nullptr;
105
idealreg2debugmask [Op_RegF] = nullptr;
106
idealreg2debugmask [Op_RegD] = nullptr;
107
idealreg2debugmask [Op_RegP] = nullptr;
108
idealreg2debugmask [Op_VecA] = nullptr;
109
idealreg2debugmask [Op_VecS] = nullptr;
110
idealreg2debugmask [Op_VecD] = nullptr;
111
idealreg2debugmask [Op_VecX] = nullptr;
112
idealreg2debugmask [Op_VecY] = nullptr;
113
idealreg2debugmask [Op_VecZ] = nullptr;
114
idealreg2debugmask [Op_RegFlags] = nullptr;
115
idealreg2debugmask [Op_RegVectMask] = nullptr;
117
idealreg2mhdebugmask[Op_RegI] = nullptr;
118
idealreg2mhdebugmask[Op_RegN] = nullptr;
119
idealreg2mhdebugmask[Op_RegL] = nullptr;
120
idealreg2mhdebugmask[Op_RegF] = nullptr;
121
idealreg2mhdebugmask[Op_RegD] = nullptr;
122
idealreg2mhdebugmask[Op_RegP] = nullptr;
123
idealreg2mhdebugmask[Op_VecA] = nullptr;
124
idealreg2mhdebugmask[Op_VecS] = nullptr;
125
idealreg2mhdebugmask[Op_VecD] = nullptr;
126
idealreg2mhdebugmask[Op_VecX] = nullptr;
127
idealreg2mhdebugmask[Op_VecY] = nullptr;
128
idealreg2mhdebugmask[Op_VecZ] = nullptr;
129
idealreg2mhdebugmask[Op_RegFlags] = nullptr;
130
idealreg2mhdebugmask[Op_RegVectMask] = nullptr;
132
debug_only(_mem_node = nullptr;) // Ideal memory node consumed by mach node
135
//------------------------------warp_incoming_stk_arg------------------------
136
// This warps a VMReg into an OptoReg::Name
137
OptoReg::Name Matcher::warp_incoming_stk_arg( VMReg reg ) {
138
OptoReg::Name warped;
139
if( reg->is_stack() ) { // Stack slot argument?
140
warped = OptoReg::add(_old_SP, reg->reg2stack() );
141
warped = OptoReg::add(warped, C->out_preserve_stack_slots());
142
if( warped >= _in_arg_limit )
143
_in_arg_limit = OptoReg::add(warped, 1); // Bump max stack slot seen
144
if (!RegMask::can_represent_arg(warped)) {
145
// the compiler cannot represent this method's calling sequence
146
// Bailout. We do not have space to represent all arguments.
147
C->record_method_not_compilable("unsupported incoming calling sequence");
152
return OptoReg::as_OptoReg(reg);
155
//---------------------------compute_old_SP------------------------------------
156
OptoReg::Name Compile::compute_old_SP() {
157
int fixed = fixed_slots();
158
int preserve = in_preserve_stack_slots();
159
return OptoReg::stack2reg(align_up(fixed + preserve, (int)Matcher::stack_alignment_in_slots()));
165
void Matcher::verify_new_nodes_only(Node* xroot) {
166
// Make sure that the new graph only references new nodes
168
Unique_Node_List worklist;
170
worklist.push(xroot);
171
while (worklist.size() > 0) {
172
Node* n = worklist.pop();
173
visited.set(n->_idx);
174
assert(C->node_arena()->contains(n), "dead node");
175
for (uint j = 0; j < n->req(); j++) {
178
assert(C->node_arena()->contains(in), "dead node");
179
if (!visited.test(in->_idx)) {
189
//---------------------------match---------------------------------------------
190
void Matcher::match( ) {
191
if( MaxLabelRootDepth < 100 ) { // Too small?
192
assert(false, "invalid MaxLabelRootDepth, increase it to 100 minimum");
193
MaxLabelRootDepth = 100;
195
// One-time initialization of some register masks.
196
init_spill_mask( C->root()->in(1) );
197
_return_addr_mask = return_addr();
199
// Pointers take 2 slots in 64-bit land
200
_return_addr_mask.Insert(OptoReg::add(return_addr(),1));
203
// Map a Java-signature return type into return register-value
204
// machine registers for 0, 1 and 2 returned values.
205
const TypeTuple *range = C->tf()->range();
206
if( range->cnt() > TypeFunc::Parms ) { // If not a void function
207
// Get ideal-register return type
208
uint ireg = range->field_at(TypeFunc::Parms)->ideal_reg();
209
// Get machine return register
210
uint sop = C->start()->Opcode();
211
OptoRegPair regs = return_value(ireg);
214
_return_value_mask = RegMask(regs.first());
215
if( OptoReg::is_valid(regs.second()) )
216
_return_value_mask.Insert(regs.second());
222
// Need the method signature to determine the incoming argument types,
223
// because the types determine which registers the incoming arguments are
224
// in, and this affects the matched code.
225
const TypeTuple *domain = C->tf()->domain();
226
uint argcnt = domain->cnt() - TypeFunc::Parms;
227
BasicType *sig_bt = NEW_RESOURCE_ARRAY( BasicType, argcnt );
228
VMRegPair *vm_parm_regs = NEW_RESOURCE_ARRAY( VMRegPair, argcnt );
229
_parm_regs = NEW_RESOURCE_ARRAY( OptoRegPair, argcnt );
230
_calling_convention_mask = NEW_RESOURCE_ARRAY( RegMask, argcnt );
232
for( i = 0; i<argcnt; i++ ) {
233
sig_bt[i] = domain->field_at(i+TypeFunc::Parms)->basic_type();
236
// Pass array of ideal registers and length to USER code (from the AD file)
237
// that will convert this to an array of register numbers.
238
const StartNode *start = C->start();
239
start->calling_convention( sig_bt, vm_parm_regs, argcnt );
241
// Sanity check users' calling convention. Real handy while trying to
242
// get the initial port correct.
243
{ for (uint i = 0; i<argcnt; i++) {
244
if( !vm_parm_regs[i].first()->is_valid() && !vm_parm_regs[i].second()->is_valid() ) {
245
assert(domain->field_at(i+TypeFunc::Parms)==Type::HALF, "only allowed on halve" );
246
_parm_regs[i].set_bad();
249
VMReg parm_reg = vm_parm_regs[i].first();
250
assert(parm_reg->is_valid(), "invalid arg?");
251
if (parm_reg->is_reg()) {
252
OptoReg::Name opto_parm_reg = OptoReg::as_OptoReg(parm_reg);
253
assert(can_be_java_arg(opto_parm_reg) ||
254
C->stub_function() == CAST_FROM_FN_PTR(address, OptoRuntime::rethrow_C) ||
255
opto_parm_reg == inline_cache_reg(),
256
"parameters in register must be preserved by runtime stubs");
258
for (uint j = 0; j < i; j++) {
259
assert(parm_reg != vm_parm_regs[j].first(),
260
"calling conv. must produce distinct regs");
266
// Do some initial frame layout.
268
// Compute the old incoming SP (may be called FP) as
269
// OptoReg::stack0() + locks + in_preserve_stack_slots + pad2.
270
_old_SP = C->compute_old_SP();
271
assert( is_even(_old_SP), "must be even" );
273
// Compute highest incoming stack argument as
274
// _old_SP + out_preserve_stack_slots + incoming argument size.
275
_in_arg_limit = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
276
assert( is_even(_in_arg_limit), "out_preserve must be even" );
277
for( i = 0; i < argcnt; i++ ) {
278
// Permit args to have no register
279
_calling_convention_mask[i].Clear();
280
if( !vm_parm_regs[i].first()->is_valid() && !vm_parm_regs[i].second()->is_valid() ) {
281
_parm_regs[i].set_bad();
284
// calling_convention returns stack arguments as a count of
285
// slots beyond OptoReg::stack0()/VMRegImpl::stack0. We need to convert this to
286
// the allocators point of view, taking into account all the
287
// preserve area, locks & pad2.
289
OptoReg::Name reg1 = warp_incoming_stk_arg(vm_parm_regs[i].first());
290
if( OptoReg::is_valid(reg1))
291
_calling_convention_mask[i].Insert(reg1);
293
OptoReg::Name reg2 = warp_incoming_stk_arg(vm_parm_regs[i].second());
294
if( OptoReg::is_valid(reg2))
295
_calling_convention_mask[i].Insert(reg2);
297
// Saved biased stack-slot register number
298
_parm_regs[i].set_pair(reg2, reg1);
301
// Finally, make sure the incoming arguments take up an even number of
302
// words, in case the arguments or locals need to contain doubleword stack
303
// slots. The rest of the system assumes that stack slot pairs (in
304
// particular, in the spill area) which look aligned will in fact be
305
// aligned relative to the stack pointer in the target machine. Double
306
// stack slots will always be allocated aligned.
307
_new_SP = OptoReg::Name(align_up(_in_arg_limit, (int)RegMask::SlotsPerLong));
309
// Compute highest outgoing stack argument as
310
// _new_SP + out_preserve_stack_slots + max(outgoing argument size).
311
_out_arg_limit = OptoReg::add(_new_SP, C->out_preserve_stack_slots());
312
assert( is_even(_out_arg_limit), "out_preserve must be even" );
314
if (!RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1))) {
315
// the compiler cannot represent this method's calling sequence
316
// Bailout. We do not have space to represent all arguments.
317
C->record_method_not_compilable("must be able to represent all call arguments in reg mask");
320
if (C->failing()) return; // bailed out on incoming arg failure
323
// Collect roots of matcher trees. Every node for which
324
// _shared[_idx] is cleared is guaranteed to not be shared, and thus
325
// can be a valid interior of some tree.
326
find_shared( C->root() );
327
find_shared( C->top() );
329
C->print_method(PHASE_BEFORE_MATCHING, 1);
331
// Create new ideal node ConP #null even if it does exist in old space
332
// to avoid false sharing if the corresponding mach node is not used.
333
// The corresponding mach node is only used in rare cases for derived
335
Node* new_ideal_null = ConNode::make(TypePtr::NULL_PTR);
337
// Swap out to old-space; emptying new-space
338
Arena* old = C->swap_old_and_new();
340
// Save debug and profile information for nodes in old space:
341
_old_node_note_array = C->node_note_array();
342
if (_old_node_note_array != nullptr) {
343
C->set_node_note_array(new(C->comp_arena()) GrowableArray<Node_Notes*>
344
(C->comp_arena(), _old_node_note_array->length(),
348
// Pre-size the new_node table to avoid the need for range checks.
349
grow_new_node_array(C->unique());
351
// Reset node counter so MachNodes start with _idx at 0
352
int live_nodes = C->live_nodes();
354
C->reset_dead_node_list();
356
// Recursively match trees from old space into new space.
357
// Correct leaves of new-space Nodes; they point to old-space.
359
Node* const n = xform(C->top(), live_nodes);
360
if (C->failing()) return;
361
C->set_cached_top_node(n);
363
Node* xroot = xform( C->root(), 1 );
364
if (C->failing()) return;
365
if (xroot == nullptr) {
366
Matcher::soft_match_failure(); // recursive matching process failed
367
assert(false, "instruction match failed");
368
C->record_method_not_compilable("instruction match failed");
370
// During matching shared constants were attached to C->root()
371
// because xroot wasn't available yet, so transfer the uses to
373
for( DUIterator_Fast jmax, j = C->root()->fast_outs(jmax); j < jmax; j++ ) {
374
Node* n = C->root()->fast_out(j);
375
if (C->node_arena()->contains(n)) {
376
assert(n->in(0) == C->root(), "should be control user");
377
n->set_req(0, xroot);
383
// Generate new mach node for ConP #null
384
assert(new_ideal_null != nullptr, "sanity");
385
_mach_null = match_tree(new_ideal_null);
386
// Don't set control, it will confuse GCM since there are no uses.
387
// The control will be set when this node is used first time
388
// in find_base_for_derived().
389
assert(_mach_null != nullptr, "");
391
C->set_root(xroot->is_Root() ? xroot->as_Root() : nullptr);
394
verify_new_nodes_only(xroot);
398
if (C->top() == nullptr || C->root() == nullptr) {
399
// New graph lost. This is due to a compilation failure we encountered earlier.
401
if (C->failure_reason() != nullptr) {
402
ss.print("graph lost: %s", C->failure_reason());
404
assert(C->failure_reason() != nullptr, "graph lost: reason unknown");
405
ss.print("graph lost: reason unknown");
407
C->record_method_not_compilable(ss.as_string());
411
old->destruct_contents();
414
assert( C->top(), "" );
415
assert( C->root(), "" );
416
validate_null_checks();
418
// Now smoke old-space
419
NOT_DEBUG( old->destruct_contents() );
421
// ------------------------
422
// Set up save-on-entry registers.
423
Fixup_Save_On_Entry( );
425
{ // Cleanup mach IR after selection phase is over.
426
Compile::TracePhase tp("postselect_cleanup", &timers[_t_postselect_cleanup]);
427
do_postselect_cleanup();
428
if (C->failing()) return;
429
assert(verify_after_postselect_cleanup(), "");
433
//------------------------------Fixup_Save_On_Entry----------------------------
434
// The stated purpose of this routine is to take care of save-on-entry
435
// registers. However, the overall goal of the Match phase is to convert into
436
// machine-specific instructions which have RegMasks to guide allocation.
437
// So what this procedure really does is put a valid RegMask on each input
438
// to the machine-specific variations of all Return, TailCall and Halt
439
// instructions. It also adds edgs to define the save-on-entry values (and of
440
// course gives them a mask).
442
static RegMask *init_input_masks( uint size, RegMask &ret_adr, RegMask &fp ) {
443
RegMask *rms = NEW_RESOURCE_ARRAY( RegMask, size );
444
// Do all the pre-defined register masks
445
rms[TypeFunc::Control ] = RegMask::Empty;
446
rms[TypeFunc::I_O ] = RegMask::Empty;
447
rms[TypeFunc::Memory ] = RegMask::Empty;
448
rms[TypeFunc::ReturnAdr] = ret_adr;
449
rms[TypeFunc::FramePtr ] = fp;
453
int Matcher::scalable_predicate_reg_slots() {
454
assert(Matcher::has_predicated_vectors() && Matcher::supports_scalable_vector(),
455
"scalable predicate vector should be supported");
456
int vector_reg_bit_size = Matcher::scalable_vector_reg_size(T_BYTE) << LogBitsPerByte;
457
// We assume each predicate register is one-eighth of the size of
458
// scalable vector register, one mask bit per vector byte.
459
int predicate_reg_bit_size = vector_reg_bit_size >> 3;
460
// Compute number of slots which is required when scalable predicate
461
// register is spilled. E.g. if scalable vector register is 640 bits,
462
// predicate register is 80 bits, which is 2.5 * slots.
463
// We will round up the slot number to power of 2, which is required
464
// by find_first_set().
465
int slots = predicate_reg_bit_size & (BitsPerInt - 1)
466
? (predicate_reg_bit_size >> LogBitsPerInt) + 1
467
: predicate_reg_bit_size >> LogBitsPerInt;
468
return round_up_power_of_2(slots);
471
#define NOF_STACK_MASKS (3*13)
473
// Create the initial stack mask used by values spilling to the stack.
474
// Disallow any debug info in outgoing argument areas by setting the
475
// initial mask accordingly.
476
void Matcher::init_first_stack_mask() {
478
// Allocate storage for spill masks as masks for the appropriate load type.
479
RegMask *rms = (RegMask*)C->comp_arena()->AmallocWords(sizeof(RegMask) * NOF_STACK_MASKS);
481
// Initialize empty placeholder masks into the newly allocated arena
482
for (int i = 0; i < NOF_STACK_MASKS; i++) {
483
new (rms + i) RegMask();
486
idealreg2spillmask [Op_RegN] = &rms[0];
487
idealreg2spillmask [Op_RegI] = &rms[1];
488
idealreg2spillmask [Op_RegL] = &rms[2];
489
idealreg2spillmask [Op_RegF] = &rms[3];
490
idealreg2spillmask [Op_RegD] = &rms[4];
491
idealreg2spillmask [Op_RegP] = &rms[5];
493
idealreg2debugmask [Op_RegN] = &rms[6];
494
idealreg2debugmask [Op_RegI] = &rms[7];
495
idealreg2debugmask [Op_RegL] = &rms[8];
496
idealreg2debugmask [Op_RegF] = &rms[9];
497
idealreg2debugmask [Op_RegD] = &rms[10];
498
idealreg2debugmask [Op_RegP] = &rms[11];
500
idealreg2mhdebugmask[Op_RegN] = &rms[12];
501
idealreg2mhdebugmask[Op_RegI] = &rms[13];
502
idealreg2mhdebugmask[Op_RegL] = &rms[14];
503
idealreg2mhdebugmask[Op_RegF] = &rms[15];
504
idealreg2mhdebugmask[Op_RegD] = &rms[16];
505
idealreg2mhdebugmask[Op_RegP] = &rms[17];
507
idealreg2spillmask [Op_VecA] = &rms[18];
508
idealreg2spillmask [Op_VecS] = &rms[19];
509
idealreg2spillmask [Op_VecD] = &rms[20];
510
idealreg2spillmask [Op_VecX] = &rms[21];
511
idealreg2spillmask [Op_VecY] = &rms[22];
512
idealreg2spillmask [Op_VecZ] = &rms[23];
514
idealreg2debugmask [Op_VecA] = &rms[24];
515
idealreg2debugmask [Op_VecS] = &rms[25];
516
idealreg2debugmask [Op_VecD] = &rms[26];
517
idealreg2debugmask [Op_VecX] = &rms[27];
518
idealreg2debugmask [Op_VecY] = &rms[28];
519
idealreg2debugmask [Op_VecZ] = &rms[29];
521
idealreg2mhdebugmask[Op_VecA] = &rms[30];
522
idealreg2mhdebugmask[Op_VecS] = &rms[31];
523
idealreg2mhdebugmask[Op_VecD] = &rms[32];
524
idealreg2mhdebugmask[Op_VecX] = &rms[33];
525
idealreg2mhdebugmask[Op_VecY] = &rms[34];
526
idealreg2mhdebugmask[Op_VecZ] = &rms[35];
528
idealreg2spillmask [Op_RegVectMask] = &rms[36];
529
idealreg2debugmask [Op_RegVectMask] = &rms[37];
530
idealreg2mhdebugmask[Op_RegVectMask] = &rms[38];
534
// At first, start with the empty mask
535
C->FIRST_STACK_mask().Clear();
537
// Add in the incoming argument area
538
OptoReg::Name init_in = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
539
for (i = init_in; i < _in_arg_limit; i = OptoReg::add(i,1)) {
540
C->FIRST_STACK_mask().Insert(i);
542
// Add in all bits past the outgoing argument area
543
guarantee(RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1)),
544
"must be able to represent all call arguments in reg mask");
545
OptoReg::Name init = _out_arg_limit;
546
for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1)) {
547
C->FIRST_STACK_mask().Insert(i);
549
// Finally, set the "infinite stack" bit.
550
C->FIRST_STACK_mask().set_AllStack();
552
// Make spill masks. Registers for their class, plus FIRST_STACK_mask.
553
RegMask aligned_stack_mask = C->FIRST_STACK_mask();
554
// Keep spill masks aligned.
555
aligned_stack_mask.clear_to_pairs();
556
assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
557
RegMask scalable_stack_mask = aligned_stack_mask;
559
*idealreg2spillmask[Op_RegP] = *idealreg2regmask[Op_RegP];
561
*idealreg2spillmask[Op_RegN] = *idealreg2regmask[Op_RegN];
562
idealreg2spillmask[Op_RegN]->OR(C->FIRST_STACK_mask());
563
idealreg2spillmask[Op_RegP]->OR(aligned_stack_mask);
565
idealreg2spillmask[Op_RegP]->OR(C->FIRST_STACK_mask());
567
*idealreg2spillmask[Op_RegI] = *idealreg2regmask[Op_RegI];
568
idealreg2spillmask[Op_RegI]->OR(C->FIRST_STACK_mask());
569
*idealreg2spillmask[Op_RegL] = *idealreg2regmask[Op_RegL];
570
idealreg2spillmask[Op_RegL]->OR(aligned_stack_mask);
571
*idealreg2spillmask[Op_RegF] = *idealreg2regmask[Op_RegF];
572
idealreg2spillmask[Op_RegF]->OR(C->FIRST_STACK_mask());
573
*idealreg2spillmask[Op_RegD] = *idealreg2regmask[Op_RegD];
574
idealreg2spillmask[Op_RegD]->OR(aligned_stack_mask);
576
if (Matcher::has_predicated_vectors()) {
577
*idealreg2spillmask[Op_RegVectMask] = *idealreg2regmask[Op_RegVectMask];
578
idealreg2spillmask[Op_RegVectMask]->OR(aligned_stack_mask);
580
*idealreg2spillmask[Op_RegVectMask] = RegMask::Empty;
583
if (Matcher::vector_size_supported(T_BYTE,4)) {
584
*idealreg2spillmask[Op_VecS] = *idealreg2regmask[Op_VecS];
585
idealreg2spillmask[Op_VecS]->OR(C->FIRST_STACK_mask());
587
*idealreg2spillmask[Op_VecS] = RegMask::Empty;
590
if (Matcher::vector_size_supported(T_FLOAT,2)) {
591
// For VecD we need dual alignment and 8 bytes (2 slots) for spills.
592
// RA guarantees such alignment since it is needed for Double and Long values.
593
*idealreg2spillmask[Op_VecD] = *idealreg2regmask[Op_VecD];
594
idealreg2spillmask[Op_VecD]->OR(aligned_stack_mask);
596
*idealreg2spillmask[Op_VecD] = RegMask::Empty;
599
if (Matcher::vector_size_supported(T_FLOAT,4)) {
600
// For VecX we need quadro alignment and 16 bytes (4 slots) for spills.
602
// RA can use input arguments stack slots for spills but until RA
603
// we don't know frame size and offset of input arg stack slots.
605
// Exclude last input arg stack slots to avoid spilling vectors there
606
// otherwise vector spills could stomp over stack slots in caller frame.
607
OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
608
for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecX); k++) {
609
aligned_stack_mask.Remove(in);
610
in = OptoReg::add(in, -1);
612
aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecX);
613
assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
614
*idealreg2spillmask[Op_VecX] = *idealreg2regmask[Op_VecX];
615
idealreg2spillmask[Op_VecX]->OR(aligned_stack_mask);
617
*idealreg2spillmask[Op_VecX] = RegMask::Empty;
620
if (Matcher::vector_size_supported(T_FLOAT,8)) {
621
// For VecY we need octo alignment and 32 bytes (8 slots) for spills.
622
OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
623
for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecY); k++) {
624
aligned_stack_mask.Remove(in);
625
in = OptoReg::add(in, -1);
627
aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecY);
628
assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
629
*idealreg2spillmask[Op_VecY] = *idealreg2regmask[Op_VecY];
630
idealreg2spillmask[Op_VecY]->OR(aligned_stack_mask);
632
*idealreg2spillmask[Op_VecY] = RegMask::Empty;
635
if (Matcher::vector_size_supported(T_FLOAT,16)) {
636
// For VecZ we need enough alignment and 64 bytes (16 slots) for spills.
637
OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
638
for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecZ); k++) {
639
aligned_stack_mask.Remove(in);
640
in = OptoReg::add(in, -1);
642
aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecZ);
643
assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
644
*idealreg2spillmask[Op_VecZ] = *idealreg2regmask[Op_VecZ];
645
idealreg2spillmask[Op_VecZ]->OR(aligned_stack_mask);
647
*idealreg2spillmask[Op_VecZ] = RegMask::Empty;
650
if (Matcher::supports_scalable_vector()) {
652
OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
653
if (Matcher::has_predicated_vectors()) {
654
// Exclude last input arg stack slots to avoid spilling vector register there,
655
// otherwise RegVectMask spills could stomp over stack slots in caller frame.
656
for (; (in >= init_in) && (k < scalable_predicate_reg_slots()); k++) {
657
scalable_stack_mask.Remove(in);
658
in = OptoReg::add(in, -1);
662
scalable_stack_mask.clear_to_sets(scalable_predicate_reg_slots());
663
assert(scalable_stack_mask.is_AllStack(), "should be infinite stack");
664
*idealreg2spillmask[Op_RegVectMask] = *idealreg2regmask[Op_RegVectMask];
665
idealreg2spillmask[Op_RegVectMask]->OR(scalable_stack_mask);
668
// Exclude last input arg stack slots to avoid spilling vector register there,
669
// otherwise vector spills could stomp over stack slots in caller frame.
670
for (; (in >= init_in) && (k < scalable_vector_reg_size(T_FLOAT)); k++) {
671
scalable_stack_mask.Remove(in);
672
in = OptoReg::add(in, -1);
676
scalable_stack_mask.clear_to_sets(RegMask::SlotsPerVecA);
677
assert(scalable_stack_mask.is_AllStack(), "should be infinite stack");
678
*idealreg2spillmask[Op_VecA] = *idealreg2regmask[Op_VecA];
679
idealreg2spillmask[Op_VecA]->OR(scalable_stack_mask);
681
*idealreg2spillmask[Op_VecA] = RegMask::Empty;
684
if (UseFPUForSpilling) {
685
// This mask logic assumes that the spill operations are
686
// symmetric and that the registers involved are the same size.
687
// On sparc for instance we may have to use 64 bit moves will
688
// kill 2 registers when used with F0-F31.
689
idealreg2spillmask[Op_RegI]->OR(*idealreg2regmask[Op_RegF]);
690
idealreg2spillmask[Op_RegF]->OR(*idealreg2regmask[Op_RegI]);
692
idealreg2spillmask[Op_RegN]->OR(*idealreg2regmask[Op_RegF]);
693
idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
694
idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
695
idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegD]);
697
idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegF]);
699
// ARM has support for moving 64bit values between a pair of
700
// integer registers and a double register
701
idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
702
idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
707
// Make up debug masks. Any spill slot plus callee-save (SOE) registers.
708
// Caller-save (SOC, AS) registers are assumed to be trashable by the various
709
// inline-cache fixup routines.
710
*idealreg2debugmask [Op_RegN] = *idealreg2spillmask[Op_RegN];
711
*idealreg2debugmask [Op_RegI] = *idealreg2spillmask[Op_RegI];
712
*idealreg2debugmask [Op_RegL] = *idealreg2spillmask[Op_RegL];
713
*idealreg2debugmask [Op_RegF] = *idealreg2spillmask[Op_RegF];
714
*idealreg2debugmask [Op_RegD] = *idealreg2spillmask[Op_RegD];
715
*idealreg2debugmask [Op_RegP] = *idealreg2spillmask[Op_RegP];
716
*idealreg2debugmask [Op_RegVectMask] = *idealreg2spillmask[Op_RegVectMask];
718
*idealreg2debugmask [Op_VecA] = *idealreg2spillmask[Op_VecA];
719
*idealreg2debugmask [Op_VecS] = *idealreg2spillmask[Op_VecS];
720
*idealreg2debugmask [Op_VecD] = *idealreg2spillmask[Op_VecD];
721
*idealreg2debugmask [Op_VecX] = *idealreg2spillmask[Op_VecX];
722
*idealreg2debugmask [Op_VecY] = *idealreg2spillmask[Op_VecY];
723
*idealreg2debugmask [Op_VecZ] = *idealreg2spillmask[Op_VecZ];
725
*idealreg2mhdebugmask[Op_RegN] = *idealreg2spillmask[Op_RegN];
726
*idealreg2mhdebugmask[Op_RegI] = *idealreg2spillmask[Op_RegI];
727
*idealreg2mhdebugmask[Op_RegL] = *idealreg2spillmask[Op_RegL];
728
*idealreg2mhdebugmask[Op_RegF] = *idealreg2spillmask[Op_RegF];
729
*idealreg2mhdebugmask[Op_RegD] = *idealreg2spillmask[Op_RegD];
730
*idealreg2mhdebugmask[Op_RegP] = *idealreg2spillmask[Op_RegP];
731
*idealreg2mhdebugmask[Op_RegVectMask] = *idealreg2spillmask[Op_RegVectMask];
733
*idealreg2mhdebugmask[Op_VecA] = *idealreg2spillmask[Op_VecA];
734
*idealreg2mhdebugmask[Op_VecS] = *idealreg2spillmask[Op_VecS];
735
*idealreg2mhdebugmask[Op_VecD] = *idealreg2spillmask[Op_VecD];
736
*idealreg2mhdebugmask[Op_VecX] = *idealreg2spillmask[Op_VecX];
737
*idealreg2mhdebugmask[Op_VecY] = *idealreg2spillmask[Op_VecY];
738
*idealreg2mhdebugmask[Op_VecZ] = *idealreg2spillmask[Op_VecZ];
740
// Prevent stub compilations from attempting to reference
741
// callee-saved (SOE) registers from debug info
742
bool exclude_soe = !Compile::current()->is_method_compilation();
743
RegMask* caller_save_mask = exclude_soe ? &caller_save_regmask_exclude_soe : &caller_save_regmask;
744
RegMask* mh_caller_save_mask = exclude_soe ? &mh_caller_save_regmask_exclude_soe : &mh_caller_save_regmask;
746
idealreg2debugmask[Op_RegN]->SUBTRACT(*caller_save_mask);
747
idealreg2debugmask[Op_RegI]->SUBTRACT(*caller_save_mask);
748
idealreg2debugmask[Op_RegL]->SUBTRACT(*caller_save_mask);
749
idealreg2debugmask[Op_RegF]->SUBTRACT(*caller_save_mask);
750
idealreg2debugmask[Op_RegD]->SUBTRACT(*caller_save_mask);
751
idealreg2debugmask[Op_RegP]->SUBTRACT(*caller_save_mask);
752
idealreg2debugmask[Op_RegVectMask]->SUBTRACT(*caller_save_mask);
754
idealreg2debugmask[Op_VecA]->SUBTRACT(*caller_save_mask);
755
idealreg2debugmask[Op_VecS]->SUBTRACT(*caller_save_mask);
756
idealreg2debugmask[Op_VecD]->SUBTRACT(*caller_save_mask);
757
idealreg2debugmask[Op_VecX]->SUBTRACT(*caller_save_mask);
758
idealreg2debugmask[Op_VecY]->SUBTRACT(*caller_save_mask);
759
idealreg2debugmask[Op_VecZ]->SUBTRACT(*caller_save_mask);
761
idealreg2mhdebugmask[Op_RegN]->SUBTRACT(*mh_caller_save_mask);
762
idealreg2mhdebugmask[Op_RegI]->SUBTRACT(*mh_caller_save_mask);
763
idealreg2mhdebugmask[Op_RegL]->SUBTRACT(*mh_caller_save_mask);
764
idealreg2mhdebugmask[Op_RegF]->SUBTRACT(*mh_caller_save_mask);
765
idealreg2mhdebugmask[Op_RegD]->SUBTRACT(*mh_caller_save_mask);
766
idealreg2mhdebugmask[Op_RegP]->SUBTRACT(*mh_caller_save_mask);
767
idealreg2mhdebugmask[Op_RegVectMask]->SUBTRACT(*mh_caller_save_mask);
769
idealreg2mhdebugmask[Op_VecA]->SUBTRACT(*mh_caller_save_mask);
770
idealreg2mhdebugmask[Op_VecS]->SUBTRACT(*mh_caller_save_mask);
771
idealreg2mhdebugmask[Op_VecD]->SUBTRACT(*mh_caller_save_mask);
772
idealreg2mhdebugmask[Op_VecX]->SUBTRACT(*mh_caller_save_mask);
773
idealreg2mhdebugmask[Op_VecY]->SUBTRACT(*mh_caller_save_mask);
774
idealreg2mhdebugmask[Op_VecZ]->SUBTRACT(*mh_caller_save_mask);
777
//---------------------------is_save_on_entry----------------------------------
778
bool Matcher::is_save_on_entry(int reg) {
780
_register_save_policy[reg] == 'E' ||
781
_register_save_policy[reg] == 'A'; // Save-on-entry register?
784
//---------------------------Fixup_Save_On_Entry-------------------------------
785
void Matcher::Fixup_Save_On_Entry( ) {
786
init_first_stack_mask();
788
Node *root = C->root(); // Short name for root
789
// Count number of save-on-entry registers.
790
uint soe_cnt = number_of_saved_registers();
793
// Find the procedure Start Node
794
StartNode *start = C->start();
795
assert( start, "Expect a start node" );
797
// Input RegMask array shared by all Returns.
798
// The type for doubles and longs has a count of 2, but
799
// there is only 1 returned value
800
uint ret_edge_cnt = TypeFunc::Parms + ((C->tf()->range()->cnt() == TypeFunc::Parms) ? 0 : 1);
801
RegMask *ret_rms = init_input_masks( ret_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
802
// Returns have 0 or 1 returned values depending on call signature.
803
// Return register is specified by return_value in the AD file.
804
if (ret_edge_cnt > TypeFunc::Parms)
805
ret_rms[TypeFunc::Parms+0] = _return_value_mask;
807
// Input RegMask array shared by all Rethrows.
808
uint reth_edge_cnt = TypeFunc::Parms+1;
809
RegMask *reth_rms = init_input_masks( reth_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
810
// Rethrow takes exception oop only, but in the argument 0 slot.
811
OptoReg::Name reg = find_receiver();
813
reth_rms[TypeFunc::Parms] = mreg2regmask[reg];
815
// Need two slots for ptrs in 64-bit land
816
reth_rms[TypeFunc::Parms].Insert(OptoReg::add(OptoReg::Name(reg), 1));
820
// Input RegMask array shared by all TailCalls
821
uint tail_call_edge_cnt = TypeFunc::Parms+2;
822
RegMask *tail_call_rms = init_input_masks( tail_call_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
824
// Input RegMask array shared by all TailJumps
825
uint tail_jump_edge_cnt = TypeFunc::Parms+2;
826
RegMask *tail_jump_rms = init_input_masks( tail_jump_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
828
// TailCalls have 2 returned values (target & moop), whose masks come
829
// from the usual MachNode/MachOper mechanism. Find a sample
830
// TailCall to extract these masks and put the correct masks into
831
// the tail_call_rms array.
832
for( i=1; i < root->req(); i++ ) {
833
MachReturnNode *m = root->in(i)->as_MachReturn();
834
if( m->ideal_Opcode() == Op_TailCall ) {
835
tail_call_rms[TypeFunc::Parms+0] = m->MachNode::in_RegMask(TypeFunc::Parms+0);
836
tail_call_rms[TypeFunc::Parms+1] = m->MachNode::in_RegMask(TypeFunc::Parms+1);
841
// TailJumps have 2 returned values (target & ex_oop), whose masks come
842
// from the usual MachNode/MachOper mechanism. Find a sample
843
// TailJump to extract these masks and put the correct masks into
844
// the tail_jump_rms array.
845
for( i=1; i < root->req(); i++ ) {
846
MachReturnNode *m = root->in(i)->as_MachReturn();
847
if( m->ideal_Opcode() == Op_TailJump ) {
848
tail_jump_rms[TypeFunc::Parms+0] = m->MachNode::in_RegMask(TypeFunc::Parms+0);
849
tail_jump_rms[TypeFunc::Parms+1] = m->MachNode::in_RegMask(TypeFunc::Parms+1);
854
// Input RegMask array shared by all Halts
855
uint halt_edge_cnt = TypeFunc::Parms;
856
RegMask *halt_rms = init_input_masks( halt_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
858
// Capture the return input masks into each exit flavor
859
for( i=1; i < root->req(); i++ ) {
860
MachReturnNode *exit = root->in(i)->as_MachReturn();
861
switch( exit->ideal_Opcode() ) {
862
case Op_Return : exit->_in_rms = ret_rms; break;
863
case Op_Rethrow : exit->_in_rms = reth_rms; break;
864
case Op_TailCall : exit->_in_rms = tail_call_rms; break;
865
case Op_TailJump : exit->_in_rms = tail_jump_rms; break;
866
case Op_Halt : exit->_in_rms = halt_rms; break;
867
default : ShouldNotReachHere();
871
// Next unused projection number from Start.
872
int proj_cnt = C->tf()->domain()->cnt();
874
// Do all the save-on-entry registers. Make projections from Start for
875
// them, and give them a use at the exit points. To the allocator, they
876
// look like incoming register arguments.
877
for( i = 0; i < _last_Mach_Reg; i++ ) {
878
if( is_save_on_entry(i) ) {
880
// Add the save-on-entry to the mask array
881
ret_rms [ ret_edge_cnt] = mreg2regmask[i];
882
reth_rms [ reth_edge_cnt] = mreg2regmask[i];
883
tail_call_rms[tail_call_edge_cnt] = mreg2regmask[i];
884
tail_jump_rms[tail_jump_edge_cnt] = mreg2regmask[i];
885
// Halts need the SOE registers, but only in the stack as debug info.
886
// A just-prior uncommon-trap or deoptimization will use the SOE regs.
887
halt_rms [ halt_edge_cnt] = *idealreg2spillmask[_register_save_type[i]];
891
// Is this a RegF low half of a RegD? Double up 2 adjacent RegF's
892
// into a single RegD.
894
_register_save_type[i ] == Op_RegF &&
895
_register_save_type[i+1] == Op_RegF &&
896
is_save_on_entry(i+1) ) {
897
// Add other bit for double
898
ret_rms [ ret_edge_cnt].Insert(OptoReg::Name(i+1));
899
reth_rms [ reth_edge_cnt].Insert(OptoReg::Name(i+1));
900
tail_call_rms[tail_call_edge_cnt].Insert(OptoReg::Name(i+1));
901
tail_jump_rms[tail_jump_edge_cnt].Insert(OptoReg::Name(i+1));
902
halt_rms [ halt_edge_cnt].Insert(OptoReg::Name(i+1));
903
mproj = new MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegD );
904
proj_cnt += 2; // Skip 2 for doubles
906
else if( (i&1) == 1 && // Else check for high half of double
907
_register_save_type[i-1] == Op_RegF &&
908
_register_save_type[i ] == Op_RegF &&
909
is_save_on_entry(i-1) ) {
910
ret_rms [ ret_edge_cnt] = RegMask::Empty;
911
reth_rms [ reth_edge_cnt] = RegMask::Empty;
912
tail_call_rms[tail_call_edge_cnt] = RegMask::Empty;
913
tail_jump_rms[tail_jump_edge_cnt] = RegMask::Empty;
914
halt_rms [ halt_edge_cnt] = RegMask::Empty;
917
// Is this a RegI low half of a RegL? Double up 2 adjacent RegI's
918
// into a single RegL.
919
else if( (i&1) == 0 &&
920
_register_save_type[i ] == Op_RegI &&
921
_register_save_type[i+1] == Op_RegI &&
922
is_save_on_entry(i+1) ) {
923
// Add other bit for long
924
ret_rms [ ret_edge_cnt].Insert(OptoReg::Name(i+1));
925
reth_rms [ reth_edge_cnt].Insert(OptoReg::Name(i+1));
926
tail_call_rms[tail_call_edge_cnt].Insert(OptoReg::Name(i+1));
927
tail_jump_rms[tail_jump_edge_cnt].Insert(OptoReg::Name(i+1));
928
halt_rms [ halt_edge_cnt].Insert(OptoReg::Name(i+1));
929
mproj = new MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegL );
930
proj_cnt += 2; // Skip 2 for longs
932
else if( (i&1) == 1 && // Else check for high half of long
933
_register_save_type[i-1] == Op_RegI &&
934
_register_save_type[i ] == Op_RegI &&
935
is_save_on_entry(i-1) ) {
936
ret_rms [ ret_edge_cnt] = RegMask::Empty;
937
reth_rms [ reth_edge_cnt] = RegMask::Empty;
938
tail_call_rms[tail_call_edge_cnt] = RegMask::Empty;
939
tail_jump_rms[tail_jump_edge_cnt] = RegMask::Empty;
940
halt_rms [ halt_edge_cnt] = RegMask::Empty;
943
// Make a projection for it off the Start
944
mproj = new MachProjNode( start, proj_cnt++, ret_rms[ret_edge_cnt], _register_save_type[i] );
949
tail_call_edge_cnt ++;
950
tail_jump_edge_cnt ++;
953
// Add a use of the SOE register to all exit paths
954
for( uint j=1; j < root->req(); j++ )
955
root->in(j)->add_req(mproj);
956
} // End of if a save-on-entry register
957
} // End of for all machine registers
960
//------------------------------init_spill_mask--------------------------------
961
void Matcher::init_spill_mask( Node *ret ) {
962
if( idealreg2regmask[Op_RegI] ) return; // One time only init
964
OptoReg::c_frame_pointer = c_frame_pointer();
965
c_frame_ptr_mask = c_frame_pointer();
967
// pointers are twice as big
968
c_frame_ptr_mask.Insert(OptoReg::add(c_frame_pointer(),1));
971
// Start at OptoReg::stack0()
972
STACK_ONLY_mask.Clear();
973
OptoReg::Name init = OptoReg::stack2reg(0);
974
// STACK_ONLY_mask is all stack bits
976
for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1))
977
STACK_ONLY_mask.Insert(i);
978
// Also set the "infinite stack" bit.
979
STACK_ONLY_mask.set_AllStack();
981
for (i = OptoReg::Name(0); i < OptoReg::Name(_last_Mach_Reg); i = OptoReg::add(i, 1)) {
982
// Copy the register names over into the shared world.
983
// SharedInfo::regName[i] = regName[i];
984
// Handy RegMasks per machine register
985
mreg2regmask[i].Insert(i);
987
// Set up regmasks used to exclude save-on-call (and always-save) registers from debug masks.
988
if (_register_save_policy[i] == 'C' ||
989
_register_save_policy[i] == 'A') {
990
caller_save_regmask.Insert(i);
991
mh_caller_save_regmask.Insert(i);
993
// Exclude save-on-entry registers from debug masks for stub compilations.
994
if (_register_save_policy[i] == 'C' ||
995
_register_save_policy[i] == 'A' ||
996
_register_save_policy[i] == 'E') {
997
caller_save_regmask_exclude_soe.Insert(i);
998
mh_caller_save_regmask_exclude_soe.Insert(i);
1002
// Also exclude the register we use to save the SP for MethodHandle
1003
// invokes to from the corresponding MH debug masks
1004
const RegMask sp_save_mask = method_handle_invoke_SP_save_mask();
1005
mh_caller_save_regmask.OR(sp_save_mask);
1006
mh_caller_save_regmask_exclude_soe.OR(sp_save_mask);
1008
// Grab the Frame Pointer
1009
Node *fp = ret->in(TypeFunc::FramePtr);
1010
// Share frame pointer while making spill ops
1013
// Get the ADLC notion of the right regmask, for each basic type.
1015
idealreg2regmask[Op_RegN] = regmask_for_ideal_register(Op_RegN, ret);
1017
idealreg2regmask[Op_RegI] = regmask_for_ideal_register(Op_RegI, ret);
1018
idealreg2regmask[Op_RegP] = regmask_for_ideal_register(Op_RegP, ret);
1019
idealreg2regmask[Op_RegF] = regmask_for_ideal_register(Op_RegF, ret);
1020
idealreg2regmask[Op_RegD] = regmask_for_ideal_register(Op_RegD, ret);
1021
idealreg2regmask[Op_RegL] = regmask_for_ideal_register(Op_RegL, ret);
1022
idealreg2regmask[Op_VecA] = regmask_for_ideal_register(Op_VecA, ret);
1023
idealreg2regmask[Op_VecS] = regmask_for_ideal_register(Op_VecS, ret);
1024
idealreg2regmask[Op_VecD] = regmask_for_ideal_register(Op_VecD, ret);
1025
idealreg2regmask[Op_VecX] = regmask_for_ideal_register(Op_VecX, ret);
1026
idealreg2regmask[Op_VecY] = regmask_for_ideal_register(Op_VecY, ret);
1027
idealreg2regmask[Op_VecZ] = regmask_for_ideal_register(Op_VecZ, ret);
1028
idealreg2regmask[Op_RegVectMask] = regmask_for_ideal_register(Op_RegVectMask, ret);
1032
static void match_alias_type(Compile* C, Node* n, Node* m) {
1033
if (!VerifyAliases) return; // do not go looking for trouble by default
1034
const TypePtr* nat = n->adr_type();
1035
const TypePtr* mat = m->adr_type();
1036
int nidx = C->get_alias_index(nat);
1037
int midx = C->get_alias_index(mat);
1038
// Detune the assert for cases like (AndI 0xFF (LoadB p)).
1039
if (nidx == Compile::AliasIdxTop && midx >= Compile::AliasIdxRaw) {
1040
for (uint i = 1; i < n->req(); i++) {
1041
Node* n1 = n->in(i);
1042
const TypePtr* n1at = n1->adr_type();
1043
if (n1at != nullptr) {
1045
nidx = C->get_alias_index(n1at);
1049
// %%% Kludgery. Instead, fix ideal adr_type methods for all these cases:
1050
if (nidx == Compile::AliasIdxTop && midx == Compile::AliasIdxRaw) {
1051
switch (n->Opcode()) {
1052
case Op_PrefetchAllocation:
1053
nidx = Compile::AliasIdxRaw;
1054
nat = TypeRawPtr::BOTTOM;
1058
if (nidx == Compile::AliasIdxRaw && midx == Compile::AliasIdxTop) {
1059
switch (n->Opcode()) {
1061
midx = Compile::AliasIdxRaw;
1062
mat = TypeRawPtr::BOTTOM;
1066
if (nidx == Compile::AliasIdxTop && midx == Compile::AliasIdxBot) {
1067
switch (n->Opcode()) {
1073
nidx = Compile::AliasIdxBot;
1074
nat = TypePtr::BOTTOM;
1078
if (nidx == Compile::AliasIdxBot && midx == Compile::AliasIdxTop) {
1079
switch (n->Opcode()) {
1083
case Op_StrIndexOfChar:
1085
case Op_VectorizedHashCode:
1086
case Op_CountPositives:
1087
case Op_MemBarVolatile:
1088
case Op_MemBarCPUOrder: // %%% these ideals should have narrower adr_type?
1089
case Op_StrInflatedCopy:
1090
case Op_StrCompressedCopy:
1092
case Op_EncodeISOArray:
1093
nidx = Compile::AliasIdxTop;
1099
if (PrintOpto || (PrintMiscellaneous && (WizardMode || Verbose))) {
1100
tty->print_cr("==== Matcher alias shift %d => %d", nidx, midx);
1104
assert(C->subsume_loads() && C->must_alias(nat, midx),
1105
"must not lose alias info when matching");
1110
//------------------------------xform------------------------------------------
1111
// Given a Node in old-space, Match him (Label/Reduce) to produce a machine
1112
// Node in new-space. Given a new-space Node, recursively walk his children.
1113
Node *Matcher::transform( Node *n ) { ShouldNotCallThis(); return n; }
1114
Node *Matcher::xform( Node *n, int max_stack ) {
1115
// Use one stack to keep both: child's node/state and parent's node/index
1116
MStack mstack(max_stack * 2 * 2); // usually: C->live_nodes() * 2 * 2
1117
mstack.push(n, Visit, nullptr, -1); // set null as parent to indicate root
1118
while (mstack.is_nonempty()) {
1119
C->check_node_count(NodeLimitFudgeFactor, "too many nodes matching instructions");
1120
if (C->failing()) return nullptr;
1121
n = mstack.node(); // Leave node on stack
1122
Node_State nstate = mstack.state();
1123
if (nstate == Visit) {
1124
mstack.set_state(Post_Visit);
1126
// Old-space or new-space check
1127
if (!C->node_arena()->contains(n)) {
1130
if (has_new_node(n)) { // Not yet Label/Reduced
1133
if (!is_dontcare(n)) { // Matcher can match this guy
1134
// Calls match special. They match alone with no children.
1135
// Their children, the incoming arguments, match normally.
1136
m = n->is_SafePoint() ? match_sfpt(n->as_SafePoint()):match_tree(n);
1137
if (C->failing()) return nullptr;
1138
if (m == nullptr) { Matcher::soft_match_failure(); return nullptr; }
1139
if (n->is_MemBar()) {
1140
m->as_MachMemBar()->set_adr_type(n->adr_type());
1142
} else { // Nothing the matcher cares about
1143
if (n->is_Proj() && n->in(0) != nullptr && n->in(0)->is_Multi()) { // Projections?
1144
// Convert to machine-dependent projection
1145
m = n->in(0)->as_Multi()->match( n->as_Proj(), this );
1146
NOT_PRODUCT(record_new2old(m, n);)
1147
if (m->in(0) != nullptr) // m might be top
1148
collect_null_checks(m, n);
1149
} else { // Else just a regular 'ol guy
1150
m = n->clone(); // So just clone into new-space
1151
NOT_PRODUCT(record_new2old(m, n);)
1152
// Def-Use edges will be added incrementally as Uses
1153
// of this node are matched.
1154
assert(m->outcnt() == 0, "no Uses of this clone yet");
1158
set_new_node(n, m); // Map old to new
1159
if (_old_node_note_array != nullptr) {
1160
Node_Notes* nn = C->locate_node_notes(_old_node_note_array,
1162
C->set_node_notes_at(m->_idx, nn);
1164
debug_only(match_alias_type(C, n, m));
1166
n = m; // n is now a new-space node
1171
if (_visited.test_set(n->_idx)) continue; // while(mstack.is_nonempty())
1174
// Put precedence edges on stack first (match them last).
1175
for (i = oldn->req(); (uint)i < oldn->len(); i++) {
1176
Node *m = oldn->in(i);
1177
if (m == nullptr) break;
1178
// set -1 to call add_prec() instead of set_req() during Step1
1179
mstack.push(m, Visit, n, -1);
1182
// Handle precedence edges for interior nodes
1183
for (i = n->len()-1; (uint)i >= n->req(); i--) {
1185
if (m == nullptr || C->node_arena()->contains(m)) continue;
1187
// set -1 to call add_prec() instead of set_req() during Step1
1188
mstack.push(m, Visit, n, -1);
1191
// For constant debug info, I'd rather have unmatched constants.
1193
JVMState* jvms = n->jvms();
1194
int debug_cnt = jvms ? jvms->debug_start() : cnt;
1196
// Now do only debug info. Clone constants rather than matching.
1197
// Constants are represented directly in the debug info without
1198
// the need for executable machine instructions.
1199
// Monitor boxes are also represented directly.
1200
for (i = cnt - 1; i >= debug_cnt; --i) { // For all debug inputs do
1201
Node *m = n->in(i); // Get input
1202
int op = m->Opcode();
1203
assert((op == Op_BoxLock) == jvms->is_monitor_use(i), "boxes only at monitor sites");
1204
if( op == Op_ConI || op == Op_ConP || op == Op_ConN || op == Op_ConNKlass ||
1205
op == Op_ConF || op == Op_ConD || op == Op_ConL
1206
// || op == Op_BoxLock // %%%% enable this and remove (+++) in chaitin.cpp
1209
NOT_PRODUCT(record_new2old(m, n));
1210
mstack.push(m, Post_Visit, n, i); // Don't need to visit
1211
mstack.push(m->in(0), Visit, m, 0);
1213
mstack.push(m, Visit, n, i);
1217
// And now walk his children, and convert his inputs to new-space.
1218
for( ; i >= 0; --i ) { // For all normal inputs do
1219
Node *m = n->in(i); // Get input
1221
mstack.push(m, Visit, n, i);
1225
else if (nstate == Post_Visit) {
1226
// Set xformed input
1227
Node *p = mstack.parent();
1228
if (p != nullptr) { // root doesn't have parent
1229
int i = (int)mstack.index();
1231
p->set_req(i, n); // required input
1233
p->add_prec(n); // precedence input
1235
ShouldNotReachHere();
1237
mstack.pop(); // remove processed node from stack
1240
ShouldNotReachHere();
1242
} // while (mstack.is_nonempty())
1243
return n; // Return new-space Node
1246
//------------------------------warp_outgoing_stk_arg------------------------
1247
OptoReg::Name Matcher::warp_outgoing_stk_arg( VMReg reg, OptoReg::Name begin_out_arg_area, OptoReg::Name &out_arg_limit_per_call ) {
1248
// Convert outgoing argument location to a pre-biased stack offset
1249
if (reg->is_stack()) {
1250
OptoReg::Name warped = reg->reg2stack();
1251
// Adjust the stack slot offset to be the register number used
1252
// by the allocator.
1253
warped = OptoReg::add(begin_out_arg_area, warped);
1254
// Keep track of the largest numbered stack slot used for an arg.
1255
// Largest used slot per call-site indicates the amount of stack
1256
// that is killed by the call.
1257
if( warped >= out_arg_limit_per_call )
1258
out_arg_limit_per_call = OptoReg::add(warped,1);
1259
if (!RegMask::can_represent_arg(warped)) {
1260
// Bailout. For example not enough space on stack for all arguments. Happens for methods with too many arguments.
1261
C->record_method_not_compilable("unsupported calling sequence");
1262
return OptoReg::Bad;
1266
return OptoReg::as_OptoReg(reg);
1270
//------------------------------match_sfpt-------------------------------------
1271
// Helper function to match call instructions. Calls match special.
1272
// They match alone with no children. Their children, the incoming
1273
// arguments, match normally.
1274
MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) {
1275
MachSafePointNode *msfpt = nullptr;
1276
MachCallNode *mcall = nullptr;
1278
// Split out case for SafePoint vs Call
1280
const TypeTuple *domain;
1281
ciMethod* method = nullptr;
1282
bool is_method_handle_invoke = false; // for special kill effects
1283
if( sfpt->is_Call() ) {
1284
call = sfpt->as_Call();
1285
domain = call->tf()->domain();
1286
cnt = domain->cnt();
1288
// Match just the call, nothing else
1289
MachNode *m = match_tree(call);
1290
if (C->failing()) return nullptr;
1291
if( m == nullptr ) { Matcher::soft_match_failure(); return nullptr; }
1293
// Copy data from the Ideal SafePoint to the machine version
1294
mcall = m->as_MachCall();
1296
mcall->set_tf( call->tf());
1297
mcall->set_entry_point( call->entry_point());
1298
mcall->set_cnt( call->cnt());
1299
mcall->set_guaranteed_safepoint(call->guaranteed_safepoint());
1301
if( mcall->is_MachCallJava() ) {
1302
MachCallJavaNode *mcall_java = mcall->as_MachCallJava();
1303
const CallJavaNode *call_java = call->as_CallJava();
1304
assert(call_java->validate_symbolic_info(), "inconsistent info");
1305
method = call_java->method();
1306
mcall_java->_method = method;
1307
mcall_java->_optimized_virtual = call_java->is_optimized_virtual();
1308
is_method_handle_invoke = call_java->is_method_handle_invoke();
1309
mcall_java->_method_handle_invoke = is_method_handle_invoke;
1310
mcall_java->_override_symbolic_info = call_java->override_symbolic_info();
1311
mcall_java->_arg_escape = call_java->arg_escape();
1312
if (is_method_handle_invoke) {
1313
C->set_has_method_handle_invokes(true);
1315
if( mcall_java->is_MachCallStaticJava() )
1316
mcall_java->as_MachCallStaticJava()->_name =
1317
call_java->as_CallStaticJava()->_name;
1318
if( mcall_java->is_MachCallDynamicJava() )
1319
mcall_java->as_MachCallDynamicJava()->_vtable_index =
1320
call_java->as_CallDynamicJava()->_vtable_index;
1322
else if( mcall->is_MachCallRuntime() ) {
1323
MachCallRuntimeNode* mach_call_rt = mcall->as_MachCallRuntime();
1324
mach_call_rt->_name = call->as_CallRuntime()->_name;
1325
mach_call_rt->_leaf_no_fp = call->is_CallLeafNoFP();
1329
// This is a non-call safepoint
1333
MachNode *mn = match_tree(sfpt);
1334
if (C->failing()) return nullptr;
1335
msfpt = mn->as_MachSafePoint();
1336
cnt = TypeFunc::Parms;
1338
msfpt->_has_ea_local_in_scope = sfpt->has_ea_local_in_scope();
1340
// Advertise the correct memory effects (for anti-dependence computation).
1341
msfpt->set_adr_type(sfpt->adr_type());
1343
// Allocate a private array of RegMasks. These RegMasks are not shared.
1344
msfpt->_in_rms = NEW_RESOURCE_ARRAY( RegMask, cnt );
1346
for (uint i = 0; i < cnt; i++) ::new (&(msfpt->_in_rms[i])) RegMask();
1348
// Do all the pre-defined non-Empty register masks
1349
msfpt->_in_rms[TypeFunc::ReturnAdr] = _return_addr_mask;
1350
msfpt->_in_rms[TypeFunc::FramePtr ] = c_frame_ptr_mask;
1352
// Place first outgoing argument can possibly be put.
1353
OptoReg::Name begin_out_arg_area = OptoReg::add(_new_SP, C->out_preserve_stack_slots());
1354
assert( is_even(begin_out_arg_area), "" );
1355
// Compute max outgoing register number per call site.
1356
OptoReg::Name out_arg_limit_per_call = begin_out_arg_area;
1357
// Calls to C may hammer extra stack slots above and beyond any arguments.
1358
// These are usually backing store for register arguments for varargs.
1359
if( call != nullptr && call->is_CallRuntime() )
1360
out_arg_limit_per_call = OptoReg::add(out_arg_limit_per_call,C->varargs_C_out_slots_killed());
1363
// Do the normal argument list (parameters) register masks
1364
int argcnt = cnt - TypeFunc::Parms;
1365
if( argcnt > 0 ) { // Skip it all if we have no args
1366
BasicType *sig_bt = NEW_RESOURCE_ARRAY( BasicType, argcnt );
1367
VMRegPair *parm_regs = NEW_RESOURCE_ARRAY( VMRegPair, argcnt );
1369
for( i = 0; i < argcnt; i++ ) {
1370
sig_bt[i] = domain->field_at(i+TypeFunc::Parms)->basic_type();
1372
// V-call to pick proper calling convention
1373
call->calling_convention( sig_bt, parm_regs, argcnt );
1376
// Sanity check users' calling convention. Really handy during
1377
// the initial porting effort. Fairly expensive otherwise.
1378
{ for (int i = 0; i<argcnt; i++) {
1379
if( !parm_regs[i].first()->is_valid() &&
1380
!parm_regs[i].second()->is_valid() ) continue;
1381
VMReg reg1 = parm_regs[i].first();
1382
VMReg reg2 = parm_regs[i].second();
1383
for (int j = 0; j < i; j++) {
1384
if( !parm_regs[j].first()->is_valid() &&
1385
!parm_regs[j].second()->is_valid() ) continue;
1386
VMReg reg3 = parm_regs[j].first();
1387
VMReg reg4 = parm_regs[j].second();
1388
if( !reg1->is_valid() ) {
1389
assert( !reg2->is_valid(), "valid halvsies" );
1390
} else if( !reg3->is_valid() ) {
1391
assert( !reg4->is_valid(), "valid halvsies" );
1393
assert( reg1 != reg2, "calling conv. must produce distinct regs");
1394
assert( reg1 != reg3, "calling conv. must produce distinct regs");
1395
assert( reg1 != reg4, "calling conv. must produce distinct regs");
1396
assert( reg2 != reg3, "calling conv. must produce distinct regs");
1397
assert( reg2 != reg4 || !reg2->is_valid(), "calling conv. must produce distinct regs");
1398
assert( reg3 != reg4, "calling conv. must produce distinct regs");
1405
// Visit each argument. Compute its outgoing register mask.
1406
// Return results now can have 2 bits returned.
1407
// Compute max over all outgoing arguments both per call-site
1408
// and over the entire method.
1409
for( i = 0; i < argcnt; i++ ) {
1410
// Address of incoming argument mask to fill in
1411
RegMask *rm = &mcall->_in_rms[i+TypeFunc::Parms];
1412
VMReg first = parm_regs[i].first();
1413
VMReg second = parm_regs[i].second();
1414
if(!first->is_valid() &&
1415
!second->is_valid()) {
1416
continue; // Avoid Halves
1418
// Handle case where arguments are in vector registers.
1419
if(call->in(TypeFunc::Parms + i)->bottom_type()->isa_vect()) {
1420
OptoReg::Name reg_fst = OptoReg::as_OptoReg(first);
1421
OptoReg::Name reg_snd = OptoReg::as_OptoReg(second);
1422
assert (reg_fst <= reg_snd, "fst=%d snd=%d", reg_fst, reg_snd);
1423
for (OptoReg::Name r = reg_fst; r <= reg_snd; r++) {
1427
// Grab first register, adjust stack slots and insert in mask.
1428
OptoReg::Name reg1 = warp_outgoing_stk_arg(first, begin_out_arg_area, out_arg_limit_per_call );
1429
if (OptoReg::is_valid(reg1))
1431
// Grab second register (if any), adjust stack slots and insert in mask.
1432
OptoReg::Name reg2 = warp_outgoing_stk_arg(second, begin_out_arg_area, out_arg_limit_per_call );
1433
if (OptoReg::is_valid(reg2))
1435
} // End of for all arguments
1438
// Compute the max stack slot killed by any call. These will not be
1439
// available for debug info, and will be used to adjust FIRST_STACK_mask
1440
// after all call sites have been visited.
1441
if( _out_arg_limit < out_arg_limit_per_call)
1442
_out_arg_limit = out_arg_limit_per_call;
1445
// Kill the outgoing argument area, including any non-argument holes and
1446
// any legacy C-killed slots. Use Fat-Projections to do the killing.
1447
// Since the max-per-method covers the max-per-call-site and debug info
1448
// is excluded on the max-per-method basis, debug info cannot land in
1449
// this killed area.
1450
uint r_cnt = mcall->tf()->range()->cnt();
1451
MachProjNode *proj = new MachProjNode( mcall, r_cnt+10000, RegMask::Empty, MachProjNode::fat_proj );
1452
if (!RegMask::can_represent_arg(OptoReg::Name(out_arg_limit_per_call-1))) {
1453
// Bailout. We do not have space to represent all arguments.
1454
C->record_method_not_compilable("unsupported outgoing calling sequence");
1456
for (int i = begin_out_arg_area; i < out_arg_limit_per_call; i++)
1457
proj->_rout.Insert(OptoReg::Name(i));
1459
if (proj->_rout.is_NotEmpty()) {
1460
push_projection(proj);
1463
// Transfer the safepoint information from the call to the mcall
1464
// Move the JVMState list
1465
msfpt->set_jvms(sfpt->jvms());
1466
for (JVMState* jvms = msfpt->jvms(); jvms; jvms = jvms->caller()) {
1467
jvms->set_map(sfpt);
1470
// Debug inputs begin just after the last incoming parameter
1471
assert((mcall == nullptr) || (mcall->jvms() == nullptr) ||
1472
(mcall->jvms()->debug_start() + mcall->_jvmadj == mcall->tf()->domain()->cnt()), "");
1474
// Add additional edges.
1475
if (msfpt->mach_constant_base_node_input() != (uint)-1 && !msfpt->is_MachCallLeaf()) {
1476
// For these calls we can not add MachConstantBase in expand(), as the
1477
// ins are not complete then.
1478
msfpt->ins_req(msfpt->mach_constant_base_node_input(), C->mach_constant_base_node());
1479
if (msfpt->jvms() &&
1480
msfpt->mach_constant_base_node_input() <= msfpt->jvms()->debug_start() + msfpt->_jvmadj) {
1481
// We added an edge before jvms, so we must adapt the position of the ins.
1482
msfpt->jvms()->adapt_position(+1);
1486
// Registers killed by the call are set in the local scheduling pass
1487
// of Global Code Motion.
1491
//---------------------------match_tree----------------------------------------
1492
// Match a Ideal Node DAG - turn it into a tree; Label & Reduce. Used as part
1493
// of the whole-sale conversion from Ideal to Mach Nodes. Also used for
1494
// making GotoNodes while building the CFG and in init_spill_mask() to identify
1495
// a Load's result RegMask for memoization in idealreg2regmask[]
1496
MachNode *Matcher::match_tree( const Node *n ) {
1497
assert( n->Opcode() != Op_Phi, "cannot match" );
1498
assert( !n->is_block_start(), "cannot match" );
1499
// Set the mark for all locally allocated State objects.
1500
// When this call returns, the _states_arena arena will be reset
1501
// freeing all State objects.
1502
ResourceMark rm( &_states_arena );
1506
// StoreNodes require their Memory input to match any LoadNodes
1507
Node *mem = n->is_Store() ? n->in(MemNode::Memory) : (Node*)1 ;
1509
Node* save_mem_node = _mem_node;
1510
_mem_node = n->is_Store() ? (Node*)n : nullptr;
1512
// State object for root node of match tree
1513
// Allocate it on _states_arena - stack allocation can cause stack overflow.
1514
State *s = new (&_states_arena) State;
1515
s->_kids[0] = nullptr;
1516
s->_kids[1] = nullptr;
1517
s->_leaf = (Node*)n;
1518
// Label the input tree, allocating labels from top-level arena
1519
Node* root_mem = mem;
1520
Label_Root(n, s, n->in(0), root_mem);
1521
if (C->failing()) return nullptr;
1523
// The minimum cost match for the whole tree is found at the root State
1524
uint mincost = max_juint;
1525
uint cost = max_juint;
1527
for (i = 0; i < NUM_OPERANDS; i++) {
1528
if (s->valid(i) && // valid entry and
1529
s->cost(i) < cost && // low cost and
1530
s->rule(i) >= NUM_OPERANDS) {// not an operand
1535
if (mincost == max_juint) {
1537
tty->print("No matching rule for:");
1540
Matcher::soft_match_failure();
1543
// Reduce input tree based upon the state labels to machine Nodes
1544
MachNode *m = ReduceInst(s, s->rule(mincost), mem);
1545
// New-to-old mapping is done in ReduceInst, to cover complex instructions.
1546
NOT_PRODUCT(_old2new_map.map(n->_idx, m);)
1548
// Add any Matcher-ignored edges
1549
uint cnt = n->req();
1551
if( mem != (Node*)1 ) start = MemNode::Memory+1;
1552
if( n->is_AddP() ) {
1553
assert( mem == (Node*)1, "" );
1554
start = AddPNode::Base+1;
1556
for( i = start; i < cnt; i++ ) {
1557
if( !n->match_edge(i) ) {
1559
m->ins_req( i, n->in(i) );
1561
m->add_req( n->in(i) );
1565
debug_only( _mem_node = save_mem_node; )
1570
//------------------------------match_into_reg---------------------------------
1571
// Choose to either match this Node in a register or part of the current
1572
// match tree. Return true for requiring a register and false for matching
1573
// as part of the current match tree.
1574
static bool match_into_reg( const Node *n, Node *m, Node *control, int i, bool shared ) {
1576
const Type *t = m->bottom_type();
1578
if (t->singleton()) {
1579
// Never force constants into registers. Allow them to match as
1580
// constants or registers. Copies of the same value will share
1581
// the same register. See find_shared_node.
1583
} else { // Not a constant
1584
// Stop recursion if they have different Controls.
1585
Node* m_control = m->in(0);
1586
// Control of load's memory can post-dominates load's control.
1587
// So use it since load can't float above its memory.
1588
Node* mem_control = (m->is_Load()) ? m->in(MemNode::Memory)->in(0) : nullptr;
1589
if (control && m_control && control != m_control && control != mem_control) {
1591
// Actually, we can live with the most conservative control we
1592
// find, if it post-dominates the others. This allows us to
1593
// pick up load/op/store trees where the load can float a little
1596
const uint max_scan = 6; // Arbitrary scan cutoff
1598
for (j=0; j<max_scan; j++) {
1599
if (x->is_Region()) // Bail out at merge points
1602
if (x == m_control) // Does 'control' post-dominate
1603
break; // m->in(0)? If so, we can use it
1604
if (x == mem_control) // Does 'control' post-dominate
1605
break; // mem_control? If so, we can use it
1607
if (j == max_scan) // No post-domination before scan end?
1608
return true; // Then break the match tree up
1610
if ((m->is_DecodeN() && Matcher::narrow_oop_use_complex_address()) ||
1611
(m->is_DecodeNKlass() && Matcher::narrow_klass_use_complex_address())) {
1612
// These are commonly used in address expressions and can
1613
// efficiently fold into them on X64 in some cases.
1618
// Not forceable cloning. If shared, put it into a register.
1623
//------------------------------Instruction Selection--------------------------
1624
// Label method walks a "tree" of nodes, using the ADLC generated DFA to match
1625
// ideal nodes to machine instructions. Trees are delimited by shared Nodes,
1626
// things the Matcher does not match (e.g., Memory), and things with different
1627
// Controls (hence forced into different blocks). We pass in the Control
1628
// selected for this entire State tree.
1630
// The Matcher works on Trees, but an Intel add-to-memory requires a DAG: the
1631
// Store and the Load must have identical Memories (as well as identical
1632
// pointers). Since the Matcher does not have anything for Memory (and
1633
// does not handle DAGs), I have to match the Memory input myself. If the
1634
// Tree root is a Store or if there are multiple Loads in the tree, I require
1635
// all Loads to have the identical memory.
1636
Node* Matcher::Label_Root(const Node* n, State* svec, Node* control, Node*& mem) {
1637
// Since Label_Root is a recursive function, its possible that we might run
1638
// out of stack space. See bugs 6272980 & 6227033 for more info.
1640
if (LabelRootDepth > MaxLabelRootDepth) {
1641
// Bailout. Can for example be hit with a deep chain of operations.
1642
C->record_method_not_compilable("Out of stack space, increase MaxLabelRootDepth");
1645
uint care = 0; // Edges matcher cares about
1646
uint cnt = n->req();
1649
// Examine children for memory state
1650
// Can only subsume a child into your match-tree if that child's memory state
1651
// is not modified along the path to another input.
1652
// It is unsafe even if the other inputs are separate roots.
1653
Node *input_mem = nullptr;
1654
for( i = 1; i < cnt; i++ ) {
1655
if( !n->match_edge(i) ) continue;
1656
Node *m = n->in(i); // Get ith input
1657
assert( m, "expect non-null children" );
1658
if( m->is_Load() ) {
1659
if( input_mem == nullptr ) {
1660
input_mem = m->in(MemNode::Memory);
1661
if (mem == (Node*)1) {
1662
// Save this memory to bail out if there's another memory access
1663
// to a different memory location in the same tree.
1666
} else if( input_mem != m->in(MemNode::Memory) ) {
1667
input_mem = NodeSentinel;
1672
for( i = 1; i < cnt; i++ ){// For my children
1673
if( !n->match_edge(i) ) continue;
1674
Node *m = n->in(i); // Get ith input
1675
// Allocate states out of a private arena
1676
State *s = new (&_states_arena) State;
1677
svec->_kids[care++] = s;
1678
assert( care <= 2, "binary only for now" );
1680
// Recursively label the State tree.
1681
s->_kids[0] = nullptr;
1682
s->_kids[1] = nullptr;
1685
// Check for leaves of the State Tree; things that cannot be a part of
1686
// the current tree. If it finds any, that value is matched as a
1687
// register operand. If not, then the normal matching is used.
1688
if( match_into_reg(n, m, control, i, is_shared(m)) ||
1689
// Stop recursion if this is a LoadNode and there is another memory access
1690
// to a different memory location in the same tree (for example, a StoreNode
1691
// at the root of this tree or another LoadNode in one of the children).
1692
((mem!=(Node*)1) && m->is_Load() && m->in(MemNode::Memory) != mem) ||
1693
// Can NOT include the match of a subtree when its memory state
1694
// is used by any of the other subtrees
1695
(input_mem == NodeSentinel) ) {
1696
// Print when we exclude matching due to different memory states at input-loads
1697
if (PrintOpto && (Verbose && WizardMode) && (input_mem == NodeSentinel)
1698
&& !((mem!=(Node*)1) && m->is_Load() && m->in(MemNode::Memory) != mem)) {
1699
tty->print_cr("invalid input_mem");
1701
// Switch to a register-only opcode; this value must be in a register
1702
// and cannot be subsumed as part of a larger instruction.
1703
s->DFA( m->ideal_reg(), m );
1706
// If match tree has no control and we do, adopt it for entire tree
1707
if( control == nullptr && m->in(0) != nullptr && m->req() > 1 )
1708
control = m->in(0); // Pick up control
1709
// Else match as a normal part of the match tree.
1710
control = Label_Root(m, s, control, mem);
1711
if (C->failing()) return nullptr;
1715
// Call DFA to match this node, and return
1716
svec->DFA( n->Opcode(), n );
1720
for( x = 0; x < _LAST_MACH_OPER; x++ )
1721
if( svec->valid(x) )
1724
if (x >= _LAST_MACH_OPER) {
1727
assert( false, "bad AD file" );
1734
// Con nodes reduced using the same rule can share their MachNode
1735
// which reduces the number of copies of a constant in the final
1736
// program. The register allocator is free to split uses later to
1737
// split live ranges.
1738
MachNode* Matcher::find_shared_node(Node* leaf, uint rule) {
1739
if (!leaf->is_Con() && !leaf->is_DecodeNarrowPtr()) return nullptr;
1741
// See if this Con has already been reduced using this rule.
1742
if (_shared_nodes.max() <= leaf->_idx) return nullptr;
1743
MachNode* last = (MachNode*)_shared_nodes.at(leaf->_idx);
1744
if (last != nullptr && rule == last->rule()) {
1745
// Don't expect control change for DecodeN
1746
if (leaf->is_DecodeNarrowPtr())
1748
// Get the new space root.
1749
Node* xroot = new_node(C->root());
1750
if (xroot == nullptr) {
1751
// This shouldn't happen give the order of matching.
1755
// Shared constants need to have their control be root so they
1756
// can be scheduled properly.
1757
Node* control = last->in(0);
1758
if (control != xroot) {
1759
if (control == nullptr || control == C->root()) {
1760
last->set_req(0, xroot);
1762
assert(false, "unexpected control");
1772
//------------------------------ReduceInst-------------------------------------
1773
// Reduce a State tree (with given Control) into a tree of MachNodes.
1774
// This routine (and it's cohort ReduceOper) convert Ideal Nodes into
1775
// complicated machine Nodes. Each MachNode covers some tree of Ideal Nodes.
1776
// Each MachNode has a number of complicated MachOper operands; each
1777
// MachOper also covers a further tree of Ideal Nodes.
1779
// The root of the Ideal match tree is always an instruction, so we enter
1780
// the recursion here. After building the MachNode, we need to recurse
1781
// the tree checking for these cases:
1782
// (1) Child is an instruction -
1783
// Build the instruction (recursively), add it as an edge.
1784
// Build a simple operand (register) to hold the result of the instruction.
1785
// (2) Child is an interior part of an instruction -
1786
// Skip over it (do nothing)
1787
// (3) Child is the start of a operand -
1788
// Build the operand, place it inside the instruction
1790
MachNode *Matcher::ReduceInst( State *s, int rule, Node *&mem ) {
1791
assert( rule >= NUM_OPERANDS, "called with operand rule" );
1793
MachNode* shared_node = find_shared_node(s->_leaf, rule);
1794
if (shared_node != nullptr) {
1798
// Build the object to represent this state & prepare for recursive calls
1799
MachNode *mach = s->MachNodeGenerator(rule);
1800
guarantee(mach != nullptr, "Missing MachNode");
1801
mach->_opnds[0] = s->MachOperGenerator(_reduceOp[rule]);
1802
assert( mach->_opnds[0] != nullptr, "Missing result operand" );
1803
Node *leaf = s->_leaf;
1804
NOT_PRODUCT(record_new2old(mach, leaf);)
1805
// Check for instruction or instruction chain rule
1806
if( rule >= _END_INST_CHAIN_RULE || rule < _BEGIN_INST_CHAIN_RULE ) {
1807
assert(C->node_arena()->contains(s->_leaf) || !has_new_node(s->_leaf),
1808
"duplicating node that's already been matched");
1810
mach->add_req( leaf->in(0) ); // Set initial control
1811
// Reduce interior of complex instruction
1812
ReduceInst_Interior( s, rule, mem, mach, 1 );
1814
// Instruction chain rules are data-dependent on their inputs
1815
mach->add_req(nullptr); // Set initial control to none
1816
ReduceInst_Chain_Rule( s, rule, mem, mach );
1819
// If a Memory was used, insert a Memory edge
1820
if( mem != (Node*)1 ) {
1821
mach->ins_req(MemNode::Memory,mem);
1823
// Verify adr type after matching memory operation
1824
const MachOper* oper = mach->memory_operand();
1825
if (oper != nullptr && oper != (MachOper*)-1) {
1826
// It has a unique memory operand. Find corresponding ideal mem node.
1828
if (leaf->is_Mem()) {
1832
assert(m != nullptr && m->is_Mem(), "expecting memory node");
1834
const Type* mach_at = mach->adr_type();
1835
// DecodeN node consumed by an address may have different type
1836
// than its input. Don't compare types for such case.
1837
if (m->adr_type() != mach_at &&
1838
(m->in(MemNode::Address)->is_DecodeNarrowPtr() ||
1839
(m->in(MemNode::Address)->is_AddP() &&
1840
m->in(MemNode::Address)->in(AddPNode::Address)->is_DecodeNarrowPtr()) ||
1841
(m->in(MemNode::Address)->is_AddP() &&
1842
m->in(MemNode::Address)->in(AddPNode::Address)->is_AddP() &&
1843
m->in(MemNode::Address)->in(AddPNode::Address)->in(AddPNode::Address)->is_DecodeNarrowPtr()))) {
1844
mach_at = m->adr_type();
1846
if (m->adr_type() != mach_at) {
1848
tty->print_cr("mach:");
1851
assert(m->adr_type() == mach_at, "matcher should not change adr type");
1856
// If the _leaf is an AddP, insert the base edge
1857
if (leaf->is_AddP()) {
1858
mach->ins_req(AddPNode::Base,leaf->in(AddPNode::Base));
1861
uint number_of_projections_prior = number_of_projections();
1863
// Perform any 1-to-many expansions required
1864
MachNode *ex = mach->Expand(s, _projection_list, mem);
1866
assert(ex->ideal_reg() == mach->ideal_reg(), "ideal types should match");
1867
if( ex->in(1)->is_Con() )
1868
ex->in(1)->set_req(0, C->root());
1869
// Remove old node from the graph
1870
for( uint i=0; i<mach->req(); i++ ) {
1871
mach->set_req(i,nullptr);
1873
NOT_PRODUCT(record_new2old(ex, s->_leaf);)
1876
// PhaseChaitin::fixup_spills will sometimes generate spill code
1877
// via the matcher. By the time, nodes have been wired into the CFG,
1878
// and any further nodes generated by expand rules will be left hanging
1879
// in space, and will not get emitted as output code. Catch this.
1880
// Also, catch any new register allocation constraints ("projections")
1881
// generated belatedly during spill code generation.
1882
if (_allocation_started) {
1883
guarantee(ex == mach, "no expand rules during spill generation");
1884
guarantee(number_of_projections_prior == number_of_projections(), "no allocation during spill generation");
1887
if (leaf->is_Con() || leaf->is_DecodeNarrowPtr()) {
1888
// Record the con for sharing
1889
_shared_nodes.map(leaf->_idx, ex);
1892
// Have mach nodes inherit GC barrier data
1893
mach->set_barrier_data(MemNode::barrier_data(leaf));
1898
void Matcher::handle_precedence_edges(Node* n, MachNode *mach) {
1899
for (uint i = n->req(); i < n->len(); i++) {
1900
if (n->in(i) != nullptr) {
1901
mach->add_prec(n->in(i));
1906
void Matcher::ReduceInst_Chain_Rule(State* s, int rule, Node* &mem, MachNode* mach) {
1907
// 'op' is what I am expecting to receive
1908
int op = _leftOp[rule];
1909
// Operand type to catch childs result
1910
// This is what my child will give me.
1911
unsigned int opnd_class_instance = s->rule(op);
1912
// Choose between operand class or not.
1913
// This is what I will receive.
1914
int catch_op = (FIRST_OPERAND_CLASS <= op && op < NUM_OPERANDS) ? opnd_class_instance : op;
1915
// New rule for child. Chase operand classes to get the actual rule.
1916
unsigned int newrule = s->rule(catch_op);
1918
if (newrule < NUM_OPERANDS) {
1919
// Chain from operand or operand class, may be output of shared node
1920
assert(opnd_class_instance < NUM_OPERANDS, "Bad AD file: Instruction chain rule must chain from operand");
1921
// Insert operand into array of operands for this instruction
1922
mach->_opnds[1] = s->MachOperGenerator(opnd_class_instance);
1924
ReduceOper(s, newrule, mem, mach);
1926
// Chain from the result of an instruction
1927
assert(newrule >= _LAST_MACH_OPER, "Do NOT chain from internal operand");
1928
mach->_opnds[1] = s->MachOperGenerator(_reduceOp[catch_op]);
1929
Node *mem1 = (Node*)1;
1930
debug_only(Node *save_mem_node = _mem_node;)
1931
mach->add_req( ReduceInst(s, newrule, mem1) );
1932
debug_only(_mem_node = save_mem_node;)
1938
uint Matcher::ReduceInst_Interior( State *s, int rule, Node *&mem, MachNode *mach, uint num_opnds ) {
1939
handle_precedence_edges(s->_leaf, mach);
1941
if( s->_leaf->is_Load() ) {
1942
Node *mem2 = s->_leaf->in(MemNode::Memory);
1943
assert( mem == (Node*)1 || mem == mem2, "multiple Memories being matched at once?" );
1944
debug_only( if( mem == (Node*)1 ) _mem_node = s->_leaf;)
1947
if( s->_leaf->in(0) != nullptr && s->_leaf->req() > 1) {
1948
if( mach->in(0) == nullptr )
1949
mach->set_req(0, s->_leaf->in(0));
1952
// Now recursively walk the state tree & add operand list.
1953
for( uint i=0; i<2; i++ ) { // binary tree
1954
State *newstate = s->_kids[i];
1955
if( newstate == nullptr ) break; // Might only have 1 child
1956
// 'op' is what I am expecting to receive
1961
op = _rightOp[rule];
1963
// Operand type to catch childs result
1964
// This is what my child will give me.
1965
int opnd_class_instance = newstate->rule(op);
1966
// Choose between operand class or not.
1967
// This is what I will receive.
1968
int catch_op = (op >= FIRST_OPERAND_CLASS && op < NUM_OPERANDS) ? opnd_class_instance : op;
1969
// New rule for child. Chase operand classes to get the actual rule.
1970
int newrule = newstate->rule(catch_op);
1972
if (newrule < NUM_OPERANDS) { // Operand/operandClass or internalOp/instruction?
1973
// Operand/operandClass
1974
// Insert operand into array of operands for this instruction
1975
mach->_opnds[num_opnds++] = newstate->MachOperGenerator(opnd_class_instance);
1976
ReduceOper(newstate, newrule, mem, mach);
1978
} else { // Child is internal operand or new instruction
1979
if (newrule < _LAST_MACH_OPER) { // internal operand or instruction?
1980
// internal operand --> call ReduceInst_Interior
1981
// Interior of complex instruction. Do nothing but recurse.
1982
num_opnds = ReduceInst_Interior(newstate, newrule, mem, mach, num_opnds);
1984
// instruction --> call build operand( ) to catch result
1985
// --> ReduceInst( newrule )
1986
mach->_opnds[num_opnds++] = s->MachOperGenerator(_reduceOp[catch_op]);
1987
Node *mem1 = (Node*)1;
1988
debug_only(Node *save_mem_node = _mem_node;)
1989
mach->add_req( ReduceInst( newstate, newrule, mem1 ) );
1990
debug_only(_mem_node = save_mem_node;)
1993
assert( mach->_opnds[num_opnds-1], "" );
1998
// This routine walks the interior of possible complex operands.
1999
// At each point we check our children in the match tree:
2001
// We are a leaf; add _leaf field as an input to the MachNode
2002
// (2) Child is an internal operand -
2003
// Skip over it ( do nothing )
2004
// (3) Child is an instruction -
2005
// Call ReduceInst recursively and
2006
// and instruction as an input to the MachNode
2007
void Matcher::ReduceOper( State *s, int rule, Node *&mem, MachNode *mach ) {
2008
assert( rule < _LAST_MACH_OPER, "called with operand rule" );
2009
State *kid = s->_kids[0];
2010
assert( kid == nullptr || s->_leaf->in(0) == nullptr, "internal operands have no control" );
2012
// Leaf? And not subsumed?
2013
if( kid == nullptr && !_swallowed[rule] ) {
2014
mach->add_req( s->_leaf ); // Add leaf pointer
2018
if( s->_leaf->is_Load() ) {
2019
assert( mem == (Node*)1, "multiple Memories being matched at once?" );
2020
mem = s->_leaf->in(MemNode::Memory);
2021
debug_only(_mem_node = s->_leaf;)
2024
handle_precedence_edges(s->_leaf, mach);
2026
if( s->_leaf->in(0) && s->_leaf->req() > 1) {
2028
mach->set_req(0,s->_leaf->in(0));
2030
assert( s->_leaf->in(0) == mach->in(0), "same instruction, differing controls?" );
2034
for (uint i = 0; kid != nullptr && i < 2; kid = s->_kids[1], i++) { // binary tree
2037
newrule = kid->rule(_leftOp[rule]);
2039
newrule = kid->rule(_rightOp[rule]);
2042
if (newrule < _LAST_MACH_OPER) { // Operand or instruction?
2043
// Internal operand; recurse but do nothing else
2044
ReduceOper(kid, newrule, mem, mach);
2046
} else { // Child is a new instruction
2047
// Reduce the instruction, and add a direct pointer from this
2048
// machine instruction to the newly reduced one.
2049
Node *mem1 = (Node*)1;
2050
debug_only(Node *save_mem_node = _mem_node;)
2051
mach->add_req( ReduceInst( kid, newrule, mem1 ) );
2052
debug_only(_mem_node = save_mem_node;)
2058
// -------------------------------------------------------------------------
2059
// Java-Java calling convention
2060
// (what you use when Java calls Java)
2062
//------------------------------find_receiver----------------------------------
2063
// For a given signature, return the OptoReg for parameter 0.
2064
OptoReg::Name Matcher::find_receiver() {
2066
BasicType sig_bt = T_OBJECT;
2067
SharedRuntime::java_calling_convention(&sig_bt, ®s, 1);
2068
// Return argument 0 register. In the LP64 build pointers
2069
// take 2 registers, but the VM wants only the 'main' name.
2070
return OptoReg::as_OptoReg(regs.first());
2073
bool Matcher::is_vshift_con_pattern(Node* n, Node* m) {
2074
if (n != nullptr && m != nullptr) {
2075
return VectorNode::is_vector_shift(n) &&
2076
VectorNode::is_vector_shift_count(m) && m->in(1)->is_Con();
2081
bool Matcher::clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
2082
// Must clone all producers of flags, or we will not match correctly.
2083
// Suppose a compare setting int-flags is shared (e.g., a switch-tree)
2084
// then it will match into an ideal Op_RegFlags. Alas, the fp-flags
2085
// are also there, so we may match a float-branch to int-flags and
2086
// expect the allocator to haul the flags from the int-side to the
2087
// fp-side. No can do.
2088
if (_must_clone[m->Opcode()]) {
2089
mstack.push(m, Visit);
2092
return pd_clone_node(n, m, mstack);
2095
bool Matcher::clone_base_plus_offset_address(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
2096
Node *off = m->in(AddPNode::Offset);
2097
if (off->is_Con()) {
2098
address_visited.test_set(m->_idx); // Flag as address_visited
2099
mstack.push(m->in(AddPNode::Address), Pre_Visit);
2100
// Clone X+offset as it also folds into most addressing expressions
2101
mstack.push(off, Visit);
2102
mstack.push(m->in(AddPNode::Base), Pre_Visit);
2108
// A method-klass-holder may be passed in the inline_cache_reg
2109
// and then expanded into the inline_cache_reg and a method_ptr register
2110
// defined in ad_<arch>.cpp
2112
//------------------------------find_shared------------------------------------
2113
// Set bits if Node is shared or otherwise a root
2114
void Matcher::find_shared(Node* n) {
2115
// Allocate stack of size C->live_nodes() * 2 to avoid frequent realloc
2116
MStack mstack(C->live_nodes() * 2);
2117
// Mark nodes as address_visited if they are inputs to an address expression
2118
VectorSet address_visited;
2119
mstack.push(n, Visit); // Don't need to pre-visit root node
2120
while (mstack.is_nonempty()) {
2121
n = mstack.node(); // Leave node on stack
2122
Node_State nstate = mstack.state();
2123
uint nop = n->Opcode();
2124
if (nstate == Pre_Visit) {
2125
if (address_visited.test(n->_idx)) { // Visited in address already?
2126
// Flag as visited and shared now.
2129
if (is_visited(n)) { // Visited already?
2130
// Node is shared and has no reason to clone. Flag it as shared.
2131
// This causes it to match into a register for the sharing.
2132
set_shared(n); // Flag as shared and
2133
if (n->is_DecodeNarrowPtr()) {
2134
// Oop field/array element loads must be shared but since
2135
// they are shared through a DecodeN they may appear to have
2136
// a single use so force sharing here.
2137
set_shared(n->in(1));
2139
mstack.pop(); // remove node from stack
2142
nstate = Visit; // Not already visited; so visit now
2144
if (nstate == Visit) {
2145
mstack.set_state(Post_Visit);
2146
set_visited(n); // Flag as visited now
2147
bool mem_op = false;
2148
int mem_addr_idx = MemNode::Address;
2149
if (find_shared_visit(mstack, n, nop, mem_op, mem_addr_idx)) {
2152
for (int i = n->req() - 1; i >= 0; --i) { // For my children
2153
Node* m = n->in(i); // Get ith input
2155
continue; // Ignore nulls
2157
if (clone_node(n, m, mstack)) {
2161
// Clone addressing expressions as they are "free" in memory access instructions
2162
if (mem_op && i == mem_addr_idx && m->is_AddP() &&
2163
// When there are other uses besides address expressions
2164
// put it on stack and mark as shared.
2166
// Some inputs for address expression are not put on stack
2167
// to avoid marking them as shared and forcing them into register
2168
// if they are used only in address expressions.
2169
// But they should be marked as shared if there are other uses
2170
// besides address expressions.
2172
if (pd_clone_address_expressions(m->as_AddP(), mstack, address_visited)) {
2176
mstack.push(m, Pre_Visit);
2177
} // for(int i = ...)
2179
else if (nstate == Alt_Post_Visit) {
2180
mstack.pop(); // Remove node from stack
2181
// We cannot remove the Cmp input from the Bool here, as the Bool may be
2182
// shared and all users of the Bool need to move the Cmp in parallel.
2183
// This leaves both the Bool and the If pointing at the Cmp. To
2184
// prevent the Matcher from trying to Match the Cmp along both paths
2185
// BoolNode::match_edge always returns a zero.
2187
// We reorder the Op_If in a pre-order manner, so we can visit without
2188
// accidentally sharing the Cmp (the Bool and the If make 2 users).
2189
n->add_req( n->in(1)->in(1) ); // Add the Cmp next to the Bool
2191
else if (nstate == Post_Visit) {
2192
mstack.pop(); // Remove node from stack
2194
// Now hack a few special opcodes
2195
uint opcode = n->Opcode();
2196
bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->matcher_find_shared_post_visit(this, n, opcode);
2198
find_shared_post_visit(n, opcode);
2202
ShouldNotReachHere();
2204
} // end of while (mstack.is_nonempty())
2207
bool Matcher::find_shared_visit(MStack& mstack, Node* n, uint opcode, bool& mem_op, int& mem_addr_idx) {
2208
switch(opcode) { // Handle some opcodes special
2209
case Op_Phi: // Treat Phis as shared roots
2211
case Op_Proj: // All handled specially during matching
2212
case Op_SafePointScalarObject:
2217
case Op_CountedLoopEnd:
2218
mstack.set_state(Alt_Post_Visit); // Alternative way
2219
// Convert (If (Bool (CmpX A B))) into (If (Bool) (CmpX A B)). Helps
2220
// with matching cmp/branch in 1 instruction. The Matcher needs the
2221
// Bool and CmpX side-by-side, because it can only get at constants
2222
// that are at the leaves of Match trees, and the Bool's condition acts
2223
// as a constant here.
2224
mstack.push(n->in(1), Visit); // Clone the Bool
2225
mstack.push(n->in(0), Pre_Visit); // Visit control input
2226
return true; // while (mstack.is_nonempty())
2227
case Op_ConvI2D: // These forms efficiently match with a prior
2228
case Op_ConvI2F: // Load but not a following Store
2229
if( n->in(1)->is_Load() && // Prior load
2230
n->outcnt() == 1 && // Not already shared
2231
n->unique_out()->is_Store() ) // Following store
2232
set_shared(n); // Force it to be a root
2234
case Op_ReverseBytesI:
2235
case Op_ReverseBytesL:
2236
if( n->in(1)->is_Load() && // Prior load
2237
n->outcnt() == 1 ) // Not already shared
2238
set_shared(n); // Force it to be a root
2240
case Op_BoxLock: // Can't match until we get stack-regs in ADLC
2250
case Op_NeverBranch:
2254
mstack.push(n->in(1), Pre_Visit); // Switch Value (could be shared)
2255
mstack.push(n->in(0), Pre_Visit); // Visit Control input
2256
return true; // while (mstack.is_nonempty())
2260
case Op_StrIndexOfChar:
2262
case Op_VectorizedHashCode:
2263
case Op_CountPositives:
2264
case Op_StrInflatedCopy:
2265
case Op_StrCompressedCopy:
2266
case Op_EncodeISOArray:
2271
case Op_MacroLogicV:
2272
case Op_VectorCmpMasked:
2276
case Op_VectorLoadMask:
2277
set_shared(n); // Force result into register (it will be anyways)
2279
case Op_ConP: { // Convert pointers above the centerline to NUL
2280
TypeNode *tn = n->as_Type(); // Constants derive from type nodes
2281
const TypePtr* tp = tn->type()->is_ptr();
2282
if (tp->_ptr == TypePtr::AnyNull) {
2283
tn->set_type(TypePtr::NULL_PTR);
2287
case Op_ConN: { // Convert narrow pointers above the centerline to NUL
2288
TypeNode *tn = n->as_Type(); // Constants derive from type nodes
2289
const TypePtr* tp = tn->type()->make_ptr();
2290
if (tp && tp->_ptr == TypePtr::AnyNull) {
2291
tn->set_type(TypeNarrowOop::NULL_PTR);
2295
case Op_Binary: // These are introduced in the Post_Visit state.
2296
ShouldNotReachHere();
2303
if( n->is_Store() ) {
2304
// Do match stores, despite no ideal reg
2308
if( n->is_Mem() ) { // Loads and LoadStores
2310
// Loads must be root of match tree due to prior load conflict
2311
if( C->subsume_loads() == false )
2314
// Fall into default case
2315
if( !n->ideal_reg() )
2316
set_dontcare(n); // Unmatchable Nodes
2321
void Matcher::find_shared_post_visit(Node* n, uint opcode) {
2322
if (n->is_predicated_vector()) {
2323
// Restructure into binary trees for Matching.
2324
if (n->req() == 4) {
2325
n->set_req(1, new BinaryNode(n->in(1), n->in(2)));
2326
n->set_req(2, n->in(3));
2328
} else if (n->req() == 5) {
2329
n->set_req(1, new BinaryNode(n->in(1), n->in(2)));
2330
n->set_req(2, new BinaryNode(n->in(3), n->in(4)));
2333
} else if (n->req() == 6) {
2334
Node* b3 = new BinaryNode(n->in(4), n->in(5));
2335
Node* b2 = new BinaryNode(n->in(3), b3);
2336
Node* b1 = new BinaryNode(n->in(2), b2);
2345
switch(opcode) { // Handle some opcodes special
2346
case Op_CompareAndExchangeB:
2347
case Op_CompareAndExchangeS:
2348
case Op_CompareAndExchangeI:
2349
case Op_CompareAndExchangeL:
2350
case Op_CompareAndExchangeP:
2351
case Op_CompareAndExchangeN:
2352
case Op_WeakCompareAndSwapB:
2353
case Op_WeakCompareAndSwapS:
2354
case Op_WeakCompareAndSwapI:
2355
case Op_WeakCompareAndSwapL:
2356
case Op_WeakCompareAndSwapP:
2357
case Op_WeakCompareAndSwapN:
2358
case Op_CompareAndSwapB:
2359
case Op_CompareAndSwapS:
2360
case Op_CompareAndSwapI:
2361
case Op_CompareAndSwapL:
2362
case Op_CompareAndSwapP:
2363
case Op_CompareAndSwapN: { // Convert trinary to binary-tree
2364
Node* newval = n->in(MemNode::ValueIn);
2365
Node* oldval = n->in(LoadStoreConditionalNode::ExpectedIn);
2366
Node* pair = new BinaryNode(oldval, newval);
2367
n->set_req(MemNode::ValueIn, pair);
2368
n->del_req(LoadStoreConditionalNode::ExpectedIn);
2371
case Op_CMoveD: // Convert trinary to binary-tree
2377
// Restructure into a binary tree for Matching. It's possible that
2378
// we could move this code up next to the graph reshaping for IfNodes
2379
// or vice-versa, but I do not want to debug this for Ladybird.
2381
Node* pair1 = new BinaryNode(n->in(1), n->in(1)->in(1));
2382
n->set_req(1, pair1);
2383
Node* pair2 = new BinaryNode(n->in(2), n->in(3));
2384
n->set_req(2, pair2);
2388
case Op_MacroLogicV: {
2389
Node* pair1 = new BinaryNode(n->in(1), n->in(2));
2390
Node* pair2 = new BinaryNode(n->in(3), n->in(4));
2391
n->set_req(1, pair1);
2392
n->set_req(2, pair2);
2397
case Op_StoreVectorMasked: {
2398
Node* pair = new BinaryNode(n->in(3), n->in(4));
2399
n->set_req(3, pair);
2403
case Op_LoopLimit: {
2404
Node* pair1 = new BinaryNode(n->in(1), n->in(2));
2405
n->set_req(1, pair1);
2406
n->set_req(2, n->in(3));
2411
case Op_StrIndexOfChar: {
2412
Node* pair1 = new BinaryNode(n->in(2), n->in(3));
2413
n->set_req(2, pair1);
2414
n->set_req(3, n->in(4));
2420
case Op_VectorizedHashCode: {
2421
Node* pair1 = new BinaryNode(n->in(2), n->in(3));
2422
n->set_req(2, pair1);
2423
Node* pair2 = new BinaryNode(n->in(4),n->in(5));
2424
n->set_req(3, pair2);
2429
case Op_EncodeISOArray:
2430
case Op_StrCompressedCopy:
2431
case Op_StrInflatedCopy: {
2432
// Restructure into a binary tree for Matching.
2433
Node* pair = new BinaryNode(n->in(3), n->in(4));
2434
n->set_req(3, pair);
2442
// Restructure into a binary tree for Matching.
2443
Node* pair = new BinaryNode(n->in(1), n->in(2));
2444
n->set_req(2, pair);
2445
n->set_req(1, n->in(3));
2449
case Op_MulAddS2I: {
2450
Node* pair1 = new BinaryNode(n->in(1), n->in(2));
2451
Node* pair2 = new BinaryNode(n->in(3), n->in(4));
2452
n->set_req(1, pair1);
2453
n->set_req(2, pair2);
2458
case Op_VectorCmpMasked:
2464
Node* pair = new BinaryNode(n->in(2), n->in(3));
2465
n->set_req(2, pair);
2469
case Op_VectorBlend:
2470
case Op_VectorInsert: {
2471
Node* pair = new BinaryNode(n->in(1), n->in(2));
2472
n->set_req(1, pair);
2473
n->set_req(2, n->in(3));
2477
case Op_LoadVectorGather:
2478
if (is_subword_type(n->bottom_type()->is_vect()->element_basic_type())) {
2479
Node* pair = new BinaryNode(n->in(MemNode::ValueIn), n->in(MemNode::ValueIn+1));
2480
n->set_req(MemNode::ValueIn, pair);
2481
n->del_req(MemNode::ValueIn+1);
2484
case Op_LoadVectorGatherMasked:
2485
if (is_subword_type(n->bottom_type()->is_vect()->element_basic_type())) {
2486
Node* pair2 = new BinaryNode(n->in(MemNode::ValueIn + 1), n->in(MemNode::ValueIn + 2));
2487
Node* pair1 = new BinaryNode(n->in(MemNode::ValueIn), pair2);
2488
n->set_req(MemNode::ValueIn, pair1);
2489
n->del_req(MemNode::ValueIn+2);
2490
n->del_req(MemNode::ValueIn+1);
2493
case Op_StoreVectorScatter: {
2494
Node* pair = new BinaryNode(n->in(MemNode::ValueIn), n->in(MemNode::ValueIn+1));
2495
n->set_req(MemNode::ValueIn, pair);
2496
n->del_req(MemNode::ValueIn+1);
2499
case Op_StoreVectorScatterMasked: {
2500
Node* pair = new BinaryNode(n->in(MemNode::ValueIn+1), n->in(MemNode::ValueIn+2));
2501
n->set_req(MemNode::ValueIn+1, pair);
2502
n->del_req(MemNode::ValueIn+2);
2503
pair = new BinaryNode(n->in(MemNode::ValueIn), n->in(MemNode::ValueIn+1));
2504
n->set_req(MemNode::ValueIn, pair);
2505
n->del_req(MemNode::ValueIn+1);
2508
case Op_VectorMaskCmp: {
2509
n->set_req(1, new BinaryNode(n->in(1), n->in(2)));
2510
n->set_req(2, n->in(3));
2514
case Op_PartialSubtypeCheck: {
2515
if (UseSecondarySupersTable && n->in(2)->is_Con()) {
2516
// PartialSubtypeCheck uses both constant and register operands for superclass input.
2517
n->set_req(2, new BinaryNode(n->in(2), n->in(2)));
2528
void Matcher::record_new2old(Node* newn, Node* old) {
2529
_new2old_map.map(newn->_idx, old);
2530
if (!_reused.test_set(old->_igv_idx)) {
2531
// Reuse the Ideal-level IGV identifier so that the node can be tracked
2532
// across matching. If there are multiple machine nodes expanded from the
2533
// same Ideal node, only one will reuse its IGV identifier.
2534
newn->_igv_idx = old->_igv_idx;
2538
// machine-independent root to machine-dependent root
2539
void Matcher::dump_old2new_map() {
2540
_old2new_map.dump();
2544
//---------------------------collect_null_checks-------------------------------
2545
// Find null checks in the ideal graph; write a machine-specific node for
2546
// it. Used by later implicit-null-check handling. Actually collects
2547
// either an IfTrue or IfFalse for the common NOT-null path, AND the ideal
2548
// value being tested.
2549
void Matcher::collect_null_checks( Node *proj, Node *orig_proj ) {
2550
Node *iff = proj->in(0);
2551
if( iff->Opcode() == Op_If ) {
2552
// During matching If's have Bool & Cmp side-by-side
2553
BoolNode *b = iff->in(1)->as_Bool();
2554
Node *cmp = iff->in(2);
2555
int opc = cmp->Opcode();
2556
if (opc != Op_CmpP && opc != Op_CmpN) return;
2558
const Type* ct = cmp->in(2)->bottom_type();
2559
if (ct == TypePtr::NULL_PTR ||
2560
(opc == Op_CmpN && ct == TypeNarrowOop::NULL_PTR)) {
2562
bool push_it = false;
2563
if( proj->Opcode() == Op_IfTrue ) {
2565
extern uint all_null_checks_found;
2566
all_null_checks_found++;
2568
if( b->_test._test == BoolTest::ne ) {
2572
assert( proj->Opcode() == Op_IfFalse, "" );
2573
if( b->_test._test == BoolTest::eq ) {
2578
_null_check_tests.push(proj);
2579
Node* val = cmp->in(1);
2581
if (val->bottom_type()->isa_narrowoop() &&
2582
!Matcher::narrow_oop_use_complex_address()) {
2584
// Look for DecodeN node which should be pinned to orig_proj.
2585
// On platforms (Sparc) which can not handle 2 adds
2586
// in addressing mode we have to keep a DecodeN node and
2587
// use it to do implicit null check in address.
2589
// DecodeN node was pinned to non-null path (orig_proj) during
2590
// CastPP transformation in final_graph_reshaping_impl().
2592
uint cnt = orig_proj->outcnt();
2593
for (uint i = 0; i < orig_proj->outcnt(); i++) {
2594
Node* d = orig_proj->raw_out(i);
2595
if (d->is_DecodeN() && d->in(1) == val) {
2597
val->set_req(0, nullptr); // Unpin now.
2598
// Mark this as special case to distinguish from
2599
// a regular case: CmpP(DecodeN, null).
2600
val = (Node*)(((intptr_t)val) | 1);
2606
_null_check_tests.push(val);
2612
//---------------------------validate_null_checks------------------------------
2613
// Its possible that the value being null checked is not the root of a match
2614
// tree. If so, I cannot use the value in an implicit null check.
2615
void Matcher::validate_null_checks( ) {
2616
uint cnt = _null_check_tests.size();
2617
for( uint i=0; i < cnt; i+=2 ) {
2618
Node *test = _null_check_tests[i];
2619
Node *val = _null_check_tests[i+1];
2620
bool is_decoden = ((intptr_t)val) & 1;
2621
val = (Node*)(((intptr_t)val) & ~1);
2622
if (has_new_node(val)) {
2623
Node* new_val = new_node(val);
2625
assert(val->is_DecodeNarrowPtr() && val->in(0) == nullptr, "sanity");
2626
// Note: new_val may have a control edge if
2627
// the original ideal node DecodeN was matched before
2628
// it was unpinned in Matcher::collect_null_checks().
2629
// Unpin the mach node and mark it.
2630
new_val->set_req(0, nullptr);
2631
new_val = (Node*)(((intptr_t)new_val) | 1);
2633
// Is a match-tree root, so replace with the matched value
2634
_null_check_tests.map(i+1, new_val);
2636
// Yank from candidate list
2637
_null_check_tests.map(i+1,_null_check_tests[--cnt]);
2638
_null_check_tests.map(i,_null_check_tests[--cnt]);
2639
_null_check_tests.pop();
2640
_null_check_tests.pop();
2646
bool Matcher::gen_narrow_oop_implicit_null_checks() {
2647
// Advice matcher to perform null checks on the narrow oop side.
2648
// Implicit checks are not possible on the uncompressed oop side anyway
2649
// (at least not for read accesses).
2650
// Performs significantly better (especially on Power 6).
2651
if (!os::zero_page_read_protected()) {
2654
return CompressedOops::use_implicit_null_checks() &&
2655
(narrow_oop_use_complex_address() ||
2656
CompressedOops::base() != nullptr);
2659
// Compute RegMask for an ideal register.
2660
const RegMask* Matcher::regmask_for_ideal_register(uint ideal_reg, Node* ret) {
2661
const Type* t = Type::mreg2type[ideal_reg];
2663
assert(ideal_reg >= Op_VecA && ideal_reg <= Op_VecZ, "not a vector: %d", ideal_reg);
2664
return nullptr; // not supported
2666
Node* fp = ret->in(TypeFunc::FramePtr);
2667
Node* mem = ret->in(TypeFunc::Memory);
2668
const TypePtr* atp = TypePtr::BOTTOM;
2669
MemNode::MemOrd mo = MemNode::unordered;
2672
switch (ideal_reg) {
2673
case Op_RegN: spill = new LoadNNode(nullptr, mem, fp, atp, t->is_narrowoop(), mo); break;
2674
case Op_RegI: spill = new LoadINode(nullptr, mem, fp, atp, t->is_int(), mo); break;
2675
case Op_RegP: spill = new LoadPNode(nullptr, mem, fp, atp, t->is_ptr(), mo); break;
2676
case Op_RegF: spill = new LoadFNode(nullptr, mem, fp, atp, t, mo); break;
2677
case Op_RegD: spill = new LoadDNode(nullptr, mem, fp, atp, t, mo); break;
2678
case Op_RegL: spill = new LoadLNode(nullptr, mem, fp, atp, t->is_long(), mo); break;
2680
case Op_VecA: // fall-through
2681
case Op_VecS: // fall-through
2682
case Op_VecD: // fall-through
2683
case Op_VecX: // fall-through
2684
case Op_VecY: // fall-through
2685
case Op_VecZ: spill = new LoadVectorNode(nullptr, mem, fp, atp, t->is_vect()); break;
2686
case Op_RegVectMask: return Matcher::predicate_reg_mask();
2688
default: ShouldNotReachHere();
2690
MachNode* mspill = match_tree(spill);
2691
assert(mspill != nullptr, "matching failed: %d", ideal_reg);
2692
// Handle generic vector operand case
2693
if (Matcher::supports_generic_vector_operands && t->isa_vect()) {
2694
specialize_mach_node(mspill);
2696
return &mspill->out_RegMask();
2699
// Process Mach IR right after selection phase is over.
2700
void Matcher::do_postselect_cleanup() {
2701
if (supports_generic_vector_operands) {
2702
specialize_generic_vector_operands();
2703
if (C->failing()) return;
2707
//----------------------------------------------------------------------
2708
// Generic machine operands elision.
2709
//----------------------------------------------------------------------
2711
// Compute concrete vector operand for a generic TEMP vector mach node based on its user info.
2712
void Matcher::specialize_temp_node(MachTempNode* tmp, MachNode* use, uint idx) {
2713
assert(use->in(idx) == tmp, "not a user");
2714
assert(!Matcher::is_generic_vector(use->_opnds[0]), "use not processed yet");
2716
if ((uint)idx == use->two_adr()) { // DEF_TEMP case
2717
tmp->_opnds[0] = use->_opnds[0]->clone();
2719
uint ideal_vreg = vector_ideal_reg(C->max_vector_size());
2720
tmp->_opnds[0] = Matcher::pd_specialize_generic_vector_operand(tmp->_opnds[0], ideal_vreg, true /*is_temp*/);
2724
// Compute concrete vector operand for a generic DEF/USE vector operand (of mach node m at index idx).
2725
MachOper* Matcher::specialize_vector_operand(MachNode* m, uint opnd_idx) {
2726
assert(Matcher::is_generic_vector(m->_opnds[opnd_idx]), "repeated updates");
2727
Node* def = nullptr;
2728
if (opnd_idx == 0) { // DEF
2729
def = m; // use mach node itself to compute vector operand type
2731
int base_idx = m->operand_index(opnd_idx);
2732
def = m->in(base_idx);
2733
if (def->is_Mach()) {
2734
if (def->is_MachTemp() && Matcher::is_generic_vector(def->as_Mach()->_opnds[0])) {
2735
specialize_temp_node(def->as_MachTemp(), m, base_idx); // MachTemp node use site
2736
} else if (is_reg2reg_move(def->as_Mach())) {
2737
def = def->in(1); // skip over generic reg-to-reg moves
2741
assert(def->bottom_type()->isa_vect(), "not a vector");
2742
uint ideal_vreg = def->bottom_type()->ideal_reg();
2743
return Matcher::pd_specialize_generic_vector_operand(m->_opnds[opnd_idx], ideal_vreg, false /*is_temp*/);
2746
void Matcher::specialize_mach_node(MachNode* m) {
2747
assert(!m->is_MachTemp(), "processed along with its user");
2748
// For generic use operands pull specific register class operands from
2749
// its def instruction's output operand (def operand).
2750
for (uint i = 0; i < m->num_opnds(); i++) {
2751
if (Matcher::is_generic_vector(m->_opnds[i])) {
2752
m->_opnds[i] = specialize_vector_operand(m, i);
2757
// Replace generic vector operands with concrete vector operands and eliminate generic reg-to-reg moves from the graph.
2758
void Matcher::specialize_generic_vector_operands() {
2759
assert(supports_generic_vector_operands, "sanity");
2762
// Replace generic vector operands (vec/legVec) with concrete ones (vec[SDXYZ]/legVec[SDXYZ])
2763
// and remove reg-to-reg vector moves (MoveVec2Leg and MoveLeg2Vec).
2764
Unique_Node_List live_nodes;
2765
C->identify_useful_nodes(live_nodes);
2767
while (live_nodes.size() > 0) {
2768
MachNode* m = live_nodes.pop()->isa_Mach();
2770
if (Matcher::is_reg2reg_move(m)) {
2771
// Register allocator properly handles vec <=> leg moves using register masks.
2772
int opnd_idx = m->operand_index(1);
2773
Node* def = m->in(opnd_idx);
2774
m->subsume_by(def, C);
2775
} else if (m->is_MachTemp()) {
2776
// process MachTemp nodes at use site (see Matcher::specialize_vector_operand)
2778
specialize_mach_node(m);
2784
uint Matcher::vector_length(const Node* n) {
2785
const TypeVect* vt = n->bottom_type()->is_vect();
2786
return vt->length();
2789
uint Matcher::vector_length(const MachNode* use, const MachOper* opnd) {
2790
int def_idx = use->operand_index(opnd);
2791
Node* def = use->in(def_idx);
2792
return def->bottom_type()->is_vect()->length();
2795
uint Matcher::vector_length_in_bytes(const Node* n) {
2796
const TypeVect* vt = n->bottom_type()->is_vect();
2797
return vt->length_in_bytes();
2800
uint Matcher::vector_length_in_bytes(const MachNode* use, const MachOper* opnd) {
2801
uint def_idx = use->operand_index(opnd);
2802
Node* def = use->in(def_idx);
2803
return def->bottom_type()->is_vect()->length_in_bytes();
2806
BasicType Matcher::vector_element_basic_type(const Node* n) {
2807
const TypeVect* vt = n->bottom_type()->is_vect();
2808
return vt->element_basic_type();
2811
BasicType Matcher::vector_element_basic_type(const MachNode* use, const MachOper* opnd) {
2812
int def_idx = use->operand_index(opnd);
2813
Node* def = use->in(def_idx);
2814
return def->bottom_type()->is_vect()->element_basic_type();
2817
bool Matcher::is_non_long_integral_vector(const Node* n) {
2818
BasicType bt = vector_element_basic_type(n);
2819
assert(bt != T_CHAR, "char is not allowed in vector");
2820
return is_subword_type(bt) || bt == T_INT;
2824
bool Matcher::verify_after_postselect_cleanup() {
2825
assert(!C->failing(), "sanity");
2826
if (supports_generic_vector_operands) {
2827
Unique_Node_List useful;
2828
C->identify_useful_nodes(useful);
2829
for (uint i = 0; i < useful.size(); i++) {
2830
MachNode* m = useful.at(i)->isa_Mach();
2832
assert(!Matcher::is_reg2reg_move(m), "no MoveVec nodes allowed");
2833
for (uint j = 0; j < m->num_opnds(); j++) {
2834
assert(!Matcher::is_generic_vector(m->_opnds[j]), "no generic vector operands allowed");
2843
// Used by the DFA in dfa_xxx.cpp. Check for a following barrier or
2844
// atomic instruction acting as a store_load barrier without any
2845
// intervening volatile load, and thus we don't need a barrier here.
2846
// We retain the Node to act as a compiler ordering barrier.
2847
bool Matcher::post_store_load_barrier(const Node* vmb) {
2848
Compile* C = Compile::current();
2849
assert(vmb->is_MemBar(), "");
2850
assert(vmb->Opcode() != Op_MemBarAcquire && vmb->Opcode() != Op_LoadFence, "");
2851
const MemBarNode* membar = vmb->as_MemBar();
2853
// Get the Ideal Proj node, ctrl, that can be used to iterate forward
2854
Node* ctrl = nullptr;
2855
for (DUIterator_Fast imax, i = membar->fast_outs(imax); i < imax; i++) {
2856
Node* p = membar->fast_out(i);
2857
assert(p->is_Proj(), "only projections here");
2858
if ((p->as_Proj()->_con == TypeFunc::Control) &&
2859
!C->node_arena()->contains(p)) { // Unmatched old-space only
2864
assert((ctrl != nullptr), "missing control projection");
2866
for (DUIterator_Fast jmax, j = ctrl->fast_outs(jmax); j < jmax; j++) {
2867
Node *x = ctrl->fast_out(j);
2868
int xop = x->Opcode();
2870
// We don't need current barrier if we see another or a lock
2871
// before seeing volatile load.
2873
// Op_Fastunlock previously appeared in the Op_* list below.
2874
// With the advent of 1-0 lock operations we're no longer guaranteed
2875
// that a monitor exit operation contains a serializing instruction.
2877
if (xop == Op_MemBarVolatile ||
2878
xop == Op_CompareAndExchangeB ||
2879
xop == Op_CompareAndExchangeS ||
2880
xop == Op_CompareAndExchangeI ||
2881
xop == Op_CompareAndExchangeL ||
2882
xop == Op_CompareAndExchangeP ||
2883
xop == Op_CompareAndExchangeN ||
2884
xop == Op_WeakCompareAndSwapB ||
2885
xop == Op_WeakCompareAndSwapS ||
2886
xop == Op_WeakCompareAndSwapL ||
2887
xop == Op_WeakCompareAndSwapP ||
2888
xop == Op_WeakCompareAndSwapN ||
2889
xop == Op_WeakCompareAndSwapI ||
2890
xop == Op_CompareAndSwapB ||
2891
xop == Op_CompareAndSwapS ||
2892
xop == Op_CompareAndSwapL ||
2893
xop == Op_CompareAndSwapP ||
2894
xop == Op_CompareAndSwapN ||
2895
xop == Op_CompareAndSwapI ||
2896
BarrierSet::barrier_set()->barrier_set_c2()->matcher_is_store_load_barrier(x, xop)) {
2900
// Op_FastLock previously appeared in the Op_* list above.
2901
if (xop == Op_FastLock) {
2905
if (x->is_MemBar()) {
2906
// We must retain this membar if there is an upcoming volatile
2907
// load, which will be followed by acquire membar.
2908
if (xop == Op_MemBarAcquire || xop == Op_LoadFence) {
2911
// For other kinds of barriers, check by pretending we
2912
// are them, and seeing if we can be removed.
2913
return post_store_load_barrier(x->as_MemBar());
2917
// probably not necessary to check for these
2918
if (x->is_Call() || x->is_SafePoint() || x->is_block_proj()) {
2925
// Check whether node n is a branch to an uncommon trap that we could
2926
// optimize as test with very high branch costs in case of going to
2927
// the uncommon trap. The code must be able to be recompiled to use
2929
bool Matcher::branches_to_uncommon_trap(const Node *n) {
2930
// Don't do it for natives, adapters, or runtime stubs
2931
Compile *C = Compile::current();
2932
if (!C->is_method_compilation()) return false;
2934
assert(n->is_If(), "You should only call this on if nodes.");
2935
IfNode *ifn = n->as_If();
2937
Node *ifFalse = nullptr;
2938
for (DUIterator_Fast imax, i = ifn->fast_outs(imax); i < imax; i++) {
2939
if (ifn->fast_out(i)->is_IfFalse()) {
2940
ifFalse = ifn->fast_out(i);
2944
assert(ifFalse, "An If should have an ifFalse. Graph is broken.");
2946
Node *reg = ifFalse;
2947
int cnt = 4; // We must protect against cycles. Limit to 4 iterations.
2948
// Alternatively use visited set? Seems too expensive.
2949
while (reg != nullptr && cnt > 0) {
2950
CallNode *call = nullptr;
2951
RegionNode *nxt_reg = nullptr;
2952
for (DUIterator_Fast imax, i = reg->fast_outs(imax); i < imax; i++) {
2953
Node *o = reg->fast_out(i);
2955
call = o->as_Call();
2957
if (o->is_Region()) {
2958
nxt_reg = o->as_Region();
2963
call->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point()) {
2964
const Type* trtype = call->in(TypeFunc::Parms)->bottom_type();
2965
if (trtype->isa_int() && trtype->is_int()->is_con()) {
2966
jint tr_con = trtype->is_int()->get_con();
2967
Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(tr_con);
2968
Deoptimization::DeoptAction action = Deoptimization::trap_request_action(tr_con);
2969
assert((int)reason < (int)BitsPerInt, "recode bit map");
2971
if (is_set_nth_bit(C->allowed_deopt_reasons(), (int)reason)
2972
&& action != Deoptimization::Action_none) {
2973
// This uncommon trap is sure to recompile, eventually.
2974
// When that happens, C->too_many_traps will prevent
2975
// this transformation from happening again.
2988
//=============================================================================
2989
//---------------------------State---------------------------------------------
2990
State::State(void) : _rule() {
2993
_kids[0] = _kids[1] = (State*)(intptr_t) CONST64(0xcafebabecafebabe);
2994
_leaf = (Node*)(intptr_t) CONST64(0xbaadf00dbaadf00d);
3001
_kids[0] = _kids[1] = (State*)(intptr_t) CONST64(0xcafebabecafebabe);
3002
_leaf = (Node*)(intptr_t) CONST64(0xbaadf00dbaadf00d);
3003
memset(_cost, -3, sizeof(_cost));
3004
memset(_rule, -3, sizeof(_rule));
3009
//---------------------------dump----------------------------------------------
3015
void State::dump(int depth) {
3016
for (int j = 0; j < depth; j++) {
3019
tty->print("--N: ");
3022
for (i = 0; i < _LAST_MACH_OPER; i++) {
3023
// Check for valid entry
3025
for (int j = 0; j < depth; j++) {
3028
assert(cost(i) != max_juint, "cost must be a valid value");
3029
assert(rule(i) < _last_Mach_Node, "rule[i] must be valid rule");
3030
tty->print_cr("%s %d %s",
3031
ruleName[i], cost(i), ruleName[rule(i)] );
3036
for (i = 0; i < 2; i++) {
3038
_kids[i]->dump(depth + 1);