2
* Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
25
#include "precompiled.hpp"
26
#include "compiler/compileLog.hpp"
27
#include "compiler/oopMap.hpp"
28
#include "memory/allocation.inline.hpp"
29
#include "memory/resourceArea.hpp"
30
#include "opto/addnode.hpp"
31
#include "opto/block.hpp"
32
#include "opto/callnode.hpp"
33
#include "opto/cfgnode.hpp"
34
#include "opto/chaitin.hpp"
35
#include "opto/coalesce.hpp"
36
#include "opto/connode.hpp"
37
#include "opto/idealGraphPrinter.hpp"
38
#include "opto/indexSet.hpp"
39
#include "opto/machnode.hpp"
40
#include "opto/memnode.hpp"
41
#include "opto/movenode.hpp"
42
#include "opto/opcodes.hpp"
43
#include "opto/rootnode.hpp"
44
#include "utilities/align.hpp"
47
void LRG::dump() const {
49
tty->print("%d ",num_regs());
52
if( mask_size() == compute_mask_size() ) tty->print(", #%d ",_mask_size);
53
else tty->print(", #!!!_%d_vs_%d ",_mask_size,_mask.Size());
55
tty->print(", #?(%d) ",_mask.Size());
58
tty->print("EffDeg: ");
59
if( _degree_valid ) tty->print( "%d ", _eff_degree );
60
else tty->print("? ");
63
tty->print("MultiDef ");
64
if (_defs != nullptr) {
66
for (int i = 0; i < _defs->length(); i++) {
67
tty->print("N%d ", _defs->at(i)->_idx);
72
else if( _def == nullptr ) tty->print("Dead ");
73
else tty->print("Def: N%d ",_def->_idx);
75
tty->print("Cost:%4.2g Area:%4.2g Score:%4.2g ",_cost,_area, score());
77
if( _is_oop ) tty->print("Oop ");
78
if( _is_float ) tty->print("Float ");
79
if( _is_vector ) tty->print("Vector ");
80
if( _is_predicate ) tty->print("Predicate ");
81
if( _is_scalable ) tty->print("Scalable ");
82
if( _was_spilled1 ) tty->print("Spilled ");
83
if( _was_spilled2 ) tty->print("Spilled2 ");
84
if( _direct_conflict ) tty->print("Direct_conflict ");
85
if( _fat_proj ) tty->print("Fat ");
86
if( _was_lo ) tty->print("Lo ");
87
if( _has_copy ) tty->print("Copy ");
88
if( _at_risk ) tty->print("Risk ");
90
if( _must_spill ) tty->print("Must_spill ");
91
if( _is_bound ) tty->print("Bound ");
93
if( _degree_valid && lo_degree() ) tty->print("Trivial ");
100
// Compute score from cost and area. Low score is best to spill.
101
static double raw_score( double cost, double area ) {
102
return cost - (area*RegisterCostAreaRatio) * 1.52588e-5;
105
double LRG::score() const {
106
// Scale _area by RegisterCostAreaRatio/64K then subtract from cost.
107
// Bigger area lowers score, encourages spilling this live range.
108
// Bigger cost raise score, prevents spilling this live range.
109
// (Note: 1/65536 is the magic constant below; I dont trust the C optimizer
110
// to turn a divide by a constant into a multiply by the reciprical).
111
double score = raw_score( _cost, _area);
113
// Account for area. Basically, LRGs covering large areas are better
114
// to spill because more other LRGs get freed up.
115
if( _area == 0.0 ) // No area? Then no progress to spill
118
if( _was_spilled2 ) // If spilled once before, we are unlikely
119
return score + 1e30; // to make progress again.
121
if( _cost >= _area*3.0 ) // Tiny area relative to cost
122
return score + 1e17; // Probably no progress to spill
124
if( (_cost+_cost) >= _area*3.0 ) // Small area relative to cost
125
return score + 1e10; // Likely no progress to spill
132
// Straight out of Tarjan's union-find algorithm
133
uint LiveRangeMap::find_compress(uint lrg) {
135
uint next = _uf_map.at(cur);
136
while (next != cur) { // Scan chain of equivalences
137
assert( next < cur, "always union smaller");
138
cur = next; // until find a fixed-point
139
next = _uf_map.at(cur);
142
// Core of union-find algorithm: update chain of
143
// equivalences to be equal to the root.
144
while (lrg != next) {
145
uint tmp = _uf_map.at(lrg);
146
_uf_map.at_put(lrg, next);
152
// Reset the Union-Find map to identity
153
void LiveRangeMap::reset_uf_map(uint max_lrg_id) {
154
_max_lrg_id= max_lrg_id;
155
// Force the Union-Find mapping to be at least this large
156
_uf_map.at_put_grow(_max_lrg_id, 0);
157
// Initialize it to be the ID mapping.
158
for (uint i = 0; i < _max_lrg_id; ++i) {
159
_uf_map.at_put(i, i);
163
// Make all Nodes map directly to their final live range; no need for
164
// the Union-Find mapping after this call.
165
void LiveRangeMap::compress_uf_map_for_nodes() {
166
// For all Nodes, compress mapping
167
uint unique = _names.length();
168
for (uint i = 0; i < unique; ++i) {
169
uint lrg = _names.at(i);
170
uint compressed_lrg = find(lrg);
171
if (lrg != compressed_lrg) {
172
_names.at_put(i, compressed_lrg);
177
// Like Find above, but no path compress, so bad asymptotic behavior
178
uint LiveRangeMap::find_const(uint lrg) const {
180
return lrg; // Ignore the zero LRG
183
// Off the end? This happens during debugging dumps when you got
184
// brand new live ranges but have not told the allocator yet.
185
if (lrg >= _max_lrg_id) {
189
uint next = _uf_map.at(lrg);
190
while (next != lrg) { // Scan chain of equivalences
191
assert(next < lrg, "always union smaller");
192
lrg = next; // until find a fixed-point
193
next = _uf_map.at(lrg);
198
PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher, bool scheduling_info_generated)
199
: PhaseRegAlloc(unique, cfg, matcher,
201
print_chaitin_statistics
207
, _lo_degree(0), _lo_stk_degree(0), _hi_degree(0), _simplified(0)
210
, _trace_spilling(C->directive()->TraceSpillingOption)
212
, _lrg_map(Thread::current()->resource_area(), unique)
213
, _scheduling_info_generated(scheduling_info_generated)
214
, _sched_int_pressure(0, Matcher::int_pressure_limit())
215
, _sched_float_pressure(0, Matcher::float_pressure_limit())
216
, _scratch_int_pressure(0, Matcher::int_pressure_limit())
217
, _scratch_float_pressure(0, Matcher::float_pressure_limit())
219
Compile::TracePhase tp("ctorChaitin", &timers[_t_ctorChaitin]);
221
_high_frequency_lrg = MIN2(double(OPTO_LRG_HIGH_FREQ), _cfg.get_outer_loop_frequency());
223
// Build a list of basic blocks, sorted by frequency
224
// Experiment with sorting strategies to speed compilation
225
uint nr_blocks = _cfg.number_of_blocks();
226
double cutoff = BLOCK_FREQUENCY(1.0); // Cutoff for high frequency bucket
227
Block **buckets[NUMBUCKS]; // Array of buckets
228
uint buckcnt[NUMBUCKS]; // Array of bucket counters
229
double buckval[NUMBUCKS]; // Array of bucket value cutoffs
231
// The space which our buckets point into.
232
Block** start = NEW_RESOURCE_ARRAY(Block *, nr_blocks*NUMBUCKS);
234
for (uint i = 0; i < NUMBUCKS; i++) {
235
buckets[i] = &start[i*nr_blocks];
237
// Bump by three orders of magnitude each time
242
// Sort blocks into buckets
243
for (uint i = 0; i < nr_blocks; i++) {
244
for (uint j = 0; j < NUMBUCKS; j++) {
245
double bval = buckval[j];
246
Block* blk = _cfg.get_block(i);
247
if (j == NUMBUCKS - 1 || blk->_freq > bval) {
248
uint cnt = buckcnt[j];
249
// Assign block to end of list for appropriate bucket
250
buckets[j][cnt] = blk;
252
break; // kick out of inner loop
257
// Squash the partially filled buckets together into the first one.
258
static_assert(NUMBUCKS >= 2, "must"); // If this isn't true then it'll mess up the squashing.
259
Block** offset = &buckets[0][buckcnt[0]];
260
for (int i = 1; i < NUMBUCKS; i++) {
261
::memmove(offset, buckets[i], buckcnt[i]*sizeof(Block*));
262
offset += buckcnt[i];
264
assert((&buckets[0][0] + nr_blocks) == offset, "should be");
266
// Free the now unused memory
267
FREE_RESOURCE_ARRAY(Block*, buckets[1], (NUMBUCKS-1)*nr_blocks);
268
// Finally, point the _blks to our memory
273
for (uint i = 0; i < NUMBUCKS; i++) {
274
blkcnt += buckcnt[i];
276
assert(blkcnt == nr_blocks, "Block array not totally filled");
280
// union 2 sets together.
281
void PhaseChaitin::Union( const Node *src_n, const Node *dst_n ) {
282
uint src = _lrg_map.find(src_n);
283
uint dst = _lrg_map.find(dst_n);
286
assert(src < _lrg_map.max_lrg_id(), "oob");
287
assert(dst < _lrg_map.max_lrg_id(), "oob");
288
assert(src < dst, "always union smaller");
289
_lrg_map.uf_map(dst, src);
292
void PhaseChaitin::new_lrg(const Node *x, uint lrg) {
293
// Make the Node->LRG mapping
294
_lrg_map.extend(x->_idx,lrg);
295
// Make the Union-Find mapping an identity function
296
_lrg_map.uf_extend(lrg, lrg);
300
int PhaseChaitin::clone_projs(Block* b, uint idx, Node* orig, Node* copy, uint& max_lrg_id) {
301
assert(b->find_node(copy) == (idx - 1), "incorrect insert index for copy kill projections");
302
DEBUG_ONLY( Block* borig = _cfg.get_block_for_node(orig); )
304
uint cnt = orig->outcnt();
305
for (uint i = 0; i < cnt; i++) {
306
Node* proj = orig->raw_out(i);
307
if (proj->is_MachProj()) {
308
assert(proj->outcnt() == 0, "only kill projections are expected here");
309
assert(_cfg.get_block_for_node(proj) == borig, "incorrect block for kill projections");
311
// Copy kill projections after the cloned node
312
Node* kills = proj->clone();
313
kills->set_req(0, copy);
314
b->insert_node(kills, idx++);
315
_cfg.map_node_to_block(kills, b);
316
new_lrg(kills, max_lrg_id++);
322
// Renumber the live ranges to compact them. Makes the IFG smaller.
323
void PhaseChaitin::compact() {
324
Compile::TracePhase tp("chaitinCompact", &timers[_t_chaitinCompact]);
326
// Current the _uf_map contains a series of short chains which are headed
327
// by a self-cycle. All the chains run from big numbers to little numbers.
328
// The Find() call chases the chains & shortens them for the next Find call.
329
// We are going to change this structure slightly. Numbers above a moving
330
// wave 'i' are unchanged. Numbers below 'j' point directly to their
331
// compacted live range with no further chaining. There are no chains or
332
// cycles below 'i', so the Find call no longer works.
335
for (i = 1; i < _lrg_map.max_lrg_id(); i++) {
336
uint lr = _lrg_map.uf_live_range_id(i);
337
// Ignore unallocated live ranges
342
_lrg_map.uf_map(i, ( lr == i ) ? j++ : _lrg_map.uf_live_range_id(lr));
344
// Now change the Node->LR mapping to reflect the compacted names
345
uint unique = _lrg_map.size();
346
for (i = 0; i < unique; i++) {
347
uint lrg_id = _lrg_map.live_range_id(i);
348
_lrg_map.map(i, _lrg_map.uf_live_range_id(lrg_id));
351
// Reset the Union-Find mapping
352
_lrg_map.reset_uf_map(j);
355
void PhaseChaitin::Register_Allocate() {
357
// Above the OLD FP (and in registers) are the incoming arguments. Stack
358
// slots in this area are called "arg_slots". Above the NEW FP (and in
359
// registers) is the outgoing argument area; above that is the spill/temp
360
// area. These are all "frame_slots". Arg_slots start at the zero
361
// stack_slots and count up to the known arg_size. Frame_slots start at
362
// the stack_slot #arg_size and go up. After allocation I map stack
363
// slots to actual offsets. Stack-slots in the arg_slot area are biased
364
// by the frame_size; stack-slots in the frame_slot area are biased by 0.
368
_matcher._allocation_started = true;
370
ResourceArea split_arena(mtCompiler); // Arena for Split local resources
371
ResourceArea live_arena(mtCompiler); // Arena for liveness & IFG info
372
ResourceMark rm(&live_arena);
374
// Need live-ness for the IFG; need the IFG for coalescing. If the
375
// liveness is JUST for coalescing, then I can get some mileage by renaming
376
// all copy-related live ranges low and then using the max copy-related
377
// live range as a cut-off for LIVE and the IFG. In other words, I can
378
// build a subset of LIVE and IFG just for copies.
379
PhaseLive live(_cfg, _lrg_map.names(), &live_arena, false);
381
// Need IFG for coalescing and coloring
382
PhaseIFG ifg(&live_arena);
385
// Come out of SSA world to the Named world. Assign (virtual) registers to
386
// Nodes. Use the same register for all inputs and the output of PhiNodes
387
// - effectively ending SSA form. This requires either coalescing live
388
// ranges or inserting copies. For the moment, we insert "virtual copies"
389
// - we pretend there is a copy prior to each Phi in predecessor blocks.
390
// We will attempt to coalesce such "virtual copies" before we manifest
395
// Verify the graph before RA.
400
Compile::TracePhase tp("computeLive", &timers[_t_computeLive]);
401
_live = nullptr; // Mark live as being not available
402
rm.reset_to_mark(); // Reclaim working storage
403
IndexSet::reset_memory(C, &live_arena);
404
ifg.init(_lrg_map.max_lrg_id()); // Empty IFG
405
gather_lrg_masks( false ); // Collect LRG masks
406
live.compute(_lrg_map.max_lrg_id()); // Compute liveness
407
_live = &live; // Mark LIVE as being available
410
// Base pointers are currently "used" by instructions which define new
411
// derived pointers. This makes base pointers live up to the where the
412
// derived pointer is made, but not beyond. Really, they need to be live
413
// across any GC point where the derived value is live. So this code looks
414
// at all the GC points, and "stretches" the live range of any base pointer
416
if (stretch_base_pointer_live_ranges(&live_arena)) {
417
Compile::TracePhase tp("computeLive (sbplr)", &timers[_t_computeLive]);
418
// Since some live range stretched, I need to recompute live
420
rm.reset_to_mark(); // Reclaim working storage
421
IndexSet::reset_memory(C, &live_arena);
422
ifg.init(_lrg_map.max_lrg_id());
423
gather_lrg_masks(false);
424
live.compute(_lrg_map.max_lrg_id());
427
// Create the interference graph using virtual copies
428
build_ifg_virtual(); // Include stack slots this time
430
// The IFG is/was triangular. I am 'squaring it up' so Union can run
431
// faster. Union requires a 'for all' operation which is slow on the
432
// triangular adjacency matrix (quick reminder: the IFG is 'sparse' -
433
// meaning I can visit all the Nodes neighbors less than a Node in time
434
// O(# of neighbors), but I have to visit all the Nodes greater than a
435
// given Node and search them for an instance, i.e., time O(#MaxLRG)).
438
// Aggressive (but pessimistic) copy coalescing.
439
// This pass works on virtual copies. Any virtual copies which are not
440
// coalesced get manifested as actual copies
442
Compile::TracePhase tp("chaitinCoalesce1", &timers[_t_chaitinCoalesce1]);
444
PhaseAggressiveCoalesce coalesce(*this);
445
coalesce.coalesce_driver();
446
// Insert un-coalesced copies. Visit all Phis. Where inputs to a Phi do
447
// not match the Phi itself, insert a copy.
448
coalesce.insert_copies(_matcher);
454
// After aggressive coalesce, attempt a first cut at coloring.
455
// To color, we need the IFG and for that we need LIVE.
457
Compile::TracePhase tp("computeLive", &timers[_t_computeLive]);
459
rm.reset_to_mark(); // Reclaim working storage
460
IndexSet::reset_memory(C, &live_arena);
461
ifg.init(_lrg_map.max_lrg_id());
462
gather_lrg_masks( true );
463
live.compute(_lrg_map.max_lrg_id());
467
// Build physical interference graph
469
must_spill = build_ifg_physical(&live_arena);
470
// If we have a guaranteed spill, might as well spill now
472
if(!_lrg_map.max_lrg_id()) {
475
// Bail out if unique gets too large (ie - unique > MaxNodeLimit)
476
C->check_node_count(10*must_spill, "out of nodes before split");
481
uint new_max_lrg_id = Split(_lrg_map.max_lrg_id(), &split_arena); // Split spilling LRG everywhere
482
_lrg_map.set_max_lrg_id(new_max_lrg_id);
483
// Bail out if unique gets too large (ie - unique > MaxNodeLimit - 2*NodeLimitFudgeFactor)
484
// or we failed to split
485
C->check_node_count(2*NodeLimitFudgeFactor, "out of nodes after physical split");
490
NOT_PRODUCT(C->verify_graph_edges();)
492
compact(); // Compact LRGs; return new lower max lrg
495
Compile::TracePhase tp("computeLive", &timers[_t_computeLive]);
497
rm.reset_to_mark(); // Reclaim working storage
498
IndexSet::reset_memory(C, &live_arena);
499
ifg.init(_lrg_map.max_lrg_id()); // Build a new interference graph
500
gather_lrg_masks( true ); // Collect intersect mask
501
live.compute(_lrg_map.max_lrg_id()); // Compute LIVE
504
build_ifg_physical(&live_arena);
506
_ifg->Compute_Effective_Degree();
507
// Only do conservative coalescing if requested
509
Compile::TracePhase tp("chaitinCoalesce2", &timers[_t_chaitinCoalesce2]);
510
// Conservative (and pessimistic) copy coalescing of those spills
511
PhaseConservativeCoalesce coalesce(*this);
512
// If max live ranges greater than cutoff, don't color the stack.
513
// This cutoff can be larger than below since it is only done once.
514
coalesce.coalesce_driver();
516
_lrg_map.compress_uf_map_for_nodes();
519
verify(&live_arena, true);
523
ifg.Compute_Effective_Degree();
529
// Prepare for Simplify & Select
530
cache_lrg_info(); // Count degree of LRGs
532
// Simplify the InterFerence Graph by removing LRGs of low degree.
533
// LRGs of low degree are trivially colorable.
536
// Select colors by re-inserting LRGs back into the IFG in reverse order.
537
// Return whether or not something spills.
538
uint spills = Select( );
540
// If we spill, split and recycle the entire thing
542
if( _trip_cnt++ > 24 ) {
543
DEBUG_ONLY( dump_for_spill_split_recycle(); )
544
if( _trip_cnt > 27 ) {
545
C->record_method_not_compilable("failed spill-split-recycle sanity check");
550
if (!_lrg_map.max_lrg_id()) {
553
uint new_max_lrg_id = Split(_lrg_map.max_lrg_id(), &split_arena); // Split spilling LRG everywhere
554
_lrg_map.set_max_lrg_id(new_max_lrg_id);
555
// Bail out if unique gets too large (ie - unique > MaxNodeLimit - 2*NodeLimitFudgeFactor)
556
C->check_node_count(2 * NodeLimitFudgeFactor, "out of nodes after split");
561
compact(); // Compact LRGs; return new lower max lrg
563
// Nuke the live-ness and interference graph and LiveRanGe info
565
Compile::TracePhase tp("computeLive", &timers[_t_computeLive]);
567
rm.reset_to_mark(); // Reclaim working storage
568
IndexSet::reset_memory(C, &live_arena);
569
ifg.init(_lrg_map.max_lrg_id());
571
// Create LiveRanGe array.
572
// Intersect register masks for all USEs and DEFs
573
gather_lrg_masks(true);
574
live.compute(_lrg_map.max_lrg_id());
577
must_spill = build_ifg_physical(&live_arena);
579
_ifg->Compute_Effective_Degree();
581
// Only do conservative coalescing if requested
583
Compile::TracePhase tp("chaitinCoalesce3", &timers[_t_chaitinCoalesce3]);
584
// Conservative (and pessimistic) copy coalescing
585
PhaseConservativeCoalesce coalesce(*this);
586
// Check for few live ranges determines how aggressive coalesce is.
587
coalesce.coalesce_driver();
589
_lrg_map.compress_uf_map_for_nodes();
591
verify(&live_arena, true);
593
cache_lrg_info(); // Count degree of LRGs
595
// Simplify the InterFerence Graph by removing LRGs of low degree.
596
// LRGs of low degree are trivially colorable.
599
// Select colors by re-inserting LRGs back into the IFG in reverse order.
600
// Return whether or not something spills.
604
// Count number of Simplify-Select trips per coloring success.
605
_allocator_attempts += _trip_cnt + 1;
606
_allocator_successes += 1;
608
// Peephole remove copies
609
post_allocate_copy_removal();
611
// Merge multidefs if multiple defs representing the same value are used in a single block.
615
// Verify the graph after RA.
619
// max_reg is past the largest *register* used.
620
// Convert that to a frame_slot number.
621
if (_max_reg <= _matcher._new_SP) {
622
_framesize = C->out_preserve_stack_slots();
625
_framesize = _max_reg -_matcher._new_SP;
627
assert((int)(_matcher._new_SP+_framesize) >= (int)_matcher._out_arg_limit, "framesize must be large enough");
629
// This frame must preserve the required fp alignment
630
_framesize = align_up(_framesize, Matcher::stack_alignment_in_slots());
631
assert(_framesize <= 1000000, "sanity check");
633
_total_framesize += _framesize;
634
if ((int)_framesize > _max_framesize) {
635
_max_framesize = _framesize;
639
// Convert CISC spills
642
// Log regalloc results
643
CompileLog* log = Compile::current()->log();
644
if (log != nullptr) {
645
log->elem("regalloc attempts='%d' success='%d'", _trip_cnt, !C->failing());
652
NOT_PRODUCT(C->verify_graph_edges();)
654
// Move important info out of the live_arena to longer lasting storage.
655
alloc_node_regs(_lrg_map.size());
656
for (uint i=0; i < _lrg_map.size(); i++) {
657
if (_lrg_map.live_range_id(i)) { // Live range associated with Node?
658
LRG &lrg = lrgs(_lrg_map.live_range_id(i));
661
} else if ((lrg.num_regs() == 1 && !lrg.is_scalable()) ||
662
(lrg.is_scalable() && lrg.scalable_reg_slots() == 1)) {
664
} else { // Must be a register-set
665
if (!lrg._fat_proj) { // Must be aligned adjacent register set
666
// Live ranges record the highest register in their mask.
667
// We want the low register for the AD file writer's convenience.
668
OptoReg::Name hi = lrg.reg(); // Get hi register
669
int num_regs = lrg.num_regs();
670
if (lrg.is_scalable() && OptoReg::is_stack(hi)) {
671
// For scalable vector registers, when they are allocated in physical
672
// registers, num_regs is RegMask::SlotsPerVecA for reg mask of scalable
673
// vector. If they are allocated on stack, we need to get the actual
674
// num_regs, which reflects the physical length of scalable registers.
675
num_regs = lrg.scalable_reg_slots();
680
OptoReg::Name lo = OptoReg::add(hi, (1 - num_regs)); // Find lo
681
// We have to use pair [lo,lo+1] even for wide vectors/vmasks because
682
// the rest of code generation works only with pairs. It is safe
683
// since for registers encoding only 'lo' is used.
684
// Second reg from pair is used in ScheduleAndBundle with vector max
685
// size 8 which corresponds to registers pair.
686
// It is also used in BuildOopMaps but oop operations are not
690
} else { // Misaligned; extract 2 bits
691
OptoReg::Name hi = lrg.reg(); // Get hi register
692
lrg.Remove(hi); // Yank from mask
693
int lo = lrg.mask().find_first_elem(); // Find lo
697
if( lrg._is_oop ) _node_oops.set(i);
706
C->set_indexSet_arena(nullptr); // ResourceArea is at end of scope
709
void PhaseChaitin::de_ssa() {
710
// Set initial Names for all Nodes. Most Nodes get the virtual register
711
// number. A few get the ZERO live range number. These do not
712
// get allocated, but instead rely on correct scheduling to ensure that
713
// only one instance is simultaneously live at a time.
715
for( uint i = 0; i < _cfg.number_of_blocks(); i++ ) {
716
Block* block = _cfg.get_block(i);
717
uint cnt = block->number_of_nodes();
719
// Handle all the normal Nodes in the block
720
for( uint j = 0; j < cnt; j++ ) {
721
Node *n = block->get_node(j);
722
// Pre-color to the zero live range, or pick virtual register
723
const RegMask &rm = n->out_RegMask();
724
_lrg_map.map(n->_idx, rm.is_NotEmpty() ? lr_counter++ : 0);
728
// Reset the Union-Find mapping to be identity
729
_lrg_map.reset_uf_map(lr_counter);
732
void PhaseChaitin::mark_ssa() {
733
// Use ssa names to populate the live range maps or if no mask
734
// is available, use the 0 entry.
736
for ( uint i = 0; i < _cfg.number_of_blocks(); i++ ) {
737
Block* block = _cfg.get_block(i);
738
uint cnt = block->number_of_nodes();
740
// Handle all the normal Nodes in the block
741
for ( uint j = 0; j < cnt; j++ ) {
742
Node *n = block->get_node(j);
743
// Pre-color to the zero live range, or pick virtual register
744
const RegMask &rm = n->out_RegMask();
745
_lrg_map.map(n->_idx, rm.is_NotEmpty() ? n->_idx : 0);
746
max_idx = (n->_idx > max_idx) ? n->_idx : max_idx;
749
_lrg_map.set_max_lrg_id(max_idx+1);
751
// Reset the Union-Find mapping to be identity
752
_lrg_map.reset_uf_map(max_idx+1);
756
// Gather LiveRanGe information, including register masks. Modification of
757
// cisc spillable in_RegMasks should not be done before AggressiveCoalesce.
758
void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
760
// Nail down the frame pointer live range
761
uint fp_lrg = _lrg_map.live_range_id(_cfg.get_root_node()->in(1)->in(TypeFunc::FramePtr));
762
lrgs(fp_lrg)._cost += 1e12; // Cost is infinite
765
for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
766
Block* block = _cfg.get_block(i);
768
// For all instructions
769
for (uint j = 1; j < block->number_of_nodes(); j++) {
770
Node* n = block->get_node(j);
771
uint input_edge_start =1; // Skip control most nodes
772
bool is_machine_node = false;
774
is_machine_node = true;
775
input_edge_start = n->as_Mach()->oper_input_base();
777
uint idx = n->is_Copy();
779
// Get virtual register number, same as LiveRanGe index
780
uint vreg = _lrg_map.live_range_id(n);
781
LRG& lrg = lrgs(vreg);
782
if (vreg) { // No vreg means un-allocable (e.g. memory)
784
// Check for float-vs-int live range (used in register-pressure
786
const Type *n_type = n->bottom_type();
787
if (n_type->is_floatingpoint()) {
791
// Check for twice prior spilling. Once prior spilling might have
792
// spilled 'soft', 2nd prior spill should have spilled 'hard' and
793
// further spilling is unlikely to make progress.
794
if (_spilled_once.test(n->_idx)) {
795
lrg._was_spilled1 = 1;
796
if (_spilled_twice.test(n->_idx)) {
797
lrg._was_spilled2 = 1;
802
// Collect bits not used by product code, but which may be useful for
805
// Collect has-copy bit
808
uint clidx = _lrg_map.live_range_id(n->in(idx));
809
LRG& copy_src = lrgs(clidx);
810
copy_src._has_copy = 1;
813
if (trace_spilling() && lrg._def != nullptr) {
814
// collect defs for MultiDef printing
815
if (lrg._defs == nullptr) {
816
lrg._defs = new (_ifg->_arena) GrowableArray<Node*>(_ifg->_arena, 2, 0, nullptr);
817
lrg._defs->append(lrg._def);
819
lrg._defs->append(n);
823
// Check for a single def LRG; these can spill nicely
824
// via rematerialization. Flag as null for no def found
825
// yet, or 'n' for single def or -1 for many defs.
826
lrg._def = lrg._def ? NodeSentinel : n;
828
// Limit result register mask to acceptable registers
829
const RegMask &rm = n->out_RegMask();
832
uint ireg = n->ideal_reg();
833
assert( !n->bottom_type()->isa_oop_ptr() || ireg == Op_RegP,
834
"oops must be in Op_RegP's" );
836
// Check for vector live range (only if vector register is used).
837
// On SPARC vector uses RegD which could be misaligned so it is not
838
// processes as vector in RA.
839
if (RegMask::is_vector(ireg)) {
841
if (Matcher::implements_scalable_vector && ireg == Op_VecA) {
842
assert(Matcher::supports_scalable_vector(), "scalable vector should be supported");
843
lrg._is_scalable = 1;
844
// For scalable vector, when it is allocated in physical register,
845
// num_regs is RegMask::SlotsPerVecA for reg mask,
846
// which may not be the actual physical register size.
847
// If it is allocated in stack, we need to get the actual
848
// physical length of scalable vector register.
849
lrg.set_scalable_reg_slots(Matcher::scalable_vector_reg_size(T_FLOAT));
853
if (ireg == Op_RegVectMask) {
854
assert(Matcher::has_predicated_vectors(), "predicated vector should be supported");
855
lrg._is_predicate = 1;
856
if (Matcher::supports_scalable_vector()) {
857
lrg._is_scalable = 1;
858
// For scalable predicate, when it is allocated in physical register,
859
// num_regs is RegMask::SlotsPerRegVectMask for reg mask,
860
// which may not be the actual physical register size.
861
// If it is allocated in stack, we need to get the actual
862
// physical length of scalable predicate register.
863
lrg.set_scalable_reg_slots(Matcher::scalable_predicate_reg_slots());
866
assert(n_type->isa_vect() == nullptr || lrg._is_vector ||
867
ireg == Op_RegD || ireg == Op_RegL || ireg == Op_RegVectMask,
868
"vector must be in vector registers");
870
// Check for bound register masks
871
const RegMask &lrgmask = lrg.mask();
872
if (lrgmask.is_bound(ireg)) {
876
// Check for maximum frequency value
877
if (lrg._maxfreq < block->_freq) {
878
lrg._maxfreq = block->_freq;
881
// Check for oop-iness, or long/double
882
// Check for multi-kill projection
884
case MachProjNode::fat_proj:
885
// Fat projections have size equal to number of registers killed
886
lrg.set_num_regs(rm.Size());
887
lrg.set_reg_pressure(lrg.num_regs());
893
lrg.set_num_regs(2); // Size is 2 stack words
895
lrg.set_num_regs(1); // Size is 1 stack word
897
// Register pressure is tracked relative to the maximum values
898
// suggested for that platform, INTPRESSURE and FLOATPRESSURE,
899
// and relative to other types which compete for the same regs.
901
// The following table contains suggested values based on the
902
// architectures as defined in each .ad file.
903
// INTPRESSURE and FLOATPRESSURE may be tuned differently for
904
// compile-speed or performance.
906
// SPARC and SPARCV9 reg_pressures are at 2 instead of 1
907
// since .ad registers are defined as high and low halves.
908
// These reg_pressure values remain compatible with the code
909
// in is_high_pressure() which relates get_invalid_mask_size(),
910
// Block::_reg_pressure and INTPRESSURE, FLOATPRESSURE.
912
// SPARC -d32 has 24 registers available for integral values,
913
// but only 10 of these are safe for 64-bit longs.
914
// Using set_reg_pressure(2) for both int and long means
915
// the allocator will believe it can fit 26 longs into
916
// registers. Using 2 for longs and 1 for ints means the
917
// allocator will attempt to put 52 integers into registers.
918
// The settings below limit this problem to methods with
919
// many long values which are being run on 32-bit SPARC.
921
// ------------------- reg_pressure --------------------
922
// Each entry is reg_pressure_per_value,number_of_regs
923
// RegL RegI RegFlags RegF RegD INTPRESSURE FLOATPRESSURE
924
// IA32 2 1 1 1 1 6 6
925
// IA64 1 1 1 1 1 50 41
926
// SPARC 2 2 2 2 2 48 (24) 52 (26)
927
// SPARCV9 2 2 2 2 2 48 (24) 52 (26)
928
// AMD64 1 1 1 1 1 14 15
929
// -----------------------------------------------------
930
lrg.set_reg_pressure(1); // normally one value per register
931
if( n_type->isa_oop_ptr() ) {
935
case Op_RegL: // Check for long or double
938
// Define platform specific register pressure
940
lrg.set_reg_pressure(2);
942
if( ireg == Op_RegL ) {
943
lrg.set_reg_pressure(2);
945
lrg.set_reg_pressure(1);
948
lrg.set_reg_pressure(1); // normally one value per register
950
// If this def of a double forces a mis-aligned double,
951
// flag as '_fat_proj' - really flag as allowing misalignment
952
// AND changes how we count interferences. A mis-aligned
953
// double can interfere with TWO aligned pairs, or effectively
955
if (rm.is_misaligned_pair()) {
961
assert(Matcher::has_predicated_vectors(), "sanity");
962
assert(RegMask::num_registers(Op_RegVectMask) == RegMask::SlotsPerRegVectMask, "sanity");
963
lrg.set_num_regs(RegMask::SlotsPerRegVectMask);
964
lrg.set_reg_pressure(1);
970
case 0: // not an ideal register
972
lrg.set_reg_pressure(1);
975
assert(Matcher::supports_scalable_vector(), "does not support scalable vector");
976
assert(RegMask::num_registers(Op_VecA) == RegMask::SlotsPerVecA, "sanity");
977
assert(lrgmask.is_aligned_sets(RegMask::SlotsPerVecA), "vector should be aligned");
978
lrg.set_num_regs(RegMask::SlotsPerVecA);
979
lrg.set_reg_pressure(1);
982
assert(Matcher::vector_size_supported(T_BYTE,4), "sanity");
983
assert(RegMask::num_registers(Op_VecS) == RegMask::SlotsPerVecS, "sanity");
984
lrg.set_num_regs(RegMask::SlotsPerVecS);
985
lrg.set_reg_pressure(1);
988
assert(Matcher::vector_size_supported(T_FLOAT,RegMask::SlotsPerVecD), "sanity");
989
assert(RegMask::num_registers(Op_VecD) == RegMask::SlotsPerVecD, "sanity");
990
assert(lrgmask.is_aligned_sets(RegMask::SlotsPerVecD), "vector should be aligned");
991
lrg.set_num_regs(RegMask::SlotsPerVecD);
992
lrg.set_reg_pressure(1);
995
assert(Matcher::vector_size_supported(T_FLOAT,RegMask::SlotsPerVecX), "sanity");
996
assert(RegMask::num_registers(Op_VecX) == RegMask::SlotsPerVecX, "sanity");
997
assert(lrgmask.is_aligned_sets(RegMask::SlotsPerVecX), "vector should be aligned");
998
lrg.set_num_regs(RegMask::SlotsPerVecX);
999
lrg.set_reg_pressure(1);
1002
assert(Matcher::vector_size_supported(T_FLOAT,RegMask::SlotsPerVecY), "sanity");
1003
assert(RegMask::num_registers(Op_VecY) == RegMask::SlotsPerVecY, "sanity");
1004
assert(lrgmask.is_aligned_sets(RegMask::SlotsPerVecY), "vector should be aligned");
1005
lrg.set_num_regs(RegMask::SlotsPerVecY);
1006
lrg.set_reg_pressure(1);
1009
assert(Matcher::vector_size_supported(T_FLOAT,RegMask::SlotsPerVecZ), "sanity");
1010
assert(RegMask::num_registers(Op_VecZ) == RegMask::SlotsPerVecZ, "sanity");
1011
assert(lrgmask.is_aligned_sets(RegMask::SlotsPerVecZ), "vector should be aligned");
1012
lrg.set_num_regs(RegMask::SlotsPerVecZ);
1013
lrg.set_reg_pressure(1);
1016
ShouldNotReachHere();
1020
// Now do the same for inputs
1021
uint cnt = n->req();
1022
// Setup for CISC SPILLING
1023
uint inp = (uint)AdlcVMDeps::Not_cisc_spillable;
1024
if( UseCISCSpill && after_aggressive ) {
1025
inp = n->cisc_operand();
1026
if( inp != (uint)AdlcVMDeps::Not_cisc_spillable )
1027
// Convert operand number to edge index number
1028
inp = n->as_Mach()->operand_index(inp);
1031
// Prepare register mask for each input
1032
for( uint k = input_edge_start; k < cnt; k++ ) {
1033
uint vreg = _lrg_map.live_range_id(n->in(k));
1038
// If this instruction is CISC Spillable, add the flags
1039
// bit to its appropriate input
1040
if( UseCISCSpill && after_aggressive && inp == k ) {
1042
if( TraceCISCSpill ) {
1043
tty->print(" use_cisc_RegMask: ");
1047
n->as_Mach()->use_cisc_RegMask();
1050
if (is_machine_node && _scheduling_info_generated) {
1051
MachNode* cur_node = n->as_Mach();
1052
// this is cleaned up by register allocation
1053
if (k >= cur_node->num_opnds()) continue;
1056
LRG &lrg = lrgs(vreg);
1057
// // Testing for floating point code shape
1058
// Node *test = n->in(k);
1059
// if( test->is_Mach() ) {
1060
// MachNode *m = test->as_Mach();
1061
// int op = m->ideal_Opcode();
1062
// if (n->is_Call() && (op == Op_AddF || op == Op_MulF) ) {
1067
// Limit result register mask to acceptable registers.
1068
// Do not limit registers from uncommon uses before
1069
// AggressiveCoalesce. This effectively pre-virtual-splits
1070
// around uncommon uses of common defs.
1071
const RegMask &rm = n->in_RegMask(k);
1072
if (!after_aggressive && _cfg.get_block_for_node(n->in(k))->_freq > 1000 * block->_freq) {
1073
// Since we are BEFORE aggressive coalesce, leave the register
1074
// mask untrimmed by the call. This encourages more coalescing.
1075
// Later, AFTER aggressive, this live range will have to spill
1076
// but the spiller handles slow-path calls very nicely.
1081
// Check for bound register masks
1082
const RegMask &lrgmask = lrg.mask();
1083
uint kreg = n->in(k)->ideal_reg();
1084
bool is_vect = RegMask::is_vector(kreg);
1085
assert(n->in(k)->bottom_type()->isa_vect() == nullptr || is_vect ||
1086
kreg == Op_RegD || kreg == Op_RegL || kreg == Op_RegVectMask,
1087
"vector must be in vector registers");
1088
if (lrgmask.is_bound(kreg))
1091
// If this use of a double forces a mis-aligned double,
1092
// flag as '_fat_proj' - really flag as allowing misalignment
1093
// AND changes how we count interferences. A mis-aligned
1094
// double can interfere with TWO aligned pairs, or effectively
1097
if (is_vect && !_scheduling_info_generated) {
1098
if (lrg.num_regs() != 0) {
1099
assert(lrgmask.is_aligned_sets(lrg.num_regs()), "vector should be aligned");
1100
assert(!lrg._fat_proj, "sanity");
1101
assert(RegMask::num_registers(kreg) == lrg.num_regs(), "sanity");
1103
assert(n->is_Phi(), "not all inputs processed only if Phi");
1107
if (!is_vect && lrg.num_regs() == 2 && !lrg._fat_proj && rm.is_misaligned_pair()) {
1111
// if the LRG is an unaligned pair, we will have to spill
1112
// so clear the LRG's register mask if it is not already spilled
1113
if (!is_vect && !n->is_SpillCopy() &&
1114
(lrg._def == nullptr || lrg.is_multidef() || !lrg._def->is_SpillCopy()) &&
1115
lrgmask.is_misaligned_pair()) {
1119
// Check for maximum frequency value
1120
if (lrg._maxfreq < block->_freq) {
1121
lrg._maxfreq = block->_freq;
1124
} // End for all allocated inputs
1125
} // end for all instructions
1126
} // end for all blocks
1128
// Final per-liverange setup
1129
for (uint i2 = 0; i2 < _lrg_map.max_lrg_id(); i2++) {
1130
LRG &lrg = lrgs(i2);
1131
assert(!lrg._is_vector || !lrg._fat_proj, "sanity");
1132
if (lrg.num_regs() > 1 && !lrg._fat_proj) {
1133
lrg.clear_to_sets();
1135
lrg.compute_set_mask_size();
1136
if (lrg.not_free()) { // Handle case where we lose from the start
1137
lrg.set_reg(OptoReg::Name(LRG::SPILL_REG));
1138
lrg._direct_conflict = 1;
1140
lrg.set_degree(0); // no neighbors in IFG yet
1144
// Set the was-lo-degree bit. Conservative coalescing should not change the
1145
// colorability of the graph. If any live range was of low-degree before
1146
// coalescing, it should Simplify. This call sets the was-lo-degree bit.
1147
// The bit is checked in Simplify.
1148
void PhaseChaitin::set_was_low() {
1150
for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) {
1151
int size = lrgs(i).num_regs();
1152
uint old_was_lo = lrgs(i)._was_lo;
1153
lrgs(i)._was_lo = 0;
1154
if( lrgs(i).lo_degree() ) {
1155
lrgs(i)._was_lo = 1; // Trivially of low degree
1156
} else { // Else check the Brigg's assertion
1157
// Brigg's observation is that the lo-degree neighbors of a
1158
// hi-degree live range will not interfere with the color choices
1159
// of said hi-degree live range. The Simplify reverse-stack-coloring
1160
// order takes care of the details. Hence you do not have to count
1161
// low-degree neighbors when determining if this guy colors.
1162
int briggs_degree = 0;
1163
IndexSet *s = _ifg->neighbors(i);
1164
IndexSetIterator elements(s);
1166
while((lidx = elements.next()) != 0) {
1167
if( !lrgs(lidx).lo_degree() )
1168
briggs_degree += MAX2(size,lrgs(lidx).num_regs());
1170
if( briggs_degree < lrgs(i).degrees_of_freedom() )
1171
lrgs(i)._was_lo = 1; // Low degree via the briggs assertion
1173
assert(old_was_lo <= lrgs(i)._was_lo, "_was_lo may not decrease");
1178
// Compute cost/area ratio, in case we spill. Build the lo-degree list.
1179
void PhaseChaitin::cache_lrg_info( ) {
1180
Compile::TracePhase tp("chaitinCacheLRG", &timers[_t_chaitinCacheLRG]);
1182
for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) {
1185
// Check for being of low degree: means we can be trivially colored.
1186
// Low degree, dead or must-spill guys just get to simplify right away
1187
if( lrg.lo_degree() ||
1190
// Split low degree list into those guys that must get a
1191
// register and those that can go to register or stack.
1192
// The idea is LRGs that can go register or stack color first when
1193
// they have a good chance of getting a register. The register-only
1194
// lo-degree live ranges always get a register.
1195
OptoReg::Name hi_reg = lrg.mask().find_last_elem();
1196
if( OptoReg::is_stack(hi_reg)) { // Can go to stack?
1197
lrg._next = _lo_stk_degree;
1200
lrg._next = _lo_degree;
1203
} else { // Else high degree
1204
lrgs(_hi_degree)._prev = i;
1205
lrg._next = _hi_degree;
1212
// Simplify the IFG by removing LRGs of low degree.
1213
void PhaseChaitin::Simplify( ) {
1214
Compile::TracePhase tp("chaitinSimplify", &timers[_t_chaitinSimplify]);
1216
while( 1 ) { // Repeat till simplified it all
1217
// May want to explore simplifying lo_degree before _lo_stk_degree.
1218
// This might result in more spills coloring into registers during
1220
while( _lo_degree || _lo_stk_degree ) {
1221
// If possible, pull from lo_stk first
1225
_lo_degree = lrgs(lo)._next;
1227
lo = _lo_stk_degree;
1228
_lo_stk_degree = lrgs(lo)._next;
1231
// Put the simplified guy on the simplified list.
1232
lrgs(lo)._next = _simplified;
1234
// If this guy is "at risk" then mark his current neighbors
1235
if (lrgs(lo)._at_risk && !_ifg->neighbors(lo)->is_empty()) {
1236
IndexSetIterator elements(_ifg->neighbors(lo));
1238
while ((datum = elements.next()) != 0) {
1239
lrgs(datum)._risk_bias = lo;
1243
// Yank this guy from the IFG.
1244
IndexSet *adj = _ifg->remove_node(lo);
1245
if (adj->is_empty()) {
1249
// If any neighbors' degrees fall below their number of
1250
// allowed registers, then put that neighbor on the low degree
1251
// list. Note that 'degree' can only fall and 'numregs' is
1252
// unchanged by this action. Thus the two are equal at most once,
1253
// so LRGs hit the lo-degree worklist at most once.
1254
IndexSetIterator elements(adj);
1256
while ((neighbor = elements.next()) != 0) {
1257
LRG *n = &lrgs(neighbor);
1259
if (VerifyRegisterAllocator) {
1260
assert( _ifg->effective_degree(neighbor) == n->degree(), "" );
1264
// Check for just becoming of-low-degree just counting registers.
1265
// _must_spill live ranges are already on the low degree list.
1266
if (n->just_lo_degree() && !n->_must_spill) {
1267
assert(!_ifg->_yanked->test(neighbor), "Cannot move to lo degree twice");
1268
// Pull from hi-degree list
1269
uint prev = n->_prev;
1270
uint next = n->_next;
1272
lrgs(prev)._next = next;
1276
lrgs(next)._prev = prev;
1277
n->_next = _lo_degree;
1278
_lo_degree = neighbor;
1281
} // End of while lo-degree/lo_stk_degree worklist not empty
1283
// Check for got everything: is hi-degree list empty?
1284
if (!_hi_degree) break;
1286
// Time to pick a potential spill guy
1287
uint lo_score = _hi_degree;
1288
double score = lrgs(lo_score).score();
1289
double area = lrgs(lo_score)._area;
1290
double cost = lrgs(lo_score)._cost;
1291
bool bound = lrgs(lo_score)._is_bound;
1293
// Find cheapest guy
1294
debug_only( int lo_no_simplify=0; );
1295
for (uint i = _hi_degree; i; i = lrgs(i)._next) {
1296
assert(!_ifg->_yanked->test(i), "");
1297
// It's just vaguely possible to move hi-degree to lo-degree without
1298
// going through a just-lo-degree stage: If you remove a double from
1299
// a float live range it's degree will drop by 2 and you can skip the
1300
// just-lo-degree stage. It's very rare (shows up after 5000+ methods
1301
// in -Xcomp of Java2Demo). So just choose this guy to simplify next.
1302
if( lrgs(i).lo_degree() ) {
1306
debug_only( if( lrgs(i)._was_lo ) lo_no_simplify=i; );
1307
double iscore = lrgs(i).score();
1308
double iarea = lrgs(i)._area;
1309
double icost = lrgs(i)._cost;
1310
bool ibound = lrgs(i)._is_bound;
1312
// Compare cost/area of i vs cost/area of lo_score. Smaller cost/area
1313
// wins. Ties happen because all live ranges in question have spilled
1314
// a few times before and the spill-score adds a huge number which
1315
// washes out the low order bits. We are choosing the lesser of 2
1316
// evils; in this case pick largest area to spill.
1317
// Ties also happen when live ranges are defined and used only inside
1318
// one block. In which case their area is 0 and score set to max.
1319
// In such case choose bound live range over unbound to free registers
1320
// or with smaller cost to spill.
1321
if ( iscore < score ||
1322
(iscore == score && iarea > area && lrgs(lo_score)._was_spilled2) ||
1323
(iscore == score && iarea == area &&
1324
( (ibound && !bound) || (ibound == bound && (icost < cost)) )) ) {
1332
LRG *lo_lrg = &lrgs(lo_score);
1333
// The live range we choose for spilling is either hi-degree, or very
1334
// rarely it can be low-degree. If we choose a hi-degree live range
1335
// there better not be any lo-degree choices.
1336
assert( lo_lrg->lo_degree() || !lo_no_simplify, "Live range was lo-degree before coalesce; should simplify" );
1338
// Pull from hi-degree list
1339
uint prev = lo_lrg->_prev;
1340
uint next = lo_lrg->_next;
1341
if( prev ) lrgs(prev)._next = next;
1342
else _hi_degree = next;
1343
lrgs(next)._prev = prev;
1344
// Jam him on the lo-degree list, despite his high degree.
1345
// Maybe he'll get a color, and maybe he'll spill.
1346
// Only Select() will know.
1347
lrgs(lo_score)._at_risk = true;
1348
_lo_degree = lo_score;
1351
} // End of while not simplified everything
1355
// Is 'reg' register legal for 'lrg'?
1356
static bool is_legal_reg(LRG &lrg, OptoReg::Name reg, int chunk) {
1357
if (reg >= chunk && reg < (chunk + RegMask::CHUNK_SIZE) &&
1358
lrg.mask().Member(OptoReg::add(reg,-chunk))) {
1359
// RA uses OptoReg which represent the highest element of a registers set.
1360
// For example, vectorX (128bit) on x86 uses [XMM,XMMb,XMMc,XMMd] set
1361
// in which XMMd is used by RA to represent such vectors. A double value
1362
// uses [XMM,XMMb] pairs and XMMb is used by RA for it.
1363
// The register mask uses largest bits set of overlapping register sets.
1364
// On x86 with AVX it uses 8 bits for each XMM registers set.
1366
// The 'lrg' already has cleared-to-set register mask (done in Select()
1367
// before calling choose_color()). Passing mask.Member(reg) check above
1368
// indicates that the size (num_regs) of 'reg' set is less or equal to
1370
// For set size 1 any register which is member of 'lrg' mask is legal.
1371
if (lrg.num_regs()==1)
1373
// For larger sets only an aligned register with the same set size is legal.
1374
int mask = lrg.num_regs()-1;
1375
if ((reg&mask) == mask)
1381
static OptoReg::Name find_first_set(LRG &lrg, RegMask mask, int chunk) {
1382
int num_regs = lrg.num_regs();
1383
OptoReg::Name assigned = mask.find_first_set(lrg, num_regs);
1385
if (lrg.is_scalable()) {
1386
// a physical register is found
1387
if (chunk == 0 && OptoReg::is_reg(assigned)) {
1391
// find available stack slots for scalable register
1392
if (lrg._is_vector) {
1393
num_regs = lrg.scalable_reg_slots();
1394
// if actual scalable vector register is exactly SlotsPerVecA * 32 bits
1395
if (num_regs == RegMask::SlotsPerVecA) {
1399
// mask has been cleared out by clear_to_sets(SlotsPerVecA) before choose_color, but it
1400
// does not work for scalable size. We have to find adjacent scalable_reg_slots() bits
1401
// instead of SlotsPerVecA bits.
1402
assigned = mask.find_first_set(lrg, num_regs); // find highest valid reg
1403
while (OptoReg::is_valid(assigned) && RegMask::can_represent(assigned)) {
1404
// Verify the found reg has scalable_reg_slots() bits set.
1405
if (mask.is_valid_reg(assigned, num_regs)) {
1408
// Remove more for each iteration
1409
mask.Remove(assigned - num_regs + 1); // Unmask the lowest reg
1410
mask.clear_to_sets(RegMask::SlotsPerVecA); // Align by SlotsPerVecA bits
1411
assigned = mask.find_first_set(lrg, num_regs);
1414
return OptoReg::Bad; // will cause chunk change, and retry next chunk
1415
} else if (lrg._is_predicate) {
1416
assert(num_regs == RegMask::SlotsPerRegVectMask, "scalable predicate register");
1417
num_regs = lrg.scalable_reg_slots();
1418
mask.clear_to_sets(num_regs);
1419
return mask.find_first_set(lrg, num_regs);
1426
// Choose a color using the biasing heuristic
1427
OptoReg::Name PhaseChaitin::bias_color( LRG &lrg, int chunk ) {
1429
// Check for "at_risk" LRG's
1430
uint risk_lrg = _lrg_map.find(lrg._risk_bias);
1431
if (risk_lrg != 0 && !_ifg->neighbors(risk_lrg)->is_empty()) {
1432
// Walk the colored neighbors of the "at_risk" candidate
1433
// Choose a color which is both legal and already taken by a neighbor
1434
// of the "at_risk" candidate in order to improve the chances of the
1435
// "at_risk" candidate of coloring
1436
IndexSetIterator elements(_ifg->neighbors(risk_lrg));
1438
while ((datum = elements.next()) != 0) {
1439
OptoReg::Name reg = lrgs(datum).reg();
1440
// If this LRG's register is legal for us, choose it
1441
if (is_legal_reg(lrg, reg, chunk))
1446
uint copy_lrg = _lrg_map.find(lrg._copy_bias);
1447
if (copy_lrg != 0) {
1448
// If he has a color,
1449
if(!_ifg->_yanked->test(copy_lrg)) {
1450
OptoReg::Name reg = lrgs(copy_lrg).reg();
1451
// And it is legal for you,
1452
if (is_legal_reg(lrg, reg, chunk))
1454
} else if( chunk == 0 ) {
1455
// Choose a color which is legal for him
1456
RegMask tempmask = lrg.mask();
1457
tempmask.AND(lrgs(copy_lrg).mask());
1458
tempmask.clear_to_sets(lrg.num_regs());
1459
OptoReg::Name reg = find_first_set(lrg, tempmask, chunk);
1460
if (OptoReg::is_valid(reg))
1465
// If no bias info exists, just go with the register selection ordering
1466
if (lrg._is_vector || lrg.num_regs() == 2 || lrg.is_scalable()) {
1467
// Find an aligned set
1468
return OptoReg::add(find_first_set(lrg, lrg.mask(), chunk), chunk);
1471
// CNC - Fun hack. Alternate 1st and 2nd selection. Enables post-allocate
1472
// copy removal to remove many more copies, by preventing a just-assigned
1473
// register from being repeatedly assigned.
1474
OptoReg::Name reg = lrg.mask().find_first_elem();
1475
if( (++_alternate & 1) && OptoReg::is_valid(reg) ) {
1476
// This 'Remove; find; Insert' idiom is an expensive way to find the
1477
// SECOND element in the mask.
1479
OptoReg::Name reg2 = lrg.mask().find_first_elem();
1481
if( OptoReg::is_reg(reg2))
1484
return OptoReg::add( reg, chunk );
1487
// Choose a color in the current chunk
1488
OptoReg::Name PhaseChaitin::choose_color( LRG &lrg, int chunk ) {
1489
assert( C->in_preserve_stack_slots() == 0 || chunk != 0 || lrg._is_bound || lrg.mask().is_bound1() || !lrg.mask().Member(OptoReg::Name(_matcher._old_SP-1)), "must not allocate stack0 (inside preserve area)");
1490
assert(C->out_preserve_stack_slots() == 0 || chunk != 0 || lrg._is_bound || lrg.mask().is_bound1() || !lrg.mask().Member(OptoReg::Name(_matcher._old_SP+0)), "must not allocate stack0 (inside preserve area)");
1492
if( lrg.num_regs() == 1 || // Common Case
1493
!lrg._fat_proj ) // Aligned+adjacent pairs ok
1494
// Use a heuristic to "bias" the color choice
1495
return bias_color(lrg, chunk);
1497
assert(!lrg._is_vector, "should be not vector here" );
1498
assert( lrg.num_regs() >= 2, "dead live ranges do not color" );
1500
// Fat-proj case or misaligned double argument.
1501
assert(lrg.compute_mask_size() == lrg.num_regs() ||
1502
lrg.num_regs() == 2,"fat projs exactly color" );
1503
assert( !chunk, "always color in 1st chunk" );
1504
// Return the highest element in the set.
1505
return lrg.mask().find_last_elem();
1508
// Select colors by re-inserting LRGs back into the IFG. LRGs are re-inserted
1509
// in reverse order of removal. As long as nothing of hi-degree was yanked,
1510
// everything going back is guaranteed a color. Select that color. If some
1511
// hi-degree LRG cannot get a color then we record that we must spill.
1512
uint PhaseChaitin::Select( ) {
1513
Compile::TracePhase tp("chaitinSelect", &timers[_t_chaitinSelect]);
1515
uint spill_reg = LRG::SPILL_REG;
1516
_max_reg = OptoReg::Name(0); // Past max register used
1517
while( _simplified ) {
1518
// Pull next LRG from the simplified list - in reverse order of removal
1519
uint lidx = _simplified;
1520
LRG *lrg = &lrgs(lidx);
1521
_simplified = lrg->_next;
1524
if (trace_spilling()) {
1526
tty->print_cr("L%d selecting degree %d degrees_of_freedom %d", lidx, lrg->degree(),
1527
lrg->degrees_of_freedom());
1532
// Re-insert into the IFG
1533
_ifg->re_insert(lidx);
1534
if( !lrg->alive() ) continue;
1535
// capture allstackedness flag before mask is hacked
1536
const int is_allstack = lrg->mask().is_AllStack();
1538
// Yeah, yeah, yeah, I know, I know. I can refactor this
1539
// to avoid the GOTO, although the refactored code will not
1540
// be much clearer. We arrive here IFF we have a stack-based
1541
// live range that cannot color in the current chunk, and it
1542
// has to move into the next free stack chunk.
1543
int chunk = 0; // Current chunk is first chunk
1546
// Remove neighbor colors
1547
IndexSet *s = _ifg->neighbors(lidx);
1548
debug_only(RegMask orig_mask = lrg->mask();)
1550
if (!s->is_empty()) {
1551
IndexSetIterator elements(s);
1553
while ((neighbor = elements.next()) != 0) {
1554
// Note that neighbor might be a spill_reg. In this case, exclusion
1555
// of its color will be a no-op, since the spill_reg chunk is in outer
1556
// space. Also, if neighbor is in a different chunk, this exclusion
1557
// will be a no-op. (Later on, if lrg runs out of possible colors in
1558
// its chunk, a new chunk of color may be tried, in which case
1559
// examination of neighbors is started again, at retry_next_chunk.)
1560
LRG &nlrg = lrgs(neighbor);
1561
OptoReg::Name nreg = nlrg.reg();
1562
// Only subtract masks in the same chunk
1563
if (nreg >= chunk && nreg < chunk + RegMask::CHUNK_SIZE) {
1565
uint size = lrg->mask().Size();
1566
RegMask rm = lrg->mask();
1568
lrg->SUBTRACT(nlrg.mask());
1570
if (trace_spilling() && lrg->mask().Size() != size) {
1572
tty->print("L%d ", lidx);
1574
tty->print(" intersected L%d ", neighbor);
1576
tty->print(" removed ");
1577
rm.SUBTRACT(lrg->mask());
1579
tty->print(" leaving ");
1587
//assert(is_allstack == lrg->mask().is_AllStack(), "nbrs must not change AllStackedness");
1588
// Aligned pairs need aligned masks
1589
assert(!lrg->_is_vector || !lrg->_fat_proj, "sanity");
1590
if (lrg->num_regs() > 1 && !lrg->_fat_proj) {
1591
lrg->clear_to_sets();
1594
// Check if a color is available and if so pick the color
1595
OptoReg::Name reg = choose_color( *lrg, chunk );
1598
// If we fail to color and the AllStack flag is set, trigger
1599
// a chunk-rollover event
1600
if(!OptoReg::is_valid(OptoReg::add(reg,-chunk)) && is_allstack) {
1601
// Bump register mask up to next stack chunk
1602
chunk += RegMask::CHUNK_SIZE;
1604
goto retry_next_chunk;
1608
// Did we get a color?
1609
else if( OptoReg::is_valid(reg)) {
1611
RegMask avail_rm = lrg->mask();
1614
// Record selected register
1617
if( reg >= _max_reg ) // Compute max register limit
1618
_max_reg = OptoReg::add(reg,1);
1619
// Fold reg back into normal space
1620
reg = OptoReg::add(reg,-chunk);
1622
// If the live range is not bound, then we actually had some choices
1623
// to make. In this case, the mask has more bits in it than the colors
1624
// chosen. Restrict the mask to just what was picked.
1625
int n_regs = lrg->num_regs();
1626
assert(!lrg->_is_vector || !lrg->_fat_proj, "sanity");
1627
if (n_regs == 1 || !lrg->_fat_proj) {
1628
if (Matcher::supports_scalable_vector()) {
1629
assert(!lrg->_is_vector || n_regs <= RegMask::SlotsPerVecA, "sanity");
1631
assert(!lrg->_is_vector || n_regs <= RegMask::SlotsPerVecZ, "sanity");
1633
lrg->Clear(); // Clear the mask
1634
lrg->Insert(reg); // Set regmask to match selected reg
1635
// For vectors and pairs, also insert the low bit of the pair
1636
// We always choose the high bit, then mask the low bits by register size
1637
if (lrg->is_scalable() && OptoReg::is_stack(lrg->reg())) { // stack
1638
n_regs = lrg->scalable_reg_slots();
1640
for (int i = 1; i < n_regs; i++) {
1641
lrg->Insert(OptoReg::add(reg,-i));
1643
lrg->set_mask_size(n_regs);
1644
} else { // Else fatproj
1645
// mask must be equal to fatproj bits, by definition
1648
if (trace_spilling()) {
1650
tty->print("L%d selected ", lidx);
1652
tty->print(" from ");
1657
// Note that reg is the highest-numbered register in the newly-bound mask.
1658
} // end color available case
1661
// Live range is live and no colors available
1663
assert( lrg->alive(), "" );
1664
assert( !lrg->_fat_proj || lrg->is_multidef() ||
1665
lrg->_def->outcnt() > 0, "fat_proj cannot spill");
1666
assert( !orig_mask.is_AllStack(), "All Stack does not spill" );
1668
// Assign the special spillreg register
1669
lrg->set_reg(OptoReg::Name(spill_reg++));
1670
// Do not empty the regmask; leave mask_size lying around
1671
// for use during Spilling
1673
if( trace_spilling() ) {
1675
tty->print("L%d spilling with neighbors: ", lidx);
1677
debug_only(tty->print(" original mask: "));
1678
debug_only(orig_mask.dump());
1686
return spill_reg-LRG::SPILL_REG; // Return number of spills
1689
// Set the 'spilled_once' or 'spilled_twice' flag on a node.
1690
void PhaseChaitin::set_was_spilled( Node *n ) {
1691
if( _spilled_once.test_set(n->_idx) )
1692
_spilled_twice.set(n->_idx);
1695
// Convert Ideal spill instructions into proper FramePtr + offset Loads and
1696
// Stores. Use-def chains are NOT preserved, but Node->LRG->reg maps are.
1697
void PhaseChaitin::fixup_spills() {
1698
// This function does only cisc spill work.
1699
if( !UseCISCSpill ) return;
1701
Compile::TracePhase tp("fixupSpills", &timers[_t_fixupSpills]);
1703
// Grab the Frame Pointer
1704
Node *fp = _cfg.get_root_block()->head()->in(1)->in(TypeFunc::FramePtr);
1707
for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
1708
Block* block = _cfg.get_block(i);
1710
// For all instructions in block
1711
uint last_inst = block->end_idx();
1712
for (uint j = 1; j <= last_inst; j++) {
1713
Node* n = block->get_node(j);
1715
// Dead instruction???
1716
assert( n->outcnt() != 0 ||// Nothing dead after post alloc
1717
C->top() == n || // Or the random TOP node
1718
n->is_Proj(), // Or a fat-proj kill node
1719
"No dead instructions after post-alloc" );
1721
int inp = n->cisc_operand();
1722
if( inp != AdlcVMDeps::Not_cisc_spillable ) {
1723
// Convert operand number to edge index number
1724
MachNode *mach = n->as_Mach();
1725
inp = mach->operand_index(inp);
1726
Node *src = n->in(inp); // Value to load or store
1727
LRG &lrg_cisc = lrgs(_lrg_map.find_const(src));
1728
OptoReg::Name src_reg = lrg_cisc.reg();
1729
// Doubles record the HIGH register of an adjacent pair.
1730
src_reg = OptoReg::add(src_reg,1-lrg_cisc.num_regs());
1731
if( OptoReg::is_stack(src_reg) ) { // If input is on stack
1732
// This is a CISC Spill, get stack offset and construct new node
1734
if( TraceCISCSpill ) {
1735
tty->print(" reg-instr: ");
1739
int stk_offset = reg2offset(src_reg);
1740
// Bailout if we might exceed node limit when spilling this instruction
1741
C->check_node_count(0, "out of nodes fixing spills");
1742
if (C->failing()) return;
1744
MachNode *cisc = mach->cisc_version(stk_offset)->as_Mach();
1745
cisc->set_req(inp,fp); // Base register is frame pointer
1746
if( cisc->oper_input_base() > 1 && mach->oper_input_base() <= 1 ) {
1747
assert( cisc->oper_input_base() == 2, "Only adding one edge");
1748
cisc->ins_req(1,src); // Requires a memory edge
1750
// There is no space reserved for a memory edge before the inputs for
1751
// instructions which have "stackSlotX" parameter instead of "memory".
1752
// For example, "MoveF2I_stack_reg". We always need a memory edge from
1753
// src to cisc, else we might schedule cisc before src, loading from a
1754
// spill location before storing the spill. On some platforms, we land
1755
// in this else case because mach->oper_input_base() > 1, i.e. we have
1756
// multiple inputs. In some rare cases there are even multiple memory
1757
// operands, before and after spilling.
1758
// (e.g. spilling "addFPR24_reg_mem" to "addFPR24_mem_cisc")
1759
// In either case, there is no space in the inputs for the memory edge
1760
// so we add an additional precedence / memory edge.
1761
cisc->add_prec(src);
1763
block->map_node(cisc, j); // Insert into basic block
1764
n->subsume_by(cisc, C); // Correct graph
1766
++_used_cisc_instructions;
1768
if( TraceCISCSpill ) {
1769
tty->print(" cisc-instr: ");
1775
if( TraceCISCSpill ) {
1776
tty->print(" using reg-instr: ");
1780
++_unused_cisc_instructions; // input can be on stack
1784
} // End of for all instructions
1786
} // End of for all blocks
1789
// Helper to stretch above; recursively discover the base Node for a
1790
// given derived Node. Easy for AddP-related machine nodes, but needs
1791
// to be recursive for derived Phis.
1792
Node* PhaseChaitin::find_base_for_derived(Node** derived_base_map, Node* derived, uint& maxlrg) {
1793
// See if already computed; if so return it
1794
if (derived_base_map[derived->_idx]) {
1795
return derived_base_map[derived->_idx];
1799
if (derived->is_Mach() && derived->as_Mach()->ideal_Opcode() == Op_VerifyVectorAlignment) {
1800
// Bypass the verification node
1801
Node* base = find_base_for_derived(derived_base_map, derived->in(1), maxlrg);
1802
derived_base_map[derived->_idx] = base;
1807
// See if this happens to be a base.
1808
// NOTE: we use TypePtr instead of TypeOopPtr because we can have
1809
// pointers derived from null! These are always along paths that
1810
// can't happen at run-time but the optimizer cannot deduce it so
1811
// we have to handle it gracefully.
1812
assert(!derived->bottom_type()->isa_narrowoop() ||
1813
derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity");
1814
const TypePtr *tj = derived->bottom_type()->isa_ptr();
1815
// If its an OOP with a non-zero offset, then it is derived.
1816
if( tj == nullptr || tj->_offset == 0 ) {
1817
derived_base_map[derived->_idx] = derived;
1820
// Derived is null+offset? Base is null!
1821
if( derived->is_Con() ) {
1822
Node *base = _matcher.mach_null();
1823
assert(base != nullptr, "sanity");
1824
if (base->in(0) == nullptr) {
1825
// Initialize it once and make it shared:
1826
// set control to _root and place it into Start block
1827
// (where top() node is placed).
1828
base->init_req(0, _cfg.get_root_node());
1829
Block *startb = _cfg.get_block_for_node(C->top());
1830
uint node_pos = startb->find_node(C->top());
1831
startb->insert_node(base, node_pos);
1832
_cfg.map_node_to_block(base, startb);
1833
assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet");
1835
// The loadConP0 might have projection nodes depending on architecture
1836
// Add the projection nodes to the CFG
1837
for (DUIterator_Fast imax, i = base->fast_outs(imax); i < imax; i++) {
1838
Node* use = base->fast_out(i);
1839
if (use->is_MachProj()) {
1840
startb->insert_node(use, ++node_pos);
1841
_cfg.map_node_to_block(use, startb);
1842
new_lrg(use, maxlrg++);
1846
if (_lrg_map.live_range_id(base) == 0) {
1847
new_lrg(base, maxlrg++);
1849
assert(base->in(0) == _cfg.get_root_node() && _cfg.get_block_for_node(base) == _cfg.get_block_for_node(C->top()), "base null should be shared");
1850
derived_base_map[derived->_idx] = base;
1854
// Check for AddP-related opcodes
1855
if (!derived->is_Phi()) {
1856
assert(derived->as_Mach()->ideal_Opcode() == Op_AddP, "but is: %s", derived->Name());
1857
Node *base = derived->in(AddPNode::Base);
1858
derived_base_map[derived->_idx] = base;
1862
// Recursively find bases for Phis.
1863
// First check to see if we can avoid a base Phi here.
1864
Node *base = find_base_for_derived( derived_base_map, derived->in(1),maxlrg);
1866
for( i = 2; i < derived->req(); i++ )
1867
if( base != find_base_for_derived( derived_base_map,derived->in(i),maxlrg))
1869
// Went to the end without finding any different bases?
1870
if( i == derived->req() ) { // No need for a base Phi here
1871
derived_base_map[derived->_idx] = base;
1875
// Now we see we need a base-Phi here to merge the bases
1876
const Type *t = base->bottom_type();
1877
base = new PhiNode( derived->in(0), t );
1878
for( i = 1; i < derived->req(); i++ ) {
1879
base->init_req(i, find_base_for_derived(derived_base_map, derived->in(i), maxlrg));
1880
t = t->meet(base->in(i)->bottom_type());
1882
base->as_Phi()->set_type(t);
1884
// Search the current block for an existing base-Phi
1885
Block *b = _cfg.get_block_for_node(derived);
1886
for( i = 1; i <= b->end_idx(); i++ ) {// Search for matching Phi
1887
Node *phi = b->get_node(i);
1888
if( !phi->is_Phi() ) { // Found end of Phis with no match?
1889
b->insert_node(base, i); // Must insert created Phi here as base
1890
_cfg.map_node_to_block(base, b);
1891
new_lrg(base,maxlrg++);
1894
// See if Phi matches.
1896
for( j = 1; j < base->req(); j++ )
1897
if( phi->in(j) != base->in(j) &&
1898
!(phi->in(j)->is_Con() && base->in(j)->is_Con()) ) // allow different nulls
1900
if( j == base->req() ) { // All inputs match?
1901
base = phi; // Then use existing 'phi' and drop 'base'
1907
// Cache info for later passes
1908
derived_base_map[derived->_idx] = base;
1912
// At each Safepoint, insert extra debug edges for each pair of derived value/
1913
// base pointer that is live across the Safepoint for oopmap building. The
1914
// edge pairs get added in after sfpt->jvmtail()->oopoff(), but are in the
1915
// required edge set.
1916
bool PhaseChaitin::stretch_base_pointer_live_ranges(ResourceArea *a) {
1917
int must_recompute_live = false;
1918
uint maxlrg = _lrg_map.max_lrg_id();
1919
Node **derived_base_map = (Node**)a->Amalloc(sizeof(Node*)*C->unique());
1920
memset( derived_base_map, 0, sizeof(Node*)*C->unique() );
1922
// For all blocks in RPO do...
1923
for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
1924
Block* block = _cfg.get_block(i);
1925
// Note use of deep-copy constructor. I cannot hammer the original
1926
// liveout bits, because they are needed by the following coalesce pass.
1927
IndexSet liveout(_live->live(block));
1929
for (uint j = block->end_idx() + 1; j > 1; j--) {
1930
Node* n = block->get_node(j - 1);
1932
// Pre-split compares of loop-phis. Loop-phis form a cycle we would
1933
// like to see in the same register. Compare uses the loop-phi and so
1934
// extends its live range BUT cannot be part of the cycle. If this
1935
// extended live range overlaps with the update of the loop-phi value
1936
// we need both alive at the same time -- which requires at least 1
1937
// copy. But because Intel has only 2-address registers we end up with
1938
// at least 2 copies, one before the loop-phi update instruction and
1939
// one after. Instead we split the input to the compare just after the
1941
if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CmpI ) {
1942
Node *phi = n->in(1);
1943
if( phi->is_Phi() && phi->as_Phi()->region()->is_Loop() ) {
1944
Block *phi_block = _cfg.get_block_for_node(phi);
1945
if (_cfg.get_block_for_node(phi_block->pred(2)) == block) {
1946
const RegMask *mask = C->matcher()->idealreg2spillmask[Op_RegI];
1947
Node *spill = new MachSpillCopyNode(MachSpillCopyNode::LoopPhiInput, phi, *mask, *mask);
1948
insert_proj( phi_block, 1, spill, maxlrg++ );
1949
n->set_req(1,spill);
1950
must_recompute_live = true;
1955
// Get value being defined
1956
uint lidx = _lrg_map.live_range_id(n);
1957
// Ignore the occasional brand-new live range
1958
if (lidx && lidx < _lrg_map.max_lrg_id()) {
1959
// Remove from live-out set
1960
liveout.remove(lidx);
1962
// Copies do not define a new value and so do not interfere.
1963
// Remove the copies source from the liveout set before interfering.
1964
uint idx = n->is_Copy();
1966
liveout.remove(_lrg_map.live_range_id(n->in(idx)));
1970
// Found a safepoint?
1971
JVMState *jvms = n->jvms();
1972
if (jvms && !liveout.is_empty()) {
1973
// Now scan for a live derived pointer
1974
IndexSetIterator elements(&liveout);
1976
while ((neighbor = elements.next()) != 0) {
1977
// Find reaching DEF for base and derived values
1978
// This works because we are still in SSA during this call.
1979
Node *derived = lrgs(neighbor)._def;
1980
const TypePtr *tj = derived->bottom_type()->isa_ptr();
1981
assert(!derived->bottom_type()->isa_narrowoop() ||
1982
derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity");
1983
// If its an OOP with a non-zero offset, then it is derived.
1984
if( tj && tj->_offset != 0 && tj->isa_oop_ptr() ) {
1985
Node *base = find_base_for_derived(derived_base_map, derived, maxlrg);
1986
assert(base->_idx < _lrg_map.size(), "");
1987
// Add reaching DEFs of derived pointer and base pointer as a
1989
n->add_req(derived);
1992
// See if the base pointer is already live to this point.
1993
// Since I'm working on the SSA form, live-ness amounts to
1994
// reaching def's. So if I find the base's live range then
1995
// I know the base's def reaches here.
1996
if ((_lrg_map.live_range_id(base) >= _lrg_map.max_lrg_id() || // (Brand new base (hence not live) or
1997
!liveout.member(_lrg_map.live_range_id(base))) && // not live) AND
1998
(_lrg_map.live_range_id(base) > 0) && // not a constant
1999
_cfg.get_block_for_node(base) != block) { // base not def'd in blk)
2000
// Base pointer is not currently live. Since I stretched
2001
// the base pointer to here and it crosses basic-block
2002
// boundaries, the global live info is now incorrect.
2004
must_recompute_live = true;
2005
} // End of if base pointer is not live to debug info
2007
} // End of scan all live data for derived ptrs crossing GC point
2008
} // End of if found a GC point
2010
// Make all inputs live
2011
if (!n->is_Phi()) { // Phi function uses come from prior block
2012
for (uint k = 1; k < n->req(); k++) {
2013
uint lidx = _lrg_map.live_range_id(n->in(k));
2014
if (lidx < _lrg_map.max_lrg_id()) {
2015
liveout.insert(lidx);
2020
} // End of forall instructions in block
2021
liveout.clear(); // Free the memory used by liveout.
2023
} // End of forall blocks
2024
_lrg_map.set_max_lrg_id(maxlrg);
2026
// If I created a new live range I need to recompute live
2027
if (maxlrg != _ifg->_maxlrg) {
2028
must_recompute_live = true;
2031
return must_recompute_live != 0;
2034
// Extend the node to LRG mapping
2036
void PhaseChaitin::add_reference(const Node *node, const Node *old_node) {
2037
_lrg_map.extend(node->_idx, _lrg_map.live_range_id(old_node));
2041
void PhaseChaitin::dump(const Node* n) const {
2042
uint r = (n->_idx < _lrg_map.size()) ? _lrg_map.find_const(n) : 0;
2043
tty->print("L%d",r);
2044
if (r && n->Opcode() != Op_Phi) {
2045
if( _node_regs ) { // Got a post-allocation copy of allocation?
2047
OptoReg::Name second = get_reg_second(n);
2048
if( OptoReg::is_valid(second) ) {
2049
if( OptoReg::is_reg(second) )
2050
tty->print("%s:",Matcher::regName[second]);
2052
tty->print("%s+%d:",OptoReg::regname(OptoReg::c_frame_pointer), reg2offset_unchecked(second));
2054
OptoReg::Name first = get_reg_first(n);
2055
if( OptoReg::is_reg(first) )
2056
tty->print("%s]",Matcher::regName[first]);
2058
tty->print("%s+%d]",OptoReg::regname(OptoReg::c_frame_pointer), reg2offset_unchecked(first));
2060
n->out_RegMask().dump();
2062
tty->print("/N%d\t",n->_idx);
2063
tty->print("%s === ", n->Name());
2065
for (k = 0; k < n->req(); k++) {
2071
uint r = (m->_idx < _lrg_map.size()) ? _lrg_map.find_const(m) : 0;
2072
tty->print("L%d",r);
2073
// Data MultiNode's can have projections with no real registers.
2074
// Don't die while dumping them.
2075
int op = n->Opcode();
2076
if( r && op != Op_Phi && op != Op_Proj && op != Op_SCMemProj) {
2079
OptoReg::Name second = get_reg_second(n->in(k));
2080
if( OptoReg::is_valid(second) ) {
2081
if( OptoReg::is_reg(second) )
2082
tty->print("%s:",Matcher::regName[second]);
2084
tty->print("%s+%d:",OptoReg::regname(OptoReg::c_frame_pointer),
2085
reg2offset_unchecked(second));
2087
OptoReg::Name first = get_reg_first(n->in(k));
2088
if( OptoReg::is_reg(first) )
2089
tty->print("%s]",Matcher::regName[first]);
2091
tty->print("%s+%d]",OptoReg::regname(OptoReg::c_frame_pointer),
2092
reg2offset_unchecked(first));
2094
n->in_RegMask(k).dump();
2096
tty->print("/N%d ",m->_idx);
2099
if( k < n->len() && n->in(k) ) tty->print("| ");
2100
for( ; k < n->len(); k++ ) {
2105
uint r = (m->_idx < _lrg_map.size()) ? _lrg_map.find_const(m) : 0;
2106
tty->print("L%d",r);
2107
tty->print("/N%d ",m->_idx);
2109
if( n->is_Mach() ) n->as_Mach()->dump_spec(tty);
2110
else n->dump_spec(tty);
2111
if( _spilled_once.test(n->_idx ) ) {
2112
tty->print(" Spill_1");
2113
if( _spilled_twice.test(n->_idx ) )
2114
tty->print(" Spill_2");
2119
void PhaseChaitin::dump(const Block* b) const {
2120
b->dump_head(&_cfg);
2122
// For all instructions
2123
for( uint j = 0; j < b->number_of_nodes(); j++ )
2124
dump(b->get_node(j));
2125
// Print live-out info at end of block
2127
tty->print("Liveout: ");
2128
IndexSet *live = _live->live(b);
2129
IndexSetIterator elements(live);
2132
while ((i = elements.next()) != 0) {
2133
tty->print("L%d ", _lrg_map.find_const(i));
2140
void PhaseChaitin::dump() const {
2141
tty->print( "--- Chaitin -- argsize: %d framesize: %d ---\n",
2142
_matcher._new_SP, _framesize );
2145
for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
2146
dump(_cfg.get_block(i));
2148
// End of per-block dump
2152
tty->print("(No IFG.)\n");
2157
tty->print("--- Live RanGe Array ---\n");
2158
for (uint i2 = 1; i2 < _lrg_map.max_lrg_id(); i2++) {
2159
tty->print("L%d: ",i2);
2160
if (i2 < _ifg->_maxlrg) {
2164
tty->print_cr("new LRG");
2169
// Dump lo-degree list
2170
tty->print("Lo degree: ");
2171
for(uint i3 = _lo_degree; i3; i3 = lrgs(i3)._next )
2172
tty->print("L%d ",i3);
2175
// Dump lo-stk-degree list
2176
tty->print("Lo stk degree: ");
2177
for(uint i4 = _lo_stk_degree; i4; i4 = lrgs(i4)._next )
2178
tty->print("L%d ",i4);
2181
// Dump lo-degree list
2182
tty->print("Hi degree: ");
2183
for(uint i5 = _hi_degree; i5; i5 = lrgs(i5)._next )
2184
tty->print("L%d ",i5);
2188
void PhaseChaitin::dump_degree_lists() const {
2189
// Dump lo-degree list
2190
tty->print("Lo degree: ");
2191
for( uint i = _lo_degree; i; i = lrgs(i)._next )
2192
tty->print("L%d ",i);
2195
// Dump lo-stk-degree list
2196
tty->print("Lo stk degree: ");
2197
for(uint i2 = _lo_stk_degree; i2; i2 = lrgs(i2)._next )
2198
tty->print("L%d ",i2);
2201
// Dump lo-degree list
2202
tty->print("Hi degree: ");
2203
for(uint i3 = _hi_degree; i3; i3 = lrgs(i3)._next )
2204
tty->print("L%d ",i3);
2208
void PhaseChaitin::dump_simplified() const {
2209
tty->print("Simplified: ");
2210
for( uint i = _simplified; i; i = lrgs(i)._next )
2211
tty->print("L%d ",i);
2215
static char *print_reg(OptoReg::Name reg, const PhaseChaitin* pc, char* buf, size_t buf_size) {
2217
os::snprintf_checked(buf, buf_size, "<OptoReg::%d>", (int)reg);
2218
else if (OptoReg::is_reg(reg))
2219
strcpy(buf, Matcher::regName[reg]);
2221
os::snprintf_checked(buf, buf_size, "%s + #%d",OptoReg::regname(OptoReg::c_frame_pointer),
2222
pc->reg2offset(reg));
2223
return buf+strlen(buf);
2226
// Dump a register name into a buffer. Be intelligent if we get called
2227
// before allocation is complete.
2228
char *PhaseChaitin::dump_register(const Node* n, char* buf, size_t buf_size) const {
2230
// Post allocation, use direct mappings, no LRG info available
2231
print_reg( get_reg_first(n), this, buf, buf_size);
2233
uint lidx = _lrg_map.find_const(n); // Grab LRG number
2235
os::snprintf_checked(buf, buf_size, "L%d",lidx); // No register binding yet
2236
} else if( !lidx ) { // Special, not allocated value
2237
strcpy(buf,"Special");
2239
if (lrgs(lidx)._is_vector) {
2240
if (lrgs(lidx).mask().is_bound_set(lrgs(lidx).num_regs()))
2241
print_reg( lrgs(lidx).reg(), this, buf, buf_size); // a bound machine register
2243
os::snprintf_checked(buf, buf_size, "L%d",lidx); // No register binding yet
2244
} else if( (lrgs(lidx).num_regs() == 1)
2245
? lrgs(lidx).mask().is_bound1()
2246
: lrgs(lidx).mask().is_bound_pair() ) {
2247
// Hah! We have a bound machine register
2248
print_reg( lrgs(lidx).reg(), this, buf, buf_size);
2250
os::snprintf_checked(buf, buf_size, "L%d",lidx); // No register binding yet
2254
return buf+strlen(buf);
2257
void PhaseChaitin::dump_for_spill_split_recycle() const {
2258
if( WizardMode && (PrintCompilation || PrintOpto) ) {
2259
// Display which live ranges need to be split and the allocator's state
2260
tty->print_cr("Graph-Coloring Iteration %d will split the following live ranges", _trip_cnt);
2261
for (uint bidx = 1; bidx < _lrg_map.max_lrg_id(); bidx++) {
2262
if( lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG ) {
2263
tty->print("L%d: ", bidx);
2272
void PhaseChaitin::dump_frame() const {
2273
const char *fp = OptoReg::regname(OptoReg::c_frame_pointer);
2274
const TypeTuple *domain = C->tf()->domain();
2275
const int argcnt = domain->cnt() - TypeFunc::Parms;
2277
// Incoming arguments in registers dump
2278
for( int k = 0; k < argcnt; k++ ) {
2279
OptoReg::Name parmreg = _matcher._parm_regs[k].first();
2280
if( OptoReg::is_reg(parmreg)) {
2281
const char *reg_name = OptoReg::regname(parmreg);
2282
tty->print("#r%3.3d %s", parmreg, reg_name);
2283
parmreg = _matcher._parm_regs[k].second();
2284
if( OptoReg::is_reg(parmreg)) {
2285
tty->print(":%s", OptoReg::regname(parmreg));
2287
tty->print(" : parm %d: ", k);
2288
domain->field_at(k + TypeFunc::Parms)->dump();
2293
// Check for un-owned padding above incoming args
2294
OptoReg::Name reg = _matcher._new_SP;
2295
if( reg > _matcher._in_arg_limit ) {
2296
reg = OptoReg::add(reg, -1);
2297
tty->print_cr("#r%3.3d %s+%2d: pad0, owned by CALLER", reg, fp, reg2offset_unchecked(reg));
2300
// Incoming argument area dump
2301
OptoReg::Name begin_in_arg = OptoReg::add(_matcher._old_SP,C->out_preserve_stack_slots());
2302
while( reg > begin_in_arg ) {
2303
reg = OptoReg::add(reg, -1);
2304
tty->print("#r%3.3d %s+%2d: ",reg,fp,reg2offset_unchecked(reg));
2306
for( j = 0; j < argcnt; j++) {
2307
if( _matcher._parm_regs[j].first() == reg ||
2308
_matcher._parm_regs[j].second() == reg ) {
2309
tty->print("parm %d: ",j);
2310
domain->field_at(j + TypeFunc::Parms)->dump();
2316
tty->print_cr("HOLE, owned by SELF");
2319
// Old outgoing preserve area
2320
while( reg > _matcher._old_SP ) {
2321
reg = OptoReg::add(reg, -1);
2322
tty->print_cr("#r%3.3d %s+%2d: old out preserve",reg,fp,reg2offset_unchecked(reg));
2326
tty->print_cr("# -- Old %s -- Framesize: %d --",fp,
2327
reg2offset_unchecked(OptoReg::add(_matcher._old_SP,-1)) - reg2offset_unchecked(_matcher._new_SP)+jintSize);
2329
// Preserve area dump
2330
int fixed_slots = C->fixed_slots();
2331
OptoReg::Name begin_in_preserve = OptoReg::add(_matcher._old_SP, -(int)C->in_preserve_stack_slots());
2332
OptoReg::Name return_addr = _matcher.return_addr();
2334
reg = OptoReg::add(reg, -1);
2335
while (OptoReg::is_stack(reg)) {
2336
tty->print("#r%3.3d %s+%2d: ",reg,fp,reg2offset_unchecked(reg));
2337
if (return_addr == reg) {
2338
tty->print_cr("return address");
2339
} else if (reg >= begin_in_preserve) {
2340
// Preserved slots are present on x86
2341
if (return_addr == OptoReg::add(reg, VMRegImpl::slots_per_word))
2342
tty->print_cr("saved fp register");
2343
else if (return_addr == OptoReg::add(reg, 2*VMRegImpl::slots_per_word) &&
2345
tty->print_cr("0xBADB100D +VerifyStackAtCalls");
2347
tty->print_cr("in_preserve");
2348
} else if ((int)OptoReg::reg2stack(reg) < fixed_slots) {
2349
tty->print_cr("Fixed slot %d", OptoReg::reg2stack(reg));
2351
tty->print_cr("pad2, stack alignment");
2353
reg = OptoReg::add(reg, -1);
2357
reg = OptoReg::add(_matcher._new_SP, _framesize );
2358
while( reg > _matcher._out_arg_limit ) {
2359
reg = OptoReg::add(reg, -1);
2360
tty->print_cr("#r%3.3d %s+%2d: spill",reg,fp,reg2offset_unchecked(reg));
2363
// Outgoing argument area dump
2364
while( reg > OptoReg::add(_matcher._new_SP, C->out_preserve_stack_slots()) ) {
2365
reg = OptoReg::add(reg, -1);
2366
tty->print_cr("#r%3.3d %s+%2d: outgoing argument",reg,fp,reg2offset_unchecked(reg));
2369
// Outgoing new preserve area
2370
while( reg > _matcher._new_SP ) {
2371
reg = OptoReg::add(reg, -1);
2372
tty->print_cr("#r%3.3d %s+%2d: new out preserve",reg,fp,reg2offset_unchecked(reg));
2377
void PhaseChaitin::dump_bb(uint pre_order) const {
2378
tty->print_cr("---dump of B%d---",pre_order);
2379
for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
2380
Block* block = _cfg.get_block(i);
2381
if (block->_pre_order == pre_order) {
2387
void PhaseChaitin::dump_lrg(uint lidx, bool defs_only) const {
2388
tty->print_cr("---dump of L%d---",lidx);
2391
if (lidx >= _lrg_map.max_lrg_id()) {
2392
tty->print("Attempt to print live range index beyond max live range.\n");
2395
tty->print("L%d: ",lidx);
2396
if (lidx < _ifg->_maxlrg) {
2399
tty->print_cr("new LRG");
2402
if( _ifg && lidx < _ifg->_maxlrg) {
2403
tty->print("Neighbors: %d - ", _ifg->neighbor_cnt(lidx));
2404
_ifg->neighbors(lidx)->dump();
2408
for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
2409
Block* block = _cfg.get_block(i);
2412
// For all instructions
2413
for( uint j = 0; j < block->number_of_nodes(); j++ ) {
2414
Node *n = block->get_node(j);
2415
if (_lrg_map.find_const(n) == lidx) {
2418
block->dump_head(&_cfg);
2424
uint cnt = n->req();
2425
for( uint k = 1; k < cnt; k++ ) {
2428
continue; // be robust in the dumper
2430
if (_lrg_map.find_const(m) == lidx) {
2433
block->dump_head(&_cfg);
2440
} // End of per-block dump
2443
#endif // not PRODUCT
2446
// Verify that base pointers and derived pointers are still sane.
2447
void PhaseChaitin::verify_base_ptrs(ResourceArea* a) const {
2448
Unique_Node_List worklist(a);
2449
for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
2450
Block* block = _cfg.get_block(i);
2451
for (uint j = block->end_idx() + 1; j > 1; j--) {
2452
Node* n = block->get_node(j-1);
2456
// Found a safepoint?
2457
if (n->is_MachSafePoint()) {
2458
MachSafePointNode* sfpt = n->as_MachSafePoint();
2459
JVMState* jvms = sfpt->jvms();
2460
if (jvms != nullptr) {
2461
// Now scan for a live derived pointer
2462
if (jvms->oopoff() < sfpt->req()) {
2463
// Check each derived/base pair
2464
for (uint idx = jvms->oopoff(); idx < sfpt->req(); idx++) {
2465
Node* check = sfpt->in(idx);
2466
bool is_derived = ((idx - jvms->oopoff()) & 1) == 0;
2467
// search upwards through spills and spill phis for AddP
2469
worklist.push(check);
2471
while (k < worklist.size()) {
2472
check = worklist.at(k);
2473
assert(check, "Bad base or derived pointer");
2474
// See PhaseChaitin::find_base_for_derived() for all cases.
2475
int isc = check->is_Copy();
2477
worklist.push(check->in(isc));
2478
} else if (check->is_Phi()) {
2479
for (uint m = 1; m < check->req(); m++) {
2480
worklist.push(check->in(m));
2482
} else if (check->is_Con()) {
2483
if (is_derived && check->bottom_type()->is_ptr()->_offset != 0) {
2484
// Derived is null+non-zero offset, base must be null.
2485
assert(check->bottom_type()->is_ptr()->ptr() == TypePtr::Null, "Bad derived pointer");
2487
assert(check->bottom_type()->is_ptr()->_offset == 0, "Bad base pointer");
2488
// Base either ConP(nullptr) or loadConP
2489
if (check->is_Mach()) {
2490
assert(check->as_Mach()->ideal_Opcode() == Op_ConP, "Bad base pointer");
2492
assert(check->Opcode() == Op_ConP &&
2493
check->bottom_type()->is_ptr()->ptr() == TypePtr::Null, "Bad base pointer");
2496
} else if (check->bottom_type()->is_ptr()->_offset == 0) {
2497
if (check->is_Proj() || (check->is_Mach() &&
2498
(check->as_Mach()->ideal_Opcode() == Op_CreateEx ||
2499
check->as_Mach()->ideal_Opcode() == Op_ThreadLocal ||
2500
check->as_Mach()->ideal_Opcode() == Op_CMoveP ||
2501
check->as_Mach()->ideal_Opcode() == Op_CheckCastPP ||
2503
(UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_CastPP) ||
2504
(UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_DecodeN) ||
2505
(UseCompressedClassPointers && check->as_Mach()->ideal_Opcode() == Op_DecodeNKlass) ||
2507
check->as_Mach()->ideal_Opcode() == Op_LoadP ||
2508
check->as_Mach()->ideal_Opcode() == Op_LoadKlass))) {
2512
assert(false, "Bad base or derived pointer");
2515
assert(is_derived, "Bad base pointer");
2516
assert(check->is_Mach() && check->as_Mach()->ideal_Opcode() == Op_AddP, "Bad derived pointer");
2519
assert(k < 100000, "Derived pointer checking in infinite loop");
2522
} // End of check for derived pointers
2523
} // End of Kcheck for debug info
2524
} // End of if found a safepoint
2525
} // End of forall instructions in block
2526
} // End of forall blocks
2529
// Verify that graphs and base pointers are still sane.
2530
void PhaseChaitin::verify(ResourceArea* a, bool verify_ifg) const {
2531
if (VerifyRegisterAllocator) {
2533
verify_base_ptrs(a);
2541
int PhaseChaitin::_final_loads = 0;
2542
int PhaseChaitin::_final_stores = 0;
2543
int PhaseChaitin::_final_memoves= 0;
2544
int PhaseChaitin::_final_copies = 0;
2545
double PhaseChaitin::_final_load_cost = 0;
2546
double PhaseChaitin::_final_store_cost = 0;
2547
double PhaseChaitin::_final_memove_cost= 0;
2548
double PhaseChaitin::_final_copy_cost = 0;
2549
int PhaseChaitin::_conserv_coalesce = 0;
2550
int PhaseChaitin::_conserv_coalesce_pair = 0;
2551
int PhaseChaitin::_conserv_coalesce_trie = 0;
2552
int PhaseChaitin::_conserv_coalesce_quad = 0;
2553
int PhaseChaitin::_post_alloc = 0;
2554
int PhaseChaitin::_lost_opp_pp_coalesce = 0;
2555
int PhaseChaitin::_lost_opp_cflow_coalesce = 0;
2556
int PhaseChaitin::_used_cisc_instructions = 0;
2557
int PhaseChaitin::_unused_cisc_instructions = 0;
2558
int PhaseChaitin::_allocator_attempts = 0;
2559
int PhaseChaitin::_allocator_successes = 0;
2562
uint PhaseChaitin::_high_pressure = 0;
2563
uint PhaseChaitin::_low_pressure = 0;
2565
void PhaseChaitin::print_chaitin_statistics() {
2566
tty->print_cr("Inserted %d spill loads, %d spill stores, %d mem-mem moves and %d copies.", _final_loads, _final_stores, _final_memoves, _final_copies);
2567
tty->print_cr("Total load cost= %6.0f, store cost = %6.0f, mem-mem cost = %5.2f, copy cost = %5.0f.", _final_load_cost, _final_store_cost, _final_memove_cost, _final_copy_cost);
2568
tty->print_cr("Adjusted spill cost = %7.0f.",
2569
_final_load_cost*4.0 + _final_store_cost * 2.0 +
2570
_final_copy_cost*1.0 + _final_memove_cost*12.0);
2571
tty->print("Conservatively coalesced %d copies, %d pairs",
2572
_conserv_coalesce, _conserv_coalesce_pair);
2573
if( _conserv_coalesce_trie || _conserv_coalesce_quad )
2574
tty->print(", %d tries, %d quads", _conserv_coalesce_trie, _conserv_coalesce_quad);
2575
tty->print_cr(", %d post alloc.", _post_alloc);
2576
if( _lost_opp_pp_coalesce || _lost_opp_cflow_coalesce )
2577
tty->print_cr("Lost coalesce opportunity, %d private-private, and %d cflow interfered.",
2578
_lost_opp_pp_coalesce, _lost_opp_cflow_coalesce );
2579
if( _used_cisc_instructions || _unused_cisc_instructions )
2580
tty->print_cr("Used cisc instruction %d, remained in register %d",
2581
_used_cisc_instructions, _unused_cisc_instructions);
2582
if( _allocator_successes != 0 )
2583
tty->print_cr("Average allocation trips %f", (float)_allocator_attempts/(float)_allocator_successes);
2584
tty->print_cr("High Pressure Blocks = %d, Low Pressure Blocks = %d", _high_pressure, _low_pressure);
2586
#endif // not PRODUCT