jdk

Форк
0
2293 строки · 80.3 Кб
1
/*
2
 * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
3
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
 *
5
 * This code is free software; you can redistribute it and/or modify it
6
 * under the terms of the GNU General Public License version 2 only, as
7
 * published by the Free Software Foundation.
8
 *
9
 * This code is distributed in the hope that it will be useful, but WITHOUT
10
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12
 * version 2 for more details (a copy is included in the LICENSE file that
13
 * accompanied this code).
14
 *
15
 * You should have received a copy of the GNU General Public License version
16
 * 2 along with this work; if not, write to the Free Software Foundation,
17
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
 *
19
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
 * or visit www.oracle.com if you need additional information or have any
21
 * questions.
22
 *
23
 */
24

25
#include "precompiled.hpp"
26
#include "libadt/vectset.hpp"
27
#include "memory/allocation.inline.hpp"
28
#include "memory/resourceArea.hpp"
29
#include "opto/block.hpp"
30
#include "opto/c2compiler.hpp"
31
#include "opto/callnode.hpp"
32
#include "opto/cfgnode.hpp"
33
#include "opto/machnode.hpp"
34
#include "opto/opcodes.hpp"
35
#include "opto/phaseX.hpp"
36
#include "opto/rootnode.hpp"
37
#include "opto/runtime.hpp"
38
#include "opto/chaitin.hpp"
39
#include "runtime/deoptimization.hpp"
40

41
// Portions of code courtesy of Clifford Click
42

43
// Optimization - Graph Style
44

45
// To avoid float value underflow
46
#define MIN_BLOCK_FREQUENCY 1.e-35f
47

48
//----------------------------schedule_node_into_block-------------------------
49
// Insert node n into block b. Look for projections of n and make sure they
50
// are in b also.
51
void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) {
52
  // Set basic block of n, Add n to b,
53
  map_node_to_block(n, b);
54
  b->add_inst(n);
55

56
  // After Matching, nearly any old Node may have projections trailing it.
57
  // These are usually machine-dependent flags.  In any case, they might
58
  // float to another block below this one.  Move them up.
59
  for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
60
    Node*  use  = n->fast_out(i);
61
    if (use->is_Proj()) {
62
      Block* buse = get_block_for_node(use);
63
      if (buse != b) {              // In wrong block?
64
        if (buse != nullptr) {
65
          buse->find_remove(use);   // Remove from wrong block
66
        }
67
        map_node_to_block(use, b);
68
        b->add_inst(use);
69
      }
70
    }
71
  }
72
}
73

74
//----------------------------replace_block_proj_ctrl-------------------------
75
// Nodes that have is_block_proj() nodes as their control need to use
76
// the appropriate Region for their actual block as their control since
77
// the projection will be in a predecessor block.
78
void PhaseCFG::replace_block_proj_ctrl( Node *n ) {
79
  const Node *in0 = n->in(0);
80
  assert(in0 != nullptr, "Only control-dependent");
81
  const Node *p = in0->is_block_proj();
82
  if (p != nullptr && p != n) {    // Control from a block projection?
83
    assert(!n->pinned() || n->is_MachConstantBase(), "only pinned MachConstantBase node is expected here");
84
    // Find trailing Region
85
    Block *pb = get_block_for_node(in0); // Block-projection already has basic block
86
    uint j = 0;
87
    if (pb->_num_succs != 1) {  // More then 1 successor?
88
      // Search for successor
89
      uint max = pb->number_of_nodes();
90
      assert( max > 1, "" );
91
      uint start = max - pb->_num_succs;
92
      // Find which output path belongs to projection
93
      for (j = start; j < max; j++) {
94
        if( pb->get_node(j) == in0 )
95
          break;
96
      }
97
      assert( j < max, "must find" );
98
      // Change control to match head of successor basic block
99
      j -= start;
100
    }
101
    n->set_req(0, pb->_succs[j]->head());
102
  }
103
}
104

105
bool PhaseCFG::is_dominator(Node* dom_node, Node* node) {
106
  assert(is_CFG(node) && is_CFG(dom_node), "node and dom_node must be CFG nodes");
107
  if (dom_node == node) {
108
    return true;
109
  }
110
  Block* d = find_block_for_node(dom_node);
111
  Block* n = find_block_for_node(node);
112
  assert(n != nullptr && d != nullptr, "blocks must exist");
113

114
  if (d == n) {
115
    if (dom_node->is_block_start()) {
116
      return true;
117
    }
118
    if (node->is_block_start()) {
119
      return false;
120
    }
121
    if (dom_node->is_block_proj()) {
122
      return false;
123
    }
124
    if (node->is_block_proj()) {
125
      return true;
126
    }
127

128
    assert(is_control_proj_or_safepoint(node), "node must be control projection or safepoint");
129
    assert(is_control_proj_or_safepoint(dom_node), "dom_node must be control projection or safepoint");
130

131
    // Neither 'node' nor 'dom_node' is a block start or block projection.
132
    // Check if 'dom_node' is above 'node' in the control graph.
133
    if (is_dominating_control(dom_node, node)) {
134
      return true;
135
    }
136

137
#ifdef ASSERT
138
    // If 'dom_node' does not dominate 'node' then 'node' has to dominate 'dom_node'
139
    if (!is_dominating_control(node, dom_node)) {
140
      node->dump();
141
      dom_node->dump();
142
      assert(false, "neither dom_node nor node dominates the other");
143
    }
144
#endif
145

146
    return false;
147
  }
148
  return d->dom_lca(n) == d;
149
}
150

151
bool PhaseCFG::is_CFG(Node* n) {
152
  return n->is_block_proj() || n->is_block_start() || is_control_proj_or_safepoint(n);
153
}
154

155
bool PhaseCFG::is_control_proj_or_safepoint(Node* n) const {
156
  bool result = (n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_SafePoint) || (n->is_Proj() && n->as_Proj()->bottom_type() == Type::CONTROL);
157
  assert(!result || (n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_SafePoint)
158
          || (n->is_Proj() && n->as_Proj()->_con == 0), "If control projection, it must be projection 0");
159
  return result;
160
}
161

162
Block* PhaseCFG::find_block_for_node(Node* n) const {
163
  if (n->is_block_start() || n->is_block_proj()) {
164
    return get_block_for_node(n);
165
  } else {
166
    // Walk the control graph up if 'n' is not a block start nor a block projection. In this case 'n' must be
167
    // an unmatched control projection or a not yet matched safepoint precedence edge in the middle of a block.
168
    assert(is_control_proj_or_safepoint(n), "must be control projection or safepoint");
169
    Node* ctrl = n->in(0);
170
    while (!ctrl->is_block_start()) {
171
      ctrl = ctrl->in(0);
172
    }
173
    return get_block_for_node(ctrl);
174
  }
175
}
176

177
// Walk up the control graph from 'n' and check if 'dom_ctrl' is found.
178
bool PhaseCFG::is_dominating_control(Node* dom_ctrl, Node* n) {
179
  Node* ctrl = n->in(0);
180
  while (!ctrl->is_block_start()) {
181
    if (ctrl == dom_ctrl) {
182
      return true;
183
    }
184
    ctrl = ctrl->in(0);
185
  }
186
  return false;
187
}
188

189

190
//------------------------------schedule_pinned_nodes--------------------------
191
// Set the basic block for Nodes pinned into blocks
192
void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) {
193
  // Allocate node stack of size C->live_nodes()+8 to avoid frequent realloc
194
  GrowableArray <Node*> spstack(C->live_nodes() + 8);
195
  spstack.push(_root);
196
  while (spstack.is_nonempty()) {
197
    Node* node = spstack.pop();
198
    if (!visited.test_set(node->_idx)) { // Test node and flag it as visited
199
      if (node->pinned() && !has_block(node)) {  // Pinned?  Nail it down!
200
        assert(node->in(0), "pinned Node must have Control");
201
        // Before setting block replace block_proj control edge
202
        replace_block_proj_ctrl(node);
203
        Node* input = node->in(0);
204
        while (!input->is_block_start()) {
205
          input = input->in(0);
206
        }
207
        Block* block = get_block_for_node(input); // Basic block of controlling input
208
        schedule_node_into_block(node, block);
209
      }
210

211
      // If the node has precedence edges (added when CastPP nodes are
212
      // removed in final_graph_reshaping), fix the control of the
213
      // node to cover the precedence edges and remove the
214
      // dependencies.
215
      Node* n = nullptr;
216
      for (uint i = node->len()-1; i >= node->req(); i--) {
217
        Node* m = node->in(i);
218
        if (m == nullptr) continue;
219

220
        // Only process precedence edges that are CFG nodes. Safepoints and control projections can be in the middle of a block
221
        if (is_CFG(m)) {
222
          node->rm_prec(i);
223
          if (n == nullptr) {
224
            n = m;
225
          } else {
226
            assert(is_dominator(n, m) || is_dominator(m, n), "one must dominate the other");
227
            n = is_dominator(n, m) ? m : n;
228
          }
229
        } else {
230
          assert(node->is_Mach(), "sanity");
231
          assert(node->as_Mach()->ideal_Opcode() == Op_StoreCM, "must be StoreCM node");
232
        }
233
      }
234
      if (n != nullptr) {
235
        assert(node->in(0), "control should have been set");
236
        assert(is_dominator(n, node->in(0)) || is_dominator(node->in(0), n), "one must dominate the other");
237
        if (!is_dominator(n, node->in(0))) {
238
          node->set_req(0, n);
239
        }
240
      }
241

242
      // process all inputs that are non null
243
      for (int i = node->req()-1; i >= 0; --i) {
244
        if (node->in(i) != nullptr) {
245
          spstack.push(node->in(i));
246
        }
247
      }
248
    }
249
  }
250
}
251

252
#ifdef ASSERT
253
// Assert that new input b2 is dominated by all previous inputs.
254
// Check this by by seeing that it is dominated by b1, the deepest
255
// input observed until b2.
256
static void assert_dom(Block* b1, Block* b2, Node* n, const PhaseCFG* cfg) {
257
  if (b1 == nullptr)  return;
258
  assert(b1->_dom_depth < b2->_dom_depth, "sanity");
259
  Block* tmp = b2;
260
  while (tmp != b1 && tmp != nullptr) {
261
    tmp = tmp->_idom;
262
  }
263
  if (tmp != b1) {
264
    // Detected an unschedulable graph.  Print some nice stuff and die.
265
    tty->print_cr("!!! Unschedulable graph !!!");
266
    for (uint j=0; j<n->len(); j++) { // For all inputs
267
      Node* inn = n->in(j); // Get input
268
      if (inn == nullptr)  continue;  // Ignore null, missing inputs
269
      Block* inb = cfg->get_block_for_node(inn);
270
      tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order,
271
                 inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth);
272
      inn->dump();
273
    }
274
    tty->print("Failing node: ");
275
    n->dump();
276
    assert(false, "unscheduable graph");
277
  }
278
}
279
#endif
280

281
static Block* find_deepest_input(Node* n, const PhaseCFG* cfg) {
282
  // Find the last input dominated by all other inputs.
283
  Block* deepb           = nullptr;     // Deepest block so far
284
  int    deepb_dom_depth = 0;
285
  for (uint k = 0; k < n->len(); k++) { // For all inputs
286
    Node* inn = n->in(k);               // Get input
287
    if (inn == nullptr)  continue;      // Ignore null, missing inputs
288
    Block* inb = cfg->get_block_for_node(inn);
289
    assert(inb != nullptr, "must already have scheduled this input");
290
    if (deepb_dom_depth < (int) inb->_dom_depth) {
291
      // The new inb must be dominated by the previous deepb.
292
      // The various inputs must be linearly ordered in the dom
293
      // tree, or else there will not be a unique deepest block.
294
      DEBUG_ONLY(assert_dom(deepb, inb, n, cfg));
295
      deepb = inb;                      // Save deepest block
296
      deepb_dom_depth = deepb->_dom_depth;
297
    }
298
  }
299
  assert(deepb != nullptr, "must be at least one input to n");
300
  return deepb;
301
}
302

303

304
//------------------------------schedule_early---------------------------------
305
// Find the earliest Block any instruction can be placed in.  Some instructions
306
// are pinned into Blocks.  Unpinned instructions can appear in last block in
307
// which all their inputs occur.
308
bool PhaseCFG::schedule_early(VectorSet &visited, Node_Stack &roots) {
309
  // Allocate stack with enough space to avoid frequent realloc
310
  Node_Stack nstack(roots.size() + 8);
311
  // _root will be processed among C->top() inputs
312
  roots.push(C->top(), 0);
313
  visited.set(C->top()->_idx);
314

315
  while (roots.size() != 0) {
316
    // Use local variables nstack_top_n & nstack_top_i to cache values
317
    // on stack's top.
318
    Node* parent_node = roots.node();
319
    uint  input_index = 0;
320
    roots.pop();
321

322
    while (true) {
323
      if (input_index == 0) {
324
        // Fixup some control.  Constants without control get attached
325
        // to root and nodes that use is_block_proj() nodes should be attached
326
        // to the region that starts their block.
327
        const Node* control_input = parent_node->in(0);
328
        if (control_input != nullptr) {
329
          replace_block_proj_ctrl(parent_node);
330
        } else {
331
          // Is a constant with NO inputs?
332
          if (parent_node->req() == 1) {
333
            parent_node->set_req(0, _root);
334
          }
335
        }
336
      }
337

338
      // First, visit all inputs and force them to get a block.  If an
339
      // input is already in a block we quit following inputs (to avoid
340
      // cycles). Instead we put that Node on a worklist to be handled
341
      // later (since IT'S inputs may not have a block yet).
342

343
      // Assume all n's inputs will be processed
344
      bool done = true;
345

346
      while (input_index < parent_node->len()) {
347
        Node* in = parent_node->in(input_index++);
348
        if (in == nullptr) {
349
          continue;
350
        }
351

352
        int is_visited = visited.test_set(in->_idx);
353
        if (!has_block(in)) {
354
          if (is_visited) {
355
            assert(false, "graph should be schedulable");
356
            return false;
357
          }
358
          // Save parent node and next input's index.
359
          nstack.push(parent_node, input_index);
360
          // Process current input now.
361
          parent_node = in;
362
          input_index = 0;
363
          // Not all n's inputs processed.
364
          done = false;
365
          break;
366
        } else if (!is_visited) {
367
          // Visit this guy later, using worklist
368
          roots.push(in, 0);
369
        }
370
      }
371

372
      if (done) {
373
        // All of n's inputs have been processed, complete post-processing.
374

375
        // Some instructions are pinned into a block.  These include Region,
376
        // Phi, Start, Return, and other control-dependent instructions and
377
        // any projections which depend on them.
378
        if (!parent_node->pinned()) {
379
          // Set earliest legal block.
380
          Block* earliest_block = find_deepest_input(parent_node, this);
381
          map_node_to_block(parent_node, earliest_block);
382
        } else {
383
          assert(get_block_for_node(parent_node) == get_block_for_node(parent_node->in(0)), "Pinned Node should be at the same block as its control edge");
384
        }
385

386
        if (nstack.is_empty()) {
387
          // Finished all nodes on stack.
388
          // Process next node on the worklist 'roots'.
389
          break;
390
        }
391
        // Get saved parent node and next input's index.
392
        parent_node = nstack.node();
393
        input_index = nstack.index();
394
        nstack.pop();
395
      }
396
    }
397
  }
398
  return true;
399
}
400

401
//------------------------------dom_lca----------------------------------------
402
// Find least common ancestor in dominator tree
403
// LCA is a current notion of LCA, to be raised above 'this'.
404
// As a convenient boundary condition, return 'this' if LCA is null.
405
// Find the LCA of those two nodes.
406
Block* Block::dom_lca(Block* LCA) {
407
  if (LCA == nullptr || LCA == this)  return this;
408

409
  Block* anc = this;
410
  while (anc->_dom_depth > LCA->_dom_depth)
411
    anc = anc->_idom;           // Walk up till anc is as high as LCA
412

413
  while (LCA->_dom_depth > anc->_dom_depth)
414
    LCA = LCA->_idom;           // Walk up till LCA is as high as anc
415

416
  while (LCA != anc) {          // Walk both up till they are the same
417
    LCA = LCA->_idom;
418
    anc = anc->_idom;
419
  }
420

421
  return LCA;
422
}
423

424
//--------------------------raise_LCA_above_use--------------------------------
425
// We are placing a definition, and have been given a def->use edge.
426
// The definition must dominate the use, so move the LCA upward in the
427
// dominator tree to dominate the use.  If the use is a phi, adjust
428
// the LCA only with the phi input paths which actually use this def.
429
static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, const PhaseCFG* cfg) {
430
  Block* buse = cfg->get_block_for_node(use);
431
  if (buse == nullptr) return LCA;   // Unused killing Projs have no use block
432
  if (!use->is_Phi())  return buse->dom_lca(LCA);
433
  uint pmax = use->req();       // Number of Phi inputs
434
  // Why does not this loop just break after finding the matching input to
435
  // the Phi?  Well...it's like this.  I do not have true def-use/use-def
436
  // chains.  Means I cannot distinguish, from the def-use direction, which
437
  // of many use-defs lead from the same use to the same def.  That is, this
438
  // Phi might have several uses of the same def.  Each use appears in a
439
  // different predecessor block.  But when I enter here, I cannot distinguish
440
  // which use-def edge I should find the predecessor block for.  So I find
441
  // them all.  Means I do a little extra work if a Phi uses the same value
442
  // more than once.
443
  for (uint j=1; j<pmax; j++) { // For all inputs
444
    if (use->in(j) == def) {    // Found matching input?
445
      Block* pred = cfg->get_block_for_node(buse->pred(j));
446
      LCA = pred->dom_lca(LCA);
447
    }
448
  }
449
  return LCA;
450
}
451

452
//----------------------------raise_LCA_above_marks----------------------------
453
// Return a new LCA that dominates LCA and any of its marked predecessors.
454
// Search all my parents up to 'early' (exclusive), looking for predecessors
455
// which are marked with the given index.  Return the LCA (in the dom tree)
456
// of all marked blocks.  If there are none marked, return the original
457
// LCA.
458
static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark, Block* early, const PhaseCFG* cfg) {
459
  Block_List worklist;
460
  worklist.push(LCA);
461
  while (worklist.size() > 0) {
462
    Block* mid = worklist.pop();
463
    if (mid == early)  continue;  // stop searching here
464

465
    // Test and set the visited bit.
466
    if (mid->raise_LCA_visited() == mark)  continue;  // already visited
467

468
    // Don't process the current LCA, otherwise the search may terminate early
469
    if (mid != LCA && mid->raise_LCA_mark() == mark) {
470
      // Raise the LCA.
471
      LCA = mid->dom_lca(LCA);
472
      if (LCA == early)  break;   // stop searching everywhere
473
      assert(early->dominates(LCA), "early is high enough");
474
      // Resume searching at that point, skipping intermediate levels.
475
      worklist.push(LCA);
476
      if (LCA == mid)
477
        continue; // Don't mark as visited to avoid early termination.
478
    } else {
479
      // Keep searching through this block's predecessors.
480
      for (uint j = 1, jmax = mid->num_preds(); j < jmax; j++) {
481
        Block* mid_parent = cfg->get_block_for_node(mid->pred(j));
482
        worklist.push(mid_parent);
483
      }
484
    }
485
    mid->set_raise_LCA_visited(mark);
486
  }
487
  return LCA;
488
}
489

490
//--------------------------memory_early_block--------------------------------
491
// This is a variation of find_deepest_input, the heart of schedule_early.
492
// Find the "early" block for a load, if we considered only memory and
493
// address inputs, that is, if other data inputs were ignored.
494
//
495
// Because a subset of edges are considered, the resulting block will
496
// be earlier (at a shallower dom_depth) than the true schedule_early
497
// point of the node. We compute this earlier block as a more permissive
498
// site for anti-dependency insertion, but only if subsume_loads is enabled.
499
static Block* memory_early_block(Node* load, Block* early, const PhaseCFG* cfg) {
500
  Node* base;
501
  Node* index;
502
  Node* store = load->in(MemNode::Memory);
503
  load->as_Mach()->memory_inputs(base, index);
504

505
  assert(base != NodeSentinel && index != NodeSentinel,
506
         "unexpected base/index inputs");
507

508
  Node* mem_inputs[4];
509
  int mem_inputs_length = 0;
510
  if (base != nullptr)  mem_inputs[mem_inputs_length++] = base;
511
  if (index != nullptr) mem_inputs[mem_inputs_length++] = index;
512
  if (store != nullptr) mem_inputs[mem_inputs_length++] = store;
513

514
  // In the comparison below, add one to account for the control input,
515
  // which may be null, but always takes up a spot in the in array.
516
  if (mem_inputs_length + 1 < (int) load->req()) {
517
    // This "load" has more inputs than just the memory, base and index inputs.
518
    // For purposes of checking anti-dependences, we need to start
519
    // from the early block of only the address portion of the instruction,
520
    // and ignore other blocks that may have factored into the wider
521
    // schedule_early calculation.
522
    if (load->in(0) != nullptr) mem_inputs[mem_inputs_length++] = load->in(0);
523

524
    Block* deepb           = nullptr;        // Deepest block so far
525
    int    deepb_dom_depth = 0;
526
    for (int i = 0; i < mem_inputs_length; i++) {
527
      Block* inb = cfg->get_block_for_node(mem_inputs[i]);
528
      if (deepb_dom_depth < (int) inb->_dom_depth) {
529
        // The new inb must be dominated by the previous deepb.
530
        // The various inputs must be linearly ordered in the dom
531
        // tree, or else there will not be a unique deepest block.
532
        DEBUG_ONLY(assert_dom(deepb, inb, load, cfg));
533
        deepb = inb;                      // Save deepest block
534
        deepb_dom_depth = deepb->_dom_depth;
535
      }
536
    }
537
    early = deepb;
538
  }
539

540
  return early;
541
}
542

543
// This function is used by insert_anti_dependences to find unrelated loads for stores in implicit null checks.
544
bool PhaseCFG::unrelated_load_in_store_null_block(Node* store, Node* load) {
545
  // We expect an anti-dependence edge from 'load' to 'store', except when
546
  // implicit_null_check() has hoisted 'store' above its early block to
547
  // perform an implicit null check, and 'load' is placed in the null
548
  // block. In this case it is safe to ignore the anti-dependence, as the
549
  // null block is only reached if 'store' tries to write to null object and
550
  // 'load' read from non-null object (there is preceding check for that)
551
  // These objects can't be the same.
552
  Block* store_block = get_block_for_node(store);
553
  Block* load_block = get_block_for_node(load);
554
  Node* end = store_block->end();
555
  if (end->is_MachNullCheck() && (end->in(1) == store) && store_block->dominates(load_block)) {
556
    Node* if_true = end->find_out_with(Op_IfTrue);
557
    assert(if_true != nullptr, "null check without null projection");
558
    Node* null_block_region = if_true->find_out_with(Op_Region);
559
    assert(null_block_region != nullptr, "null check without null region");
560
    return get_block_for_node(null_block_region) == load_block;
561
  }
562
  return false;
563
}
564

565
//--------------------------insert_anti_dependences---------------------------
566
// A load may need to witness memory that nearby stores can overwrite.
567
// For each nearby store, either insert an "anti-dependence" edge
568
// from the load to the store, or else move LCA upward to force the
569
// load to (eventually) be scheduled in a block above the store.
570
//
571
// Do not add edges to stores on distinct control-flow paths;
572
// only add edges to stores which might interfere.
573
//
574
// Return the (updated) LCA.  There will not be any possibly interfering
575
// store between the load's "early block" and the updated LCA.
576
// Any stores in the updated LCA will have new precedence edges
577
// back to the load.  The caller is expected to schedule the load
578
// in the LCA, in which case the precedence edges will make LCM
579
// preserve anti-dependences.  The caller may also hoist the load
580
// above the LCA, if it is not the early block.
581
Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) {
582
  assert(load->needs_anti_dependence_check(), "must be a load of some sort");
583
  assert(LCA != nullptr, "");
584
  DEBUG_ONLY(Block* LCA_orig = LCA);
585

586
  // Compute the alias index.  Loads and stores with different alias indices
587
  // do not need anti-dependence edges.
588
  int load_alias_idx = C->get_alias_index(load->adr_type());
589
#ifdef ASSERT
590
  assert(Compile::AliasIdxTop <= load_alias_idx && load_alias_idx < C->num_alias_types(), "Invalid alias index");
591
  if (load_alias_idx == Compile::AliasIdxBot && C->do_aliasing() &&
592
      (PrintOpto || VerifyAliases ||
593
       (PrintMiscellaneous && (WizardMode || Verbose)))) {
594
    // Load nodes should not consume all of memory.
595
    // Reporting a bottom type indicates a bug in adlc.
596
    // If some particular type of node validly consumes all of memory,
597
    // sharpen the preceding "if" to exclude it, so we can catch bugs here.
598
    tty->print_cr("*** Possible Anti-Dependence Bug:  Load consumes all of memory.");
599
    load->dump(2);
600
    if (VerifyAliases)  assert(load_alias_idx != Compile::AliasIdxBot, "");
601
  }
602
#endif
603

604
  if (!C->alias_type(load_alias_idx)->is_rewritable()) {
605
    // It is impossible to spoil this load by putting stores before it,
606
    // because we know that the stores will never update the value
607
    // which 'load' must witness.
608
    return LCA;
609
  }
610

611
  node_idx_t load_index = load->_idx;
612

613
  // Note the earliest legal placement of 'load', as determined by
614
  // by the unique point in the dom tree where all memory effects
615
  // and other inputs are first available.  (Computed by schedule_early.)
616
  // For normal loads, 'early' is the shallowest place (dom graph wise)
617
  // to look for anti-deps between this load and any store.
618
  Block* early = get_block_for_node(load);
619

620
  // If we are subsuming loads, compute an "early" block that only considers
621
  // memory or address inputs. This block may be different than the
622
  // schedule_early block in that it could be at an even shallower depth in the
623
  // dominator tree, and allow for a broader discovery of anti-dependences.
624
  if (C->subsume_loads()) {
625
    early = memory_early_block(load, early, this);
626
  }
627

628
  ResourceArea *area = Thread::current()->resource_area();
629
  Node_List worklist_mem(area);     // prior memory state to store
630
  Node_List worklist_store(area);   // possible-def to explore
631
  Node_List worklist_visited(area); // visited mergemem nodes
632
  Node_List non_early_stores(area); // all relevant stores outside of early
633
  bool must_raise_LCA = false;
634

635
  // 'load' uses some memory state; look for users of the same state.
636
  // Recurse through MergeMem nodes to the stores that use them.
637

638
  // Each of these stores is a possible definition of memory
639
  // that 'load' needs to use.  We need to force 'load'
640
  // to occur before each such store.  When the store is in
641
  // the same block as 'load', we insert an anti-dependence
642
  // edge load->store.
643

644
  // The relevant stores "nearby" the load consist of a tree rooted
645
  // at initial_mem, with internal nodes of type MergeMem.
646
  // Therefore, the branches visited by the worklist are of this form:
647
  //    initial_mem -> (MergeMem ->)* store
648
  // The anti-dependence constraints apply only to the fringe of this tree.
649

650
  Node* initial_mem = load->in(MemNode::Memory);
651
  worklist_store.push(initial_mem);
652
  worklist_visited.push(initial_mem);
653
  worklist_mem.push(nullptr);
654
  while (worklist_store.size() > 0) {
655
    // Examine a nearby store to see if it might interfere with our load.
656
    Node* mem   = worklist_mem.pop();
657
    Node* store = worklist_store.pop();
658
    uint op = store->Opcode();
659

660
    // MergeMems do not directly have anti-deps.
661
    // Treat them as internal nodes in a forward tree of memory states,
662
    // the leaves of which are each a 'possible-def'.
663
    if (store == initial_mem    // root (exclusive) of tree we are searching
664
        || op == Op_MergeMem    // internal node of tree we are searching
665
        ) {
666
      mem = store;   // It's not a possibly interfering store.
667
      if (store == initial_mem)
668
        initial_mem = nullptr;  // only process initial memory once
669

670
      for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
671
        store = mem->fast_out(i);
672
        if (store->is_MergeMem()) {
673
          // Be sure we don't get into combinatorial problems.
674
          // (Allow phis to be repeated; they can merge two relevant states.)
675
          uint j = worklist_visited.size();
676
          for (; j > 0; j--) {
677
            if (worklist_visited.at(j-1) == store)  break;
678
          }
679
          if (j > 0)  continue; // already on work list; do not repeat
680
          worklist_visited.push(store);
681
        }
682
        worklist_mem.push(mem);
683
        worklist_store.push(store);
684
      }
685
      continue;
686
    }
687

688
    if (op == Op_MachProj || op == Op_Catch)   continue;
689
    if (store->needs_anti_dependence_check())  continue;  // not really a store
690

691
    // Compute the alias index.  Loads and stores with different alias
692
    // indices do not need anti-dependence edges.  Wide MemBar's are
693
    // anti-dependent on everything (except immutable memories).
694
    const TypePtr* adr_type = store->adr_type();
695
    if (!C->can_alias(adr_type, load_alias_idx))  continue;
696

697
    // Most slow-path runtime calls do NOT modify Java memory, but
698
    // they can block and so write Raw memory.
699
    if (store->is_Mach()) {
700
      MachNode* mstore = store->as_Mach();
701
      if (load_alias_idx != Compile::AliasIdxRaw) {
702
        // Check for call into the runtime using the Java calling
703
        // convention (and from there into a wrapper); it has no
704
        // _method.  Can't do this optimization for Native calls because
705
        // they CAN write to Java memory.
706
        if (mstore->ideal_Opcode() == Op_CallStaticJava) {
707
          assert(mstore->is_MachSafePoint(), "");
708
          MachSafePointNode* ms = (MachSafePointNode*) mstore;
709
          assert(ms->is_MachCallJava(), "");
710
          MachCallJavaNode* mcj = (MachCallJavaNode*) ms;
711
          if (mcj->_method == nullptr) {
712
            // These runtime calls do not write to Java visible memory
713
            // (other than Raw) and so do not require anti-dependence edges.
714
            continue;
715
          }
716
        }
717
        // Same for SafePoints: they read/write Raw but only read otherwise.
718
        // This is basically a workaround for SafePoints only defining control
719
        // instead of control + memory.
720
        if (mstore->ideal_Opcode() == Op_SafePoint)
721
          continue;
722
      } else {
723
        // Some raw memory, such as the load of "top" at an allocation,
724
        // can be control dependent on the previous safepoint. See
725
        // comments in GraphKit::allocate_heap() about control input.
726
        // Inserting an anti-dep between such a safepoint and a use
727
        // creates a cycle, and will cause a subsequent failure in
728
        // local scheduling.  (BugId 4919904)
729
        // (%%% How can a control input be a safepoint and not a projection??)
730
        if (mstore->ideal_Opcode() == Op_SafePoint && load->in(0) == mstore)
731
          continue;
732
      }
733
    }
734

735
    // Identify a block that the current load must be above,
736
    // or else observe that 'store' is all the way up in the
737
    // earliest legal block for 'load'.  In the latter case,
738
    // immediately insert an anti-dependence edge.
739
    Block* store_block = get_block_for_node(store);
740
    assert(store_block != nullptr, "unused killing projections skipped above");
741

742
    if (store->is_Phi()) {
743
      // Loop-phis need to raise load before input. (Other phis are treated
744
      // as store below.)
745
      //
746
      // 'load' uses memory which is one (or more) of the Phi's inputs.
747
      // It must be scheduled not before the Phi, but rather before
748
      // each of the relevant Phi inputs.
749
      //
750
      // Instead of finding the LCA of all inputs to a Phi that match 'mem',
751
      // we mark each corresponding predecessor block and do a combined
752
      // hoisting operation later (raise_LCA_above_marks).
753
      //
754
      // Do not assert(store_block != early, "Phi merging memory after access")
755
      // PhiNode may be at start of block 'early' with backedge to 'early'
756
      DEBUG_ONLY(bool found_match = false);
757
      for (uint j = PhiNode::Input, jmax = store->req(); j < jmax; j++) {
758
        if (store->in(j) == mem) {   // Found matching input?
759
          DEBUG_ONLY(found_match = true);
760
          Block* pred_block = get_block_for_node(store_block->pred(j));
761
          if (pred_block != early) {
762
            // If any predecessor of the Phi matches the load's "early block",
763
            // we do not need a precedence edge between the Phi and 'load'
764
            // since the load will be forced into a block preceding the Phi.
765
            pred_block->set_raise_LCA_mark(load_index);
766
            assert(!LCA_orig->dominates(pred_block) ||
767
                   early->dominates(pred_block), "early is high enough");
768
            must_raise_LCA = true;
769
          } else {
770
            // anti-dependent upon PHI pinned below 'early', no edge needed
771
            LCA = early;             // but can not schedule below 'early'
772
          }
773
        }
774
      }
775
      assert(found_match, "no worklist bug");
776
    } else if (store_block != early) {
777
      // 'store' is between the current LCA and earliest possible block.
778
      // Label its block, and decide later on how to raise the LCA
779
      // to include the effect on LCA of this store.
780
      // If this store's block gets chosen as the raised LCA, we
781
      // will find him on the non_early_stores list and stick him
782
      // with a precedence edge.
783
      // (But, don't bother if LCA is already raised all the way.)
784
      if (LCA != early && !unrelated_load_in_store_null_block(store, load)) {
785
        store_block->set_raise_LCA_mark(load_index);
786
        must_raise_LCA = true;
787
        non_early_stores.push(store);
788
      }
789
    } else {
790
      // Found a possibly-interfering store in the load's 'early' block.
791
      // This means 'load' cannot sink at all in the dominator tree.
792
      // Add an anti-dep edge, and squeeze 'load' into the highest block.
793
      assert(store != load->find_exact_control(load->in(0)), "dependence cycle found");
794
      if (verify) {
795
        assert(store->find_edge(load) != -1 || unrelated_load_in_store_null_block(store, load),
796
               "missing precedence edge");
797
      } else {
798
        store->add_prec(load);
799
      }
800
      LCA = early;
801
      // This turns off the process of gathering non_early_stores.
802
    }
803
  }
804
  // (Worklist is now empty; all nearby stores have been visited.)
805

806
  // Finished if 'load' must be scheduled in its 'early' block.
807
  // If we found any stores there, they have already been given
808
  // precedence edges.
809
  if (LCA == early)  return LCA;
810

811
  // We get here only if there are no possibly-interfering stores
812
  // in the load's 'early' block.  Move LCA up above all predecessors
813
  // which contain stores we have noted.
814
  //
815
  // The raised LCA block can be a home to such interfering stores,
816
  // but its predecessors must not contain any such stores.
817
  //
818
  // The raised LCA will be a lower bound for placing the load,
819
  // preventing the load from sinking past any block containing
820
  // a store that may invalidate the memory state required by 'load'.
821
  if (must_raise_LCA)
822
    LCA = raise_LCA_above_marks(LCA, load->_idx, early, this);
823
  if (LCA == early)  return LCA;
824

825
  // Insert anti-dependence edges from 'load' to each store
826
  // in the non-early LCA block.
827
  // Mine the non_early_stores list for such stores.
828
  if (LCA->raise_LCA_mark() == load_index) {
829
    while (non_early_stores.size() > 0) {
830
      Node* store = non_early_stores.pop();
831
      Block* store_block = get_block_for_node(store);
832
      if (store_block == LCA) {
833
        // add anti_dependence from store to load in its own block
834
        assert(store != load->find_exact_control(load->in(0)), "dependence cycle found");
835
        if (verify) {
836
          assert(store->find_edge(load) != -1, "missing precedence edge");
837
        } else {
838
          store->add_prec(load);
839
        }
840
      } else {
841
        assert(store_block->raise_LCA_mark() == load_index, "block was marked");
842
        // Any other stores we found must be either inside the new LCA
843
        // or else outside the original LCA.  In the latter case, they
844
        // did not interfere with any use of 'load'.
845
        assert(LCA->dominates(store_block)
846
               || !LCA_orig->dominates(store_block), "no stray stores");
847
      }
848
    }
849
  }
850

851
  // Return the highest block containing stores; any stores
852
  // within that block have been given anti-dependence edges.
853
  return LCA;
854
}
855

856
// This class is used to iterate backwards over the nodes in the graph.
857

858
class Node_Backward_Iterator {
859

860
private:
861
  Node_Backward_Iterator();
862

863
public:
864
  // Constructor for the iterator
865
  Node_Backward_Iterator(Node *root, VectorSet &visited, Node_Stack &stack, PhaseCFG &cfg);
866

867
  // Postincrement operator to iterate over the nodes
868
  Node *next();
869

870
private:
871
  VectorSet   &_visited;
872
  Node_Stack  &_stack;
873
  PhaseCFG &_cfg;
874
};
875

876
// Constructor for the Node_Backward_Iterator
877
Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_Stack &stack, PhaseCFG &cfg)
878
  : _visited(visited), _stack(stack), _cfg(cfg) {
879
  // The stack should contain exactly the root
880
  stack.clear();
881
  stack.push(root, root->outcnt());
882

883
  // Clear the visited bits
884
  visited.clear();
885
}
886

887
// Iterator for the Node_Backward_Iterator
888
Node *Node_Backward_Iterator::next() {
889

890
  // If the _stack is empty, then just return null: finished.
891
  if ( !_stack.size() )
892
    return nullptr;
893

894
  // I visit unvisited not-anti-dependence users first, then anti-dependent
895
  // children next. I iterate backwards to support removal of nodes.
896
  // The stack holds states consisting of 3 values:
897
  // current Def node, flag which indicates 1st/2nd pass, index of current out edge
898
  Node *self = (Node*)(((uintptr_t)_stack.node()) & ~1);
899
  bool iterate_anti_dep = (((uintptr_t)_stack.node()) & 1);
900
  uint idx = MIN2(_stack.index(), self->outcnt()); // Support removal of nodes.
901
  _stack.pop();
902

903
  // I cycle here when I am entering a deeper level of recursion.
904
  // The key variable 'self' was set prior to jumping here.
905
  while( 1 ) {
906

907
    _visited.set(self->_idx);
908

909
    // Now schedule all uses as late as possible.
910
    const Node* src = self->is_Proj() ? self->in(0) : self;
911
    uint src_rpo = _cfg.get_block_for_node(src)->_rpo;
912

913
    // Schedule all nodes in a post-order visit
914
    Node *unvisited = nullptr;  // Unvisited anti-dependent Node, if any
915

916
    // Scan for unvisited nodes
917
    while (idx > 0) {
918
      // For all uses, schedule late
919
      Node* n = self->raw_out(--idx); // Use
920

921
      // Skip already visited children
922
      if ( _visited.test(n->_idx) )
923
        continue;
924

925
      // do not traverse backward control edges
926
      Node *use = n->is_Proj() ? n->in(0) : n;
927
      uint use_rpo = _cfg.get_block_for_node(use)->_rpo;
928

929
      if ( use_rpo < src_rpo )
930
        continue;
931

932
      // Phi nodes always precede uses in a basic block
933
      if ( use_rpo == src_rpo && use->is_Phi() )
934
        continue;
935

936
      unvisited = n;      // Found unvisited
937

938
      // Check for possible-anti-dependent
939
      // 1st pass: No such nodes, 2nd pass: Only such nodes.
940
      if (n->needs_anti_dependence_check() == iterate_anti_dep) {
941
        unvisited = n;      // Found unvisited
942
        break;
943
      }
944
    }
945

946
    // Did I find an unvisited not-anti-dependent Node?
947
    if (!unvisited) {
948
      if (!iterate_anti_dep) {
949
        // 2nd pass: Iterate over nodes which needs_anti_dependence_check.
950
        iterate_anti_dep = true;
951
        idx = self->outcnt();
952
        continue;
953
      }
954
      break;                  // All done with children; post-visit 'self'
955
    }
956

957
    // Visit the unvisited Node.  Contains the obvious push to
958
    // indicate I'm entering a deeper level of recursion.  I push the
959
    // old state onto the _stack and set a new state and loop (recurse).
960
    _stack.push((Node*)((uintptr_t)self | (uintptr_t)iterate_anti_dep), idx);
961
    self = unvisited;
962
    iterate_anti_dep = false;
963
    idx = self->outcnt();
964
  } // End recursion loop
965

966
  return self;
967
}
968

969
//------------------------------ComputeLatenciesBackwards----------------------
970
// Compute the latency of all the instructions.
971
void PhaseCFG::compute_latencies_backwards(VectorSet &visited, Node_Stack &stack) {
972
#ifndef PRODUCT
973
  if (trace_opto_pipelining())
974
    tty->print("\n#---- ComputeLatenciesBackwards ----\n");
975
#endif
976

977
  Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
978
  Node *n;
979

980
  // Walk over all the nodes from last to first
981
  while ((n = iter.next())) {
982
    // Set the latency for the definitions of this instruction
983
    partial_latency_of_defs(n);
984
  }
985
} // end ComputeLatenciesBackwards
986

987
//------------------------------partial_latency_of_defs------------------------
988
// Compute the latency impact of this node on all defs.  This computes
989
// a number that increases as we approach the beginning of the routine.
990
void PhaseCFG::partial_latency_of_defs(Node *n) {
991
  // Set the latency for this instruction
992
#ifndef PRODUCT
993
  if (trace_opto_pipelining()) {
994
    tty->print("# latency_to_inputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n));
995
    dump();
996
  }
997
#endif
998

999
  if (n->is_Proj()) {
1000
    n = n->in(0);
1001
  }
1002

1003
  if (n->is_Root()) {
1004
    return;
1005
  }
1006

1007
  uint nlen = n->len();
1008
  uint use_latency = get_latency_for_node(n);
1009
  uint use_pre_order = get_block_for_node(n)->_pre_order;
1010

1011
  for (uint j = 0; j < nlen; j++) {
1012
    Node *def = n->in(j);
1013

1014
    if (!def || def == n) {
1015
      continue;
1016
    }
1017

1018
    // Walk backwards thru projections
1019
    if (def->is_Proj()) {
1020
      def = def->in(0);
1021
    }
1022

1023
#ifndef PRODUCT
1024
    if (trace_opto_pipelining()) {
1025
      tty->print("#    in(%2d): ", j);
1026
      def->dump();
1027
    }
1028
#endif
1029

1030
    // If the defining block is not known, assume it is ok
1031
    Block *def_block = get_block_for_node(def);
1032
    uint def_pre_order = def_block ? def_block->_pre_order : 0;
1033

1034
    if ((use_pre_order <  def_pre_order) || (use_pre_order == def_pre_order && n->is_Phi())) {
1035
      continue;
1036
    }
1037

1038
    uint delta_latency = n->latency(j);
1039
    uint current_latency = delta_latency + use_latency;
1040

1041
    if (get_latency_for_node(def) < current_latency) {
1042
      set_latency_for_node(def, current_latency);
1043
    }
1044

1045
#ifndef PRODUCT
1046
    if (trace_opto_pipelining()) {
1047
      tty->print_cr("#      %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d", use_latency, j, delta_latency, current_latency, def->_idx, get_latency_for_node(def));
1048
    }
1049
#endif
1050
  }
1051
}
1052

1053
//------------------------------latency_from_use-------------------------------
1054
// Compute the latency of a specific use
1055
int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) {
1056
  // If self-reference, return no latency
1057
  if (use == n || use->is_Root()) {
1058
    return 0;
1059
  }
1060

1061
  uint def_pre_order = get_block_for_node(def)->_pre_order;
1062
  uint latency = 0;
1063

1064
  // If the use is not a projection, then it is simple...
1065
  if (!use->is_Proj()) {
1066
#ifndef PRODUCT
1067
    if (trace_opto_pipelining()) {
1068
      tty->print("#    out(): ");
1069
      use->dump();
1070
    }
1071
#endif
1072

1073
    uint use_pre_order = get_block_for_node(use)->_pre_order;
1074

1075
    if (use_pre_order < def_pre_order)
1076
      return 0;
1077

1078
    if (use_pre_order == def_pre_order && use->is_Phi())
1079
      return 0;
1080

1081
    uint nlen = use->len();
1082
    uint nl = get_latency_for_node(use);
1083

1084
    for ( uint j=0; j<nlen; j++ ) {
1085
      if (use->in(j) == n) {
1086
        // Change this if we want local latencies
1087
        uint ul = use->latency(j);
1088
        uint  l = ul + nl;
1089
        if (latency < l) latency = l;
1090
#ifndef PRODUCT
1091
        if (trace_opto_pipelining()) {
1092
          tty->print_cr("#      %d + edge_latency(%d) == %d -> %d, latency = %d",
1093
                        nl, j, ul, l, latency);
1094
        }
1095
#endif
1096
      }
1097
    }
1098
  } else {
1099
    // This is a projection, just grab the latency of the use(s)
1100
    for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) {
1101
      uint l = latency_from_use(use, def, use->fast_out(j));
1102
      if (latency < l) latency = l;
1103
    }
1104
  }
1105

1106
  return latency;
1107
}
1108

1109
//------------------------------latency_from_uses------------------------------
1110
// Compute the latency of this instruction relative to all of it's uses.
1111
// This computes a number that increases as we approach the beginning of the
1112
// routine.
1113
void PhaseCFG::latency_from_uses(Node *n) {
1114
  // Set the latency for this instruction
1115
#ifndef PRODUCT
1116
  if (trace_opto_pipelining()) {
1117
    tty->print("# latency_from_outputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n));
1118
    dump();
1119
  }
1120
#endif
1121
  uint latency=0;
1122
  const Node *def = n->is_Proj() ? n->in(0): n;
1123

1124
  for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1125
    uint l = latency_from_use(n, def, n->fast_out(i));
1126

1127
    if (latency < l) latency = l;
1128
  }
1129

1130
  set_latency_for_node(n, latency);
1131
}
1132

1133
//------------------------------is_cheaper_block-------------------------
1134
// Check if a block between early and LCA block of uses is cheaper by
1135
// frequency-based policy, latency-based policy and random-based policy
1136
bool PhaseCFG::is_cheaper_block(Block* LCA, Node* self, uint target_latency,
1137
                                uint end_latency, double least_freq,
1138
                                int cand_cnt, bool in_latency) {
1139
  if (StressGCM) {
1140
    // Should be randomly accepted in stress mode
1141
    return C->randomized_select(cand_cnt);
1142
  }
1143

1144
  // Better Frequency
1145
  if (LCA->_freq < least_freq) {
1146
    return true;
1147
  }
1148

1149
  // Otherwise, choose with latency
1150
  const double delta = 1 + PROB_UNLIKELY_MAG(4);
1151
  if (!in_latency                     &&  // No block containing latency
1152
      LCA->_freq < least_freq * delta &&  // No worse frequency
1153
      target_latency >= end_latency   &&  // within latency range
1154
      !self->is_iteratively_computed()    // But don't hoist IV increments
1155
            // because they may end up above other uses of their phi forcing
1156
            // their result register to be different from their input.
1157
  ) {
1158
    return true;
1159
  }
1160

1161
  return false;
1162
}
1163

1164
//------------------------------hoist_to_cheaper_block-------------------------
1165
// Pick a block for node self, between early and LCA block of uses, that is a
1166
// cheaper alternative to LCA.
1167
Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {
1168
  Block* least       = LCA;
1169
  double least_freq  = least->_freq;
1170
  uint target        = get_latency_for_node(self);
1171
  uint start_latency = get_latency_for_node(LCA->head());
1172
  uint end_latency   = get_latency_for_node(LCA->get_node(LCA->end_idx()));
1173
  bool in_latency    = (target <= start_latency);
1174
  const Block* root_block = get_block_for_node(_root);
1175

1176
  // Turn off latency scheduling if scheduling is just plain off
1177
  if (!C->do_scheduling())
1178
    in_latency = true;
1179

1180
  // Do not hoist (to cover latency) instructions which target a
1181
  // single register.  Hoisting stretches the live range of the
1182
  // single register and may force spilling.
1183
  MachNode* mach = self->is_Mach() ? self->as_Mach() : nullptr;
1184
  if (mach && mach->out_RegMask().is_bound1() && mach->out_RegMask().is_NotEmpty())
1185
    in_latency = true;
1186

1187
#ifndef PRODUCT
1188
  if (trace_opto_pipelining()) {
1189
    tty->print("# Find cheaper block for latency %d: ", get_latency_for_node(self));
1190
    self->dump();
1191
    tty->print_cr("#   B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
1192
      LCA->_pre_order,
1193
      LCA->head()->_idx,
1194
      start_latency,
1195
      LCA->get_node(LCA->end_idx())->_idx,
1196
      end_latency,
1197
      least_freq);
1198
  }
1199
#endif
1200

1201
  int cand_cnt = 0;  // number of candidates tried
1202

1203
  // Walk up the dominator tree from LCA (Lowest common ancestor) to
1204
  // the earliest legal location. Capture the least execution frequency,
1205
  // or choose a random block if -XX:+StressGCM, or using latency-based policy
1206
  while (LCA != early) {
1207
    LCA = LCA->_idom;         // Follow up the dominator tree
1208

1209
    if (LCA == nullptr) {
1210
      // Bailout without retry
1211
      assert(false, "graph should be schedulable");
1212
      C->record_method_not_compilable("late schedule failed: LCA is null");
1213
      return least;
1214
    }
1215

1216
    // Don't hoist machine instructions to the root basic block
1217
    if (mach && LCA == root_block)
1218
      break;
1219

1220
    if (self->is_memory_writer() &&
1221
        (LCA->_loop->depth() > early->_loop->depth())) {
1222
      // LCA is an invalid placement for a memory writer: choosing it would
1223
      // cause memory interference, as illustrated in schedule_late().
1224
      continue;
1225
    }
1226
    verify_memory_writer_placement(LCA, self);
1227

1228
    uint start_lat = get_latency_for_node(LCA->head());
1229
    uint end_idx   = LCA->end_idx();
1230
    uint end_lat   = get_latency_for_node(LCA->get_node(end_idx));
1231
    double LCA_freq = LCA->_freq;
1232
#ifndef PRODUCT
1233
    if (trace_opto_pipelining()) {
1234
      tty->print_cr("#   B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
1235
        LCA->_pre_order, LCA->head()->_idx, start_lat, end_idx, end_lat, LCA_freq);
1236
    }
1237
#endif
1238
    cand_cnt++;
1239
    if (is_cheaper_block(LCA, self, target, end_lat, least_freq, cand_cnt, in_latency)) {
1240
      least = LCA;            // Found cheaper block
1241
      least_freq = LCA_freq;
1242
      start_latency = start_lat;
1243
      end_latency = end_lat;
1244
      if (target <= start_lat)
1245
        in_latency = true;
1246
    }
1247
  }
1248

1249
#ifndef PRODUCT
1250
  if (trace_opto_pipelining()) {
1251
    tty->print_cr("#  Choose block B%d with start latency=%d and freq=%g",
1252
      least->_pre_order, start_latency, least_freq);
1253
  }
1254
#endif
1255

1256
  // See if the latency needs to be updated
1257
  if (target < end_latency) {
1258
#ifndef PRODUCT
1259
    if (trace_opto_pipelining()) {
1260
      tty->print_cr("#  Change latency for [%4d] from %d to %d", self->_idx, target, end_latency);
1261
    }
1262
#endif
1263
    set_latency_for_node(self, end_latency);
1264
    partial_latency_of_defs(self);
1265
  }
1266

1267
  return least;
1268
}
1269

1270

1271
//------------------------------schedule_late-----------------------------------
1272
// Now schedule all codes as LATE as possible.  This is the LCA in the
1273
// dominator tree of all USES of a value.  Pick the block with the least
1274
// loop nesting depth that is lowest in the dominator tree.
1275
extern const char must_clone[];
1276
void PhaseCFG::schedule_late(VectorSet &visited, Node_Stack &stack) {
1277
#ifndef PRODUCT
1278
  if (trace_opto_pipelining())
1279
    tty->print("\n#---- schedule_late ----\n");
1280
#endif
1281

1282
  Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
1283
  Node *self;
1284

1285
  // Walk over all the nodes from last to first
1286
  while ((self = iter.next())) {
1287
    Block* early = get_block_for_node(self); // Earliest legal placement
1288

1289
    if (self->is_top()) {
1290
      // Top node goes in bb #2 with other constants.
1291
      // It must be special-cased, because it has no out edges.
1292
      early->add_inst(self);
1293
      continue;
1294
    }
1295

1296
    // No uses, just terminate
1297
    if (self->outcnt() == 0) {
1298
      assert(self->is_MachProj(), "sanity");
1299
      continue;                   // Must be a dead machine projection
1300
    }
1301

1302
    // If node is pinned in the block, then no scheduling can be done.
1303
    if( self->pinned() )          // Pinned in block?
1304
      continue;
1305

1306
#ifdef ASSERT
1307
    // Assert that memory writers (e.g. stores) have a "home" block (the block
1308
    // given by their control input), and that this block corresponds to their
1309
    // earliest possible placement. This guarantees that
1310
    // hoist_to_cheaper_block() will always have at least one valid choice.
1311
    if (self->is_memory_writer()) {
1312
      assert(find_block_for_node(self->in(0)) == early,
1313
             "The home of a memory writer must also be its earliest placement");
1314
    }
1315
#endif
1316

1317
    MachNode* mach = self->is_Mach() ? self->as_Mach() : nullptr;
1318
    if (mach) {
1319
      switch (mach->ideal_Opcode()) {
1320
      case Op_CreateEx:
1321
        // Don't move exception creation
1322
        early->add_inst(self);
1323
        continue;
1324
        break;
1325
      case Op_CheckCastPP: {
1326
        // Don't move CheckCastPP nodes away from their input, if the input
1327
        // is a rawptr (5071820).
1328
        Node *def = self->in(1);
1329
        if (def != nullptr && def->bottom_type()->base() == Type::RawPtr) {
1330
          early->add_inst(self);
1331
#ifdef ASSERT
1332
          _raw_oops.push(def);
1333
#endif
1334
          continue;
1335
        }
1336
        break;
1337
      }
1338
      default:
1339
        break;
1340
      }
1341
      if (C->has_irreducible_loop() && self->is_memory_writer()) {
1342
        // If the CFG is irreducible, place memory writers in their home block.
1343
        // This prevents hoist_to_cheaper_block() from accidentally placing such
1344
        // nodes into deeper loops, as in the following example:
1345
        //
1346
        // Home placement of store in B1 (loop L1):
1347
        //
1348
        // B1 (L1):
1349
        //   m1 <- ..
1350
        //   m2 <- store m1, ..
1351
        // B2 (L2):
1352
        //   jump B2
1353
        // B3 (L1):
1354
        //   .. <- .. m2, ..
1355
        //
1356
        // Wrong "hoisting" of store to B2 (in loop L2, child of L1):
1357
        //
1358
        // B1 (L1):
1359
        //   m1 <- ..
1360
        // B2 (L2):
1361
        //   m2 <- store m1, ..
1362
        //   # Wrong: m1 and m2 interfere at this point.
1363
        //   jump B2
1364
        // B3 (L1):
1365
        //   .. <- .. m2, ..
1366
        //
1367
        // This "hoist inversion" can happen due to different factors such as
1368
        // inaccurate estimation of frequencies for irreducible CFGs, and loops
1369
        // with always-taken exits in reducible CFGs. In the reducible case,
1370
        // hoist inversion is prevented by discarding invalid blocks (those in
1371
        // deeper loops than the home block). In the irreducible case, the
1372
        // invalid blocks cannot be identified due to incomplete loop nesting
1373
        // information, hence a conservative solution is taken.
1374
#ifndef PRODUCT
1375
        if (trace_opto_pipelining()) {
1376
          tty->print_cr("# Irreducible loops: schedule in home block B%d:",
1377
                        early->_pre_order);
1378
          self->dump();
1379
        }
1380
#endif
1381
        schedule_node_into_block(self, early);
1382
        continue;
1383
      }
1384
    }
1385

1386
    // Gather LCA of all uses
1387
    Block *LCA = nullptr;
1388
    {
1389
      for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) {
1390
        // For all uses, find LCA
1391
        Node* use = self->fast_out(i);
1392
        LCA = raise_LCA_above_use(LCA, use, self, this);
1393
      }
1394
      guarantee(LCA != nullptr, "There must be a LCA");
1395
    }  // (Hide defs of imax, i from rest of block.)
1396

1397
    // Place temps in the block of their use.  This isn't a
1398
    // requirement for correctness but it reduces useless
1399
    // interference between temps and other nodes.
1400
    if (mach != nullptr && mach->is_MachTemp()) {
1401
      map_node_to_block(self, LCA);
1402
      LCA->add_inst(self);
1403
      continue;
1404
    }
1405

1406
    // Check if 'self' could be anti-dependent on memory
1407
    if (self->needs_anti_dependence_check()) {
1408
      // Hoist LCA above possible-defs and insert anti-dependences to
1409
      // defs in new LCA block.
1410
      LCA = insert_anti_dependences(LCA, self);
1411
    }
1412

1413
    if (early->_dom_depth > LCA->_dom_depth) {
1414
      // Somehow the LCA has moved above the earliest legal point.
1415
      // (One way this can happen is via memory_early_block.)
1416
      if (C->subsume_loads() == true && !C->failing()) {
1417
        // Retry with subsume_loads == false
1418
        // If this is the first failure, the sentinel string will "stick"
1419
        // to the Compile object, and the C2Compiler will see it and retry.
1420
        C->record_failure(C2Compiler::retry_no_subsuming_loads());
1421
      } else {
1422
        // Bailout without retry when (early->_dom_depth > LCA->_dom_depth)
1423
        assert(false, "graph should be schedulable");
1424
        C->record_method_not_compilable("late schedule failed: incorrect graph");
1425
      }
1426
      return;
1427
    }
1428

1429
    if (self->is_memory_writer()) {
1430
      // If the LCA of a memory writer is a descendant of its home loop, hoist
1431
      // it into a valid placement.
1432
      while (LCA->_loop->depth() > early->_loop->depth()) {
1433
        LCA = LCA->_idom;
1434
      }
1435
      assert(LCA != nullptr, "a valid LCA must exist");
1436
      verify_memory_writer_placement(LCA, self);
1437
    }
1438

1439
    // If there is no opportunity to hoist, then we're done.
1440
    // In stress mode, try to hoist even the single operations.
1441
    bool try_to_hoist = StressGCM || (LCA != early);
1442

1443
    // Must clone guys stay next to use; no hoisting allowed.
1444
    // Also cannot hoist guys that alter memory or are otherwise not
1445
    // allocatable (hoisting can make a value live longer, leading to
1446
    // anti and output dependency problems which are normally resolved
1447
    // by the register allocator giving everyone a different register).
1448
    if (mach != nullptr && must_clone[mach->ideal_Opcode()])
1449
      try_to_hoist = false;
1450

1451
    Block* late = nullptr;
1452
    if (try_to_hoist) {
1453
      // Now find the block with the least execution frequency.
1454
      // Start at the latest schedule and work up to the earliest schedule
1455
      // in the dominator tree.  Thus the Node will dominate all its uses.
1456
      late = hoist_to_cheaper_block(LCA, early, self);
1457
    } else {
1458
      // Just use the LCA of the uses.
1459
      late = LCA;
1460
    }
1461

1462
    // Put the node into target block
1463
    schedule_node_into_block(self, late);
1464

1465
#ifdef ASSERT
1466
    if (self->needs_anti_dependence_check()) {
1467
      // since precedence edges are only inserted when we're sure they
1468
      // are needed make sure that after placement in a block we don't
1469
      // need any new precedence edges.
1470
      verify_anti_dependences(late, self);
1471
    }
1472
#endif
1473
  } // Loop until all nodes have been visited
1474

1475
} // end ScheduleLate
1476

1477
//------------------------------GlobalCodeMotion-------------------------------
1478
void PhaseCFG::global_code_motion() {
1479
  ResourceMark rm;
1480

1481
#ifndef PRODUCT
1482
  if (trace_opto_pipelining()) {
1483
    tty->print("\n---- Start GlobalCodeMotion ----\n");
1484
  }
1485
#endif
1486

1487
  // Initialize the node to block mapping for things on the proj_list
1488
  for (uint i = 0; i < _matcher.number_of_projections(); i++) {
1489
    unmap_node_from_block(_matcher.get_projection(i));
1490
  }
1491

1492
  // Set the basic block for Nodes pinned into blocks
1493
  VectorSet visited;
1494
  schedule_pinned_nodes(visited);
1495

1496
  // Find the earliest Block any instruction can be placed in.  Some
1497
  // instructions are pinned into Blocks.  Unpinned instructions can
1498
  // appear in last block in which all their inputs occur.
1499
  visited.clear();
1500
  Node_Stack stack((C->live_nodes() >> 2) + 16); // pre-grow
1501
  if (!schedule_early(visited, stack)) {
1502
    // Bailout without retry
1503
    assert(false, "early schedule failed");
1504
    C->record_method_not_compilable("early schedule failed");
1505
    return;
1506
  }
1507

1508
  // Build Def-Use edges.
1509
  // Compute the latency information (via backwards walk) for all the
1510
  // instructions in the graph
1511
  _node_latency = new GrowableArray<uint>(); // resource_area allocation
1512

1513
  if (C->do_scheduling()) {
1514
    compute_latencies_backwards(visited, stack);
1515
  }
1516

1517
  // Now schedule all codes as LATE as possible.  This is the LCA in the
1518
  // dominator tree of all USES of a value.  Pick the block with the least
1519
  // loop nesting depth that is lowest in the dominator tree.
1520
  // ( visited.clear() called in schedule_late()->Node_Backward_Iterator() )
1521
  schedule_late(visited, stack);
1522
  if (C->failing()) {
1523
    return;
1524
  }
1525

1526
#ifndef PRODUCT
1527
  if (trace_opto_pipelining()) {
1528
    tty->print("\n---- Detect implicit null checks ----\n");
1529
  }
1530
#endif
1531

1532
  // Detect implicit-null-check opportunities.  Basically, find null checks
1533
  // with suitable memory ops nearby.  Use the memory op to do the null check.
1534
  // I can generate a memory op if there is not one nearby.
1535
  if (C->is_method_compilation()) {
1536
    // By reversing the loop direction we get a very minor gain on mpegaudio.
1537
    // Feel free to revert to a forward loop for clarity.
1538
    // for( int i=0; i < (int)matcher._null_check_tests.size(); i+=2 ) {
1539
    for (int i = _matcher._null_check_tests.size() - 2; i >= 0; i -= 2) {
1540
      Node* proj = _matcher._null_check_tests[i];
1541
      Node* val  = _matcher._null_check_tests[i + 1];
1542
      Block* block = get_block_for_node(proj);
1543
      implicit_null_check(block, proj, val, C->allowed_deopt_reasons());
1544
      // The implicit_null_check will only perform the transformation
1545
      // if the null branch is truly uncommon, *and* it leads to an
1546
      // uncommon trap.  Combined with the too_many_traps guards
1547
      // above, this prevents SEGV storms reported in 6366351,
1548
      // by recompiling offending methods without this optimization.
1549
    }
1550
  }
1551

1552
  bool block_size_threshold_ok = false;
1553
  intptr_t *recalc_pressure_nodes = nullptr;
1554
  if (OptoRegScheduling) {
1555
    for (uint i = 0; i < number_of_blocks(); i++) {
1556
      Block* block = get_block(i);
1557
      if (block->number_of_nodes() > 10) {
1558
        block_size_threshold_ok = true;
1559
        break;
1560
      }
1561
    }
1562
  }
1563

1564
  // Enabling the scheduler for register pressure plus finding blocks of size to schedule for it
1565
  // is key to enabling this feature.
1566
  PhaseChaitin regalloc(C->unique(), *this, _matcher, true);
1567
  ResourceArea live_arena(mtCompiler);      // Arena for liveness
1568
  ResourceMark rm_live(&live_arena);
1569
  PhaseLive live(*this, regalloc._lrg_map.names(), &live_arena, true);
1570
  PhaseIFG ifg(&live_arena);
1571
  if (OptoRegScheduling && block_size_threshold_ok) {
1572
    regalloc.mark_ssa();
1573
    Compile::TracePhase tp("computeLive", &timers[_t_computeLive]);
1574
    rm_live.reset_to_mark();           // Reclaim working storage
1575
    IndexSet::reset_memory(C, &live_arena);
1576
    uint node_size = regalloc._lrg_map.max_lrg_id();
1577
    ifg.init(node_size); // Empty IFG
1578
    regalloc.set_ifg(ifg);
1579
    regalloc.set_live(live);
1580
    regalloc.gather_lrg_masks(false);    // Collect LRG masks
1581
    live.compute(node_size); // Compute liveness
1582

1583
    recalc_pressure_nodes = NEW_RESOURCE_ARRAY(intptr_t, node_size);
1584
    for (uint i = 0; i < node_size; i++) {
1585
      recalc_pressure_nodes[i] = 0;
1586
    }
1587
  }
1588
  _regalloc = &regalloc;
1589

1590
#ifndef PRODUCT
1591
  if (trace_opto_pipelining()) {
1592
    tty->print("\n---- Start Local Scheduling ----\n");
1593
  }
1594
#endif
1595

1596
  // Schedule locally.  Right now a simple topological sort.
1597
  // Later, do a real latency aware scheduler.
1598
  GrowableArray<int> ready_cnt(C->unique(), C->unique(), -1);
1599
  visited.reset();
1600
  for (uint i = 0; i < number_of_blocks(); i++) {
1601
    Block* block = get_block(i);
1602
    if (!schedule_local(block, ready_cnt, visited, recalc_pressure_nodes)) {
1603
      if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
1604
        assert(false, "local schedule failed");
1605
        C->record_method_not_compilable("local schedule failed");
1606
      }
1607
      _regalloc = nullptr;
1608
      return;
1609
    }
1610
  }
1611
  _regalloc = nullptr;
1612

1613
  // If we inserted any instructions between a Call and his CatchNode,
1614
  // clone the instructions on all paths below the Catch.
1615
  for (uint i = 0; i < number_of_blocks(); i++) {
1616
    Block* block = get_block(i);
1617
    call_catch_cleanup(block);
1618
  }
1619

1620
#ifndef PRODUCT
1621
  if (trace_opto_pipelining()) {
1622
    tty->print("\n---- After GlobalCodeMotion ----\n");
1623
    for (uint i = 0; i < number_of_blocks(); i++) {
1624
      Block* block = get_block(i);
1625
      block->dump();
1626
    }
1627
  }
1628
#endif
1629
  // Dead.
1630
  _node_latency = (GrowableArray<uint> *)((intptr_t)0xdeadbeef);
1631
}
1632

1633
bool PhaseCFG::do_global_code_motion() {
1634

1635
  build_dominator_tree();
1636
  if (C->failing()) {
1637
    return false;
1638
  }
1639

1640
  NOT_PRODUCT( C->verify_graph_edges(); )
1641

1642
  estimate_block_frequency();
1643

1644
  global_code_motion();
1645

1646
  if (C->failing()) {
1647
    return false;
1648
  }
1649

1650
  return true;
1651
}
1652

1653
//------------------------------Estimate_Block_Frequency-----------------------
1654
// Estimate block frequencies based on IfNode probabilities.
1655
void PhaseCFG::estimate_block_frequency() {
1656

1657
  // Force conditional branches leading to uncommon traps to be unlikely,
1658
  // not because we get to the uncommon_trap with less relative frequency,
1659
  // but because an uncommon_trap typically causes a deopt, so we only get
1660
  // there once.
1661
  if (C->do_freq_based_layout()) {
1662
    Block_List worklist;
1663
    Block* root_blk = get_block(0);
1664
    for (uint i = 1; i < root_blk->num_preds(); i++) {
1665
      Block *pb = get_block_for_node(root_blk->pred(i));
1666
      if (pb->has_uncommon_code()) {
1667
        worklist.push(pb);
1668
      }
1669
    }
1670
    while (worklist.size() > 0) {
1671
      Block* uct = worklist.pop();
1672
      if (uct == get_root_block()) {
1673
        continue;
1674
      }
1675
      for (uint i = 1; i < uct->num_preds(); i++) {
1676
        Block *pb = get_block_for_node(uct->pred(i));
1677
        if (pb->_num_succs == 1) {
1678
          worklist.push(pb);
1679
        } else if (pb->num_fall_throughs() == 2) {
1680
          pb->update_uncommon_branch(uct);
1681
        }
1682
      }
1683
    }
1684
  }
1685

1686
  // Create the loop tree and calculate loop depth.
1687
  _root_loop = create_loop_tree();
1688
  _root_loop->compute_loop_depth(0);
1689

1690
  // Compute block frequency of each block, relative to a single loop entry.
1691
  _root_loop->compute_freq();
1692

1693
  // Adjust all frequencies to be relative to a single method entry
1694
  _root_loop->_freq = 1.0;
1695
  _root_loop->scale_freq();
1696

1697
  // Save outmost loop frequency for LRG frequency threshold
1698
  _outer_loop_frequency = _root_loop->outer_loop_freq();
1699

1700
  // force paths ending at uncommon traps to be infrequent
1701
  if (!C->do_freq_based_layout()) {
1702
    Block_List worklist;
1703
    Block* root_blk = get_block(0);
1704
    for (uint i = 1; i < root_blk->num_preds(); i++) {
1705
      Block *pb = get_block_for_node(root_blk->pred(i));
1706
      if (pb->has_uncommon_code()) {
1707
        worklist.push(pb);
1708
      }
1709
    }
1710
    while (worklist.size() > 0) {
1711
      Block* uct = worklist.pop();
1712
      uct->_freq = PROB_MIN;
1713
      for (uint i = 1; i < uct->num_preds(); i++) {
1714
        Block *pb = get_block_for_node(uct->pred(i));
1715
        if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) {
1716
          worklist.push(pb);
1717
        }
1718
      }
1719
    }
1720
  }
1721

1722
#ifdef ASSERT
1723
  for (uint i = 0; i < number_of_blocks(); i++) {
1724
    Block* b = get_block(i);
1725
    assert(b->_freq >= MIN_BLOCK_FREQUENCY, "Register Allocator requires meaningful block frequency");
1726
  }
1727
#endif
1728

1729
#ifndef PRODUCT
1730
  if (PrintCFGBlockFreq) {
1731
    tty->print_cr("CFG Block Frequencies");
1732
    _root_loop->dump_tree();
1733
    if (Verbose) {
1734
      tty->print_cr("PhaseCFG dump");
1735
      dump();
1736
      tty->print_cr("Node dump");
1737
      _root->dump(99999);
1738
    }
1739
  }
1740
#endif
1741
}
1742

1743
//----------------------------create_loop_tree--------------------------------
1744
// Create a loop tree from the CFG
1745
CFGLoop* PhaseCFG::create_loop_tree() {
1746

1747
#ifdef ASSERT
1748
  assert(get_block(0) == get_root_block(), "first block should be root block");
1749
  for (uint i = 0; i < number_of_blocks(); i++) {
1750
    Block* block = get_block(i);
1751
    // Check that _loop field are clear...we could clear them if not.
1752
    assert(block->_loop == nullptr, "clear _loop expected");
1753
    // Sanity check that the RPO numbering is reflected in the _blocks array.
1754
    // It doesn't have to be for the loop tree to be built, but if it is not,
1755
    // then the blocks have been reordered since dom graph building...which
1756
    // may question the RPO numbering
1757
    assert(block->_rpo == i, "unexpected reverse post order number");
1758
  }
1759
#endif
1760

1761
  int idct = 0;
1762
  CFGLoop* root_loop = new CFGLoop(idct++);
1763

1764
  Block_List worklist;
1765

1766
  // Assign blocks to loops
1767
  for(uint i = number_of_blocks() - 1; i > 0; i-- ) { // skip Root block
1768
    Block* block = get_block(i);
1769

1770
    if (block->head()->is_Loop()) {
1771
      Block* loop_head = block;
1772
      assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
1773
      Node* tail_n = loop_head->pred(LoopNode::LoopBackControl);
1774
      Block* tail = get_block_for_node(tail_n);
1775

1776
      // Defensively filter out Loop nodes for non-single-entry loops.
1777
      // For all reasonable loops, the head occurs before the tail in RPO.
1778
      if (i <= tail->_rpo) {
1779

1780
        // The tail and (recursive) predecessors of the tail
1781
        // are made members of a new loop.
1782

1783
        assert(worklist.size() == 0, "nonempty worklist");
1784
        CFGLoop* nloop = new CFGLoop(idct++);
1785
        assert(loop_head->_loop == nullptr, "just checking");
1786
        loop_head->_loop = nloop;
1787
        // Add to nloop so push_pred() will skip over inner loops
1788
        nloop->add_member(loop_head);
1789
        nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, this);
1790

1791
        while (worklist.size() > 0) {
1792
          Block* member = worklist.pop();
1793
          if (member != loop_head) {
1794
            for (uint j = 1; j < member->num_preds(); j++) {
1795
              nloop->push_pred(member, j, worklist, this);
1796
            }
1797
          }
1798
        }
1799
      }
1800
    }
1801
  }
1802

1803
  // Create a member list for each loop consisting
1804
  // of both blocks and (immediate child) loops.
1805
  for (uint i = 0; i < number_of_blocks(); i++) {
1806
    Block* block = get_block(i);
1807
    CFGLoop* lp = block->_loop;
1808
    if (lp == nullptr) {
1809
      // Not assigned to a loop. Add it to the method's pseudo loop.
1810
      block->_loop = root_loop;
1811
      lp = root_loop;
1812
    }
1813
    if (lp == root_loop || block != lp->head()) { // loop heads are already members
1814
      lp->add_member(block);
1815
    }
1816
    if (lp != root_loop) {
1817
      if (lp->parent() == nullptr) {
1818
        // Not a nested loop. Make it a child of the method's pseudo loop.
1819
        root_loop->add_nested_loop(lp);
1820
      }
1821
      if (block == lp->head()) {
1822
        // Add nested loop to member list of parent loop.
1823
        lp->parent()->add_member(lp);
1824
      }
1825
    }
1826
  }
1827

1828
  return root_loop;
1829
}
1830

1831
//------------------------------push_pred--------------------------------------
1832
void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg) {
1833
  Node* pred_n = blk->pred(i);
1834
  Block* pred = cfg->get_block_for_node(pred_n);
1835
  CFGLoop *pred_loop = pred->_loop;
1836
  if (pred_loop == nullptr) {
1837
    // Filter out blocks for non-single-entry loops.
1838
    // For all reasonable loops, the head occurs before the tail in RPO.
1839
    if (pred->_rpo > head()->_rpo) {
1840
      pred->_loop = this;
1841
      worklist.push(pred);
1842
    }
1843
  } else if (pred_loop != this) {
1844
    // Nested loop.
1845
    while (pred_loop->_parent != nullptr && pred_loop->_parent != this) {
1846
      pred_loop = pred_loop->_parent;
1847
    }
1848
    // Make pred's loop be a child
1849
    if (pred_loop->_parent == nullptr) {
1850
      add_nested_loop(pred_loop);
1851
      // Continue with loop entry predecessor.
1852
      Block* pred_head = pred_loop->head();
1853
      assert(pred_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
1854
      assert(pred_head != head(), "loop head in only one loop");
1855
      push_pred(pred_head, LoopNode::EntryControl, worklist, cfg);
1856
    } else {
1857
      assert(pred_loop->_parent == this && _parent == nullptr, "just checking");
1858
    }
1859
  }
1860
}
1861

1862
//------------------------------add_nested_loop--------------------------------
1863
// Make cl a child of the current loop in the loop tree.
1864
void CFGLoop::add_nested_loop(CFGLoop* cl) {
1865
  assert(_parent == nullptr, "no parent yet");
1866
  assert(cl != this, "not my own parent");
1867
  cl->_parent = this;
1868
  CFGLoop* ch = _child;
1869
  if (ch == nullptr) {
1870
    _child = cl;
1871
  } else {
1872
    while (ch->_sibling != nullptr) { ch = ch->_sibling; }
1873
    ch->_sibling = cl;
1874
  }
1875
}
1876

1877
//------------------------------compute_loop_depth-----------------------------
1878
// Store the loop depth in each CFGLoop object.
1879
// Recursively walk the children to do the same for them.
1880
void CFGLoop::compute_loop_depth(int depth) {
1881
  _depth = depth;
1882
  CFGLoop* ch = _child;
1883
  while (ch != nullptr) {
1884
    ch->compute_loop_depth(depth + 1);
1885
    ch = ch->_sibling;
1886
  }
1887
}
1888

1889
//------------------------------compute_freq-----------------------------------
1890
// Compute the frequency of each block and loop, relative to a single entry
1891
// into the dominating loop head.
1892
void CFGLoop::compute_freq() {
1893
  // Bottom up traversal of loop tree (visit inner loops first.)
1894
  // Set loop head frequency to 1.0, then transitively
1895
  // compute frequency for all successors in the loop,
1896
  // as well as for each exit edge.  Inner loops are
1897
  // treated as single blocks with loop exit targets
1898
  // as the successor blocks.
1899

1900
  // Nested loops first
1901
  CFGLoop* ch = _child;
1902
  while (ch != nullptr) {
1903
    ch->compute_freq();
1904
    ch = ch->_sibling;
1905
  }
1906
  assert (_members.length() > 0, "no empty loops");
1907
  Block* hd = head();
1908
  hd->_freq = 1.0;
1909
  for (int i = 0; i < _members.length(); i++) {
1910
    CFGElement* s = _members.at(i);
1911
    double freq = s->_freq;
1912
    if (s->is_block()) {
1913
      Block* b = s->as_Block();
1914
      for (uint j = 0; j < b->_num_succs; j++) {
1915
        Block* sb = b->_succs[j];
1916
        update_succ_freq(sb, freq * b->succ_prob(j));
1917
      }
1918
    } else {
1919
      CFGLoop* lp = s->as_CFGLoop();
1920
      assert(lp->_parent == this, "immediate child");
1921
      for (int k = 0; k < lp->_exits.length(); k++) {
1922
        Block* eb = lp->_exits.at(k).get_target();
1923
        double prob = lp->_exits.at(k).get_prob();
1924
        update_succ_freq(eb, freq * prob);
1925
      }
1926
    }
1927
  }
1928

1929
  // For all loops other than the outer, "method" loop,
1930
  // sum and normalize the exit probability. The "method" loop
1931
  // should keep the initial exit probability of 1, so that
1932
  // inner blocks do not get erroneously scaled.
1933
  if (_depth != 0) {
1934
    // Total the exit probabilities for this loop.
1935
    double exits_sum = 0.0f;
1936
    for (int i = 0; i < _exits.length(); i++) {
1937
      exits_sum += _exits.at(i).get_prob();
1938
    }
1939

1940
    // Normalize the exit probabilities. Until now, the
1941
    // probabilities estimate the possibility of exit per
1942
    // a single loop iteration; afterward, they estimate
1943
    // the probability of exit per loop entry.
1944
    for (int i = 0; i < _exits.length(); i++) {
1945
      Block* et = _exits.at(i).get_target();
1946
      float new_prob = 0.0f;
1947
      if (_exits.at(i).get_prob() > 0.0f) {
1948
        new_prob = _exits.at(i).get_prob() / exits_sum;
1949
      }
1950
      BlockProbPair bpp(et, new_prob);
1951
      _exits.at_put(i, bpp);
1952
    }
1953

1954
    // Save the total, but guard against unreasonable probability,
1955
    // as the value is used to estimate the loop trip count.
1956
    // An infinite trip count would blur relative block
1957
    // frequencies.
1958
    if (exits_sum > 1.0f) exits_sum = 1.0;
1959
    if (exits_sum < PROB_MIN) exits_sum = PROB_MIN;
1960
    _exit_prob = exits_sum;
1961
  }
1962
}
1963

1964
//------------------------------succ_prob-------------------------------------
1965
// Determine the probability of reaching successor 'i' from the receiver block.
1966
float Block::succ_prob(uint i) {
1967
  int eidx = end_idx();
1968
  Node *n = get_node(eidx);  // Get ending Node
1969

1970
  int op = n->Opcode();
1971
  if (n->is_Mach()) {
1972
    if (n->is_MachNullCheck()) {
1973
      // Can only reach here if called after lcm. The original Op_If is gone,
1974
      // so we attempt to infer the probability from one or both of the
1975
      // successor blocks.
1976
      assert(_num_succs == 2, "expecting 2 successors of a null check");
1977
      // If either successor has only one predecessor, then the
1978
      // probability estimate can be derived using the
1979
      // relative frequency of the successor and this block.
1980
      if (_succs[i]->num_preds() == 2) {
1981
        return _succs[i]->_freq / _freq;
1982
      } else if (_succs[1-i]->num_preds() == 2) {
1983
        return 1 - (_succs[1-i]->_freq / _freq);
1984
      } else {
1985
        // Estimate using both successor frequencies
1986
        float freq = _succs[i]->_freq;
1987
        return freq / (freq + _succs[1-i]->_freq);
1988
      }
1989
    }
1990
    op = n->as_Mach()->ideal_Opcode();
1991
  }
1992

1993

1994
  // Switch on branch type
1995
  switch( op ) {
1996
  case Op_CountedLoopEnd:
1997
  case Op_If: {
1998
    assert (i < 2, "just checking");
1999
    // Conditionals pass on only part of their frequency
2000
    float prob  = n->as_MachIf()->_prob;
2001
    assert(prob >= 0.0 && prob <= 1.0, "out of range probability");
2002
    // If succ[i] is the FALSE branch, invert path info
2003
    if( get_node(i + eidx + 1)->Opcode() == Op_IfFalse ) {
2004
      return 1.0f - prob; // not taken
2005
    } else {
2006
      return prob; // taken
2007
    }
2008
  }
2009

2010
  case Op_Jump:
2011
    return n->as_MachJump()->_probs[get_node(i + eidx + 1)->as_JumpProj()->_con];
2012

2013
  case Op_Catch: {
2014
    const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
2015
    if (ci->_con == CatchProjNode::fall_through_index) {
2016
      // Fall-thru path gets the lion's share.
2017
      return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs;
2018
    } else {
2019
      // Presume exceptional paths are equally unlikely
2020
      return PROB_UNLIKELY_MAG(5);
2021
    }
2022
  }
2023

2024
  case Op_Root:
2025
  case Op_Goto:
2026
    // Pass frequency straight thru to target
2027
    return 1.0f;
2028

2029
  case Op_NeverBranch:
2030
    return 0.0f;
2031

2032
  case Op_TailCall:
2033
  case Op_TailJump:
2034
  case Op_Return:
2035
  case Op_Halt:
2036
  case Op_Rethrow:
2037
    // Do not push out freq to root block
2038
    return 0.0f;
2039

2040
  default:
2041
    ShouldNotReachHere();
2042
  }
2043

2044
  return 0.0f;
2045
}
2046

2047
//------------------------------num_fall_throughs-----------------------------
2048
// Return the number of fall-through candidates for a block
2049
int Block::num_fall_throughs() {
2050
  int eidx = end_idx();
2051
  Node *n = get_node(eidx);  // Get ending Node
2052

2053
  int op = n->Opcode();
2054
  if (n->is_Mach()) {
2055
    if (n->is_MachNullCheck()) {
2056
      // In theory, either side can fall-thru, for simplicity sake,
2057
      // let's say only the false branch can now.
2058
      return 1;
2059
    }
2060
    op = n->as_Mach()->ideal_Opcode();
2061
  }
2062

2063
  // Switch on branch type
2064
  switch( op ) {
2065
  case Op_CountedLoopEnd:
2066
  case Op_If:
2067
    return 2;
2068

2069
  case Op_Root:
2070
  case Op_Goto:
2071
    return 1;
2072

2073
  case Op_Catch: {
2074
    for (uint i = 0; i < _num_succs; i++) {
2075
      const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
2076
      if (ci->_con == CatchProjNode::fall_through_index) {
2077
        return 1;
2078
      }
2079
    }
2080
    return 0;
2081
  }
2082

2083
  case Op_Jump:
2084
  case Op_NeverBranch:
2085
  case Op_TailCall:
2086
  case Op_TailJump:
2087
  case Op_Return:
2088
  case Op_Halt:
2089
  case Op_Rethrow:
2090
    return 0;
2091

2092
  default:
2093
    ShouldNotReachHere();
2094
  }
2095

2096
  return 0;
2097
}
2098

2099
//------------------------------succ_fall_through-----------------------------
2100
// Return true if a specific successor could be fall-through target.
2101
bool Block::succ_fall_through(uint i) {
2102
  int eidx = end_idx();
2103
  Node *n = get_node(eidx);  // Get ending Node
2104

2105
  int op = n->Opcode();
2106
  if (n->is_Mach()) {
2107
    if (n->is_MachNullCheck()) {
2108
      // In theory, either side can fall-thru, for simplicity sake,
2109
      // let's say only the false branch can now.
2110
      return get_node(i + eidx + 1)->Opcode() == Op_IfFalse;
2111
    }
2112
    op = n->as_Mach()->ideal_Opcode();
2113
  }
2114

2115
  // Switch on branch type
2116
  switch( op ) {
2117
  case Op_CountedLoopEnd:
2118
  case Op_If:
2119
  case Op_Root:
2120
  case Op_Goto:
2121
    return true;
2122

2123
  case Op_Catch: {
2124
    const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
2125
    return ci->_con == CatchProjNode::fall_through_index;
2126
  }
2127

2128
  case Op_Jump:
2129
  case Op_NeverBranch:
2130
  case Op_TailCall:
2131
  case Op_TailJump:
2132
  case Op_Return:
2133
  case Op_Halt:
2134
  case Op_Rethrow:
2135
    return false;
2136

2137
  default:
2138
    ShouldNotReachHere();
2139
  }
2140

2141
  return false;
2142
}
2143

2144
//------------------------------update_uncommon_branch------------------------
2145
// Update the probability of a two-branch to be uncommon
2146
void Block::update_uncommon_branch(Block* ub) {
2147
  int eidx = end_idx();
2148
  Node *n = get_node(eidx);  // Get ending Node
2149

2150
  int op = n->as_Mach()->ideal_Opcode();
2151

2152
  assert(op == Op_CountedLoopEnd || op == Op_If, "must be a If");
2153
  assert(num_fall_throughs() == 2, "must be a two way branch block");
2154

2155
  // Which successor is ub?
2156
  uint s;
2157
  for (s = 0; s <_num_succs; s++) {
2158
    if (_succs[s] == ub) break;
2159
  }
2160
  assert(s < 2, "uncommon successor must be found");
2161

2162
  // If ub is the true path, make the proability small, else
2163
  // ub is the false path, and make the probability large
2164
  bool invert = (get_node(s + eidx + 1)->Opcode() == Op_IfFalse);
2165

2166
  // Get existing probability
2167
  float p = n->as_MachIf()->_prob;
2168

2169
  if (invert) p = 1.0 - p;
2170
  if (p > PROB_MIN) {
2171
    p = PROB_MIN;
2172
  }
2173
  if (invert) p = 1.0 - p;
2174

2175
  n->as_MachIf()->_prob = p;
2176
}
2177

2178
//------------------------------update_succ_freq-------------------------------
2179
// Update the appropriate frequency associated with block 'b', a successor of
2180
// a block in this loop.
2181
void CFGLoop::update_succ_freq(Block* b, double freq) {
2182
  if (b->_loop == this) {
2183
    if (b == head()) {
2184
      // back branch within the loop
2185
      // Do nothing now, the loop carried frequency will be
2186
      // adjust later in scale_freq().
2187
    } else {
2188
      // simple branch within the loop
2189
      b->_freq += freq;
2190
    }
2191
  } else if (!in_loop_nest(b)) {
2192
    // branch is exit from this loop
2193
    BlockProbPair bpp(b, freq);
2194
    _exits.append(bpp);
2195
  } else {
2196
    // branch into nested loop
2197
    CFGLoop* ch = b->_loop;
2198
    ch->_freq += freq;
2199
  }
2200
}
2201

2202
//------------------------------in_loop_nest-----------------------------------
2203
// Determine if block b is in the receiver's loop nest.
2204
bool CFGLoop::in_loop_nest(Block* b) {
2205
  int depth = _depth;
2206
  CFGLoop* b_loop = b->_loop;
2207
  int b_depth = b_loop->_depth;
2208
  if (depth == b_depth) {
2209
    return true;
2210
  }
2211
  while (b_depth > depth) {
2212
    b_loop = b_loop->_parent;
2213
    b_depth = b_loop->_depth;
2214
  }
2215
  return b_loop == this;
2216
}
2217

2218
//------------------------------scale_freq-------------------------------------
2219
// Scale frequency of loops and blocks by trip counts from outer loops
2220
// Do a top down traversal of loop tree (visit outer loops first.)
2221
void CFGLoop::scale_freq() {
2222
  double loop_freq = _freq * trip_count();
2223
  _freq = loop_freq;
2224
  for (int i = 0; i < _members.length(); i++) {
2225
    CFGElement* s = _members.at(i);
2226
    double block_freq = s->_freq * loop_freq;
2227
    if (g_isnan(block_freq) || block_freq < MIN_BLOCK_FREQUENCY)
2228
      block_freq = MIN_BLOCK_FREQUENCY;
2229
    s->_freq = block_freq;
2230
  }
2231
  CFGLoop* ch = _child;
2232
  while (ch != nullptr) {
2233
    ch->scale_freq();
2234
    ch = ch->_sibling;
2235
  }
2236
}
2237

2238
// Frequency of outer loop
2239
double CFGLoop::outer_loop_freq() const {
2240
  if (_child != nullptr) {
2241
    return _child->_freq;
2242
  }
2243
  return _freq;
2244
}
2245

2246
#ifndef PRODUCT
2247
//------------------------------dump_tree--------------------------------------
2248
void CFGLoop::dump_tree() const {
2249
  dump();
2250
  if (_child != nullptr)   _child->dump_tree();
2251
  if (_sibling != nullptr) _sibling->dump_tree();
2252
}
2253

2254
//------------------------------dump-------------------------------------------
2255
void CFGLoop::dump() const {
2256
  for (int i = 0; i < _depth; i++) tty->print("   ");
2257
  tty->print("%s: %d  trip_count: %6.0f freq: %6.0f\n",
2258
             _depth == 0 ? "Method" : "Loop", _id, trip_count(), _freq);
2259
  for (int i = 0; i < _depth; i++) tty->print("   ");
2260
  tty->print("         members:");
2261
  int k = 0;
2262
  for (int i = 0; i < _members.length(); i++) {
2263
    if (k++ >= 6) {
2264
      tty->print("\n              ");
2265
      for (int j = 0; j < _depth+1; j++) tty->print("   ");
2266
      k = 0;
2267
    }
2268
    CFGElement *s = _members.at(i);
2269
    if (s->is_block()) {
2270
      Block *b = s->as_Block();
2271
      tty->print(" B%d(%6.3f)", b->_pre_order, b->_freq);
2272
    } else {
2273
      CFGLoop* lp = s->as_CFGLoop();
2274
      tty->print(" L%d(%6.3f)", lp->_id, lp->_freq);
2275
    }
2276
  }
2277
  tty->print("\n");
2278
  for (int i = 0; i < _depth; i++) tty->print("   ");
2279
  tty->print("         exits:  ");
2280
  k = 0;
2281
  for (int i = 0; i < _exits.length(); i++) {
2282
    if (k++ >= 7) {
2283
      tty->print("\n              ");
2284
      for (int j = 0; j < _depth+1; j++) tty->print("   ");
2285
      k = 0;
2286
    }
2287
    Block *blk = _exits.at(i).get_target();
2288
    double prob = _exits.at(i).get_prob();
2289
    tty->print(" ->%d@%d%%", blk->_pre_order, (int)(prob*100));
2290
  }
2291
  tty->print("\n");
2292
}
2293
#endif
2294

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.