jdk

Форк
0
/
loopPredicate.cpp 
1576 строк · 68.1 Кб
1
/*
2
 * Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
3
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
 *
5
 * This code is free software; you can redistribute it and/or modify it
6
 * under the terms of the GNU General Public License version 2 only, as
7
 * published by the Free Software Foundation.
8
 *
9
 * This code is distributed in the hope that it will be useful, but WITHOUT
10
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12
 * version 2 for more details (a copy is included in the LICENSE file that
13
 * accompanied this code).
14
 *
15
 * You should have received a copy of the GNU General Public License version
16
 * 2 along with this work; if not, write to the Free Software Foundation,
17
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
 *
19
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
 * or visit www.oracle.com if you need additional information or have any
21
 * questions.
22
 *
23
 */
24

25
#include "precompiled.hpp"
26
#include "memory/allocation.hpp"
27
#include "opto/loopnode.hpp"
28
#include "opto/addnode.hpp"
29
#include "opto/callnode.hpp"
30
#include "opto/castnode.hpp"
31
#include "opto/connode.hpp"
32
#include "opto/convertnode.hpp"
33
#include "opto/loopnode.hpp"
34
#include "opto/matcher.hpp"
35
#include "opto/mulnode.hpp"
36
#include "opto/opaquenode.hpp"
37
#include "opto/predicates.hpp"
38
#include "opto/rootnode.hpp"
39
#include "opto/subnode.hpp"
40
#include <fenv.h>
41
#include <math.h>
42

43
/*
44
 * The general idea of Loop Predication is to hoist a check inside a loop body by inserting a Hoisted Check Predicate with
45
 * an uncommon trap on the entry path to the loop. The old check inside the loop can be eliminated. If the condition of
46
 * the Hoisted Check Predicate fails at runtime, we'll execute the uncommon trap to avoid entering the loop which misses
47
 * the check. Loop Predication can currently remove array range checks and loop invariant checks (such as null checks).
48
 *
49
 * On top of these predicates added by Loop Predication, there are other kinds of predicates. A detailed description
50
 * about all predicates can be found in predicates.hpp.
51
*/
52

53
//-------------------------------register_control-------------------------
54
void PhaseIdealLoop::register_control(Node* n, IdealLoopTree *loop, Node* pred, bool update_body) {
55
  assert(n->is_CFG(), "msust be control node");
56
  _igvn.register_new_node_with_optimizer(n);
57
  if (update_body) {
58
    loop->_body.push(n);
59
  }
60
  set_loop(n, loop);
61
  // When called from beautify_loops() idom is not constructed yet.
62
  if (_idom != nullptr) {
63
    set_idom(n, pred, dom_depth(pred));
64
  }
65
}
66

67
//------------------------------create_new_if_for_predicate------------------------
68
// create a new if above the uct_if_pattern for the predicate to be promoted.
69
//
70
//          before                                after
71
//        ----------                           ----------
72
//           ctrl                                 ctrl
73
//            |                                     |
74
//            |                                     |
75
//            v                                     v
76
//           iff                                 new_iff
77
//          /    \                                /      \
78
//         /      \                              /        \
79
//        v        v                            v          v
80
//  uncommon_proj cont_proj                   if_uct     if_cont
81
// \      |        |                           |          |
82
//  \     |        |                           |          |
83
//   v    v        v                           |          v
84
//     rgn       loop                          |         iff
85
//      |                                      |        /     \
86
//      |                                      |       /       \
87
//      v                                      |      v         v
88
// uncommon_trap                               | uncommon_proj cont_proj
89
//                                           \  \    |           |
90
//                                            \  \   |           |
91
//                                             v  v  v           v
92
//                                               rgn           loop
93
//                                                |
94
//                                                |
95
//                                                v
96
//                                           uncommon_trap
97
//
98
//
99
// We will create a region to guard the uct call if there is no one there.
100
// The continuation projection (if_cont) of the new_iff is returned which
101
// is an IfTrue projection. This code is also used to clone predicates to cloned loops.
102
IfTrueNode* PhaseIdealLoop::create_new_if_for_predicate(ParsePredicateSuccessProj* parse_predicate_success_proj,
103
                                                        Node* new_entry, const Deoptimization::DeoptReason reason,
104
                                                        const int opcode, const bool rewire_uncommon_proj_phi_inputs
105
                                                        NOT_PRODUCT (COMMA AssertionPredicateType assertion_predicate_type)) {
106
  assert(parse_predicate_success_proj->is_uncommon_trap_if_pattern(reason), "must be a uct if pattern!");
107
  ParsePredicateNode* parse_predicate = parse_predicate_success_proj->in(0)->as_ParsePredicate();
108
  ParsePredicateUncommonProj* uncommon_proj = parse_predicate->uncommon_proj();
109
  Node* uncommon_trap = parse_predicate->uncommon_trap();
110

111
  uint proj_index = 1; // region's edge corresponding to uncommon_proj
112
  if (!uncommon_trap->is_Region()) { // create a region to guard the call
113
    assert(uncommon_trap->is_Call(), "must be call uct");
114
    CallNode* call = uncommon_trap->as_Call();
115
    IdealLoopTree* loop = get_loop(call);
116
    uncommon_trap = new RegionNode(1);
117
    Node* uncommon_proj_orig = uncommon_proj;
118
    uncommon_proj = uncommon_proj->clone()->as_IfFalse();
119
    register_control(uncommon_proj, loop, parse_predicate);
120
    uncommon_trap->add_req(uncommon_proj);
121
    register_control(uncommon_trap, loop, uncommon_proj);
122
    _igvn.replace_input_of(call, 0, uncommon_trap);
123
    // When called from beautify_loops() idom is not constructed yet.
124
    if (_idom != nullptr) {
125
      set_idom(call, uncommon_trap, dom_depth(uncommon_trap));
126
    }
127
    // Move nodes pinned on the projection or whose control is set to
128
    // the projection to the region.
129
    lazy_replace(uncommon_proj_orig, uncommon_trap);
130
  } else {
131
    // Find region's edge corresponding to uncommon_proj
132
    for (; proj_index < uncommon_trap->req(); proj_index++)
133
      if (uncommon_trap->in(proj_index) == uncommon_proj) break;
134
    assert(proj_index < uncommon_trap->req(), "sanity");
135
  }
136

137
  Node* entry = parse_predicate->in(0);
138
  if (new_entry != nullptr) {
139
    // Cloning the predicate to new location.
140
    entry = new_entry;
141
  }
142
  // Create new_iff
143
  IdealLoopTree* lp = get_loop(entry);
144
  IfNode* new_iff = nullptr;
145
  switch (opcode) {
146
    case Op_If:
147
      new_iff = new IfNode(entry, parse_predicate->in(1), parse_predicate->_prob, parse_predicate->_fcnt
148
                           NOT_PRODUCT(COMMA assertion_predicate_type));
149
      break;
150
    case Op_RangeCheck:
151
      new_iff = new RangeCheckNode(entry, parse_predicate->in(1), parse_predicate->_prob, parse_predicate->_fcnt
152
                                   NOT_PRODUCT(COMMA assertion_predicate_type));
153
      break;
154
    case Op_ParsePredicate:
155
      new_iff = new ParsePredicateNode(entry, reason, &_igvn);
156
      break;
157
    default:
158
      fatal("no other If variant here");
159
  }
160
  register_control(new_iff, lp, entry);
161

162
  IfTrueNode* if_cont = new IfTrueNode(new_iff);
163
  IfFalseNode* if_uct = new IfFalseNode(new_iff);
164

165
  register_control(if_cont, lp, new_iff);
166
  register_control(if_uct, get_loop(uncommon_trap), new_iff);
167

168
  _igvn.add_input_to(uncommon_trap, if_uct);
169

170
  // If rgn has phis add new edges which has the same
171
  // value as on original uncommon_proj pass.
172
  assert(uncommon_trap->in(uncommon_trap->req() - 1) == if_uct, "new edge should be last");
173
  bool has_phi = false;
174
  for (DUIterator_Fast imax, i = uncommon_trap->fast_outs(imax); i < imax; i++) {
175
    Node* use = uncommon_trap->fast_out(i);
176
    if (use->is_Phi() && use->outcnt() > 0) {
177
      assert(use->in(0) == uncommon_trap, "");
178
      _igvn.rehash_node_delayed(use);
179
      Node* phi_input = use->in(proj_index);
180

181
      if (uncommon_proj->outcnt() > 1 && !phi_input->is_CFG() && !phi_input->is_Phi() && get_ctrl(phi_input) == uncommon_proj) {
182
        // There are some control dependent nodes on the uncommon projection. We cannot simply reuse these data nodes.
183
        // We either need to rewire them from the old uncommon projection to the newly created uncommon proj (if the old
184
        // If is dying) or clone them and update their control (if the old If is not dying).
185
        if (rewire_uncommon_proj_phi_inputs) {
186
          // Replace phi input for the old uncommon projection with TOP as the If is dying anyways. Reuse the old data
187
          // nodes by simply updating control inputs and ctrl.
188
          _igvn.replace_input_of(use, proj_index, C->top());
189
          set_ctrl_of_nodes_with_same_ctrl(phi_input, uncommon_proj, if_uct);
190
        } else {
191
          phi_input = clone_nodes_with_same_ctrl(phi_input, uncommon_proj, if_uct);
192
        }
193
      }
194
      use->add_req(phi_input);
195
      has_phi = true;
196
    }
197
  }
198
  assert(!has_phi || uncommon_trap->req() > 3, "no phis when region is created");
199

200
  if (new_entry == nullptr) {
201
    // Attach if_cont to iff
202
    _igvn.replace_input_of(parse_predicate, 0, if_cont);
203
    if (_idom != nullptr) {
204
      set_idom(parse_predicate, if_cont, dom_depth(parse_predicate));
205
    }
206
  }
207

208
  // When called from beautify_loops() idom is not constructed yet.
209
  if (_idom != nullptr) {
210
    Node* ridom = idom(uncommon_trap);
211
    Node* nrdom = dom_lca_internal(ridom, new_iff);
212
    set_idom(uncommon_trap, nrdom, dom_depth(uncommon_trap));
213
  }
214

215
  return if_cont;
216
}
217

218
// Update ctrl and control inputs of all data nodes starting from 'node' to 'new_ctrl' which have 'old_ctrl' as
219
// current ctrl.
220
void PhaseIdealLoop::set_ctrl_of_nodes_with_same_ctrl(Node* start_node, ProjNode* old_uncommon_proj,
221
                                                      Node* new_uncommon_proj) {
222
  ResourceMark rm;
223
  const Unique_Node_List nodes_with_same_ctrl = find_nodes_with_same_ctrl(start_node, old_uncommon_proj);
224
  for (uint i = 0; i < nodes_with_same_ctrl.size(); i++) {
225
    Node* node = nodes_with_same_ctrl[i];
226
    if (node->in(0) == old_uncommon_proj) {
227
      _igvn.replace_input_of(node, 0, new_uncommon_proj);
228
    }
229
    set_ctrl(node, new_uncommon_proj);
230
  }
231
}
232

233
// Recursively find all input nodes with the same ctrl.
234
Unique_Node_List PhaseIdealLoop::find_nodes_with_same_ctrl(Node* node, const ProjNode* ctrl) {
235
  Unique_Node_List nodes_with_same_ctrl;
236
  nodes_with_same_ctrl.push(node);
237
  for (uint j = 0; j < nodes_with_same_ctrl.size(); j++) {
238
    Node* next = nodes_with_same_ctrl[j];
239
    for (uint k = 1; k < next->req(); k++) {
240
      Node* in = next->in(k);
241
      if (!in->is_Phi() && get_ctrl(in) == ctrl) {
242
        nodes_with_same_ctrl.push(in);
243
      }
244
    }
245
  }
246
  return nodes_with_same_ctrl;
247
}
248

249
// Clone all data nodes with a ctrl to the old uncommon projection from `start_node' by following its inputs. Rewire the
250
// cloned nodes to the new uncommon projection. Returns the clone of the `start_node`.
251
Node* PhaseIdealLoop::clone_nodes_with_same_ctrl(Node* start_node, ProjNode* old_uncommon_proj, Node* new_uncommon_proj) {
252
  ResourceMark rm;
253
  DEBUG_ONLY(uint last_idx = C->unique();)
254
  const Unique_Node_List nodes_with_same_ctrl = find_nodes_with_same_ctrl(start_node, old_uncommon_proj);
255
  DataNodeGraph data_node_graph(nodes_with_same_ctrl, this);
256
  const OrigToNewHashtable& orig_to_clone = data_node_graph.clone(new_uncommon_proj);
257
  fix_cloned_data_node_controls(old_uncommon_proj, new_uncommon_proj, orig_to_clone);
258
  Node** cloned_node_ptr = orig_to_clone.get(start_node);
259
  assert(cloned_node_ptr != nullptr && (*cloned_node_ptr)->_idx >= last_idx, "must exist and be a proper clone");
260
  return *cloned_node_ptr;
261
}
262

263
// All data nodes with a control input to the uncommon projection in the chain need to be rewired to the new uncommon
264
// projection (could not only be the last data node in the chain but also, for example, a pinned DivNode within the chain).
265
void PhaseIdealLoop::fix_cloned_data_node_controls(const ProjNode* old_uncommon_proj, Node* new_uncommon_proj,
266
                                                   const OrigToNewHashtable& orig_to_clone) {
267
  auto orig_clone_action = [&](Node* orig, Node* clone) {
268
    if (orig->in(0) == old_uncommon_proj) {
269
      _igvn.replace_input_of(clone, 0, new_uncommon_proj);
270
      set_ctrl(clone, new_uncommon_proj);
271
    }
272
  };
273
  orig_to_clone.iterate_all(orig_clone_action);
274
}
275

276
IfProjNode* PhaseIdealLoop::clone_parse_predicate_to_unswitched_loop(ParsePredicateSuccessProj* parse_predicate_proj,
277
                                                                     Node* new_entry, Deoptimization::DeoptReason reason,
278
                                                                     const bool slow_loop) {
279

280
  IfProjNode* new_predicate_proj = create_new_if_for_predicate(parse_predicate_proj, new_entry, reason, Op_ParsePredicate,
281
                                                               slow_loop);
282
  assert(new_predicate_proj->is_IfTrue(), "the success projection of a Parse Predicate is a true projection");
283
  ParsePredicateNode* parse_predicate = new_predicate_proj->in(0)->as_ParsePredicate();
284
  return new_predicate_proj;
285
}
286

287
// Clones Assertion Predicates to both unswitched loops starting at 'old_predicate_proj' by following its control inputs.
288
// It also rewires the control edges of data nodes with dependencies in the loop from the old predicates to the new
289
// cloned predicates.
290
void PhaseIdealLoop::clone_assertion_predicates_to_unswitched_loop(IdealLoopTree* loop, const Node_List& old_new,
291
                                                                   Deoptimization::DeoptReason reason,
292
                                                                   IfProjNode* old_predicate_proj,
293
                                                                   ParsePredicateSuccessProj* fast_loop_parse_predicate_proj,
294
                                                                   ParsePredicateSuccessProj* slow_loop_parse_predicate_proj) {
295
  assert(fast_loop_parse_predicate_proj->in(0)->is_ParsePredicate() &&
296
         slow_loop_parse_predicate_proj->in(0)->is_ParsePredicate(), "sanity check");
297
  // Only need to clone range check predicates as those can be changed and duplicated by inserting pre/main/post loops
298
  // and doing loop unrolling. Push the original predicates on a list to later process them in reverse order to keep the
299
  // original predicate order.
300
  Unique_Node_List list;
301
  get_assertion_predicates(old_predicate_proj, list);
302

303
  Node_List to_process;
304
  IfNode* iff = old_predicate_proj->in(0)->as_If();
305
  IfProjNode* uncommon_proj = iff->proj_out(1 - old_predicate_proj->as_Proj()->_con)->as_IfProj();
306
  // Process in reverse order such that 'create_new_if_for_predicate' can be used in
307
  // 'clone_assertion_predicate_for_unswitched_loops' and the original order is maintained.
308
  for (int i = list.size() - 1; i >= 0; i--) {
309
    Node* predicate = list.at(i);
310
    assert(predicate->in(0)->is_If(), "must be If node");
311
    iff = predicate->in(0)->as_If();
312
    assert(predicate->is_Proj() && predicate->as_Proj()->is_IfProj(), "predicate must be a projection of an if node");
313
    IfProjNode* predicate_proj = predicate->as_IfProj();
314

315
    IfProjNode* fast_proj = clone_assertion_predicate_for_unswitched_loops(iff, predicate_proj, reason, fast_loop_parse_predicate_proj);
316
    assert(assertion_predicate_has_loop_opaque_node(fast_proj->in(0)->as_If()), "must find Assertion Predicate for fast loop");
317
    IfProjNode* slow_proj = clone_assertion_predicate_for_unswitched_loops(iff, predicate_proj, reason, slow_loop_parse_predicate_proj);
318
    assert(assertion_predicate_has_loop_opaque_node(slow_proj->in(0)->as_If()), "must find Assertion Predicate for slow loop");
319

320
    // Update control dependent data nodes.
321
    for (DUIterator j = predicate->outs(); predicate->has_out(j); j++) {
322
      Node* fast_node = predicate->out(j);
323
      if (loop->is_member(get_loop(ctrl_or_self(fast_node)))) {
324
        assert(fast_node->in(0) == predicate, "only control edge");
325
        Node* slow_node = old_new[fast_node->_idx];
326
        assert(slow_node->in(0) == predicate, "only control edge");
327
        _igvn.replace_input_of(fast_node, 0, fast_proj);
328
        to_process.push(slow_node);
329
        --j;
330
      }
331
    }
332
    // Have to delay updates to the slow loop so uses of predicate are not modified while we iterate on them.
333
    while (to_process.size() > 0) {
334
      Node* slow_node = to_process.pop();
335
      _igvn.replace_input_of(slow_node, 0, slow_proj);
336
    }
337
  }
338
}
339

340
// Put all Assertion Predicate projections on a list, starting at 'predicate' and going up in the tree. If 'get_opaque'
341
// is set, then the Opaque4 nodes of the Assertion Predicates are put on the list instead of the projections.
342
void PhaseIdealLoop::get_assertion_predicates(Node* predicate, Unique_Node_List& list, bool get_opaque) {
343
  ParsePredicateNode* parse_predicate = predicate->in(0)->as_ParsePredicate();
344
  ProjNode* uncommon_proj = parse_predicate->proj_out(1 - predicate->as_Proj()->_con);
345
  Node* rgn = uncommon_proj->unique_ctrl_out();
346
  assert(rgn->is_Region() || rgn->is_Call(), "must be a region or call uct");
347
  predicate = parse_predicate->in(0);
348
  while (predicate != nullptr && predicate->is_Proj() && predicate->in(0)->is_If()) {
349
    IfNode* iff = predicate->in(0)->as_If();
350
    uncommon_proj = iff->proj_out(1 - predicate->as_Proj()->_con);
351
    if (uncommon_proj->unique_ctrl_out() != rgn) {
352
      break;
353
    }
354
    Node* bol = iff->in(1);
355
    assert(!bol->is_OpaqueInitializedAssertionPredicate(), "should not find an Initialized Assertion Predicate");
356
    if (bol->is_Opaque4()) {
357
      assert(assertion_predicate_has_loop_opaque_node(iff), "must find OpaqueLoop* nodes");
358
      if (get_opaque) {
359
        // Collect the predicate Opaque4 node.
360
        list.push(bol);
361
      } else {
362
        // Collect the predicate projection.
363
        list.push(predicate);
364
      }
365
    }
366
    predicate = predicate->in(0)->in(0);
367
  }
368
}
369

370
// Clone an Assertion Predicate for an unswitched loop. OpaqueLoopInit and OpaqueLoopStride nodes are cloned and uncommon
371
// traps are kept for the predicate (a Halt node is used later when creating pre/main/post loops and copying this cloned
372
// predicate again).
373
IfProjNode* PhaseIdealLoop::clone_assertion_predicate_for_unswitched_loops(IfNode* template_assertion_predicate,
374
                                                                           IfProjNode* predicate,
375
                                                                           Deoptimization::DeoptReason reason,
376
                                                                           ParsePredicateSuccessProj* parse_predicate_proj) {
377
  TemplateAssertionPredicateExpression template_assertion_predicate_expression(
378
      template_assertion_predicate->in(1)->as_Opaque4());
379
  Opaque4Node* cloned_opaque4_node = template_assertion_predicate_expression.clone(parse_predicate_proj->in(0)->in(0), this);
380
  IfProjNode* if_proj = create_new_if_for_predicate(parse_predicate_proj, nullptr, reason, template_assertion_predicate->Opcode(), false);
381
  _igvn.replace_input_of(if_proj->in(0), 1, cloned_opaque4_node);
382
  _igvn.replace_input_of(parse_predicate_proj->in(0), 0, if_proj);
383
  set_idom(parse_predicate_proj->in(0), if_proj, dom_depth(if_proj));
384
  return if_proj;
385
}
386

387
// Clone the old Parse Predicates and Assertion Predicates before the unswitch If to the unswitched loops after the
388
// unswitch If.
389
void PhaseIdealLoop::clone_parse_and_assertion_predicates_to_unswitched_loop(IdealLoopTree* loop, Node_List& old_new,
390
                                                                             IfProjNode*& iffast_pred, IfProjNode*& ifslow_pred) {
391
  LoopNode* head = loop->_head->as_Loop();
392
  Node* entry = head->skip_strip_mined()->in(LoopNode::EntryControl);
393

394
  const Predicates predicates(entry);
395
  clone_loop_predication_predicates_to_unswitched_loop(loop, old_new, predicates.loop_predicate_block(),
396
                                                       Deoptimization::Reason_predicate, iffast_pred, ifslow_pred);
397
  clone_loop_predication_predicates_to_unswitched_loop(loop, old_new, predicates.profiled_loop_predicate_block(),
398
                                                       Deoptimization::Reason_profile_predicate, iffast_pred, ifslow_pred);
399

400
  const PredicateBlock* loop_limit_check_predicate_block = predicates.loop_limit_check_predicate_block();
401
  if (loop_limit_check_predicate_block->has_parse_predicate() && !head->is_CountedLoop()) {
402
    // Don't clone the Loop Limit Check Parse Predicate if we already have a counted loop (a Loop Limit Check Predicate
403
    // is only created when converting a LoopNode to a CountedLoopNode).
404
    clone_parse_predicate_to_unswitched_loops(loop_limit_check_predicate_block, Deoptimization::Reason_loop_limit_check,
405
                                              iffast_pred, ifslow_pred);
406
  }
407
}
408

409
// Clone the Parse Predicate and Template Assertion Predicates of a Loop Predication related Predicate Block.
410
void PhaseIdealLoop::clone_loop_predication_predicates_to_unswitched_loop(IdealLoopTree* loop, const Node_List& old_new,
411
                                                                          const PredicateBlock* predicate_block,
412
                                                                          Deoptimization::DeoptReason reason,
413
                                                                          IfProjNode*& iffast_pred,
414
                                                                          IfProjNode*& ifslow_pred) {
415
  if (predicate_block->has_parse_predicate()) {
416
    // We currently only clone Assertion Predicates if there are Parse Predicates. This is not entirely correct and will
417
    // be changed with the complete fix for Assertion Predicates.
418
    clone_parse_predicate_to_unswitched_loops(predicate_block, reason, iffast_pred, ifslow_pred);
419
    assert(iffast_pred->in(0)->is_ParsePredicate() && ifslow_pred->in(0)->is_ParsePredicate(),
420
           "must be success projections of the cloned Parse Predicates");
421
    clone_assertion_predicates_to_unswitched_loop(loop, old_new, reason, predicate_block->parse_predicate_success_proj(),
422
                                                  iffast_pred->as_IfTrue(), ifslow_pred->as_IfTrue());
423
  }
424
}
425

426
void PhaseIdealLoop::clone_parse_predicate_to_unswitched_loops(const PredicateBlock* predicate_block,
427
                                                               Deoptimization::DeoptReason reason,
428
                                                               IfProjNode*& iffast_pred, IfProjNode*& ifslow_pred) {
429
  assert(predicate_block->has_parse_predicate(), "must have parse predicate");
430
  ParsePredicateSuccessProj* parse_predicate_proj = predicate_block->parse_predicate_success_proj();
431
  iffast_pred = clone_parse_predicate_to_unswitched_loop(parse_predicate_proj, iffast_pred, reason, false);
432
  check_cloned_parse_predicate_for_unswitching(iffast_pred, true);
433

434
  ifslow_pred = clone_parse_predicate_to_unswitched_loop(parse_predicate_proj, ifslow_pred, reason, true);
435
  check_cloned_parse_predicate_for_unswitching(ifslow_pred, false);
436
}
437

438
#ifndef PRODUCT
439
void PhaseIdealLoop::check_cloned_parse_predicate_for_unswitching(const Node* new_entry, const bool is_fast_loop) {
440
  assert(new_entry != nullptr, "IfTrue or IfFalse after clone predicate");
441
  if (TraceLoopPredicate) {
442
    tty->print("Parse Predicate cloned to %s loop: ", is_fast_loop ? "fast" : "slow");
443
    new_entry->in(0)->dump();
444
  }
445
}
446
#endif
447

448
//------------------------------Invariance-----------------------------------
449
// Helper class for loop_predication_impl to compute invariance on the fly and
450
// clone invariants.
451
class Invariance : public StackObj {
452
  VectorSet _visited, _invariant;
453
  Node_Stack _stack;
454
  VectorSet _clone_visited;
455
  Node_List _old_new; // map of old to new (clone)
456
  IdealLoopTree* _lpt;
457
  PhaseIdealLoop* _phase;
458
  Node* _data_dependency_on; // The projection into the loop on which data nodes are dependent or null otherwise
459

460
  // Helper function to set up the invariance for invariance computation
461
  // If n is a known invariant, set up directly. Otherwise, look up the
462
  // the possibility to push n onto the stack for further processing.
463
  void visit(Node* use, Node* n) {
464
    if (_lpt->is_invariant(n)) { // known invariant
465
      _invariant.set(n->_idx);
466
    } else if (!n->is_CFG()) {
467
      Node *n_ctrl = _phase->ctrl_or_self(n);
468
      Node *u_ctrl = _phase->ctrl_or_self(use); // self if use is a CFG
469
      if (_phase->is_dominator(n_ctrl, u_ctrl)) {
470
        _stack.push(n, n->in(0) == nullptr ? 1 : 0);
471
      }
472
    }
473
  }
474

475
  // Compute invariance for "the_node" and (possibly) all its inputs recursively
476
  // on the fly
477
  void compute_invariance(Node* n) {
478
    assert(_visited.test(n->_idx), "must be");
479
    visit(n, n);
480
    while (_stack.is_nonempty()) {
481
      Node*  n = _stack.node();
482
      uint idx = _stack.index();
483
      if (idx == n->req()) { // all inputs are processed
484
        _stack.pop();
485
        // n is invariant if it's inputs are all invariant
486
        bool all_inputs_invariant = true;
487
        for (uint i = 0; i < n->req(); i++) {
488
          Node* in = n->in(i);
489
          if (in == nullptr) continue;
490
          assert(_visited.test(in->_idx), "must have visited input");
491
          if (!_invariant.test(in->_idx)) { // bad guy
492
            all_inputs_invariant = false;
493
            break;
494
          }
495
        }
496
        if (all_inputs_invariant) {
497
          // If n's control is a predicate that was moved out of the
498
          // loop, it was marked invariant but n is only invariant if
499
          // it depends only on that test. Otherwise, unless that test
500
          // is out of the loop, it's not invariant.
501
          if (n->is_CFG() || n->depends_only_on_test() || n->in(0) == nullptr || !_phase->is_member(_lpt, n->in(0))) {
502
            _invariant.set(n->_idx); // I am a invariant too
503
          }
504
        }
505
      } else { // process next input
506
        _stack.set_index(idx + 1);
507
        Node* m = n->in(idx);
508
        if (m != nullptr && !_visited.test_set(m->_idx)) {
509
          visit(n, m);
510
        }
511
      }
512
    }
513
  }
514

515
  // Helper function to set up _old_new map for clone_nodes.
516
  // If n is a known invariant, set up directly ("clone" of n == n).
517
  // Otherwise, push n onto the stack for real cloning.
518
  void clone_visit(Node* n) {
519
    assert(_invariant.test(n->_idx), "must be invariant");
520
    if (_lpt->is_invariant(n)) { // known invariant
521
      _old_new.map(n->_idx, n);
522
    } else { // to be cloned
523
      assert(!n->is_CFG(), "should not see CFG here");
524
      _stack.push(n, n->in(0) == nullptr ? 1 : 0);
525
    }
526
  }
527

528
  // Clone "n" and (possibly) all its inputs recursively
529
  void clone_nodes(Node* n, Node* ctrl) {
530
    clone_visit(n);
531
    while (_stack.is_nonempty()) {
532
      Node*  n = _stack.node();
533
      uint idx = _stack.index();
534
      if (idx == n->req()) { // all inputs processed, clone n!
535
        _stack.pop();
536
        // clone invariant node
537
        Node* n_cl = n->clone();
538
        _old_new.map(n->_idx, n_cl);
539
        _phase->register_new_node(n_cl, ctrl);
540
        for (uint i = 0; i < n->req(); i++) {
541
          Node* in = n_cl->in(i);
542
          if (in == nullptr) continue;
543
          n_cl->set_req(i, _old_new[in->_idx]);
544
        }
545
      } else { // process next input
546
        _stack.set_index(idx + 1);
547
        Node* m = n->in(idx);
548
        if (m != nullptr && !_clone_visited.test_set(m->_idx)) {
549
          clone_visit(m); // visit the input
550
        }
551
      }
552
    }
553
  }
554

555
 public:
556
  Invariance(Arena* area, IdealLoopTree* lpt) :
557
    _visited(area), _invariant(area),
558
    _stack(area, 10 /* guess */),
559
    _clone_visited(area), _old_new(area),
560
    _lpt(lpt), _phase(lpt->_phase),
561
    _data_dependency_on(nullptr)
562
  {
563
    LoopNode* head = _lpt->_head->as_Loop();
564
    Node* entry = head->skip_strip_mined()->in(LoopNode::EntryControl);
565
    if (entry->outcnt() != 1) {
566
      // If a node is pinned between the predicates and the loop
567
      // entry, we won't be able to move any node in the loop that
568
      // depends on it above it in a predicate. Mark all those nodes
569
      // as non-loop-invariant.
570
      // Loop predication could create new nodes for which the below
571
      // invariant information is missing. Mark the 'entry' node to
572
      // later check again if a node needs to be treated as non-loop-
573
      // invariant as well.
574
      _data_dependency_on = entry;
575
      Unique_Node_List wq;
576
      wq.push(entry);
577
      for (uint next = 0; next < wq.size(); ++next) {
578
        Node *n = wq.at(next);
579
        for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
580
          Node* u = n->fast_out(i);
581
          if (!u->is_CFG()) {
582
            Node* c = _phase->get_ctrl(u);
583
            if (_lpt->is_member(_phase->get_loop(c)) || _phase->is_dominator(c, head)) {
584
              _visited.set(u->_idx);
585
              wq.push(u);
586
            }
587
          }
588
        }
589
      }
590
    }
591
  }
592

593
  // Did we explicitly mark some nodes non-loop-invariant? If so, return the entry node on which some data nodes
594
  // are dependent that prevent loop predication. Otherwise, return null.
595
  Node* data_dependency_on() {
596
    return _data_dependency_on;
597
  }
598

599
  // Map old to n for invariance computation and clone
600
  void map_ctrl(Node* old, Node* n) {
601
    assert(old->is_CFG() && n->is_CFG(), "must be");
602
    _old_new.map(old->_idx, n); // "clone" of old is n
603
    _invariant.set(old->_idx);  // old is invariant
604
    _clone_visited.set(old->_idx);
605
  }
606

607
  // Driver function to compute invariance
608
  bool is_invariant(Node* n) {
609
    if (!_visited.test_set(n->_idx))
610
      compute_invariance(n);
611
    return (_invariant.test(n->_idx) != 0);
612
  }
613

614
  // Driver function to clone invariant
615
  Node* clone(Node* n, Node* ctrl) {
616
    assert(ctrl->is_CFG(), "must be");
617
    assert(_invariant.test(n->_idx), "must be an invariant");
618
    if (!_clone_visited.test(n->_idx))
619
      clone_nodes(n, ctrl);
620
    return _old_new[n->_idx];
621
  }
622
};
623

624
//------------------------------is_range_check_if -----------------------------------
625
// Returns true if the predicate of iff is in "scale*iv + offset u< load_range(ptr)" format
626
// Note: this function is particularly designed for loop predication. We require load_range
627
//       and offset to be loop invariant computed on the fly by "invar"
628
bool IdealLoopTree::is_range_check_if(IfProjNode* if_success_proj, PhaseIdealLoop *phase, BasicType bt, Node *iv, Node *&range,
629
                                      Node *&offset, jlong &scale) const {
630
  IfNode* iff = if_success_proj->in(0)->as_If();
631
  if (!is_loop_exit(iff)) {
632
    return false;
633
  }
634
  if (!iff->in(1)->is_Bool()) {
635
    return false;
636
  }
637
  const BoolNode *bol = iff->in(1)->as_Bool();
638
  if (bol->_test._test != BoolTest::lt || if_success_proj->is_IfFalse()) {
639
    // We don't have the required range check pattern:
640
    // if (scale*iv + offset <u limit) {
641
    //
642
    // } else {
643
    //   trap();
644
    // }
645
    //
646
    // Having the trap on the true projection:
647
    // if (scale*iv + offset <u limit) {
648
    //   trap();
649
    // }
650
    //
651
    // is not correct. We would need to flip the test to get the expected "trap on false path" pattern:
652
    // if (scale*iv + offset >=u limit) {
653
    //
654
    // } else {
655
    //   trap();
656
    // }
657
    //
658
    // If we create a Range Check Predicate for this wrong pattern, it could succeed at runtime (i.e. true for the
659
    // value of "scale*iv + offset" in the first loop iteration and true for the value of "scale*iv + offset" in the
660
    // last loop iteration) while the check to be hoisted could fail in other loop iterations.
661
    //
662
    // Example:
663
    // Loop: "for (int i = -1; i < 1000; i++)"
664
    // init = "scale*iv + offset" in the first loop iteration = 1*-1 + 0 = -1
665
    // last = "scale*iv + offset" in the last loop iteration = 1*999 + 0 = 999
666
    // limit = 100
667
    //
668
    // Range Check Predicate is always true:
669
    // init >=u limit && last >=u limit  <=>
670
    // -1 >=u 100 && 999 >= u 100
671
    //
672
    // But for 0 <= x < 100: x >=u 100 is false.
673
    // We would wrongly skip the branch with the trap() and possibly miss to execute some other statements inside that
674
    // trap() branch.
675
    return false;
676
  }
677
  if (!bol->in(1)->is_Cmp()) {
678
    return false;
679
  }
680
  const CmpNode *cmp = bol->in(1)->as_Cmp();
681
  if (cmp->Opcode() != Op_Cmp_unsigned(bt)) {
682
    return false;
683
  }
684
  range = cmp->in(2);
685
  if (range->Opcode() != Op_LoadRange) {
686
    const TypeInteger* tinteger = phase->_igvn.type(range)->isa_integer(bt);
687
    if (tinteger == nullptr || tinteger->empty() || tinteger->lo_as_long() < 0) {
688
      // Allow predication on positive values that aren't LoadRanges.
689
      // This allows optimization of loops where the length of the
690
      // array is a known value and doesn't need to be loaded back
691
      // from the array.
692
      return false;
693
    }
694
  } else {
695
    assert(bt == T_INT, "no LoadRange for longs");
696
  }
697
  scale  = 0;
698
  offset = nullptr;
699
  if (!phase->is_scaled_iv_plus_offset(cmp->in(1), iv, bt, &scale, &offset)) {
700
    return false;
701
  }
702
  return true;
703
}
704

705
bool IdealLoopTree::is_range_check_if(IfProjNode* if_success_proj, PhaseIdealLoop *phase, Invariance& invar DEBUG_ONLY(COMMA ProjNode *predicate_proj)) const {
706
  Node* range = nullptr;
707
  Node* offset = nullptr;
708
  jlong scale = 0;
709
  Node* iv = _head->as_BaseCountedLoop()->phi();
710
  Compile* C = Compile::current();
711
  const uint old_unique_idx = C->unique();
712
  if (!is_range_check_if(if_success_proj, phase, T_INT, iv, range, offset, scale)) {
713
    return false;
714
  }
715
  if (!invar.is_invariant(range)) {
716
    return false;
717
  }
718
  if (offset != nullptr) {
719
    if (!invar.is_invariant(offset)) { // offset must be invariant
720
      return false;
721
    }
722
    Node* data_dependency_on = invar.data_dependency_on();
723
    if (data_dependency_on != nullptr && old_unique_idx < C->unique()) {
724
      // 'offset' node was newly created in is_range_check_if(). Check that it does not depend on the entry projection
725
      // into the loop. If it does, we cannot perform loop predication (see Invariant::Invariant()).
726
      assert(!offset->is_CFG(), "offset must be a data node");
727
      if (_phase->get_ctrl(offset) == data_dependency_on) {
728
        return false;
729
      }
730
    }
731
  }
732
#ifdef ASSERT
733
  if (offset && phase->has_ctrl(offset)) {
734
    Node* offset_ctrl = phase->get_ctrl(offset);
735
    if (phase->get_loop(predicate_proj) == phase->get_loop(offset_ctrl) &&
736
        phase->is_dominator(predicate_proj, offset_ctrl)) {
737
      // If the control of offset is loop predication promoted by previous pass,
738
      // then it will lead to cyclic dependency.
739
      // Previously promoted loop predication is in the same loop of predication
740
      // point.
741
      // This situation can occur when pinning nodes too conservatively - can we do better?
742
      assert(false, "cyclic dependency prevents range check elimination, idx: offset %d, offset_ctrl %d, predicate_proj %d",
743
             offset->_idx, offset_ctrl->_idx, predicate_proj->_idx);
744
    }
745
  }
746
#endif
747
  return true;
748
}
749

750
//------------------------------rc_predicate-----------------------------------
751
// Create a range check predicate
752
//
753
// for (i = init; i < limit; i += stride) {
754
//    a[scale*i+offset]
755
// }
756
//
757
// Compute max(scale*i + offset) for init <= i < limit and build the predicate
758
// as "max(scale*i + offset) u< a.length".
759
//
760
// There are two cases for max(scale*i + offset):
761
// (1) stride*scale > 0
762
//   max(scale*i + offset) = scale*(limit-stride) + offset
763
// (2) stride*scale < 0
764
//   max(scale*i + offset) = scale*init + offset
765
BoolNode* PhaseIdealLoop::rc_predicate(Node* ctrl, const int scale, Node* offset, Node* init, Node* limit,
766
                                       const jint stride, Node* range, const bool upper, bool& overflow) {
767
  jint con_limit  = (limit != nullptr && limit->is_Con())  ? limit->get_int()  : 0;
768
  jint con_init   = init->is_Con()   ? init->get_int()   : 0;
769
  jint con_offset = offset->is_Con() ? offset->get_int() : 0;
770

771
  stringStream* predString = nullptr;
772
  if (TraceLoopPredicate) {
773
    predString = new (mtCompiler) stringStream();
774
    predString->print("rc_predicate ");
775
  }
776

777
  overflow = false;
778
  Node* max_idx_expr = nullptr;
779
  const TypeInt* idx_type = TypeInt::INT;
780
  // same signs and upper, or different signs and not upper.
781
  if (((stride > 0) == (scale > 0)) == upper) {
782
    guarantee(limit != nullptr, "sanity");
783
    if (TraceLoopPredicate) {
784
      if (limit->is_Con()) {
785
        predString->print("(%d ", con_limit);
786
      } else {
787
        predString->print("(limit ");
788
      }
789
      predString->print("- %d) ", stride);
790
    }
791
    // Check if (limit - stride) may overflow
792
    const TypeInt* limit_type = _igvn.type(limit)->isa_int();
793
    jint limit_lo = limit_type->_lo;
794
    jint limit_hi = limit_type->_hi;
795
    if ((stride > 0 && (java_subtract(limit_lo, stride) < limit_lo)) ||
796
        (stride < 0 && (java_subtract(limit_hi, stride) > limit_hi))) {
797
      // No overflow possible
798
      ConINode* con_stride = _igvn.intcon(stride);
799
      set_ctrl(con_stride, C->root());
800
      max_idx_expr = new SubINode(limit, con_stride);
801
      idx_type = TypeInt::make(limit_lo - stride, limit_hi - stride, limit_type->_widen);
802
    } else {
803
      // May overflow
804
      overflow = true;
805
      limit = new ConvI2LNode(limit);
806
      register_new_node(limit, ctrl);
807
      ConLNode* con_stride = _igvn.longcon(stride);
808
      set_ctrl(con_stride, C->root());
809
      max_idx_expr = new SubLNode(limit, con_stride);
810
    }
811
    register_new_node(max_idx_expr, ctrl);
812
  } else {
813
    if (TraceLoopPredicate) {
814
      if (init->is_Con()) {
815
        predString->print("%d ", con_init);
816
      } else {
817
        predString->print("init ");
818
      }
819
    }
820
    idx_type = _igvn.type(init)->isa_int();
821
    max_idx_expr = init;
822
  }
823

824
  if (scale != 1) {
825
    ConNode* con_scale = _igvn.intcon(scale);
826
    set_ctrl(con_scale, C->root());
827
    if (TraceLoopPredicate) {
828
      predString->print("* %d ", scale);
829
    }
830
    // Check if (scale * max_idx_expr) may overflow
831
    const TypeInt* scale_type = TypeInt::make(scale);
832
    MulINode* mul = new MulINode(max_idx_expr, con_scale);
833

834
    if (overflow || MulINode::does_overflow(idx_type, scale_type)) {
835
      // May overflow
836
      idx_type = TypeInt::INT;
837
      mul->destruct(&_igvn);
838
      if (!overflow) {
839
        max_idx_expr = new ConvI2LNode(max_idx_expr);
840
        register_new_node(max_idx_expr, ctrl);
841
      }
842
      overflow = true;
843
      con_scale = _igvn.longcon(scale);
844
      set_ctrl(con_scale, C->root());
845
      max_idx_expr = new MulLNode(max_idx_expr, con_scale);
846
    } else {
847
      // No overflow possible
848
      max_idx_expr = mul;
849
      idx_type = (TypeInt*)mul->mul_ring(idx_type, scale_type);
850
    }
851
    register_new_node(max_idx_expr, ctrl);
852
  }
853

854
  if (offset && (!offset->is_Con() || con_offset != 0)){
855
    if (TraceLoopPredicate) {
856
      if (offset->is_Con()) {
857
        predString->print("+ %d ", con_offset);
858
      } else {
859
        predString->print("+ offset");
860
      }
861
    }
862
    // Check if (max_idx_expr + offset) may overflow
863
    const TypeInt* offset_type = _igvn.type(offset)->isa_int();
864
    jint lo = java_add(idx_type->_lo, offset_type->_lo);
865
    jint hi = java_add(idx_type->_hi, offset_type->_hi);
866
    if (overflow || (lo > hi) ||
867
        ((idx_type->_lo & offset_type->_lo) < 0 && lo >= 0) ||
868
        ((~(idx_type->_hi | offset_type->_hi)) < 0 && hi < 0)) {
869
      // May overflow
870
      if (!overflow) {
871
        max_idx_expr = new ConvI2LNode(max_idx_expr);
872
        register_new_node(max_idx_expr, ctrl);
873
      }
874
      overflow = true;
875
      offset = new ConvI2LNode(offset);
876
      register_new_node(offset, ctrl);
877
      max_idx_expr = new AddLNode(max_idx_expr, offset);
878
    } else {
879
      // No overflow possible
880
      max_idx_expr = new AddINode(max_idx_expr, offset);
881
    }
882
    register_new_node(max_idx_expr, ctrl);
883
  }
884

885
  CmpNode* cmp = nullptr;
886
  if (overflow) {
887
    // Integer expressions may overflow, do long comparison
888
    range = new ConvI2LNode(range);
889
    register_new_node(range, ctrl);
890
    cmp = new CmpULNode(max_idx_expr, range);
891
  } else {
892
    cmp = new CmpUNode(max_idx_expr, range);
893
  }
894
  register_new_node(cmp, ctrl);
895
  BoolNode* bol = new BoolNode(cmp, BoolTest::lt);
896
  register_new_node(bol, ctrl);
897

898
  if (TraceLoopPredicate) {
899
    predString->print_cr("<u range");
900
    tty->print("%s", predString->base());
901
    delete predString;
902
  }
903
  return bol;
904
}
905

906
// Should loop predication look not only in the path from tail to head
907
// but also in branches of the loop body?
908
bool PhaseIdealLoop::loop_predication_should_follow_branches(IdealLoopTree* loop, float& loop_trip_cnt) {
909
  if (!UseProfiledLoopPredicate) {
910
    return false;
911
  }
912

913
  LoopNode* head = loop->_head->as_Loop();
914
  bool follow_branches = true;
915
  IdealLoopTree* l = loop->_child;
916
  // For leaf loops and loops with a single inner loop
917
  while (l != nullptr && follow_branches) {
918
    IdealLoopTree* child = l;
919
    if (child->_child != nullptr &&
920
        child->_head->is_OuterStripMinedLoop()) {
921
      assert(child->_child->_next == nullptr, "only one inner loop for strip mined loop");
922
      assert(child->_child->_head->is_CountedLoop() && child->_child->_head->as_CountedLoop()->is_strip_mined(), "inner loop should be strip mined");
923
      child = child->_child;
924
    }
925
    if (child->_child != nullptr || child->_irreducible) {
926
      follow_branches = false;
927
    }
928
    l = l->_next;
929
  }
930
  if (follow_branches) {
931
    loop->compute_profile_trip_cnt(this);
932
    if (head->is_profile_trip_failed()) {
933
      follow_branches = false;
934
    } else {
935
      loop_trip_cnt = head->profile_trip_cnt();
936
      if (head->is_CountedLoop()) {
937
        CountedLoopNode* cl = head->as_CountedLoop();
938
        if (cl->phi() != nullptr) {
939
          const TypeInt* t = _igvn.type(cl->phi())->is_int();
940
          float worst_case_trip_cnt = ((float)t->_hi - t->_lo) / ABS((float)cl->stride_con());
941
          if (worst_case_trip_cnt < loop_trip_cnt) {
942
            loop_trip_cnt = worst_case_trip_cnt;
943
          }
944
        }
945
      }
946
    }
947
  }
948
  return follow_branches;
949
}
950

951
float PathFrequency::to(Node* n) {
952
  // post order walk on the CFG graph from n to _dom
953
  IdealLoopTree* loop = _phase->get_loop(_dom);
954
  Node* c = n;
955
  for (;;) {
956
    assert(_phase->get_loop(c) == loop, "have to be in the same loop");
957
    if (c == _dom || _freqs.at_grow(c->_idx, -1) >= 0) {
958
      float f = c == _dom ? 1 : _freqs.at(c->_idx);
959
      Node* prev = c;
960
      while (_stack.size() > 0 && prev == c) {
961
        Node* n = _stack.node();
962
        if (!n->is_Region()) {
963
          if (_phase->get_loop(n) != _phase->get_loop(n->in(0))) {
964
            // Found an inner loop: compute frequency of reaching this
965
            // exit from the loop head by looking at the number of
966
            // times each loop exit was taken
967
            IdealLoopTree* inner_loop = _phase->get_loop(n->in(0));
968
            LoopNode* inner_head = inner_loop->_head->as_Loop();
969
            assert(_phase->get_loop(n) == loop, "only 1 inner loop");
970
            if (inner_head->is_OuterStripMinedLoop()) {
971
              inner_head->verify_strip_mined(1);
972
              if (n->in(0) == inner_head->in(LoopNode::LoopBackControl)->in(0)) {
973
                n = n->in(0)->in(0)->in(0);
974
              }
975
              inner_loop = inner_loop->_child;
976
              inner_head = inner_loop->_head->as_Loop();
977
              inner_head->verify_strip_mined(1);
978
            }
979
            float loop_exit_cnt = 0.0f;
980
            for (uint i = 0; i < inner_loop->_body.size(); i++) {
981
              Node *n = inner_loop->_body[i];
982
              float c = inner_loop->compute_profile_trip_cnt_helper(n);
983
              loop_exit_cnt += c;
984
            }
985
            float cnt = -1;
986
            if (n->in(0)->is_If()) {
987
              IfNode* iff = n->in(0)->as_If();
988
              float p = n->in(0)->as_If()->_prob;
989
              if (n->Opcode() == Op_IfFalse) {
990
                p = 1 - p;
991
              }
992
              if (p > PROB_MIN) {
993
                cnt = p * iff->_fcnt;
994
              } else {
995
                cnt = 0;
996
              }
997
            } else {
998
              assert(n->in(0)->is_Jump(), "unsupported node kind");
999
              JumpNode* jmp = n->in(0)->as_Jump();
1000
              float p = n->in(0)->as_Jump()->_probs[n->as_JumpProj()->_con];
1001
              cnt = p * jmp->_fcnt;
1002
            }
1003
            float this_exit_f = cnt > 0 ? cnt / loop_exit_cnt : 0;
1004
            this_exit_f = check_and_truncate_frequency(this_exit_f);
1005
            f = f * this_exit_f;
1006
            f = check_and_truncate_frequency(f);
1007
          } else {
1008
            float p = -1;
1009
            if (n->in(0)->is_If()) {
1010
              p = n->in(0)->as_If()->_prob;
1011
              if (n->Opcode() == Op_IfFalse) {
1012
                p = 1 - p;
1013
              }
1014
            } else {
1015
              assert(n->in(0)->is_Jump(), "unsupported node kind");
1016
              p = n->in(0)->as_Jump()->_probs[n->as_JumpProj()->_con];
1017
            }
1018
            f = f * p;
1019
            f = check_and_truncate_frequency(f);
1020
          }
1021
          _freqs.at_put_grow(n->_idx, (float)f, -1);
1022
          _stack.pop();
1023
        } else {
1024
          float prev_f = _freqs_stack.pop();
1025
          float new_f = f;
1026
          f = new_f + prev_f;
1027
          f = check_and_truncate_frequency(f);
1028
          uint i = _stack.index();
1029
          if (i < n->req()) {
1030
            c = n->in(i);
1031
            _stack.set_index(i+1);
1032
            _freqs_stack.push(f);
1033
          } else {
1034
            _freqs.at_put_grow(n->_idx, f, -1);
1035
            _stack.pop();
1036
          }
1037
        }
1038
      }
1039
      if (_stack.size() == 0) {
1040
        return check_and_truncate_frequency(f);
1041
      }
1042
    } else if (c->is_Loop()) {
1043
      ShouldNotReachHere();
1044
      c = c->in(LoopNode::EntryControl);
1045
    } else if (c->is_Region()) {
1046
      _freqs_stack.push(0);
1047
      _stack.push(c, 2);
1048
      c = c->in(1);
1049
    } else {
1050
      if (c->is_IfProj()) {
1051
        IfNode* iff = c->in(0)->as_If();
1052
        if (iff->_prob == PROB_UNKNOWN) {
1053
          // assume never taken
1054
          _freqs.at_put_grow(c->_idx, 0, -1);
1055
        } else if (_phase->get_loop(c) != _phase->get_loop(iff)) {
1056
          if (iff->_fcnt == COUNT_UNKNOWN) {
1057
            // assume never taken
1058
            _freqs.at_put_grow(c->_idx, 0, -1);
1059
          } else {
1060
            // skip over loop
1061
            _stack.push(c, 1);
1062
            c = _phase->get_loop(c->in(0))->_head->as_Loop()->skip_strip_mined()->in(LoopNode::EntryControl);
1063
          }
1064
        } else {
1065
          _stack.push(c, 1);
1066
          c = iff;
1067
        }
1068
      } else if (c->is_JumpProj()) {
1069
        JumpNode* jmp = c->in(0)->as_Jump();
1070
        if (_phase->get_loop(c) != _phase->get_loop(jmp)) {
1071
          if (jmp->_fcnt == COUNT_UNKNOWN) {
1072
            // assume never taken
1073
            _freqs.at_put_grow(c->_idx, 0, -1);
1074
          } else {
1075
            // skip over loop
1076
            _stack.push(c, 1);
1077
            c = _phase->get_loop(c->in(0))->_head->as_Loop()->skip_strip_mined()->in(LoopNode::EntryControl);
1078
          }
1079
        } else {
1080
          _stack.push(c, 1);
1081
          c = jmp;
1082
        }
1083
      } else if (c->Opcode() == Op_CatchProj &&
1084
                 c->in(0)->Opcode() == Op_Catch &&
1085
                 c->in(0)->in(0)->is_Proj() &&
1086
                 c->in(0)->in(0)->in(0)->is_Call()) {
1087
        // assume exceptions are never thrown
1088
        uint con = c->as_Proj()->_con;
1089
        if (con == CatchProjNode::fall_through_index) {
1090
          Node* call = c->in(0)->in(0)->in(0)->in(0);
1091
          if (_phase->get_loop(call) != _phase->get_loop(c)) {
1092
            _freqs.at_put_grow(c->_idx, 0, -1);
1093
          } else {
1094
            c = call;
1095
          }
1096
        } else {
1097
          assert(con >= CatchProjNode::catch_all_index, "what else?");
1098
          _freqs.at_put_grow(c->_idx, 0, -1);
1099
        }
1100
      } else if (c->unique_ctrl_out_or_null() == nullptr && !c->is_If() && !c->is_Jump()) {
1101
        ShouldNotReachHere();
1102
      } else {
1103
        c = c->in(0);
1104
      }
1105
    }
1106
  }
1107
  ShouldNotReachHere();
1108
  return -1;
1109
}
1110

1111
void PhaseIdealLoop::loop_predication_follow_branches(Node *n, IdealLoopTree *loop, float loop_trip_cnt,
1112
                                                      PathFrequency& pf, Node_Stack& stack, VectorSet& seen,
1113
                                                      Node_List& if_proj_list) {
1114
  assert(n->is_Region(), "start from a region");
1115
  Node* tail = loop->tail();
1116
  stack.push(n, 1);
1117
  do {
1118
    Node* c = stack.node();
1119
    assert(c->is_Region() || c->is_IfProj(), "only region here");
1120
    uint i = stack.index();
1121

1122
    if (i < c->req()) {
1123
      stack.set_index(i+1);
1124
      Node* in = c->in(i);
1125
      while (!is_dominator(in, tail) && !seen.test_set(in->_idx)) {
1126
        IdealLoopTree* in_loop = get_loop(in);
1127
        if (in_loop != loop) {
1128
          in = in_loop->_head->in(LoopNode::EntryControl);
1129
        } else if (in->is_Region()) {
1130
          stack.push(in, 1);
1131
          break;
1132
        } else if (in->is_IfProj() &&
1133
                   in->as_Proj()->is_uncommon_trap_if_pattern() &&
1134
                   (in->in(0)->Opcode() == Op_If ||
1135
                    in->in(0)->Opcode() == Op_RangeCheck)) {
1136
          if (pf.to(in) * loop_trip_cnt >= 1) {
1137
            stack.push(in, 1);
1138
          }
1139
          in = in->in(0);
1140
        } else {
1141
          in = in->in(0);
1142
        }
1143
      }
1144
    } else {
1145
      if (c->is_IfProj()) {
1146
        if_proj_list.push(c);
1147
      }
1148
      stack.pop();
1149
    }
1150

1151
  } while (stack.size() > 0);
1152
}
1153

1154
bool PhaseIdealLoop::loop_predication_impl_helper(IdealLoopTree* loop, IfProjNode* if_success_proj,
1155
                                                  ParsePredicateSuccessProj* parse_predicate_proj, CountedLoopNode* cl,
1156
                                                  ConNode* zero, Invariance& invar, Deoptimization::DeoptReason reason) {
1157
  // Following are changed to nonnull when a predicate can be hoisted
1158
  IfNode*   iff  = if_success_proj->in(0)->as_If();
1159
  Node*     test = iff->in(1);
1160
  if (!test->is_Bool()) { //Conv2B, ...
1161
    return false;
1162
  }
1163
  BoolNode* bol = test->as_Bool();
1164
  bool range_check_predicate = false;
1165
  if (invar.is_invariant(bol)) {
1166
    C->print_method(PHASE_BEFORE_LOOP_PREDICATION_IC, 4, iff);
1167
    // Invariant test
1168
    IfProjNode* hoisted_check_predicate_proj = create_new_if_for_predicate(parse_predicate_proj, nullptr, reason,
1169
                                                                           iff->Opcode());
1170
    Node* ctrl = hoisted_check_predicate_proj->in(0)->as_If()->in(0);
1171
    BoolNode* hoisted_check_predicate_bool = invar.clone(bol, ctrl)->as_Bool();
1172

1173
    // Negate test if necessary (Parse Predicates always have IfTrue as success projection and IfFalse as uncommon trap)
1174
    bool negated = false;
1175
    if (if_success_proj->is_IfFalse()) {
1176
      hoisted_check_predicate_bool = new BoolNode(hoisted_check_predicate_bool->in(1),
1177
                                                  hoisted_check_predicate_bool->_test.negate());
1178
      register_new_node(hoisted_check_predicate_bool, ctrl);
1179
      negated = true;
1180
    }
1181
    IfNode* new_predicate_iff = hoisted_check_predicate_proj->in(0)->as_If();
1182
    _igvn.hash_delete(new_predicate_iff);
1183
    new_predicate_iff->set_req(1, hoisted_check_predicate_bool);
1184

1185
    invar.map_ctrl(if_success_proj, hoisted_check_predicate_proj); // Mark hoisted check as invariant
1186

1187
    // Eliminate the old If in the loop body.
1188
    dominated_by(hoisted_check_predicate_proj, iff, negated);
1189

1190
    C->print_method(PHASE_AFTER_LOOP_PREDICATION_IC, 4, hoisted_check_predicate_proj->in(0));
1191

1192
#ifndef PRODUCT
1193
    if (TraceLoopPredicate) {
1194
      tty->print("Predicate invariant if%s: %d ", negated ? " negated" : "", new_predicate_iff->_idx);
1195
      loop->dump_head();
1196
    } else if (TraceLoopOpts) {
1197
      tty->print("Predicate IC ");
1198
      loop->dump_head();
1199
    }
1200
#endif
1201
  } else if (cl != nullptr && loop->is_range_check_if(if_success_proj, this, invar DEBUG_ONLY(COMMA parse_predicate_proj))) {
1202
    C->print_method(PHASE_BEFORE_LOOP_PREDICATION_RC, 4, iff);
1203
    // Range check for counted loops
1204
    assert(if_success_proj->is_IfTrue(), "trap must be on false projection for a range check");
1205
    IfTrueNode* hoisted_check_proj = if_success_proj->as_IfTrue();
1206
    const Node*    cmp    = bol->in(1)->as_Cmp();
1207
    Node*          idx    = cmp->in(1);
1208
    assert(!invar.is_invariant(idx), "index is variant");
1209
    Node* rng = cmp->in(2);
1210
    assert(rng->Opcode() == Op_LoadRange || iff->is_RangeCheck() || _igvn.type(rng)->is_int()->_lo >= 0, "must be");
1211
    assert(invar.is_invariant(rng), "range must be invariant");
1212
    int scale    = 1;
1213
    Node* offset = zero;
1214
    bool ok = is_scaled_iv_plus_offset(idx, cl->phi(), &scale, &offset);
1215
    assert(ok, "must be index expression");
1216

1217
    Node* init    = cl->init_trip();
1218
    // Limit is not exact.
1219
    // Calculate exact limit here.
1220
    // Note, counted loop's test is '<' or '>'.
1221
#ifdef ASSERT
1222
    const bool exact_trip_count = cl->has_exact_trip_count();
1223
    const uint trip_count = cl->trip_count();
1224
    loop->compute_trip_count(this);
1225
    assert(exact_trip_count == cl->has_exact_trip_count() && trip_count == cl->trip_count(),
1226
           "should have computed trip count on Loop Predication entry");
1227
#endif
1228
    Node* limit   = exact_limit(loop);
1229
    int  stride   = cl->stride()->get_int();
1230

1231
    // Build if's for the upper and lower bound tests.  The
1232
    // lower_bound test will dominate the upper bound test and all
1233
    // cloned or created nodes will use the lower bound test as
1234
    // their declared control.
1235

1236
    // Perform cloning to keep Invariance state correct since the
1237
    // late schedule will place invariant things in the loop.
1238
    ParsePredicateNode* parse_predicate = parse_predicate_proj->in(0)->as_ParsePredicate();
1239
    Node* ctrl = parse_predicate->in(0);
1240
    rng = invar.clone(rng, ctrl);
1241
    if (offset && offset != zero) {
1242
      assert(invar.is_invariant(offset), "offset must be loop invariant");
1243
      offset = invar.clone(offset, ctrl);
1244
    }
1245
    // If predicate expressions may overflow in the integer range, longs are used.
1246
    bool overflow = false;
1247
    // Test the lower bound
1248
    BoolNode* lower_bound_bol = rc_predicate(ctrl, scale, offset, init, limit, stride, rng, false, overflow);
1249

1250
    const int if_opcode = iff->Opcode();
1251
    IfProjNode* lower_bound_proj = create_new_if_for_predicate(parse_predicate_proj, nullptr, reason, overflow ? Op_If : if_opcode);
1252
    IfNode* lower_bound_iff = lower_bound_proj->in(0)->as_If();
1253
    _igvn.hash_delete(lower_bound_iff);
1254
    lower_bound_iff->set_req(1, lower_bound_bol);
1255
    if (TraceLoopPredicate) {
1256
      tty->print_cr("lower bound check if: %d", lower_bound_iff->_idx);
1257
    }
1258

1259
    // Test the upper bound
1260
    BoolNode* upper_bound_bol = rc_predicate(lower_bound_proj, scale, offset, init, limit, stride, rng, true, overflow);
1261

1262
    IfProjNode* upper_bound_proj = create_new_if_for_predicate(parse_predicate_proj, nullptr, reason, overflow ? Op_If : if_opcode);
1263
    assert(upper_bound_proj->in(0)->as_If()->in(0) == lower_bound_proj, "should dominate");
1264
    IfNode* upper_bound_iff = upper_bound_proj->in(0)->as_If();
1265
    _igvn.hash_delete(upper_bound_iff);
1266
    upper_bound_iff->set_req(1, upper_bound_bol);
1267
    if (TraceLoopPredicate) {
1268
      tty->print_cr("upper bound check if: %d", upper_bound_iff->_idx);
1269
    }
1270

1271
    // Fall through into rest of the cleanup code which will move any dependent nodes to the skeleton predicates of the
1272
    // upper bound test. We always need to create skeleton predicates in order to properly remove dead loops when later
1273
    // splitting the predicated loop into (unreachable) sub-loops (i.e. done by unrolling, peeling, pre/main/post etc.).
1274
    IfTrueNode* template_assertion_predicate_proj =
1275
        add_template_assertion_predicate(iff, loop, hoisted_check_proj, parse_predicate_proj, upper_bound_proj, scale,
1276
                                         offset, init, limit, stride, rng, overflow, reason);
1277

1278
    // Eliminate the old range check in the loop body.
1279
    // When a range check is eliminated, data dependent nodes (Load and range check CastII nodes) are now dependent on 2
1280
    // Hoisted Check Predicates (one for the start of the loop, one for the end) but we can only keep track of one control
1281
    // dependency: pin the data dependent nodes.
1282
    eliminate_hoisted_range_check(hoisted_check_proj, template_assertion_predicate_proj);
1283
    invar.map_ctrl(hoisted_check_proj, template_assertion_predicate_proj); // Mark hoisted check as invariant
1284

1285
    C->print_method(PHASE_AFTER_LOOP_PREDICATION_RC, 4, template_assertion_predicate_proj->in(0));
1286

1287
#ifndef PRODUCT
1288
    if (TraceLoopOpts && !TraceLoopPredicate) {
1289
      tty->print("Predicate RC ");
1290
      loop->dump_head();
1291
    }
1292
#endif
1293
  } else {
1294
    // Loop variant check (for example, range check in non-counted loop)
1295
    // with uncommon trap.
1296
    return false;
1297
  }
1298

1299
  C->set_major_progress();
1300
  return true;
1301
}
1302

1303
void PhaseIdealLoop::eliminate_hoisted_range_check(IfTrueNode* hoisted_check_proj,
1304
                                                   IfTrueNode* template_assertion_predicate_proj) {
1305
  _igvn.replace_input_of(hoisted_check_proj->in(0), 1, _igvn.intcon(1));
1306
  rewire_safe_outputs_to_dominator(hoisted_check_proj, template_assertion_predicate_proj, true);
1307
}
1308

1309
// Each newly created Hoisted Check Predicate is accompanied by two Template Assertion Predicates. Later, we initialize
1310
// them by making a copy of them when splitting a loop into sub loops. The Assertion Predicates ensure that dead sub
1311
// loops are removed properly.
1312
IfTrueNode* PhaseIdealLoop::add_template_assertion_predicate(IfNode* iff, IdealLoopTree* loop, IfProjNode* if_proj,
1313
                                                             ParsePredicateSuccessProj* parse_predicate_proj,
1314
                                                             IfProjNode* upper_bound_proj, const int scale, Node* offset,
1315
                                                             Node* init, Node* limit, const jint stride,
1316
                                                             Node* rng, bool& overflow, Deoptimization::DeoptReason reason) {
1317
  // First predicate for the initial value on first loop iteration
1318
  Node* opaque_init = new OpaqueLoopInitNode(C, init);
1319
  register_new_node(opaque_init, upper_bound_proj);
1320
  bool negate = (if_proj->_con != parse_predicate_proj->_con);
1321
  BoolNode* bol = rc_predicate(upper_bound_proj, scale, offset, opaque_init, limit, stride, rng,
1322
                               (stride > 0) != (scale > 0), overflow);
1323
  Node* opaque_bol = new Opaque4Node(C, bol, _igvn.intcon(1)); // This will go away once loop opts are over
1324
  C->add_template_assertion_predicate_opaq(opaque_bol);
1325
  register_new_node(opaque_bol, upper_bound_proj);
1326
  IfTrueNode* new_proj = create_new_if_for_predicate(parse_predicate_proj, nullptr, reason, overflow ? Op_If : iff->Opcode(),
1327
                                                     false NOT_PRODUCT(COMMA AssertionPredicateType::Init_value));
1328
  _igvn.replace_input_of(new_proj->in(0), 1, opaque_bol);
1329
  assert(opaque_init->outcnt() > 0, "should be used");
1330

1331
  // Second predicate for init + (current stride - initial stride)
1332
  // This is identical to the previous predicate initially but as
1333
  // unrolling proceeds current stride is updated.
1334
  Node* init_stride = loop->_head->as_CountedLoop()->stride();
1335
  Node* opaque_stride = new OpaqueLoopStrideNode(C, init_stride);
1336
  register_new_node(opaque_stride, new_proj);
1337
  Node* max_value = new SubINode(opaque_stride, init_stride);
1338
  register_new_node(max_value, new_proj);
1339
  max_value = new AddINode(opaque_init, max_value);
1340
  register_new_node(max_value, new_proj);
1341
  // init + (current stride - initial stride) is within the loop so narrow its type by leveraging the type of the iv Phi
1342
  const Type* type_iv = loop->_head->as_CountedLoop()->phi()->bottom_type();
1343
  assert(!type_iv->is_int()->is_con(), "constant indicates one loop iteration for which we bailed out earlier");
1344
  max_value = new CastIINode(new_proj, max_value, type_iv);
1345
  register_new_node(max_value, new_proj);
1346

1347
  bol = rc_predicate(new_proj, scale, offset, max_value, limit, stride, rng, (stride > 0) != (scale > 0),
1348
                     overflow);
1349
  opaque_bol = new Opaque4Node(C, bol, _igvn.intcon(1));
1350
  C->add_template_assertion_predicate_opaq(opaque_bol);
1351
  register_new_node(opaque_bol, new_proj);
1352
  new_proj = create_new_if_for_predicate(parse_predicate_proj, nullptr, reason, overflow ? Op_If : iff->Opcode(),
1353
                                         false NOT_PRODUCT(COMMA AssertionPredicateType::Last_value));
1354
  _igvn.replace_input_of(new_proj->in(0), 1, opaque_bol);
1355
  assert(max_value->outcnt() > 0, "should be used");
1356
  assert(assertion_predicate_has_loop_opaque_node(new_proj->in(0)->as_If()), "unexpected");
1357

1358
  return new_proj;
1359
}
1360

1361
// Insert Hoisted Check Predicates for null checks and range checks and additional Template Assertion Predicates for
1362
// range checks.
1363
bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree* loop) {
1364
  LoopNode* head = loop->_head->as_Loop();
1365

1366
  if (head->unique_ctrl_out()->is_NeverBranch()) {
1367
    // do nothing for infinite loops
1368
    return false;
1369
  }
1370

1371
  CountedLoopNode *cl = nullptr;
1372
  if (head->is_valid_counted_loop(T_INT)) {
1373
    cl = head->as_CountedLoop();
1374
    if (!cl->is_normal_loop()) {
1375
      // Do nothing for iteration-splitted loops
1376
      return false;
1377
    }
1378
    loop->compute_trip_count(this);
1379
    if (cl->trip_count() == 1) {
1380
      // Not worth to hoist checks out of a loop that is only run for one iteration since the checks are only going to
1381
      // be executed once anyway.
1382
      return false;
1383
    }
1384
    // Avoid RCE if Counted loop's test is '!='.
1385
    BoolTest::mask bt = cl->loopexit()->test_trip();
1386
    if (bt != BoolTest::lt && bt != BoolTest::gt) {
1387
      cl = nullptr;
1388
    }
1389
  }
1390

1391
  Node* entry = head->skip_strip_mined()->in(LoopNode::EntryControl);
1392
  const Predicates predicates(entry);
1393
  const PredicateBlock* loop_predicate_block = predicates.loop_predicate_block();
1394
  const PredicateBlock* profiled_loop_predicate_block = predicates.profiled_loop_predicate_block();
1395
  float loop_trip_cnt = -1;
1396
  bool follow_branches = profiled_loop_predicate_block->has_parse_predicate() &&
1397
                         loop_predication_should_follow_branches(loop, loop_trip_cnt);
1398
  assert(!follow_branches || loop_trip_cnt >= 0, "negative trip count?");
1399

1400
  if (!loop_predicate_block->has_parse_predicate() && !follow_branches) {
1401
#ifndef PRODUCT
1402
    if (TraceLoopPredicate) {
1403
      tty->print("Missing Parse Predicates:");
1404
      loop->dump_head();
1405
      head->dump(1);
1406
    }
1407
#endif
1408
    return false;
1409
  }
1410
  ConNode* zero = _igvn.intcon(0);
1411
  set_ctrl(zero, C->root());
1412

1413
  ResourceArea* area = Thread::current()->resource_area();
1414
  Invariance invar(area, loop);
1415

1416
  // Create list of if-projs such that a newer proj dominates all older
1417
  // projs in the list, and they all dominate loop->tail()
1418
  Node_List if_proj_list;
1419
  Node_List regions;
1420
  Node* current_proj = loop->tail(); // start from tail
1421

1422

1423
  Node_List controls;
1424
  while (current_proj != head) {
1425
    if (loop == get_loop(current_proj) && // still in the loop ?
1426
        current_proj->is_Proj()        && // is a projection  ?
1427
        (current_proj->in(0)->Opcode() == Op_If ||
1428
         current_proj->in(0)->Opcode() == Op_RangeCheck)) { // is a if projection ?
1429
      if_proj_list.push(current_proj);
1430
    }
1431
    if (follow_branches &&
1432
        current_proj->Opcode() == Op_Region &&
1433
        loop == get_loop(current_proj)) {
1434
      regions.push(current_proj);
1435
    }
1436
    current_proj = idom(current_proj);
1437
  }
1438

1439
  bool hoisted = false; // true if at least one proj is promoted
1440

1441
  if (can_create_loop_predicates(profiled_loop_predicate_block)) {
1442
    while (if_proj_list.size() > 0) {
1443
      Node* n = if_proj_list.pop();
1444

1445
      IfProjNode* if_proj = n->as_IfProj();
1446
      IfNode* iff = if_proj->in(0)->as_If();
1447

1448
      CallStaticJavaNode* call = if_proj->is_uncommon_trap_if_pattern();
1449
      if (call == nullptr) {
1450
        if (loop->is_loop_exit(iff)) {
1451
          // stop processing the remaining projs in the list because the execution of them
1452
          // depends on the condition of "iff" (iff->in(1)).
1453
          break;
1454
        } else {
1455
          // Both arms are inside the loop. There are two cases:
1456
          // (1) there is one backward branch. In this case, any remaining proj
1457
          //     in the if_proj list post-dominates "iff". So, the condition of "iff"
1458
          //     does not determine the execution the remaining projs directly, and we
1459
          //     can safely continue.
1460
          // (2) both arms are forwarded, i.e. a diamond shape. In this case, "proj"
1461
          //     does not dominate loop->tail(), so it can not be in the if_proj list.
1462
          continue;
1463
        }
1464
      }
1465
      Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(call->uncommon_trap_request());
1466
      if (reason == Deoptimization::Reason_predicate) {
1467
        break;
1468
      }
1469

1470
      if (loop_predicate_block->has_parse_predicate()) {
1471
        ParsePredicateSuccessProj* loop_parse_predicate_proj = loop_predicate_block->parse_predicate_success_proj();
1472
        hoisted = loop_predication_impl_helper(loop, if_proj, loop_parse_predicate_proj, cl, zero, invar,
1473
                                               Deoptimization::Reason_predicate) | hoisted;
1474
      }
1475
    } // end while
1476
  }
1477

1478
  if (follow_branches) {
1479
    assert(profiled_loop_predicate_block->has_parse_predicate(), "sanity check");
1480
    PathFrequency pf(loop->_head, this);
1481

1482
    // Some projections were skipped due to an early loop exit. Try them with profile data.
1483
    while (if_proj_list.size() > 0) {
1484
      Node* if_proj = if_proj_list.pop();
1485
      float f = pf.to(if_proj);
1486
      if (if_proj->as_Proj()->is_uncommon_trap_if_pattern() &&
1487
          f * loop_trip_cnt >= 1) {
1488
        ParsePredicateSuccessProj* profiled_loop_parse_predicate_proj =
1489
            profiled_loop_predicate_block->parse_predicate_success_proj();
1490
        hoisted = loop_predication_impl_helper(loop, if_proj->as_IfProj(), profiled_loop_parse_predicate_proj,
1491
                                               cl, zero, invar, Deoptimization::Reason_profile_predicate) | hoisted;
1492
      }
1493
    }
1494

1495
    // And look into all branches
1496
    Node_Stack stack(0);
1497
    VectorSet seen;
1498
    Node_List if_proj_list_freq(area);
1499
    while (regions.size() > 0) {
1500
      Node* c = regions.pop();
1501
      loop_predication_follow_branches(c, loop, loop_trip_cnt, pf, stack, seen, if_proj_list_freq);
1502
    }
1503

1504
    for (uint i = 0; i < if_proj_list_freq.size(); i++) {
1505
      IfProjNode* if_proj = if_proj_list_freq.at(i)->as_IfProj();
1506
      ParsePredicateSuccessProj* profiled_loop_parse_predicate_proj =
1507
          profiled_loop_predicate_block->parse_predicate_success_proj();
1508
      hoisted = loop_predication_impl_helper(loop, if_proj, profiled_loop_parse_predicate_proj, cl, zero,
1509
                                             invar, Deoptimization::Reason_profile_predicate) | hoisted;
1510
    }
1511
  }
1512

1513
#ifndef PRODUCT
1514
  // report that the loop predication has been actually performed
1515
  // for this loop
1516
  if (TraceLoopPredicate && hoisted) {
1517
    tty->print("Loop Predication Performed:");
1518
    loop->dump_head();
1519
  }
1520
#endif
1521

1522
  head->verify_strip_mined(1);
1523

1524
  return hoisted;
1525
}
1526

1527
// We cannot add Loop Predicates if:
1528
// (1) Already added Profiled Loop Predicates (Loop Predicates and Profiled Loop Predicates can be dependent
1529
//     through a data node, and thus we should only add new Profiled Loop Predicates which are below Loop Predicates
1530
//     in the graph).
1531
// (2) There are currently no Profiled Loop Predicates, but we have a data node with a control dependency on the Loop
1532
//     Parse Predicate (could happen, for example, if we've removed an earlier created Profiled Loop Predicate with
1533
//     dominated_by()). We should not create a Loop Predicate for a check that is dependent on this data node because
1534
//     the Loop Predicate would end up above the data node with its dependency on the Loop Parse Predicate below. This
1535
//     would become unschedulable. However, we can still hoist the check as Profiled Loop Predicate which would end up
1536
//     below the Loop Parse Predicate.
1537
bool PhaseIdealLoop::can_create_loop_predicates(const PredicateBlock* profiled_loop_predicate_block) const {
1538
  bool has_profiled_loop_predicate_block = profiled_loop_predicate_block != nullptr;
1539
  bool can_create_loop_predicates = true;
1540
  if (has_profiled_loop_predicate_block
1541
      && (profiled_loop_predicate_block->has_runtime_predicates() // (1)
1542
          || profiled_loop_predicate_block->entry()->outcnt() != 1)) { // (2)
1543
    can_create_loop_predicates = false;
1544
  }
1545
  return can_create_loop_predicates;
1546
}
1547

1548
//------------------------------loop_predication--------------------------------
1549
// driver routine for loop predication optimization
1550
bool IdealLoopTree::loop_predication(PhaseIdealLoop* phase) {
1551
  bool hoisted = false;
1552
  // Recursively promote predicates
1553
  if (_child) {
1554
    hoisted = _child->loop_predication( phase);
1555
  }
1556

1557
  // Self
1558
  if (can_apply_loop_predication()) {
1559
    hoisted |= phase->loop_predication_impl(this);
1560
  }
1561

1562
  // Sibling
1563
  if (_next) {
1564
    hoisted |= _next->loop_predication( phase);
1565
  }
1566

1567
  return hoisted;
1568
}
1569

1570
bool IdealLoopTree::can_apply_loop_predication() {
1571
  return !_head->is_Root() &&
1572
         _head->is_Loop() &&
1573
         !_head->is_OuterStripMinedLoop() &&
1574
         !_irreducible &&
1575
         !tail()->is_top();
1576
}
1577

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.