jdk

Форк
0
/
ifnode.cpp 
2219 строк · 83.2 Кб
1
/*
2
 * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
3
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
 *
5
 * This code is free software; you can redistribute it and/or modify it
6
 * under the terms of the GNU General Public License version 2 only, as
7
 * published by the Free Software Foundation.
8
 *
9
 * This code is distributed in the hope that it will be useful, but WITHOUT
10
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12
 * version 2 for more details (a copy is included in the LICENSE file that
13
 * accompanied this code).
14
 *
15
 * You should have received a copy of the GNU General Public License version
16
 * 2 along with this work; if not, write to the Free Software Foundation,
17
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
 *
19
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
 * or visit www.oracle.com if you need additional information or have any
21
 * questions.
22
 *
23
 */
24

25
#include "precompiled.hpp"
26
#include "ci/ciTypeFlow.hpp"
27
#include "memory/allocation.inline.hpp"
28
#include "memory/resourceArea.hpp"
29
#include "opto/addnode.hpp"
30
#include "opto/castnode.hpp"
31
#include "opto/cfgnode.hpp"
32
#include "opto/connode.hpp"
33
#include "opto/loopnode.hpp"
34
#include "opto/phaseX.hpp"
35
#include "opto/predicates.hpp"
36
#include "opto/runtime.hpp"
37
#include "opto/rootnode.hpp"
38
#include "opto/subnode.hpp"
39
#include "opto/subtypenode.hpp"
40

41
// Portions of code courtesy of Clifford Click
42

43
// Optimization - Graph Style
44

45

46
#ifndef PRODUCT
47
extern uint explicit_null_checks_elided;
48
#endif
49

50
IfNode::IfNode(Node* control, Node* bol, float p, float fcnt)
51
    : MultiBranchNode(2),
52
      _prob(p),
53
      _fcnt(fcnt)
54
      NOT_PRODUCT(COMMA _assertion_predicate_type(AssertionPredicateType::None)) {
55
  init_node(control, bol);
56
}
57

58
#ifndef PRODUCT
59
IfNode::IfNode(Node* control, Node* bol, float p, float fcnt, AssertionPredicateType assertion_predicate_type)
60
    : MultiBranchNode(2),
61
      _prob(p),
62
      _fcnt(fcnt),
63
      _assertion_predicate_type(assertion_predicate_type) {
64
  init_node(control, bol);
65
}
66
#endif // NOT_PRODUCT
67

68
//=============================================================================
69
//------------------------------Value------------------------------------------
70
// Return a tuple for whichever arm of the IF is reachable
71
const Type* IfNode::Value(PhaseGVN* phase) const {
72
  if( !in(0) ) return Type::TOP;
73
  if( phase->type(in(0)) == Type::TOP )
74
    return Type::TOP;
75
  const Type *t = phase->type(in(1));
76
  if( t == Type::TOP )          // data is undefined
77
    return TypeTuple::IFNEITHER; // unreachable altogether
78
  if( t == TypeInt::ZERO )      // zero, or false
79
    return TypeTuple::IFFALSE;  // only false branch is reachable
80
  if( t == TypeInt::ONE )       // 1, or true
81
    return TypeTuple::IFTRUE;   // only true branch is reachable
82
  assert( t == TypeInt::BOOL, "expected boolean type" );
83

84
  return TypeTuple::IFBOTH;     // No progress
85
}
86

87
const RegMask &IfNode::out_RegMask() const {
88
  return RegMask::Empty;
89
}
90

91
//------------------------------split_if---------------------------------------
92
// Look for places where we merge constants, then test on the merged value.
93
// If the IF test will be constant folded on the path with the constant, we
94
// win by splitting the IF to before the merge point.
95
static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
96
  // I could be a lot more general here, but I'm trying to squeeze this
97
  // in before the Christmas '98 break so I'm gonna be kinda restrictive
98
  // on the patterns I accept.  CNC
99

100
  // Look for a compare of a constant and a merged value
101
  Node *i1 = iff->in(1);
102
  if( !i1->is_Bool() ) return nullptr;
103
  BoolNode *b = i1->as_Bool();
104
  Node *cmp = b->in(1);
105
  if( !cmp->is_Cmp() ) return nullptr;
106
  i1 = cmp->in(1);
107
  if( i1 == nullptr || !i1->is_Phi() ) return nullptr;
108
  PhiNode *phi = i1->as_Phi();
109
  Node *con2 = cmp->in(2);
110
  if( !con2->is_Con() ) return nullptr;
111
  // See that the merge point contains some constants
112
  Node *con1=nullptr;
113
  uint i4;
114
  RegionNode* phi_region = phi->region();
115
  for (i4 = 1; i4 < phi->req(); i4++ ) {
116
    con1 = phi->in(i4);
117
    // Do not optimize partially collapsed merges
118
    if (con1 == nullptr || phi_region->in(i4) == nullptr || igvn->type(phi_region->in(i4)) == Type::TOP) {
119
      igvn->_worklist.push(iff);
120
      return nullptr;
121
    }
122
    if( con1->is_Con() ) break; // Found a constant
123
    // Also allow null-vs-not-null checks
124
    const TypePtr *tp = igvn->type(con1)->isa_ptr();
125
    if( tp && tp->_ptr == TypePtr::NotNull )
126
      break;
127
  }
128
  if( i4 >= phi->req() ) return nullptr; // Found no constants
129

130
  igvn->C->set_has_split_ifs(true); // Has chance for split-if
131

132
  // Make sure that the compare can be constant folded away
133
  Node *cmp2 = cmp->clone();
134
  cmp2->set_req(1,con1);
135
  cmp2->set_req(2,con2);
136
  const Type *t = cmp2->Value(igvn);
137
  // This compare is dead, so whack it!
138
  igvn->remove_dead_node(cmp2);
139
  if( !t->singleton() ) return nullptr;
140

141
  // No intervening control, like a simple Call
142
  Node* r = iff->in(0);
143
  if (!r->is_Region() || r->is_Loop() || phi_region != r || r->as_Region()->is_copy()) {
144
    return nullptr;
145
  }
146

147
  // No other users of the cmp/bool
148
  if (b->outcnt() != 1 || cmp->outcnt() != 1) {
149
    //tty->print_cr("many users of cmp/bool");
150
    return nullptr;
151
  }
152

153
  // Make sure we can determine where all the uses of merged values go
154
  for (DUIterator_Fast jmax, j = r->fast_outs(jmax); j < jmax; j++) {
155
    Node* u = r->fast_out(j);
156
    if( u == r ) continue;
157
    if( u == iff ) continue;
158
    if( u->outcnt() == 0 ) continue; // use is dead & ignorable
159
    if( !u->is_Phi() ) {
160
      /*
161
      if( u->is_Start() ) {
162
        tty->print_cr("Region has inlined start use");
163
      } else {
164
        tty->print_cr("Region has odd use");
165
        u->dump(2);
166
      }*/
167
      return nullptr;
168
    }
169
    if( u != phi ) {
170
      // CNC - do not allow any other merged value
171
      //tty->print_cr("Merging another value");
172
      //u->dump(2);
173
      return nullptr;
174
    }
175
    // Make sure we can account for all Phi uses
176
    for (DUIterator_Fast kmax, k = u->fast_outs(kmax); k < kmax; k++) {
177
      Node* v = u->fast_out(k); // User of the phi
178
      // CNC - Allow only really simple patterns.
179
      // In particular I disallow AddP of the Phi, a fairly common pattern
180
      if (v == cmp) continue;  // The compare is OK
181
      if (v->is_ConstraintCast()) {
182
        // If the cast is derived from data flow edges, it may not have a control edge.
183
        // If so, it should be safe to split. But follow-up code can not deal with
184
        // this (l. 359). So skip.
185
        if (v->in(0) == nullptr) {
186
          return nullptr;
187
        }
188
        if (v->in(0)->in(0) == iff) {
189
          continue;               // CastPP/II of the IfNode is OK
190
        }
191
      }
192
      // Disabled following code because I cannot tell if exactly one
193
      // path dominates without a real dominator check. CNC 9/9/1999
194
      //uint vop = v->Opcode();
195
      //if( vop == Op_Phi ) {        // Phi from another merge point might be OK
196
      //  Node *r = v->in(0);        // Get controlling point
197
      //  if( !r ) return nullptr;   // Degraded to a copy
198
      //  // Find exactly one path in (either True or False doms, but not IFF)
199
      //  int cnt = 0;
200
      //  for( uint i = 1; i < r->req(); i++ )
201
      //    if( r->in(i) && r->in(i)->in(0) == iff )
202
      //      cnt++;
203
      //  if( cnt == 1 ) continue; // Exactly one of True or False guards Phi
204
      //}
205
      if( !v->is_Call() ) {
206
        /*
207
        if( v->Opcode() == Op_AddP ) {
208
          tty->print_cr("Phi has AddP use");
209
        } else if( v->Opcode() == Op_CastPP ) {
210
          tty->print_cr("Phi has CastPP use");
211
        } else if( v->Opcode() == Op_CastII ) {
212
          tty->print_cr("Phi has CastII use");
213
        } else {
214
          tty->print_cr("Phi has use I can't be bothered with");
215
        }
216
        */
217
      }
218
      return nullptr;
219

220
      /* CNC - Cut out all the fancy acceptance tests
221
      // Can we clone this use when doing the transformation?
222
      // If all uses are from Phis at this merge or constants, then YES.
223
      if( !v->in(0) && v != cmp ) {
224
        tty->print_cr("Phi has free-floating use");
225
        v->dump(2);
226
        return nullptr;
227
      }
228
      for( uint l = 1; l < v->req(); l++ ) {
229
        if( (!v->in(l)->is_Phi() || v->in(l)->in(0) != r) &&
230
            !v->in(l)->is_Con() ) {
231
          tty->print_cr("Phi has use");
232
          v->dump(2);
233
          return nullptr;
234
        } // End of if Phi-use input is neither Phi nor Constant
235
      } // End of for all inputs to Phi-use
236
      */
237
    } // End of for all uses of Phi
238
  } // End of for all uses of Region
239

240
  // Only do this if the IF node is in a sane state
241
  if (iff->outcnt() != 2)
242
    return nullptr;
243

244
  // Got a hit!  Do the Mondo Hack!
245
  //
246
  //ABC  a1c   def   ghi            B     1     e     h   A C   a c   d f   g i
247
  // R - Phi - Phi - Phi            Rc - Phi - Phi - Phi   Rx - Phi - Phi - Phi
248
  //     cmp - 2                         cmp - 2               cmp - 2
249
  //       bool                            bool_c                bool_x
250
  //       if                               if_c                  if_x
251
  //      T  F                              T  F                  T  F
252
  // ..s..    ..t ..                   ..s..    ..t..        ..s..    ..t..
253
  //
254
  // Split the paths coming into the merge point into 2 separate groups of
255
  // merges.  On the left will be all the paths feeding constants into the
256
  // Cmp's Phi.  On the right will be the remaining paths.  The Cmp's Phi
257
  // will fold up into a constant; this will let the Cmp fold up as well as
258
  // all the control flow.  Below the original IF we have 2 control
259
  // dependent regions, 's' and 't'.  Now we will merge the two paths
260
  // just prior to 's' and 't' from the two IFs.  At least 1 path (and quite
261
  // likely 2 or more) will promptly constant fold away.
262
  PhaseGVN *phase = igvn;
263

264
  // Make a region merging constants and a region merging the rest
265
  uint req_c = 0;
266
  for (uint ii = 1; ii < r->req(); ii++) {
267
    if (phi->in(ii) == con1) {
268
      req_c++;
269
    }
270
    if (Node::may_be_loop_entry(r->in(ii))) {
271
      // Bail out if splitting through a region with a Parse Predicate input (could
272
      // also be a loop header before loop opts creates a LoopNode for it).
273
      return nullptr;
274
    }
275
  }
276

277
  // If all the defs of the phi are the same constant, we already have the desired end state.
278
  // Skip the split that would create empty phi and region nodes.
279
  if ((r->req() - req_c) == 1) {
280
    return nullptr;
281
  }
282

283
  // At this point we know that we can apply the split if optimization. If the region is still on the worklist,
284
  // we should wait until it is processed. The region might be removed which makes this optimization redundant.
285
  // This also avoids the creation of dead data loops when rewiring data nodes below when a region is dying.
286
  if (igvn->_worklist.member(r)) {
287
    igvn->_worklist.push(iff); // retry split if later again
288
    return nullptr;
289
  }
290

291
  Node *region_c = new RegionNode(req_c + 1);
292
  Node *phi_c    = con1;
293
  uint  len      = r->req();
294
  Node *region_x = new RegionNode(len - req_c);
295
  Node *phi_x    = PhiNode::make_blank(region_x, phi);
296
  for (uint i = 1, i_c = 1, i_x = 1; i < len; i++) {
297
    if (phi->in(i) == con1) {
298
      region_c->init_req( i_c++, r  ->in(i) );
299
    } else {
300
      region_x->init_req( i_x,   r  ->in(i) );
301
      phi_x   ->init_req( i_x++, phi->in(i) );
302
    }
303
  }
304

305
  // Register the new RegionNodes but do not transform them.  Cannot
306
  // transform until the entire Region/Phi conglomerate has been hacked
307
  // as a single huge transform.
308
  igvn->register_new_node_with_optimizer( region_c );
309
  igvn->register_new_node_with_optimizer( region_x );
310
  // Prevent the untimely death of phi_x.  Currently he has no uses.  He is
311
  // about to get one.  If this only use goes away, then phi_x will look dead.
312
  // However, he will be picking up some more uses down below.
313
  Node *hook = new Node(4);
314
  hook->init_req(0, phi_x);
315
  hook->init_req(1, phi_c);
316
  phi_x = phase->transform( phi_x );
317

318
  // Make the compare
319
  Node *cmp_c = phase->makecon(t);
320
  Node *cmp_x = cmp->clone();
321
  cmp_x->set_req(1,phi_x);
322
  cmp_x->set_req(2,con2);
323
  cmp_x = phase->transform(cmp_x);
324
  // Make the bool
325
  Node *b_c = phase->transform(new BoolNode(cmp_c,b->_test._test));
326
  Node *b_x = phase->transform(new BoolNode(cmp_x,b->_test._test));
327
  // Make the IfNode
328
  IfNode* iff_c = iff->clone()->as_If();
329
  iff_c->set_req(0, region_c);
330
  iff_c->set_req(1, b_c);
331
  igvn->set_type_bottom(iff_c);
332
  igvn->_worklist.push(iff_c);
333
  hook->init_req(2, iff_c);
334

335
  IfNode* iff_x = iff->clone()->as_If();
336
  iff_x->set_req(0, region_x);
337
  iff_x->set_req(1, b_x);
338
  igvn->set_type_bottom(iff_x);
339
  igvn->_worklist.push(iff_x);
340
  hook->init_req(3, iff_x);
341

342
  // Make the true/false arms
343
  Node *iff_c_t = phase->transform(new IfTrueNode (iff_c));
344
  Node *iff_c_f = phase->transform(new IfFalseNode(iff_c));
345
  Node *iff_x_t = phase->transform(new IfTrueNode (iff_x));
346
  Node *iff_x_f = phase->transform(new IfFalseNode(iff_x));
347

348
  // Merge the TRUE paths
349
  Node *region_s = new RegionNode(3);
350
  igvn->_worklist.push(region_s);
351
  region_s->init_req(1, iff_c_t);
352
  region_s->init_req(2, iff_x_t);
353
  igvn->register_new_node_with_optimizer( region_s );
354

355
  // Merge the FALSE paths
356
  Node *region_f = new RegionNode(3);
357
  igvn->_worklist.push(region_f);
358
  region_f->init_req(1, iff_c_f);
359
  region_f->init_req(2, iff_x_f);
360
  igvn->register_new_node_with_optimizer( region_f );
361

362
  igvn->hash_delete(cmp);// Remove soon-to-be-dead node from hash table.
363
  cmp->set_req(1,nullptr);  // Whack the inputs to cmp because it will be dead
364
  cmp->set_req(2,nullptr);
365
  // Check for all uses of the Phi and give them a new home.
366
  // The 'cmp' got cloned, but CastPP/IIs need to be moved.
367
  Node *phi_s = nullptr;     // do not construct unless needed
368
  Node *phi_f = nullptr;     // do not construct unless needed
369
  for (DUIterator_Last i2min, i2 = phi->last_outs(i2min); i2 >= i2min; --i2) {
370
    Node* v = phi->last_out(i2);// User of the phi
371
    igvn->rehash_node_delayed(v); // Have to fixup other Phi users
372
    uint vop = v->Opcode();
373
    Node *proj = nullptr;
374
    if( vop == Op_Phi ) {       // Remote merge point
375
      Node *r = v->in(0);
376
      for (uint i3 = 1; i3 < r->req(); i3++)
377
        if (r->in(i3) && r->in(i3)->in(0) == iff) {
378
          proj = r->in(i3);
379
          break;
380
        }
381
    } else if( v->is_ConstraintCast() ) {
382
      proj = v->in(0);          // Controlling projection
383
    } else {
384
      assert( 0, "do not know how to handle this guy" );
385
    }
386
    guarantee(proj != nullptr, "sanity");
387

388
    Node *proj_path_data, *proj_path_ctrl;
389
    if( proj->Opcode() == Op_IfTrue ) {
390
      if( phi_s == nullptr ) {
391
        // Only construct phi_s if needed, otherwise provides
392
        // interfering use.
393
        phi_s = PhiNode::make_blank(region_s,phi);
394
        phi_s->init_req( 1, phi_c );
395
        phi_s->init_req( 2, phi_x );
396
        hook->add_req(phi_s);
397
        phi_s = phase->transform(phi_s);
398
      }
399
      proj_path_data = phi_s;
400
      proj_path_ctrl = region_s;
401
    } else {
402
      if( phi_f == nullptr ) {
403
        // Only construct phi_f if needed, otherwise provides
404
        // interfering use.
405
        phi_f = PhiNode::make_blank(region_f,phi);
406
        phi_f->init_req( 1, phi_c );
407
        phi_f->init_req( 2, phi_x );
408
        hook->add_req(phi_f);
409
        phi_f = phase->transform(phi_f);
410
      }
411
      proj_path_data = phi_f;
412
      proj_path_ctrl = region_f;
413
    }
414

415
    // Fixup 'v' for for the split
416
    if( vop == Op_Phi ) {       // Remote merge point
417
      uint i;
418
      for( i = 1; i < v->req(); i++ )
419
        if( v->in(i) == phi )
420
          break;
421
      v->set_req(i, proj_path_data );
422
    } else if( v->is_ConstraintCast() ) {
423
      v->set_req(0, proj_path_ctrl );
424
      v->set_req(1, proj_path_data );
425
    } else
426
      ShouldNotReachHere();
427
  }
428

429
  // Now replace the original iff's True/False with region_s/region_t.
430
  // This makes the original iff go dead.
431
  for (DUIterator_Last i3min, i3 = iff->last_outs(i3min); i3 >= i3min; --i3) {
432
    Node* p = iff->last_out(i3);
433
    assert( p->Opcode() == Op_IfTrue || p->Opcode() == Op_IfFalse, "" );
434
    Node *u = (p->Opcode() == Op_IfTrue) ? region_s : region_f;
435
    // Replace p with u
436
    igvn->add_users_to_worklist(p);
437
    for (DUIterator_Last lmin, l = p->last_outs(lmin); l >= lmin;) {
438
      Node* x = p->last_out(l);
439
      igvn->hash_delete(x);
440
      uint uses_found = 0;
441
      for( uint j = 0; j < x->req(); j++ ) {
442
        if( x->in(j) == p ) {
443
          x->set_req(j, u);
444
          uses_found++;
445
        }
446
      }
447
      l -= uses_found;    // we deleted 1 or more copies of this edge
448
    }
449
    igvn->remove_dead_node(p);
450
  }
451

452
  // Force the original merge dead
453
  igvn->hash_delete(r);
454
  // First, remove region's dead users.
455
  for (DUIterator_Last lmin, l = r->last_outs(lmin); l >= lmin;) {
456
    Node* u = r->last_out(l);
457
    if( u == r ) {
458
      r->set_req(0, nullptr);
459
    } else {
460
      assert(u->outcnt() == 0, "only dead users");
461
      igvn->remove_dead_node(u);
462
    }
463
    l -= 1;
464
  }
465
  igvn->remove_dead_node(r);
466

467
  // Now remove the bogus extra edges used to keep things alive
468
  igvn->remove_dead_node( hook );
469

470
  // Must return either the original node (now dead) or a new node
471
  // (Do not return a top here, since that would break the uniqueness of top.)
472
  return new ConINode(TypeInt::ZERO);
473
}
474

475
IfNode* IfNode::make_with_same_profile(IfNode* if_node_profile, Node* ctrl, BoolNode* bol) {
476
  // Assert here that we only try to create a clone from an If node with the same profiling if that actually makes sense.
477
  // Some If node subtypes should not be cloned in this way. In theory, we should not clone BaseCountedLoopEndNodes.
478
  // But they can end up being used as normal If nodes when peeling a loop - they serve as zero-trip guard.
479
  // Allow them as well.
480
  assert(if_node_profile->Opcode() == Op_If || if_node_profile->is_RangeCheck()
481
         || if_node_profile->is_BaseCountedLoopEnd(), "should not clone other nodes");
482
  if (if_node_profile->is_RangeCheck()) {
483
    // RangeCheck nodes could be further optimized.
484
    return new RangeCheckNode(ctrl, bol, if_node_profile->_prob, if_node_profile->_fcnt);
485
  } else {
486
    // Not a RangeCheckNode? Fall back to IfNode.
487
    return new IfNode(ctrl, bol, if_node_profile->_prob, if_node_profile->_fcnt);
488
  }
489
}
490

491
// if this IfNode follows a range check pattern return the projection
492
// for the failed path
493
ProjNode* IfNode::range_check_trap_proj(int& flip_test, Node*& l, Node*& r) {
494
  if (outcnt() != 2) {
495
    return nullptr;
496
  }
497
  Node* b = in(1);
498
  if (b == nullptr || !b->is_Bool())  return nullptr;
499
  BoolNode* bn = b->as_Bool();
500
  Node* cmp = bn->in(1);
501
  if (cmp == nullptr)  return nullptr;
502
  if (cmp->Opcode() != Op_CmpU)  return nullptr;
503

504
  l = cmp->in(1);
505
  r = cmp->in(2);
506
  flip_test = 1;
507
  if (bn->_test._test == BoolTest::le) {
508
    l = cmp->in(2);
509
    r = cmp->in(1);
510
    flip_test = 2;
511
  } else if (bn->_test._test != BoolTest::lt) {
512
    return nullptr;
513
  }
514
  if (l->is_top())  return nullptr;   // Top input means dead test
515
  if (r->Opcode() != Op_LoadRange && !is_RangeCheck())  return nullptr;
516

517
  // We have recognized one of these forms:
518
  //  Flip 1:  If (Bool[<] CmpU(l, LoadRange)) ...
519
  //  Flip 2:  If (Bool[<=] CmpU(LoadRange, l)) ...
520

521
  ProjNode* iftrap = proj_out_or_null(flip_test == 2 ? true : false);
522
  return iftrap;
523
}
524

525

526
//------------------------------is_range_check---------------------------------
527
// Return 0 if not a range check.  Return 1 if a range check and set index and
528
// offset.  Return 2 if we had to negate the test.  Index is null if the check
529
// is versus a constant.
530
int RangeCheckNode::is_range_check(Node* &range, Node* &index, jint &offset) {
531
  int flip_test = 0;
532
  Node* l = nullptr;
533
  Node* r = nullptr;
534
  ProjNode* iftrap = range_check_trap_proj(flip_test, l, r);
535

536
  if (iftrap == nullptr) {
537
    return 0;
538
  }
539

540
  // Make sure it's a real range check by requiring an uncommon trap
541
  // along the OOB path.  Otherwise, it's possible that the user wrote
542
  // something which optimized to look like a range check but behaves
543
  // in some other way.
544
  if (iftrap->is_uncommon_trap_proj(Deoptimization::Reason_range_check) == nullptr) {
545
    return 0;
546
  }
547

548
  // Look for index+offset form
549
  Node* ind = l;
550
  jint  off = 0;
551
  if (l->is_top()) {
552
    return 0;
553
  } else if (l->Opcode() == Op_AddI) {
554
    if ((off = l->in(1)->find_int_con(0)) != 0) {
555
      ind = l->in(2)->uncast();
556
    } else if ((off = l->in(2)->find_int_con(0)) != 0) {
557
      ind = l->in(1)->uncast();
558
    }
559
  } else if ((off = l->find_int_con(-1)) >= 0) {
560
    // constant offset with no variable index
561
    ind = nullptr;
562
  } else {
563
    // variable index with no constant offset (or dead negative index)
564
    off = 0;
565
  }
566

567
  // Return all the values:
568
  index  = ind;
569
  offset = off;
570
  range  = r;
571
  return flip_test;
572
}
573

574
//------------------------------adjust_check-----------------------------------
575
// Adjust (widen) a prior range check
576
static void adjust_check(IfProjNode* proj, Node* range, Node* index,
577
                         int flip, jint off_lo, PhaseIterGVN* igvn) {
578
  PhaseGVN *gvn = igvn;
579
  // Break apart the old check
580
  Node *iff = proj->in(0);
581
  Node *bol = iff->in(1);
582
  if( bol->is_top() ) return;   // In case a partially dead range check appears
583
  // bail (or bomb[ASSERT/DEBUG]) if NOT projection-->IfNode-->BoolNode
584
  DEBUG_ONLY( if (!bol->is_Bool()) { proj->dump(3); fatal("Expect projection-->IfNode-->BoolNode"); } )
585
  if (!bol->is_Bool()) return;
586

587
  Node *cmp = bol->in(1);
588
  // Compute a new check
589
  Node *new_add = gvn->intcon(off_lo);
590
  if (index) {
591
    new_add = off_lo ? gvn->transform(new AddINode(index, new_add)) : index;
592
  }
593
  Node *new_cmp = (flip == 1)
594
    ? new CmpUNode(new_add, range)
595
    : new CmpUNode(range, new_add);
596
  new_cmp = gvn->transform(new_cmp);
597
  // See if no need to adjust the existing check
598
  if (new_cmp == cmp) return;
599
  // Else, adjust existing check
600
  Node* new_bol = gvn->transform(new BoolNode(new_cmp, bol->as_Bool()->_test._test));
601
  igvn->rehash_node_delayed(iff);
602
  iff->set_req_X(1, new_bol, igvn);
603
  // As part of range check smearing, this range check is widened. Loads and range check Cast nodes that are control
604
  // dependent on this range check now depend on multiple dominating range checks. These control dependent nodes end up
605
  // at the lowest/nearest dominating check in the graph. To ensure that these Loads/Casts do not float above any of the
606
  // dominating checks (even when the lowest dominating check is later replaced by yet another dominating check), we
607
  // need to pin them at the lowest dominating check.
608
  proj->pin_array_access_nodes(igvn);
609
}
610

611
//------------------------------up_one_dom-------------------------------------
612
// Walk up the dominator tree one step.  Return null at root or true
613
// complex merges.  Skips through small diamonds.
614
Node* IfNode::up_one_dom(Node *curr, bool linear_only) {
615
  Node *dom = curr->in(0);
616
  if( !dom )                    // Found a Region degraded to a copy?
617
    return curr->nonnull_req(); // Skip thru it
618

619
  if( curr != dom )             // Normal walk up one step?
620
    return dom;
621

622
  // Use linear_only if we are still parsing, since we cannot
623
  // trust the regions to be fully filled in.
624
  if (linear_only)
625
    return nullptr;
626

627
  if( dom->is_Root() )
628
    return nullptr;
629

630
  // Else hit a Region.  Check for a loop header
631
  if( dom->is_Loop() )
632
    return dom->in(1);          // Skip up thru loops
633

634
  // Check for small diamonds
635
  Node *din1, *din2, *din3, *din4;
636
  if( dom->req() == 3 &&        // 2-path merge point
637
      (din1 = dom ->in(1)) &&   // Left  path exists
638
      (din2 = dom ->in(2)) &&   // Right path exists
639
      (din3 = din1->in(0)) &&   // Left  path up one
640
      (din4 = din2->in(0)) ) {  // Right path up one
641
    if( din3->is_Call() &&      // Handle a slow-path call on either arm
642
        (din3 = din3->in(0)) )
643
      din3 = din3->in(0);
644
    if( din4->is_Call() &&      // Handle a slow-path call on either arm
645
        (din4 = din4->in(0)) )
646
      din4 = din4->in(0);
647
    if (din3 != nullptr && din3 == din4 && din3->is_If()) // Regions not degraded to a copy
648
      return din3;              // Skip around diamonds
649
  }
650

651
  // Give up the search at true merges
652
  return nullptr;                  // Dead loop?  Or hit root?
653
}
654

655

656
//------------------------------filtered_int_type--------------------------------
657
// Return a possibly more restrictive type for val based on condition control flow for an if
658
const TypeInt* IfNode::filtered_int_type(PhaseGVN* gvn, Node* val, Node* if_proj) {
659
  assert(if_proj &&
660
         (if_proj->Opcode() == Op_IfTrue || if_proj->Opcode() == Op_IfFalse), "expecting an if projection");
661
  if (if_proj->in(0) && if_proj->in(0)->is_If()) {
662
    IfNode* iff = if_proj->in(0)->as_If();
663
    if (iff->in(1) && iff->in(1)->is_Bool()) {
664
      BoolNode* bol = iff->in(1)->as_Bool();
665
      if (bol->in(1) && bol->in(1)->is_Cmp()) {
666
        const CmpNode* cmp  = bol->in(1)->as_Cmp();
667
        if (cmp->in(1) == val) {
668
          const TypeInt* cmp2_t = gvn->type(cmp->in(2))->isa_int();
669
          if (cmp2_t != nullptr) {
670
            jint lo = cmp2_t->_lo;
671
            jint hi = cmp2_t->_hi;
672
            BoolTest::mask msk = if_proj->Opcode() == Op_IfTrue ? bol->_test._test : bol->_test.negate();
673
            switch (msk) {
674
            case BoolTest::ne: {
675
              // If val is compared to its lower or upper bound, we can narrow the type
676
              const TypeInt* val_t = gvn->type(val)->isa_int();
677
              if (val_t != nullptr && !val_t->singleton() && cmp2_t->is_con()) {
678
                if (val_t->_lo == lo) {
679
                  return TypeInt::make(val_t->_lo + 1, val_t->_hi, val_t->_widen);
680
                } else if (val_t->_hi == hi) {
681
                  return TypeInt::make(val_t->_lo, val_t->_hi - 1, val_t->_widen);
682
                }
683
              }
684
              // Can't refine type
685
              return nullptr;
686
            }
687
            case BoolTest::eq:
688
              return cmp2_t;
689
            case BoolTest::lt:
690
              lo = TypeInt::INT->_lo;
691
              if (hi != min_jint) {
692
                hi = hi - 1;
693
              }
694
              break;
695
            case BoolTest::le:
696
              lo = TypeInt::INT->_lo;
697
              break;
698
            case BoolTest::gt:
699
              if (lo != max_jint) {
700
                lo = lo + 1;
701
              }
702
              hi = TypeInt::INT->_hi;
703
              break;
704
            case BoolTest::ge:
705
              // lo unchanged
706
              hi = TypeInt::INT->_hi;
707
              break;
708
            default:
709
              break;
710
            }
711
            const TypeInt* rtn_t = TypeInt::make(lo, hi, cmp2_t->_widen);
712
            return rtn_t;
713
          }
714
        }
715
      }
716
    }
717
  }
718
  return nullptr;
719
}
720

721
//------------------------------fold_compares----------------------------
722
// See if a pair of CmpIs can be converted into a CmpU.  In some cases
723
// the direction of this if is determined by the preceding if so it
724
// can be eliminate entirely.
725
//
726
// Given an if testing (CmpI n v) check for an immediately control
727
// dependent if that is testing (CmpI n v2) and has one projection
728
// leading to this if and the other projection leading to a region
729
// that merges one of this ifs control projections.
730
//
731
//                   If
732
//                  / |
733
//                 /  |
734
//                /   |
735
//              If    |
736
//              /\    |
737
//             /  \   |
738
//            /    \  |
739
//           /    Region
740
//
741
// Or given an if testing (CmpI n v) check for a dominating if that is
742
// testing (CmpI n v2), both having one projection leading to an
743
// uncommon trap. Allow Another independent guard in between to cover
744
// an explicit range check:
745
// if (index < 0 || index >= array.length) {
746
// which may need a null check to guard the LoadRange
747
//
748
//                   If
749
//                  / \
750
//                 /   \
751
//                /     \
752
//              If      unc
753
//              /\
754
//             /  \
755
//            /    \
756
//           /      unc
757
//
758

759
// Is the comparison for this If suitable for folding?
760
bool IfNode::cmpi_folds(PhaseIterGVN* igvn, bool fold_ne) {
761
  return in(1) != nullptr &&
762
    in(1)->is_Bool() &&
763
    in(1)->in(1) != nullptr &&
764
    in(1)->in(1)->Opcode() == Op_CmpI &&
765
    in(1)->in(1)->in(2) != nullptr &&
766
    in(1)->in(1)->in(2) != igvn->C->top() &&
767
    (in(1)->as_Bool()->_test.is_less() ||
768
     in(1)->as_Bool()->_test.is_greater() ||
769
     (fold_ne && in(1)->as_Bool()->_test._test == BoolTest::ne));
770
}
771

772
// Is a dominating control suitable for folding with this if?
773
bool IfNode::is_ctrl_folds(Node* ctrl, PhaseIterGVN* igvn) {
774
  return ctrl != nullptr &&
775
    ctrl->is_Proj() &&
776
    ctrl->outcnt() == 1 && // No side-effects
777
    ctrl->in(0) != nullptr &&
778
    ctrl->in(0)->Opcode() == Op_If &&
779
    ctrl->in(0)->outcnt() == 2 &&
780
    ctrl->in(0)->as_If()->cmpi_folds(igvn, true) &&
781
    // Must compare same value
782
    ctrl->in(0)->in(1)->in(1)->in(1) != nullptr &&
783
    ctrl->in(0)->in(1)->in(1)->in(1) != igvn->C->top() &&
784
    ctrl->in(0)->in(1)->in(1)->in(1) == in(1)->in(1)->in(1);
785
}
786

787
// Do this If and the dominating If share a region?
788
bool IfNode::has_shared_region(ProjNode* proj, ProjNode*& success, ProjNode*& fail) {
789
  ProjNode* otherproj = proj->other_if_proj();
790
  Node* otherproj_ctrl_use = otherproj->unique_ctrl_out_or_null();
791
  RegionNode* region = (otherproj_ctrl_use != nullptr && otherproj_ctrl_use->is_Region()) ? otherproj_ctrl_use->as_Region() : nullptr;
792
  success = nullptr;
793
  fail = nullptr;
794

795
  if (otherproj->outcnt() == 1 && region != nullptr && !region->has_phi()) {
796
    for (int i = 0; i < 2; i++) {
797
      ProjNode* proj = proj_out(i);
798
      if (success == nullptr && proj->outcnt() == 1 && proj->unique_out() == region) {
799
        success = proj;
800
      } else if (fail == nullptr) {
801
        fail = proj;
802
      } else {
803
        success = fail = nullptr;
804
      }
805
    }
806
  }
807
  return success != nullptr && fail != nullptr;
808
}
809

810
bool IfNode::is_dominator_unc(CallStaticJavaNode* dom_unc, CallStaticJavaNode* unc) {
811
  // Different methods and methods containing jsrs are not supported.
812
  ciMethod* method = unc->jvms()->method();
813
  ciMethod* dom_method = dom_unc->jvms()->method();
814
  if (method != dom_method || method->has_jsrs()) {
815
    return false;
816
  }
817
  // Check that both traps are in the same activation of the method (instead
818
  // of two activations being inlined through different call sites) by verifying
819
  // that the call stacks are equal for both JVMStates.
820
  JVMState* dom_caller = dom_unc->jvms()->caller();
821
  JVMState* caller = unc->jvms()->caller();
822
  if ((dom_caller == nullptr) != (caller == nullptr)) {
823
    // The current method must either be inlined into both dom_caller and
824
    // caller or must not be inlined at all (top method). Bail out otherwise.
825
    return false;
826
  } else if (dom_caller != nullptr && !dom_caller->same_calls_as(caller)) {
827
    return false;
828
  }
829
  // Check that the bci of the dominating uncommon trap dominates the bci
830
  // of the dominated uncommon trap. Otherwise we may not re-execute
831
  // the dominated check after deoptimization from the merged uncommon trap.
832
  ciTypeFlow* flow = dom_method->get_flow_analysis();
833
  int bci = unc->jvms()->bci();
834
  int dom_bci = dom_unc->jvms()->bci();
835
  if (!flow->is_dominated_by(bci, dom_bci)) {
836
    return false;
837
  }
838

839
  return true;
840
}
841

842
// Return projection that leads to an uncommon trap if any
843
ProjNode* IfNode::uncommon_trap_proj(CallStaticJavaNode*& call) const {
844
  for (int i = 0; i < 2; i++) {
845
    call = proj_out(i)->is_uncommon_trap_proj();
846
    if (call != nullptr) {
847
      return proj_out(i);
848
    }
849
  }
850
  return nullptr;
851
}
852

853
// Do this If and the dominating If both branch out to an uncommon trap
854
bool IfNode::has_only_uncommon_traps(ProjNode* proj, ProjNode*& success, ProjNode*& fail, PhaseIterGVN* igvn) {
855
  ProjNode* otherproj = proj->other_if_proj();
856
  CallStaticJavaNode* dom_unc = otherproj->is_uncommon_trap_proj();
857

858
  if (otherproj->outcnt() == 1 && dom_unc != nullptr) {
859
    // We need to re-execute the folded Ifs after deoptimization from the merged traps
860
    if (!dom_unc->jvms()->should_reexecute()) {
861
      return false;
862
    }
863

864
    CallStaticJavaNode* unc = nullptr;
865
    ProjNode* unc_proj = uncommon_trap_proj(unc);
866
    if (unc_proj != nullptr && unc_proj->outcnt() == 1) {
867
      if (dom_unc == unc) {
868
        // Allow the uncommon trap to be shared through a region
869
        RegionNode* r = unc->in(0)->as_Region();
870
        if (r->outcnt() != 2 || r->req() != 3 || r->find_edge(otherproj) == -1 || r->find_edge(unc_proj) == -1) {
871
          return false;
872
        }
873
        assert(r->has_phi() == nullptr, "simple region shouldn't have a phi");
874
      } else if (dom_unc->in(0) != otherproj || unc->in(0) != unc_proj) {
875
        return false;
876
      }
877

878
      if (!is_dominator_unc(dom_unc, unc)) {
879
        return false;
880
      }
881

882
      // See merge_uncommon_traps: the reason of the uncommon trap
883
      // will be changed and the state of the dominating If will be
884
      // used. Checked that we didn't apply this transformation in a
885
      // previous compilation and it didn't cause too many traps
886
      ciMethod* dom_method = dom_unc->jvms()->method();
887
      int dom_bci = dom_unc->jvms()->bci();
888
      if (!igvn->C->too_many_traps(dom_method, dom_bci, Deoptimization::Reason_unstable_fused_if) &&
889
          !igvn->C->too_many_traps(dom_method, dom_bci, Deoptimization::Reason_range_check) &&
890
          // Return true if c2 manages to reconcile with UnstableIf optimization. See the comments for it.
891
          igvn->C->remove_unstable_if_trap(dom_unc, true/*yield*/)) {
892
        success = unc_proj;
893
        fail = unc_proj->other_if_proj();
894
        return true;
895
      }
896
    }
897
  }
898
  return false;
899
}
900

901
// Check that the 2 CmpI can be folded into as single CmpU and proceed with the folding
902
bool IfNode::fold_compares_helper(ProjNode* proj, ProjNode* success, ProjNode* fail, PhaseIterGVN* igvn) {
903
  Node* this_cmp = in(1)->in(1);
904
  BoolNode* this_bool = in(1)->as_Bool();
905
  IfNode* dom_iff = proj->in(0)->as_If();
906
  BoolNode* dom_bool = dom_iff->in(1)->as_Bool();
907
  Node* lo = dom_iff->in(1)->in(1)->in(2);
908
  Node* hi = this_cmp->in(2);
909
  Node* n = this_cmp->in(1);
910
  ProjNode* otherproj = proj->other_if_proj();
911

912
  const TypeInt* lo_type = IfNode::filtered_int_type(igvn, n, otherproj);
913
  const TypeInt* hi_type = IfNode::filtered_int_type(igvn, n, success);
914

915
  BoolTest::mask lo_test = dom_bool->_test._test;
916
  BoolTest::mask hi_test = this_bool->_test._test;
917
  BoolTest::mask cond = hi_test;
918

919
  // convert:
920
  //
921
  //          dom_bool = x {<,<=,>,>=} a
922
  //                           / \
923
  //     proj = {True,False}  /   \ otherproj = {False,True}
924
  //                         /
925
  //        this_bool = x {<,<=} b
926
  //                       / \
927
  //  fail = {True,False} /   \ success = {False,True}
928
  //                     /
929
  //
930
  // (Second test guaranteed canonicalized, first one may not have
931
  // been canonicalized yet)
932
  //
933
  // into:
934
  //
935
  // cond = (x - lo) {<u,<=u,>u,>=u} adjusted_lim
936
  //                       / \
937
  //                 fail /   \ success
938
  //                     /
939
  //
940

941
  // Figure out which of the two tests sets the upper bound and which
942
  // sets the lower bound if any.
943
  Node* adjusted_lim = nullptr;
944
  if (lo_type != nullptr && hi_type != nullptr && hi_type->_lo > lo_type->_hi &&
945
      hi_type->_hi == max_jint && lo_type->_lo == min_jint && lo_test != BoolTest::ne) {
946
    assert((dom_bool->_test.is_less() && !proj->_con) ||
947
           (dom_bool->_test.is_greater() && proj->_con), "incorrect test");
948

949
    // this_bool = <
950
    //   dom_bool = >= (proj = True) or dom_bool = < (proj = False)
951
    //     x in [a, b[ on the fail (= True) projection, b > a-1 (because of hi_type->_lo > lo_type->_hi test above):
952
    //     lo = a, hi = b, adjusted_lim = b-a, cond = <u
953
    //   dom_bool = > (proj = True) or dom_bool = <= (proj = False)
954
    //     x in ]a, b[ on the fail (= True) projection, b > a:
955
    //     lo = a+1, hi = b, adjusted_lim = b-a-1, cond = <u
956
    // this_bool = <=
957
    //   dom_bool = >= (proj = True) or dom_bool = < (proj = False)
958
    //     x in [a, b] on the fail (= True) projection, b+1 > a-1:
959
    //     lo = a, hi = b, adjusted_lim = b-a+1, cond = <u
960
    //     lo = a, hi = b, adjusted_lim = b-a, cond = <=u doesn't work because b = a - 1 is possible, then b-a = -1
961
    //   dom_bool = > (proj = True) or dom_bool = <= (proj = False)
962
    //     x in ]a, b] on the fail (= True) projection b+1 > a:
963
    //     lo = a+1, hi = b, adjusted_lim = b-a, cond = <u
964
    //     lo = a+1, hi = b, adjusted_lim = b-a-1, cond = <=u doesn't work because a = b is possible, then b-a-1 = -1
965

966
    if (hi_test == BoolTest::lt) {
967
      if (lo_test == BoolTest::gt || lo_test == BoolTest::le) {
968
        lo = igvn->transform(new AddINode(lo, igvn->intcon(1)));
969
      }
970
    } else if (hi_test == BoolTest::le) {
971
      if (lo_test == BoolTest::ge || lo_test == BoolTest::lt) {
972
        adjusted_lim = igvn->transform(new SubINode(hi, lo));
973
        adjusted_lim = igvn->transform(new AddINode(adjusted_lim, igvn->intcon(1)));
974
        cond = BoolTest::lt;
975
      } else if (lo_test == BoolTest::gt || lo_test == BoolTest::le) {
976
        adjusted_lim = igvn->transform(new SubINode(hi, lo));
977
        lo = igvn->transform(new AddINode(lo, igvn->intcon(1)));
978
        cond = BoolTest::lt;
979
      } else {
980
        assert(false, "unhandled lo_test: %d", lo_test);
981
        return false;
982
      }
983
    } else {
984
      assert(igvn->_worklist.member(in(1)) && in(1)->Value(igvn) != igvn->type(in(1)), "unhandled hi_test: %d", hi_test);
985
      return false;
986
    }
987
    // this test was canonicalized
988
    assert(this_bool->_test.is_less() && fail->_con, "incorrect test");
989
  } else if (lo_type != nullptr && hi_type != nullptr && lo_type->_lo > hi_type->_hi &&
990
             lo_type->_hi == max_jint && hi_type->_lo == min_jint && lo_test != BoolTest::ne) {
991

992
    // this_bool = <
993
    //   dom_bool = < (proj = True) or dom_bool = >= (proj = False)
994
    //     x in [b, a[ on the fail (= False) projection, a > b-1 (because of lo_type->_lo > hi_type->_hi above):
995
    //     lo = b, hi = a, adjusted_lim = a-b, cond = >=u
996
    //   dom_bool = <= (proj = True) or dom_bool = > (proj = False)
997
    //     x in [b, a] on the fail (= False) projection, a+1 > b-1:
998
    //     lo = b, hi = a, adjusted_lim = a-b+1, cond = >=u
999
    //     lo = b, hi = a, adjusted_lim = a-b, cond = >u doesn't work because a = b - 1 is possible, then b-a = -1
1000
    // this_bool = <=
1001
    //   dom_bool = < (proj = True) or dom_bool = >= (proj = False)
1002
    //     x in ]b, a[ on the fail (= False) projection, a > b:
1003
    //     lo = b+1, hi = a, adjusted_lim = a-b-1, cond = >=u
1004
    //   dom_bool = <= (proj = True) or dom_bool = > (proj = False)
1005
    //     x in ]b, a] on the fail (= False) projection, a+1 > b:
1006
    //     lo = b+1, hi = a, adjusted_lim = a-b, cond = >=u
1007
    //     lo = b+1, hi = a, adjusted_lim = a-b-1, cond = >u doesn't work because a = b is possible, then b-a-1 = -1
1008

1009
    swap(lo, hi);
1010
    swap(lo_type, hi_type);
1011
    swap(lo_test, hi_test);
1012

1013
    assert((dom_bool->_test.is_less() && proj->_con) ||
1014
           (dom_bool->_test.is_greater() && !proj->_con), "incorrect test");
1015

1016
    cond = (hi_test == BoolTest::le || hi_test == BoolTest::gt) ? BoolTest::gt : BoolTest::ge;
1017

1018
    if (lo_test == BoolTest::lt) {
1019
      if (hi_test == BoolTest::lt || hi_test == BoolTest::ge) {
1020
        cond = BoolTest::ge;
1021
      } else if (hi_test == BoolTest::le || hi_test == BoolTest::gt) {
1022
        adjusted_lim = igvn->transform(new SubINode(hi, lo));
1023
        adjusted_lim = igvn->transform(new AddINode(adjusted_lim, igvn->intcon(1)));
1024
        cond = BoolTest::ge;
1025
      } else {
1026
        assert(false, "unhandled hi_test: %d", hi_test);
1027
        return false;
1028
      }
1029
    } else if (lo_test == BoolTest::le) {
1030
      if (hi_test == BoolTest::lt || hi_test == BoolTest::ge) {
1031
        lo = igvn->transform(new AddINode(lo, igvn->intcon(1)));
1032
        cond = BoolTest::ge;
1033
      } else if (hi_test == BoolTest::le || hi_test == BoolTest::gt) {
1034
        adjusted_lim = igvn->transform(new SubINode(hi, lo));
1035
        lo = igvn->transform(new AddINode(lo, igvn->intcon(1)));
1036
        cond = BoolTest::ge;
1037
      } else {
1038
        assert(false, "unhandled hi_test: %d", hi_test);
1039
        return false;
1040
      }
1041
    } else {
1042
      assert(igvn->_worklist.member(in(1)) && in(1)->Value(igvn) != igvn->type(in(1)), "unhandled lo_test: %d", lo_test);
1043
      return false;
1044
    }
1045
    // this test was canonicalized
1046
    assert(this_bool->_test.is_less() && !fail->_con, "incorrect test");
1047
  } else {
1048
    const TypeInt* failtype = filtered_int_type(igvn, n, proj);
1049
    if (failtype != nullptr) {
1050
      const TypeInt* type2 = filtered_int_type(igvn, n, fail);
1051
      if (type2 != nullptr) {
1052
        failtype = failtype->join(type2)->is_int();
1053
        if (failtype->empty()) {
1054
          // previous if determines the result of this if so
1055
          // replace Bool with constant
1056
          igvn->replace_input_of(this, 1, igvn->intcon(success->_con));
1057
          return true;
1058
        }
1059
      }
1060
    }
1061
    return false;
1062
  }
1063

1064
  assert(lo != nullptr && hi != nullptr, "sanity");
1065
  Node* hook = new Node(lo); // Add a use to lo to prevent him from dying
1066
  // Merge the two compares into a single unsigned compare by building (CmpU (n - lo) (hi - lo))
1067
  Node* adjusted_val = igvn->transform(new SubINode(n,  lo));
1068
  if (adjusted_lim == nullptr) {
1069
    adjusted_lim = igvn->transform(new SubINode(hi, lo));
1070
  }
1071
  hook->destruct(igvn);
1072

1073
  if (adjusted_val->is_top() || adjusted_lim->is_top()) {
1074
    return false;
1075
  }
1076

1077
  if (igvn->type(adjusted_lim)->is_int()->_lo < 0 &&
1078
      !igvn->C->post_loop_opts_phase()) {
1079
    // If range check elimination applies to this comparison, it includes code to protect from overflows that may
1080
    // cause the main loop to be skipped entirely. Delay this transformation.
1081
    // Example:
1082
    // for (int i = 0; i < limit; i++) {
1083
    //   if (i < max_jint && i > min_jint) {...
1084
    // }
1085
    // Comparisons folded as:
1086
    // i - min_jint - 1 <u -2
1087
    // when RC applies, main loop limit becomes:
1088
    // min(limit, max(-2 + min_jint + 1, min_jint))
1089
    // = min(limit, min_jint)
1090
    // = min_jint
1091
    if (adjusted_val->outcnt() == 0) {
1092
      igvn->remove_dead_node(adjusted_val);
1093
    }
1094
    if (adjusted_lim->outcnt() == 0) {
1095
      igvn->remove_dead_node(adjusted_lim);
1096
    }
1097
    igvn->C->record_for_post_loop_opts_igvn(this);
1098
    return false;
1099
  }
1100

1101
  Node* newcmp = igvn->transform(new CmpUNode(adjusted_val, adjusted_lim));
1102
  Node* newbool = igvn->transform(new BoolNode(newcmp, cond));
1103

1104
  igvn->replace_input_of(dom_iff, 1, igvn->intcon(proj->_con));
1105
  igvn->replace_input_of(this, 1, newbool);
1106

1107
  return true;
1108
}
1109

1110
// Merge the branches that trap for this If and the dominating If into
1111
// a single region that branches to the uncommon trap for the
1112
// dominating If
1113
Node* IfNode::merge_uncommon_traps(ProjNode* proj, ProjNode* success, ProjNode* fail, PhaseIterGVN* igvn) {
1114
  Node* res = this;
1115
  assert(success->in(0) == this, "bad projection");
1116

1117
  ProjNode* otherproj = proj->other_if_proj();
1118

1119
  CallStaticJavaNode* unc = success->is_uncommon_trap_proj();
1120
  CallStaticJavaNode* dom_unc = otherproj->is_uncommon_trap_proj();
1121

1122
  if (unc != dom_unc) {
1123
    Node* r = new RegionNode(3);
1124

1125
    r->set_req(1, otherproj);
1126
    r->set_req(2, success);
1127
    r = igvn->transform(r);
1128
    assert(r->is_Region(), "can't go away");
1129

1130
    // Make both If trap at the state of the first If: once the CmpI
1131
    // nodes are merged, if we trap we don't know which of the CmpI
1132
    // nodes would have caused the trap so we have to restart
1133
    // execution at the first one
1134
    igvn->replace_input_of(dom_unc, 0, r);
1135
    igvn->replace_input_of(unc, 0, igvn->C->top());
1136
  }
1137
  int trap_request = dom_unc->uncommon_trap_request();
1138
  Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request);
1139
  Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request);
1140

1141
  int flip_test = 0;
1142
  Node* l = nullptr;
1143
  Node* r = nullptr;
1144

1145
  if (success->in(0)->as_If()->range_check_trap_proj(flip_test, l, r) != nullptr) {
1146
    // If this looks like a range check, change the trap to
1147
    // Reason_range_check so the compiler recognizes it as a range
1148
    // check and applies the corresponding optimizations
1149
    trap_request = Deoptimization::make_trap_request(Deoptimization::Reason_range_check, action);
1150

1151
    improve_address_types(l, r, fail, igvn);
1152

1153
    res = igvn->transform(new RangeCheckNode(in(0), in(1), _prob, _fcnt));
1154
  } else if (unc != dom_unc) {
1155
    // If we trap we won't know what CmpI would have caused the trap
1156
    // so use a special trap reason to mark this pair of CmpI nodes as
1157
    // bad candidate for folding. On recompilation we won't fold them
1158
    // and we may trap again but this time we'll know what branch
1159
    // traps
1160
    trap_request = Deoptimization::make_trap_request(Deoptimization::Reason_unstable_fused_if, action);
1161
  }
1162
  igvn->replace_input_of(dom_unc, TypeFunc::Parms, igvn->intcon(trap_request));
1163
  return res;
1164
}
1165

1166
// If we are turning 2 CmpI nodes into a CmpU that follows the pattern
1167
// of a rangecheck on index i, on 64 bit the compares may be followed
1168
// by memory accesses using i as index. In that case, the CmpU tells
1169
// us something about the values taken by i that can help the compiler
1170
// (see Compile::conv_I2X_index())
1171
void IfNode::improve_address_types(Node* l, Node* r, ProjNode* fail, PhaseIterGVN* igvn) {
1172
#ifdef _LP64
1173
  ResourceMark rm;
1174
  Node_Stack stack(2);
1175

1176
  assert(r->Opcode() == Op_LoadRange, "unexpected range check");
1177
  const TypeInt* array_size = igvn->type(r)->is_int();
1178

1179
  stack.push(l, 0);
1180

1181
  while(stack.size() > 0) {
1182
    Node* n = stack.node();
1183
    uint start = stack.index();
1184

1185
    uint i = start;
1186
    for (; i < n->outcnt(); i++) {
1187
      Node* use = n->raw_out(i);
1188
      if (stack.size() == 1) {
1189
        if (use->Opcode() == Op_ConvI2L) {
1190
          const TypeLong* bounds = use->as_Type()->type()->is_long();
1191
          if (bounds->_lo <= array_size->_lo && bounds->_hi >= array_size->_hi &&
1192
              (bounds->_lo != array_size->_lo || bounds->_hi != array_size->_hi)) {
1193
            stack.set_index(i+1);
1194
            stack.push(use, 0);
1195
            break;
1196
          }
1197
        }
1198
      } else if (use->is_Mem()) {
1199
        Node* ctrl = use->in(0);
1200
        for (int i = 0; i < 10 && ctrl != nullptr && ctrl != fail; i++) {
1201
          ctrl = up_one_dom(ctrl);
1202
        }
1203
        if (ctrl == fail) {
1204
          Node* init_n = stack.node_at(1);
1205
          assert(init_n->Opcode() == Op_ConvI2L, "unexpected first node");
1206
          // Create a new narrow ConvI2L node that is dependent on the range check
1207
          Node* new_n = igvn->C->conv_I2X_index(igvn, l, array_size, fail);
1208

1209
          // The type of the ConvI2L may be widen and so the new
1210
          // ConvI2L may not be better than an existing ConvI2L
1211
          if (new_n != init_n) {
1212
            for (uint j = 2; j < stack.size(); j++) {
1213
              Node* n = stack.node_at(j);
1214
              Node* clone = n->clone();
1215
              int rep = clone->replace_edge(init_n, new_n, igvn);
1216
              assert(rep > 0, "can't find expected node?");
1217
              clone = igvn->transform(clone);
1218
              init_n = n;
1219
              new_n = clone;
1220
            }
1221
            igvn->hash_delete(use);
1222
            int rep = use->replace_edge(init_n, new_n, igvn);
1223
            assert(rep > 0, "can't find expected node?");
1224
            igvn->transform(use);
1225
            if (init_n->outcnt() == 0) {
1226
              igvn->_worklist.push(init_n);
1227
            }
1228
          }
1229
        }
1230
      } else if (use->in(0) == nullptr && (igvn->type(use)->isa_long() ||
1231
                                        igvn->type(use)->isa_ptr())) {
1232
        stack.set_index(i+1);
1233
        stack.push(use, 0);
1234
        break;
1235
      }
1236
    }
1237
    if (i == n->outcnt()) {
1238
      stack.pop();
1239
    }
1240
  }
1241
#endif
1242
}
1243

1244
bool IfNode::is_cmp_with_loadrange(ProjNode* proj) {
1245
  if (in(1) != nullptr &&
1246
      in(1)->in(1) != nullptr &&
1247
      in(1)->in(1)->in(2) != nullptr) {
1248
    Node* other = in(1)->in(1)->in(2);
1249
    if (other->Opcode() == Op_LoadRange &&
1250
        ((other->in(0) != nullptr && other->in(0) == proj) ||
1251
         (other->in(0) == nullptr &&
1252
          other->in(2) != nullptr &&
1253
          other->in(2)->is_AddP() &&
1254
          other->in(2)->in(1) != nullptr &&
1255
          other->in(2)->in(1)->Opcode() == Op_CastPP &&
1256
          other->in(2)->in(1)->in(0) == proj))) {
1257
      return true;
1258
    }
1259
  }
1260
  return false;
1261
}
1262

1263
bool IfNode::is_null_check(ProjNode* proj, PhaseIterGVN* igvn) {
1264
  Node* other = in(1)->in(1)->in(2);
1265
  if (other->in(MemNode::Address) != nullptr &&
1266
      proj->in(0)->in(1) != nullptr &&
1267
      proj->in(0)->in(1)->is_Bool() &&
1268
      proj->in(0)->in(1)->in(1) != nullptr &&
1269
      proj->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
1270
      proj->in(0)->in(1)->in(1)->in(2) != nullptr &&
1271
      proj->in(0)->in(1)->in(1)->in(1) == other->in(MemNode::Address)->in(AddPNode::Address)->uncast() &&
1272
      igvn->type(proj->in(0)->in(1)->in(1)->in(2)) == TypePtr::NULL_PTR) {
1273
    return true;
1274
  }
1275
  return false;
1276
}
1277

1278
// Check that the If that is in between the 2 integer comparisons has
1279
// no side effect
1280
bool IfNode::is_side_effect_free_test(ProjNode* proj, PhaseIterGVN* igvn) {
1281
  if (proj == nullptr) {
1282
    return false;
1283
  }
1284
  CallStaticJavaNode* unc = proj->is_uncommon_trap_if_pattern();
1285
  if (unc != nullptr && proj->outcnt() <= 2) {
1286
    if (proj->outcnt() == 1 ||
1287
        // Allow simple null check from LoadRange
1288
        (is_cmp_with_loadrange(proj) && is_null_check(proj, igvn))) {
1289
      CallStaticJavaNode* unc = proj->is_uncommon_trap_if_pattern();
1290
      CallStaticJavaNode* dom_unc = proj->in(0)->in(0)->as_Proj()->is_uncommon_trap_if_pattern();
1291
      assert(dom_unc != nullptr, "is_uncommon_trap_if_pattern returned null");
1292

1293
      // reroute_side_effect_free_unc changes the state of this
1294
      // uncommon trap to restart execution at the previous
1295
      // CmpI. Check that this change in a previous compilation didn't
1296
      // cause too many traps.
1297
      int trap_request = unc->uncommon_trap_request();
1298
      Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request);
1299

1300
      if (igvn->C->too_many_traps(dom_unc->jvms()->method(), dom_unc->jvms()->bci(), reason)) {
1301
        return false;
1302
      }
1303

1304
      if (!is_dominator_unc(dom_unc, unc)) {
1305
        return false;
1306
      }
1307

1308
      return true;
1309
    }
1310
  }
1311
  return false;
1312
}
1313

1314
// Make the If between the 2 integer comparisons trap at the state of
1315
// the first If: the last CmpI is the one replaced by a CmpU and the
1316
// first CmpI is eliminated, so the test between the 2 CmpI nodes
1317
// won't be guarded by the first CmpI anymore. It can trap in cases
1318
// where the first CmpI would have prevented it from executing: on a
1319
// trap, we need to restart execution at the state of the first CmpI
1320
void IfNode::reroute_side_effect_free_unc(ProjNode* proj, ProjNode* dom_proj, PhaseIterGVN* igvn) {
1321
  CallStaticJavaNode* dom_unc = dom_proj->is_uncommon_trap_if_pattern();
1322
  ProjNode* otherproj = proj->other_if_proj();
1323
  CallStaticJavaNode* unc = proj->is_uncommon_trap_if_pattern();
1324
  Node* call_proj = dom_unc->unique_ctrl_out();
1325
  Node* halt = call_proj->unique_ctrl_out();
1326

1327
  Node* new_unc = dom_unc->clone();
1328
  call_proj = call_proj->clone();
1329
  halt = halt->clone();
1330
  Node* c = otherproj->clone();
1331

1332
  c = igvn->transform(c);
1333
  new_unc->set_req(TypeFunc::Parms, unc->in(TypeFunc::Parms));
1334
  new_unc->set_req(0, c);
1335
  new_unc = igvn->transform(new_unc);
1336
  call_proj->set_req(0, new_unc);
1337
  call_proj = igvn->transform(call_proj);
1338
  halt->set_req(0, call_proj);
1339
  halt = igvn->transform(halt);
1340

1341
  igvn->replace_node(otherproj, igvn->C->top());
1342
  igvn->C->root()->add_req(halt);
1343
}
1344

1345
Node* IfNode::fold_compares(PhaseIterGVN* igvn) {
1346
  if (Opcode() != Op_If) return nullptr;
1347

1348
  if (cmpi_folds(igvn)) {
1349
    Node* ctrl = in(0);
1350
    if (is_ctrl_folds(ctrl, igvn)) {
1351
      // A integer comparison immediately dominated by another integer
1352
      // comparison
1353
      ProjNode* success = nullptr;
1354
      ProjNode* fail = nullptr;
1355
      ProjNode* dom_cmp = ctrl->as_Proj();
1356
      if (has_shared_region(dom_cmp, success, fail) &&
1357
          // Next call modifies graph so must be last
1358
          fold_compares_helper(dom_cmp, success, fail, igvn)) {
1359
        return this;
1360
      }
1361
      if (has_only_uncommon_traps(dom_cmp, success, fail, igvn) &&
1362
          // Next call modifies graph so must be last
1363
          fold_compares_helper(dom_cmp, success, fail, igvn)) {
1364
        return merge_uncommon_traps(dom_cmp, success, fail, igvn);
1365
      }
1366
      return nullptr;
1367
    } else if (ctrl->in(0) != nullptr &&
1368
               ctrl->in(0)->in(0) != nullptr) {
1369
      ProjNode* success = nullptr;
1370
      ProjNode* fail = nullptr;
1371
      Node* dom = ctrl->in(0)->in(0);
1372
      ProjNode* dom_cmp = dom->isa_Proj();
1373
      ProjNode* other_cmp = ctrl->isa_Proj();
1374

1375
      // Check if it's an integer comparison dominated by another
1376
      // integer comparison with another test in between
1377
      if (is_ctrl_folds(dom, igvn) &&
1378
          has_only_uncommon_traps(dom_cmp, success, fail, igvn) &&
1379
          is_side_effect_free_test(other_cmp, igvn) &&
1380
          // Next call modifies graph so must be last
1381
          fold_compares_helper(dom_cmp, success, fail, igvn)) {
1382
        reroute_side_effect_free_unc(other_cmp, dom_cmp, igvn);
1383
        return merge_uncommon_traps(dom_cmp, success, fail, igvn);
1384
      }
1385
    }
1386
  }
1387
  return nullptr;
1388
}
1389

1390
//------------------------------remove_useless_bool----------------------------
1391
// Check for people making a useless boolean: things like
1392
// if( (x < y ? true : false) ) { ... }
1393
// Replace with if( x < y ) { ... }
1394
static Node *remove_useless_bool(IfNode *iff, PhaseGVN *phase) {
1395
  Node *i1 = iff->in(1);
1396
  if( !i1->is_Bool() ) return nullptr;
1397
  BoolNode *bol = i1->as_Bool();
1398

1399
  Node *cmp = bol->in(1);
1400
  if( cmp->Opcode() != Op_CmpI ) return nullptr;
1401

1402
  // Must be comparing against a bool
1403
  const Type *cmp2_t = phase->type( cmp->in(2) );
1404
  if( cmp2_t != TypeInt::ZERO &&
1405
      cmp2_t != TypeInt::ONE )
1406
    return nullptr;
1407

1408
  // Find a prior merge point merging the boolean
1409
  i1 = cmp->in(1);
1410
  if( !i1->is_Phi() ) return nullptr;
1411
  PhiNode *phi = i1->as_Phi();
1412
  if( phase->type( phi ) != TypeInt::BOOL )
1413
    return nullptr;
1414

1415
  // Check for diamond pattern
1416
  int true_path = phi->is_diamond_phi();
1417
  if( true_path == 0 ) return nullptr;
1418

1419
  // Make sure that iff and the control of the phi are different. This
1420
  // should really only happen for dead control flow since it requires
1421
  // an illegal cycle.
1422
  if (phi->in(0)->in(1)->in(0) == iff) return nullptr;
1423

1424
  // phi->region->if_proj->ifnode->bool->cmp
1425
  BoolNode *bol2 = phi->in(0)->in(1)->in(0)->in(1)->as_Bool();
1426

1427
  // Now get the 'sense' of the test correct so we can plug in
1428
  // either iff2->in(1) or its complement.
1429
  int flip = 0;
1430
  if( bol->_test._test == BoolTest::ne ) flip = 1-flip;
1431
  else if( bol->_test._test != BoolTest::eq ) return nullptr;
1432
  if( cmp2_t == TypeInt::ZERO ) flip = 1-flip;
1433

1434
  const Type *phi1_t = phase->type( phi->in(1) );
1435
  const Type *phi2_t = phase->type( phi->in(2) );
1436
  // Check for Phi(0,1) and flip
1437
  if( phi1_t == TypeInt::ZERO ) {
1438
    if( phi2_t != TypeInt::ONE ) return nullptr;
1439
    flip = 1-flip;
1440
  } else {
1441
    // Check for Phi(1,0)
1442
    if( phi1_t != TypeInt::ONE  ) return nullptr;
1443
    if( phi2_t != TypeInt::ZERO ) return nullptr;
1444
  }
1445
  if( true_path == 2 ) {
1446
    flip = 1-flip;
1447
  }
1448

1449
  Node* new_bol = (flip ? phase->transform( bol2->negate(phase) ) : bol2);
1450
  assert(new_bol != iff->in(1), "must make progress");
1451
  iff->set_req_X(1, new_bol, phase);
1452
  // Intervening diamond probably goes dead
1453
  phase->C->set_major_progress();
1454
  return iff;
1455
}
1456

1457
static IfNode* idealize_test(PhaseGVN* phase, IfNode* iff);
1458

1459
struct RangeCheck {
1460
  IfProjNode* ctl;
1461
  jint off;
1462
};
1463

1464
Node* IfNode::Ideal_common(PhaseGVN *phase, bool can_reshape) {
1465
  if (remove_dead_region(phase, can_reshape))  return this;
1466
  // No Def-Use info?
1467
  if (!can_reshape)  return nullptr;
1468

1469
  // Don't bother trying to transform a dead if
1470
  if (in(0)->is_top())  return nullptr;
1471
  // Don't bother trying to transform an if with a dead test
1472
  if (in(1)->is_top())  return nullptr;
1473
  // Another variation of a dead test
1474
  if (in(1)->is_Con())  return nullptr;
1475
  // Another variation of a dead if
1476
  if (outcnt() < 2)  return nullptr;
1477

1478
  // Canonicalize the test.
1479
  Node* idt_if = idealize_test(phase, this);
1480
  if (idt_if != nullptr)  return idt_if;
1481

1482
  // Try to split the IF
1483
  PhaseIterGVN *igvn = phase->is_IterGVN();
1484
  Node *s = split_if(this, igvn);
1485
  if (s != nullptr)  return s;
1486

1487
  return NodeSentinel;
1488
}
1489

1490
//------------------------------Ideal------------------------------------------
1491
// Return a node which is more "ideal" than the current node.  Strip out
1492
// control copies
1493
Node* IfNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1494
  Node* res = Ideal_common(phase, can_reshape);
1495
  if (res != NodeSentinel) {
1496
    return res;
1497
  }
1498

1499
  // Check for people making a useless boolean: things like
1500
  // if( (x < y ? true : false) ) { ... }
1501
  // Replace with if( x < y ) { ... }
1502
  Node* bol2 = remove_useless_bool(this, phase);
1503
  if (bol2) return bol2;
1504

1505
  if (in(0) == nullptr) return nullptr;     // Dead loop?
1506

1507
  PhaseIterGVN* igvn = phase->is_IterGVN();
1508
  Node* result = fold_compares(igvn);
1509
  if (result != nullptr) {
1510
    return result;
1511
  }
1512

1513
  // Scan for an equivalent test
1514
  int dist = 4;               // Cutoff limit for search
1515
  if (is_If() && in(1)->is_Bool()) {
1516
    Node* cmp = in(1)->in(1);
1517
    if (cmp->Opcode() == Op_CmpP &&
1518
        cmp->in(2) != nullptr && // make sure cmp is not already dead
1519
        cmp->in(2)->bottom_type() == TypePtr::NULL_PTR) {
1520
      dist = 64;              // Limit for null-pointer scans
1521
    }
1522
  }
1523

1524
  Node* prev_dom = search_identical(dist, igvn);
1525

1526
  if (prev_dom != nullptr) {
1527
    // Replace dominated IfNode
1528
    return dominated_by(prev_dom, igvn, false);
1529
  }
1530

1531
  return simple_subsuming(igvn);
1532
}
1533

1534
//------------------------------dominated_by-----------------------------------
1535
Node* IfNode::dominated_by(Node* prev_dom, PhaseIterGVN* igvn, bool pin_array_access_nodes) {
1536
#ifndef PRODUCT
1537
  if (TraceIterativeGVN) {
1538
    tty->print("   Removing IfNode: "); this->dump();
1539
  }
1540
#endif
1541

1542
  igvn->hash_delete(this);      // Remove self to prevent spurious V-N
1543
  Node *idom = in(0);
1544
  // Need opcode to decide which way 'this' test goes
1545
  int prev_op = prev_dom->Opcode();
1546
  Node *top = igvn->C->top(); // Shortcut to top
1547

1548
  // Now walk the current IfNode's projections.
1549
  // Loop ends when 'this' has no more uses.
1550
  for (DUIterator_Last imin, i = last_outs(imin); i >= imin; --i) {
1551
    Node *ifp = last_out(i);     // Get IfTrue/IfFalse
1552
    igvn->add_users_to_worklist(ifp);
1553
    // Check which projection it is and set target.
1554
    // Data-target is either the dominating projection of the same type
1555
    // or TOP if the dominating projection is of opposite type.
1556
    // Data-target will be used as the new control edge for the non-CFG
1557
    // nodes like Casts and Loads.
1558
    Node *data_target = (ifp->Opcode() == prev_op) ? prev_dom : top;
1559
    // Control-target is just the If's immediate dominator or TOP.
1560
    Node *ctrl_target = (ifp->Opcode() == prev_op) ?     idom : top;
1561

1562
    // For each child of an IfTrue/IfFalse projection, reroute.
1563
    // Loop ends when projection has no more uses.
1564
    for (DUIterator_Last jmin, j = ifp->last_outs(jmin); j >= jmin; --j) {
1565
      Node* s = ifp->last_out(j);   // Get child of IfTrue/IfFalse
1566
      if (s->depends_only_on_test() && igvn->no_dependent_zero_check(s)) {
1567
        // For control producers.
1568
        // Do not rewire Div and Mod nodes which could have a zero divisor to avoid skipping their zero check.
1569
        igvn->replace_input_of(s, 0, data_target); // Move child to data-target
1570
        if (pin_array_access_nodes && data_target != top) {
1571
          // As a result of range check smearing, Loads and range check Cast nodes that are control dependent on this
1572
          // range check (that is about to be removed) now depend on multiple dominating range checks. After the removal
1573
          // of this range check, these control dependent nodes end up at the lowest/nearest dominating check in the
1574
          // graph. To ensure that these Loads/Casts do not float above any of the dominating checks (even when the
1575
          // lowest dominating check is later replaced by yet another dominating check), we need to pin them at the
1576
          // lowest dominating check.
1577
          Node* clone = s->pin_array_access_node();
1578
          if (clone != nullptr) {
1579
            clone = igvn->transform(clone);
1580
            igvn->replace_node(s, clone);
1581
          }
1582
        }
1583
      } else {
1584
        // Find the control input matching this def-use edge.
1585
        // For Regions it may not be in slot 0.
1586
        uint l;
1587
        for (l = 0; s->in(l) != ifp; l++) { }
1588
        igvn->replace_input_of(s, l, ctrl_target);
1589
      }
1590
    } // End for each child of a projection
1591

1592
    igvn->remove_dead_node(ifp);
1593
  } // End for each IfTrue/IfFalse child of If
1594

1595
  // Kill the IfNode
1596
  igvn->remove_dead_node(this);
1597

1598
  // Must return either the original node (now dead) or a new node
1599
  // (Do not return a top here, since that would break the uniqueness of top.)
1600
  return new ConINode(TypeInt::ZERO);
1601
}
1602

1603
Node* IfNode::search_identical(int dist, PhaseIterGVN* igvn) {
1604
  // Setup to scan up the CFG looking for a dominating test
1605
  Node* dom = in(0);
1606
  Node* prev_dom = this;
1607
  int op = Opcode();
1608
  // Search up the dominator tree for an If with an identical test
1609
  while (dom->Opcode() != op ||  // Not same opcode?
1610
         !same_condition(dom, igvn) ||  // Not same input 1?
1611
         prev_dom->in(0) != dom) {  // One path of test does not dominate?
1612
    if (dist < 0) return nullptr;
1613

1614
    dist--;
1615
    prev_dom = dom;
1616
    dom = up_one_dom(dom);
1617
    if (!dom) return nullptr;
1618
  }
1619

1620
  // Check that we did not follow a loop back to ourselves
1621
  if (this == dom) {
1622
    return nullptr;
1623
  }
1624

1625
#ifndef PRODUCT
1626
  if (dist > 2) { // Add to count of null checks elided
1627
    explicit_null_checks_elided++;
1628
  }
1629
#endif
1630

1631
  return prev_dom;
1632
}
1633

1634
bool IfNode::same_condition(const Node* dom, PhaseIterGVN* igvn) const {
1635
  Node* dom_bool = dom->in(1);
1636
  Node* this_bool = in(1);
1637
  if (dom_bool == this_bool) {
1638
    return true;
1639
  }
1640

1641
  if (dom_bool == nullptr || !dom_bool->is_Bool() ||
1642
      this_bool == nullptr || !this_bool->is_Bool()) {
1643
    return false;
1644
  }
1645
  Node* dom_cmp = dom_bool->in(1);
1646
  Node* this_cmp = this_bool->in(1);
1647

1648
  // If the comparison is a subtype check, then SubTypeCheck nodes may have profile data attached to them and may be
1649
  // different nodes even-though they perform the same subtype check
1650
  if (dom_cmp == nullptr || !dom_cmp->is_SubTypeCheck() ||
1651
      this_cmp == nullptr || !this_cmp->is_SubTypeCheck()) {
1652
    return false;
1653
  }
1654

1655
  if (dom_cmp->in(1) != this_cmp->in(1) ||
1656
      dom_cmp->in(2) != this_cmp->in(2) ||
1657
      dom_bool->as_Bool()->_test._test != this_bool->as_Bool()->_test._test) {
1658
    return false;
1659
  }
1660

1661
  return true;
1662
}
1663

1664

1665
static int subsuming_bool_test_encode(Node*);
1666

1667
// Check if dominating test is subsuming 'this' one.
1668
//
1669
//              cmp
1670
//              / \
1671
//     (r1)  bool  \
1672
//            /    bool (r2)
1673
//    (dom) if       \
1674
//            \       )
1675
//    (pre)  if[TF]  /
1676
//               \  /
1677
//                if (this)
1678
//   \r1
1679
//  r2\  eqT  eqF  neT  neF  ltT  ltF  leT  leF  gtT  gtF  geT  geF
1680
//  eq    t    f    f    t    f    -    -    f    f    -    -    f
1681
//  ne    f    t    t    f    t    -    -    t    t    -    -    t
1682
//  lt    f    -    -    f    t    f    -    f    f    -    f    t
1683
//  le    t    -    -    t    t    -    t    f    f    t    -    t
1684
//  gt    f    -    -    f    f    -    f    t    t    f    -    f
1685
//  ge    t    -    -    t    f    t    -    t    t    -    t    f
1686
//
1687
Node* IfNode::simple_subsuming(PhaseIterGVN* igvn) {
1688
  // Table encoding: N/A (na), True-branch (tb), False-branch (fb).
1689
  static enum { na, tb, fb } s_short_circuit_map[6][12] = {
1690
  /*rel: eq+T eq+F ne+T ne+F lt+T lt+F le+T le+F gt+T gt+F ge+T ge+F*/
1691
  /*eq*/{ tb,  fb,  fb,  tb,  fb,  na,  na,  fb,  fb,  na,  na,  fb },
1692
  /*ne*/{ fb,  tb,  tb,  fb,  tb,  na,  na,  tb,  tb,  na,  na,  tb },
1693
  /*lt*/{ fb,  na,  na,  fb,  tb,  fb,  na,  fb,  fb,  na,  fb,  tb },
1694
  /*le*/{ tb,  na,  na,  tb,  tb,  na,  tb,  fb,  fb,  tb,  na,  tb },
1695
  /*gt*/{ fb,  na,  na,  fb,  fb,  na,  fb,  tb,  tb,  fb,  na,  fb },
1696
  /*ge*/{ tb,  na,  na,  tb,  fb,  tb,  na,  tb,  tb,  na,  tb,  fb }};
1697

1698
  Node* pre = in(0);
1699
  if (!pre->is_IfTrue() && !pre->is_IfFalse()) {
1700
    return nullptr;
1701
  }
1702
  Node* dom = pre->in(0);
1703
  if (!dom->is_If()) {
1704
    return nullptr;
1705
  }
1706
  Node* bol = in(1);
1707
  if (!bol->is_Bool()) {
1708
    return nullptr;
1709
  }
1710
  Node* cmp = in(1)->in(1);
1711
  if (!cmp->is_Cmp()) {
1712
    return nullptr;
1713
  }
1714

1715
  if (!dom->in(1)->is_Bool()) {
1716
    return nullptr;
1717
  }
1718
  if (dom->in(1)->in(1) != cmp) {  // Not same cond?
1719
    return nullptr;
1720
  }
1721

1722
  int drel = subsuming_bool_test_encode(dom->in(1));
1723
  int trel = subsuming_bool_test_encode(bol);
1724
  int bout = pre->is_IfFalse() ? 1 : 0;
1725

1726
  if (drel < 0 || trel < 0) {
1727
    return nullptr;
1728
  }
1729
  int br = s_short_circuit_map[trel][2*drel+bout];
1730
  if (br == na) {
1731
    return nullptr;
1732
  }
1733
#ifndef PRODUCT
1734
  if (TraceIterativeGVN) {
1735
    tty->print("   Subsumed IfNode: "); dump();
1736
  }
1737
#endif
1738
  // Replace condition with constant True(1)/False(0).
1739
  bool is_always_true = br == tb;
1740
  set_req(1, igvn->intcon(is_always_true ? 1 : 0));
1741

1742
  // Update any data dependencies to the directly dominating test. This subsumed test is not immediately removed by igvn
1743
  // and therefore subsequent optimizations might miss these data dependencies otherwise. There might be a dead loop
1744
  // ('always_taken_proj' == 'pre') that is cleaned up later. Skip this case to make the iterator work properly.
1745
  Node* always_taken_proj = proj_out(is_always_true);
1746
  if (always_taken_proj != pre) {
1747
    for (DUIterator_Fast imax, i = always_taken_proj->fast_outs(imax); i < imax; i++) {
1748
      Node* u = always_taken_proj->fast_out(i);
1749
      if (!u->is_CFG()) {
1750
        igvn->replace_input_of(u, 0, pre);
1751
        --i;
1752
        --imax;
1753
      }
1754
    }
1755
  }
1756

1757
  if (bol->outcnt() == 0) {
1758
    igvn->remove_dead_node(bol);    // Kill the BoolNode.
1759
  }
1760
  return this;
1761
}
1762

1763
// Map BoolTest to local table encoding. The BoolTest (e)numerals
1764
//   { eq = 0, ne = 4, le = 5, ge = 7, lt = 3, gt = 1 }
1765
// are mapped to table indices, while the remaining (e)numerals in BoolTest
1766
//   { overflow = 2, no_overflow = 6, never = 8, illegal = 9 }
1767
// are ignored (these are not modeled in the table).
1768
//
1769
static int subsuming_bool_test_encode(Node* node) {
1770
  precond(node->is_Bool());
1771
  BoolTest::mask x = node->as_Bool()->_test._test;
1772
  switch (x) {
1773
    case BoolTest::eq: return 0;
1774
    case BoolTest::ne: return 1;
1775
    case BoolTest::lt: return 2;
1776
    case BoolTest::le: return 3;
1777
    case BoolTest::gt: return 4;
1778
    case BoolTest::ge: return 5;
1779
    case BoolTest::overflow:
1780
    case BoolTest::no_overflow:
1781
    case BoolTest::never:
1782
    case BoolTest::illegal:
1783
    default:
1784
      return -1;
1785
  }
1786
}
1787

1788
//------------------------------Identity---------------------------------------
1789
// If the test is constant & we match, then we are the input Control
1790
Node* IfProjNode::Identity(PhaseGVN* phase) {
1791
  // Can only optimize if cannot go the other way
1792
  const TypeTuple *t = phase->type(in(0))->is_tuple();
1793
  if (t == TypeTuple::IFNEITHER || (always_taken(t) &&
1794
       // During parsing (GVN) we don't remove dead code aggressively.
1795
       // Cut off dead branch and let PhaseRemoveUseless take care of it.
1796
      (!phase->is_IterGVN() ||
1797
       // During IGVN, first wait for the dead branch to be killed.
1798
       // Otherwise, the IfNode's control will have two control uses (the IfNode
1799
       // that doesn't go away because it still has uses and this branch of the
1800
       // If) which breaks other optimizations. Node::has_special_unique_user()
1801
       // will cause this node to be reprocessed once the dead branch is killed.
1802
       in(0)->outcnt() == 1))) {
1803
    // IfNode control
1804
    if (in(0)->is_BaseCountedLoopEnd()) {
1805
      // CountedLoopEndNode may be eliminated by if subsuming, replace CountedLoopNode with LoopNode to
1806
      // avoid mismatching between CountedLoopNode and CountedLoopEndNode in the following optimization.
1807
      Node* head = unique_ctrl_out_or_null();
1808
      if (head != nullptr && head->is_BaseCountedLoop() && head->in(LoopNode::LoopBackControl) == this) {
1809
        Node* new_head = new LoopNode(head->in(LoopNode::EntryControl), this);
1810
        phase->is_IterGVN()->register_new_node_with_optimizer(new_head);
1811
        phase->is_IterGVN()->replace_node(head, new_head);
1812
      }
1813
    }
1814
    return in(0)->in(0);
1815
  }
1816
  // no progress
1817
  return this;
1818
}
1819

1820
bool IfNode::is_zero_trip_guard() const {
1821
  if (in(1)->is_Bool() && in(1)->in(1)->is_Cmp()) {
1822
    return in(1)->in(1)->in(1)->Opcode() == Op_OpaqueZeroTripGuard;
1823
  }
1824
  return false;
1825
}
1826

1827
void IfProjNode::pin_array_access_nodes(PhaseIterGVN* igvn) {
1828
  for (DUIterator i = outs(); has_out(i); i++) {
1829
    Node* u = out(i);
1830
    if (!u->depends_only_on_test()) {
1831
      continue;
1832
    }
1833
    Node* clone = u->pin_array_access_node();
1834
    if (clone != nullptr) {
1835
      clone = igvn->transform(clone);
1836
      assert(clone != u, "shouldn't common");
1837
      igvn->replace_node(u, clone);
1838
      --i;
1839
    }
1840
  }
1841
}
1842

1843
#ifndef PRODUCT
1844
void IfNode::dump_spec(outputStream* st) const {
1845
  switch (_assertion_predicate_type) {
1846
    case AssertionPredicateType::Init_value:
1847
      st->print("#Init Value Assertion Predicate  ");
1848
      break;
1849
    case AssertionPredicateType::Last_value:
1850
      st->print("#Last Value Assertion Predicate  ");
1851
      break;
1852
    case AssertionPredicateType::None:
1853
      // No Assertion Predicate
1854
      break;
1855
    default:
1856
      fatal("Unknown Assertion Predicate type");
1857
  }
1858
  st->print("P=%f, C=%f", _prob, _fcnt);
1859
}
1860
#endif // NOT PRODUCT
1861

1862
//------------------------------idealize_test----------------------------------
1863
// Try to canonicalize tests better.  Peek at the Cmp/Bool/If sequence and
1864
// come up with a canonical sequence.  Bools getting 'eq', 'gt' and 'ge' forms
1865
// converted to 'ne', 'le' and 'lt' forms.  IfTrue/IfFalse get swapped as
1866
// needed.
1867
static IfNode* idealize_test(PhaseGVN* phase, IfNode* iff) {
1868
  assert(iff->in(0) != nullptr, "If must be live");
1869

1870
  if (iff->outcnt() != 2)  return nullptr; // Malformed projections.
1871
  Node* old_if_f = iff->proj_out(false);
1872
  Node* old_if_t = iff->proj_out(true);
1873

1874
  // CountedLoopEnds want the back-control test to be TRUE, regardless of
1875
  // whether they are testing a 'gt' or 'lt' condition.  The 'gt' condition
1876
  // happens in count-down loops
1877
  if (iff->is_BaseCountedLoopEnd())  return nullptr;
1878
  if (!iff->in(1)->is_Bool())  return nullptr; // Happens for partially optimized IF tests
1879
  BoolNode *b = iff->in(1)->as_Bool();
1880
  BoolTest bt = b->_test;
1881
  // Test already in good order?
1882
  if( bt.is_canonical() )
1883
    return nullptr;
1884

1885
  // Flip test to be canonical.  Requires flipping the IfFalse/IfTrue and
1886
  // cloning the IfNode.
1887
  Node* new_b = phase->transform( new BoolNode(b->in(1), bt.negate()) );
1888
  if( !new_b->is_Bool() ) return nullptr;
1889
  b = new_b->as_Bool();
1890

1891
  PhaseIterGVN *igvn = phase->is_IterGVN();
1892
  assert( igvn, "Test is not canonical in parser?" );
1893

1894
  // The IF node never really changes, but it needs to be cloned
1895
  iff = iff->clone()->as_If();
1896
  iff->set_req(1, b);
1897
  iff->_prob = 1.0-iff->_prob;
1898

1899
  Node *prior = igvn->hash_find_insert(iff);
1900
  if( prior ) {
1901
    igvn->remove_dead_node(iff);
1902
    iff = (IfNode*)prior;
1903
  } else {
1904
    // Cannot call transform on it just yet
1905
    igvn->set_type_bottom(iff);
1906
  }
1907
  igvn->_worklist.push(iff);
1908

1909
  // Now handle projections.  Cloning not required.
1910
  Node* new_if_f = (Node*)(new IfFalseNode( iff ));
1911
  Node* new_if_t = (Node*)(new IfTrueNode ( iff ));
1912

1913
  igvn->register_new_node_with_optimizer(new_if_f);
1914
  igvn->register_new_node_with_optimizer(new_if_t);
1915
  // Flip test, so flip trailing control
1916
  igvn->replace_node(old_if_f, new_if_t);
1917
  igvn->replace_node(old_if_t, new_if_f);
1918

1919
  // Progress
1920
  return iff;
1921
}
1922

1923
Node* RangeCheckNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1924
  Node* res = Ideal_common(phase, can_reshape);
1925
  if (res != NodeSentinel) {
1926
    return res;
1927
  }
1928

1929
  PhaseIterGVN *igvn = phase->is_IterGVN();
1930
  // Setup to scan up the CFG looking for a dominating test
1931
  Node* prev_dom = this;
1932

1933
  // Check for range-check vs other kinds of tests
1934
  Node* index1;
1935
  Node* range1;
1936
  jint offset1;
1937
  int flip1 = is_range_check(range1, index1, offset1);
1938
  if (flip1) {
1939
    Node* dom = in(0);
1940
    // Try to remove extra range checks.  All 'up_one_dom' gives up at merges
1941
    // so all checks we inspect post-dominate the top-most check we find.
1942
    // If we are going to fail the current check and we reach the top check
1943
    // then we are guaranteed to fail, so just start interpreting there.
1944
    // We 'expand' the top 3 range checks to include all post-dominating
1945
    // checks.
1946
    //
1947
    // Example:
1948
    // a[i+x] // (1) 1 < x < 6
1949
    // a[i+3] // (2)
1950
    // a[i+4] // (3)
1951
    // a[i+6] // max = max of all constants
1952
    // a[i+2]
1953
    // a[i+1] // min = min of all constants
1954
    //
1955
    // If x < 3:
1956
    //   (1) a[i+x]: Leave unchanged
1957
    //   (2) a[i+3]: Replace with a[i+max] = a[i+6]: i+x < i+3 <= i+6  -> (2) is covered
1958
    //   (3) a[i+4]: Replace with a[i+min] = a[i+1]: i+1 < i+4 <= i+6  -> (3) and all following checks are covered
1959
    //   Remove all other a[i+c] checks
1960
    //
1961
    // If x >= 3:
1962
    //   (1) a[i+x]: Leave unchanged
1963
    //   (2) a[i+3]: Replace with a[i+min] = a[i+1]: i+1 < i+3 <= i+x  -> (2) is covered
1964
    //   (3) a[i+4]: Replace with a[i+max] = a[i+6]: i+1 < i+4 <= i+6  -> (3) and all following checks are covered
1965
    //   Remove all other a[i+c] checks
1966
    //
1967
    // We only need the top 2 range checks if x is the min or max of all constants.
1968
    //
1969
    // This, however, only works if the interval [i+min,i+max] is not larger than max_int (i.e. abs(max - min) < max_int):
1970
    // The theoretical max size of an array is max_int with:
1971
    // - Valid index space: [0,max_int-1]
1972
    // - Invalid index space: [max_int,-1] // max_int, min_int, min_int - 1 ..., -1
1973
    //
1974
    // The size of the consecutive valid index space is smaller than the size of the consecutive invalid index space.
1975
    // If we choose min and max in such a way that:
1976
    // - abs(max - min) < max_int
1977
    // - i+max and i+min are inside the valid index space
1978
    // then all indices [i+min,i+max] must be in the valid index space. Otherwise, the invalid index space must be
1979
    // smaller than the valid index space which is never the case for any array size.
1980
    //
1981
    // Choosing a smaller array size only makes the valid index space smaller and the invalid index space larger and
1982
    // the argument above still holds.
1983
    //
1984
    // Note that the same optimization with the same maximal accepted interval size can also be found in C1.
1985
    const jlong maximum_number_of_min_max_interval_indices = (jlong)max_jint;
1986

1987
    // The top 3 range checks seen
1988
    const int NRC = 3;
1989
    RangeCheck prev_checks[NRC];
1990
    int nb_checks = 0;
1991

1992
    // Low and high offsets seen so far
1993
    jint off_lo = offset1;
1994
    jint off_hi = offset1;
1995

1996
    bool found_immediate_dominator = false;
1997

1998
    // Scan for the top checks and collect range of offsets
1999
    for (int dist = 0; dist < 999; dist++) { // Range-Check scan limit
2000
      if (dom->Opcode() == Op_RangeCheck &&  // Not same opcode?
2001
          prev_dom->in(0) == dom) { // One path of test does dominate?
2002
        if (dom == this) return nullptr; // dead loop
2003
        // See if this is a range check
2004
        Node* index2;
2005
        Node* range2;
2006
        jint offset2;
2007
        int flip2 = dom->as_RangeCheck()->is_range_check(range2, index2, offset2);
2008
        // See if this is a _matching_ range check, checking against
2009
        // the same array bounds.
2010
        if (flip2 == flip1 && range2 == range1 && index2 == index1 &&
2011
            dom->outcnt() == 2) {
2012
          if (nb_checks == 0 && dom->in(1) == in(1)) {
2013
            // Found an immediately dominating test at the same offset.
2014
            // This kind of back-to-back test can be eliminated locally,
2015
            // and there is no need to search further for dominating tests.
2016
            assert(offset2 == offset1, "Same test but different offsets");
2017
            found_immediate_dominator = true;
2018
            break;
2019
          }
2020

2021
          // "x - y" -> must add one to the difference for number of elements in [x,y]
2022
          const jlong diff = (jlong)MIN2(offset2, off_lo) - (jlong)MAX2(offset2, off_hi);
2023
          if (ABS(diff) < maximum_number_of_min_max_interval_indices) {
2024
            // Gather expanded bounds
2025
            off_lo = MIN2(off_lo, offset2);
2026
            off_hi = MAX2(off_hi, offset2);
2027
            // Record top NRC range checks
2028
            prev_checks[nb_checks % NRC].ctl = prev_dom->as_IfProj();
2029
            prev_checks[nb_checks % NRC].off = offset2;
2030
            nb_checks++;
2031
          }
2032
        }
2033
      }
2034
      prev_dom = dom;
2035
      dom = up_one_dom(dom);
2036
      if (!dom) break;
2037
    }
2038

2039
    if (!found_immediate_dominator) {
2040
      // Attempt to widen the dominating range check to cover some later
2041
      // ones.  Since range checks "fail" by uncommon-trapping to the
2042
      // interpreter, widening a check can make us speculatively enter
2043
      // the interpreter.  If we see range-check deopt's, do not widen!
2044
      if (!phase->C->allow_range_check_smearing())  return nullptr;
2045

2046
      if (can_reshape && !phase->C->post_loop_opts_phase()) {
2047
        // We are about to perform range check smearing (i.e. remove this RangeCheck if it is dominated by
2048
        // a series of RangeChecks which have a range that covers this RangeCheck). This can cause array access nodes to
2049
        // be pinned. We want to avoid that and first allow range check elimination a chance to remove the RangeChecks
2050
        // from loops. Hence, we delay range check smearing until after loop opts.
2051
        phase->C->record_for_post_loop_opts_igvn(this);
2052
        return nullptr;
2053
      }
2054

2055
      // Didn't find prior covering check, so cannot remove anything.
2056
      if (nb_checks == 0) {
2057
        return nullptr;
2058
      }
2059
      // Constant indices only need to check the upper bound.
2060
      // Non-constant indices must check both low and high.
2061
      int chk0 = (nb_checks - 1) % NRC;
2062
      if (index1) {
2063
        if (nb_checks == 1) {
2064
          return nullptr;
2065
        } else {
2066
          // If the top range check's constant is the min or max of
2067
          // all constants we widen the next one to cover the whole
2068
          // range of constants.
2069
          RangeCheck rc0 = prev_checks[chk0];
2070
          int chk1 = (nb_checks - 2) % NRC;
2071
          RangeCheck rc1 = prev_checks[chk1];
2072
          if (rc0.off == off_lo) {
2073
            adjust_check(rc1.ctl, range1, index1, flip1, off_hi, igvn);
2074
            prev_dom = rc1.ctl;
2075
          } else if (rc0.off == off_hi) {
2076
            adjust_check(rc1.ctl, range1, index1, flip1, off_lo, igvn);
2077
            prev_dom = rc1.ctl;
2078
          } else {
2079
            // If the top test's constant is not the min or max of all
2080
            // constants, we need 3 range checks. We must leave the
2081
            // top test unchanged because widening it would allow the
2082
            // accesses it protects to successfully read/write out of
2083
            // bounds.
2084
            if (nb_checks == 2) {
2085
              return nullptr;
2086
            }
2087
            int chk2 = (nb_checks - 3) % NRC;
2088
            RangeCheck rc2 = prev_checks[chk2];
2089
            // The top range check a+i covers interval: -a <= i < length-a
2090
            // The second range check b+i covers interval: -b <= i < length-b
2091
            if (rc1.off <= rc0.off) {
2092
              // if b <= a, we change the second range check to:
2093
              // -min_of_all_constants <= i < length-min_of_all_constants
2094
              // Together top and second range checks now cover:
2095
              // -min_of_all_constants <= i < length-a
2096
              // which is more restrictive than -b <= i < length-b:
2097
              // -b <= -min_of_all_constants <= i < length-a <= length-b
2098
              // The third check is then changed to:
2099
              // -max_of_all_constants <= i < length-max_of_all_constants
2100
              // so 2nd and 3rd checks restrict allowed values of i to:
2101
              // -min_of_all_constants <= i < length-max_of_all_constants
2102
              adjust_check(rc1.ctl, range1, index1, flip1, off_lo, igvn);
2103
              adjust_check(rc2.ctl, range1, index1, flip1, off_hi, igvn);
2104
            } else {
2105
              // if b > a, we change the second range check to:
2106
              // -max_of_all_constants <= i < length-max_of_all_constants
2107
              // Together top and second range checks now cover:
2108
              // -a <= i < length-max_of_all_constants
2109
              // which is more restrictive than -b <= i < length-b:
2110
              // -b < -a <= i < length-max_of_all_constants <= length-b
2111
              // The third check is then changed to:
2112
              // -max_of_all_constants <= i < length-max_of_all_constants
2113
              // so 2nd and 3rd checks restrict allowed values of i to:
2114
              // -min_of_all_constants <= i < length-max_of_all_constants
2115
              adjust_check(rc1.ctl, range1, index1, flip1, off_hi, igvn);
2116
              adjust_check(rc2.ctl, range1, index1, flip1, off_lo, igvn);
2117
            }
2118
            prev_dom = rc2.ctl;
2119
          }
2120
        }
2121
      } else {
2122
        RangeCheck rc0 = prev_checks[chk0];
2123
        // 'Widen' the offset of the 1st and only covering check
2124
        adjust_check(rc0.ctl, range1, index1, flip1, off_hi, igvn);
2125
        // Test is now covered by prior checks, dominate it out
2126
        prev_dom = rc0.ctl;
2127
      }
2128
      // The last RangeCheck is found to be redundant with a sequence of n (n >= 2) preceding RangeChecks.
2129
      // If an array load is control dependent on the eliminated range check, the array load nodes (CastII and Load)
2130
      // become control dependent on the last range check of the sequence, but they are really dependent on the entire
2131
      // sequence of RangeChecks. If RangeCheck#n is later replaced by a dominating identical check, the array load
2132
      // nodes must not float above the n-1 other RangeCheck in the sequence. We pin the array load nodes here to
2133
      // guarantee it doesn't happen.
2134
      //
2135
      // RangeCheck#1                 RangeCheck#1
2136
      //    |      \                     |      \
2137
      //    |      uncommon trap         |      uncommon trap
2138
      //    ..                           ..
2139
      // RangeCheck#n              -> RangeCheck#n
2140
      //    |      \                     |      \
2141
      //    |      uncommon trap        CastII  uncommon trap
2142
      // RangeCheck                     Load
2143
      //    |      \
2144
      //   CastII  uncommon trap
2145
      //   Load
2146

2147
      return dominated_by(prev_dom, igvn, true);
2148
    }
2149
  } else {
2150
    prev_dom = search_identical(4, igvn);
2151

2152
    if (prev_dom == nullptr) {
2153
      return nullptr;
2154
    }
2155
  }
2156

2157
  // Replace dominated IfNode
2158
  return dominated_by(prev_dom, igvn, false);
2159
}
2160

2161
ParsePredicateNode::ParsePredicateNode(Node* control, Deoptimization::DeoptReason deopt_reason, PhaseGVN* gvn)
2162
    : IfNode(control, gvn->intcon(1), PROB_MAX, COUNT_UNKNOWN),
2163
      _deopt_reason(deopt_reason),
2164
      _useless(false) {
2165
  init_class_id(Class_ParsePredicate);
2166
  gvn->C->add_parse_predicate(this);
2167
  gvn->C->record_for_post_loop_opts_igvn(this);
2168
#ifdef ASSERT
2169
  switch (deopt_reason) {
2170
    case Deoptimization::Reason_predicate:
2171
    case Deoptimization::Reason_profile_predicate:
2172
    case Deoptimization::Reason_loop_limit_check:
2173
      break;
2174
    default:
2175
      assert(false, "unsupported deoptimization reason for Parse Predicate");
2176
  }
2177
#endif // ASSERT
2178
}
2179

2180
Node* ParsePredicateNode::uncommon_trap() const {
2181
  ParsePredicateUncommonProj* uncommon_proj = proj_out(0)->as_IfFalse();
2182
  Node* uct_region_or_call = uncommon_proj->unique_ctrl_out();
2183
  assert(uct_region_or_call->is_Region() || uct_region_or_call->is_Call(), "must be a region or call uct");
2184
  return uct_region_or_call;
2185
}
2186

2187
// Fold this node away once it becomes useless or at latest in post loop opts IGVN.
2188
const Type* ParsePredicateNode::Value(PhaseGVN* phase) const {
2189
  if (phase->type(in(0)) == Type::TOP) {
2190
    return Type::TOP;
2191
  }
2192
  if (_useless || phase->C->post_loop_opts_phase()) {
2193
    return TypeTuple::IFTRUE;
2194
  } else {
2195
    return bottom_type();
2196
  }
2197
}
2198

2199
#ifndef PRODUCT
2200
void ParsePredicateNode::dump_spec(outputStream* st) const {
2201
  st->print(" #");
2202
  switch (_deopt_reason) {
2203
    case Deoptimization::DeoptReason::Reason_predicate:
2204
      st->print("Loop ");
2205
      break;
2206
    case Deoptimization::DeoptReason::Reason_profile_predicate:
2207
      st->print("Profiled_Loop ");
2208
      break;
2209
    case Deoptimization::DeoptReason::Reason_loop_limit_check:
2210
      st->print("Loop_Limit_Check ");
2211
      break;
2212
    default:
2213
      fatal("unknown kind");
2214
  }
2215
  if (_useless) {
2216
    st->print("#useless ");
2217
  }
2218
}
2219
#endif // NOT PRODUCT
2220

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.