jdk

Форк
0
/
doCall.cpp 
1200 строк · 52.9 Кб
1
/*
2
 * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved.
3
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
 *
5
 * This code is free software; you can redistribute it and/or modify it
6
 * under the terms of the GNU General Public License version 2 only, as
7
 * published by the Free Software Foundation.
8
 *
9
 * This code is distributed in the hope that it will be useful, but WITHOUT
10
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12
 * version 2 for more details (a copy is included in the LICENSE file that
13
 * accompanied this code).
14
 *
15
 * You should have received a copy of the GNU General Public License version
16
 * 2 along with this work; if not, write to the Free Software Foundation,
17
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
 *
19
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
 * or visit www.oracle.com if you need additional information or have any
21
 * questions.
22
 *
23
 */
24

25
#include "precompiled.hpp"
26
#include "ci/ciCallSite.hpp"
27
#include "ci/ciMethodHandle.hpp"
28
#include "ci/ciSymbols.hpp"
29
#include "classfile/vmSymbols.hpp"
30
#include "compiler/compileBroker.hpp"
31
#include "compiler/compileLog.hpp"
32
#include "interpreter/linkResolver.hpp"
33
#include "logging/log.hpp"
34
#include "logging/logLevel.hpp"
35
#include "logging/logMessage.hpp"
36
#include "logging/logStream.hpp"
37
#include "opto/addnode.hpp"
38
#include "opto/callGenerator.hpp"
39
#include "opto/castnode.hpp"
40
#include "opto/cfgnode.hpp"
41
#include "opto/mulnode.hpp"
42
#include "opto/parse.hpp"
43
#include "opto/rootnode.hpp"
44
#include "opto/runtime.hpp"
45
#include "opto/subnode.hpp"
46
#include "prims/methodHandles.hpp"
47
#include "runtime/sharedRuntime.hpp"
48
#include "utilities/macros.hpp"
49
#if INCLUDE_JFR
50
#include "jfr/jfr.hpp"
51
#endif
52

53
static void print_trace_type_profile(outputStream* out, int depth, ciKlass* prof_klass, int site_count, int receiver_count) {
54
  CompileTask::print_inline_indent(depth, out);
55
  out->print(" \\-> TypeProfile (%d/%d counts) = ", receiver_count, site_count);
56
  prof_klass->name()->print_symbol_on(out);
57
  out->cr();
58
}
59

60
static void trace_type_profile(Compile* C, ciMethod* method, int depth, int bci, ciMethod* prof_method,
61
                               ciKlass* prof_klass, int site_count, int receiver_count) {
62
  if (TraceTypeProfile || C->print_inlining()) {
63
    outputStream* out = tty;
64
    if (!C->print_inlining()) {
65
      if (!PrintOpto && !PrintCompilation) {
66
        method->print_short_name();
67
        tty->cr();
68
      }
69
      CompileTask::print_inlining_tty(prof_method, depth, bci, InliningResult::SUCCESS);
70
    } else {
71
      out = C->print_inlining_stream();
72
    }
73
    print_trace_type_profile(out, depth, prof_klass, site_count, receiver_count);
74
  }
75

76
  LogTarget(Debug, jit, inlining) lt;
77
  if (lt.is_enabled()) {
78
    LogStream ls(lt);
79
    print_trace_type_profile(&ls, depth, prof_klass, site_count, receiver_count);
80
  }
81
}
82

83
CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool call_does_dispatch,
84
                                       JVMState* jvms, bool allow_inline,
85
                                       float prof_factor, ciKlass* speculative_receiver_type,
86
                                       bool allow_intrinsics) {
87
  assert(callee != nullptr, "failed method resolution");
88

89
  ciMethod*       caller      = jvms->method();
90
  int             bci         = jvms->bci();
91
  Bytecodes::Code bytecode    = caller->java_code_at_bci(bci);
92
  ciMethod*       orig_callee = caller->get_method_at_bci(bci);
93

94
  const bool is_virtual_or_interface = (bytecode == Bytecodes::_invokevirtual) ||
95
                                       (bytecode == Bytecodes::_invokeinterface) ||
96
                                       (orig_callee->intrinsic_id() == vmIntrinsics::_linkToVirtual) ||
97
                                       (orig_callee->intrinsic_id() == vmIntrinsics::_linkToInterface);
98

99
  // Dtrace currently doesn't work unless all calls are vanilla
100
  if (env()->dtrace_method_probes()) {
101
    allow_inline = false;
102
  }
103

104
  // Note: When we get profiling during stage-1 compiles, we want to pull
105
  // from more specific profile data which pertains to this inlining.
106
  // Right now, ignore the information in jvms->caller(), and do method[bci].
107
  ciCallProfile profile = caller->call_profile_at_bci(bci);
108

109
  // See how many times this site has been invoked.
110
  int site_count = profile.count();
111
  int receiver_count = -1;
112
  if (call_does_dispatch && UseTypeProfile && profile.has_receiver(0)) {
113
    // Receivers in the profile structure are ordered by call counts
114
    // so that the most called (major) receiver is profile.receiver(0).
115
    receiver_count = profile.receiver_count(0);
116
  }
117

118
  CompileLog* log = this->log();
119
  if (log != nullptr) {
120
    int rid = (receiver_count >= 0)? log->identify(profile.receiver(0)): -1;
121
    int r2id = (rid != -1 && profile.has_receiver(1))? log->identify(profile.receiver(1)):-1;
122
    log->begin_elem("call method='%d' count='%d' prof_factor='%f'",
123
                    log->identify(callee), site_count, prof_factor);
124
    if (call_does_dispatch)  log->print(" virtual='1'");
125
    if (allow_inline)     log->print(" inline='1'");
126
    if (receiver_count >= 0) {
127
      log->print(" receiver='%d' receiver_count='%d'", rid, receiver_count);
128
      if (profile.has_receiver(1)) {
129
        log->print(" receiver2='%d' receiver2_count='%d'", r2id, profile.receiver_count(1));
130
      }
131
    }
132
    if (callee->is_method_handle_intrinsic()) {
133
      log->print(" method_handle_intrinsic='1'");
134
    }
135
    log->end_elem();
136
  }
137

138
  // Special case the handling of certain common, profitable library
139
  // methods.  If these methods are replaced with specialized code,
140
  // then we return it as the inlined version of the call.
141
  CallGenerator* cg_intrinsic = nullptr;
142
  if (allow_inline && allow_intrinsics) {
143
    CallGenerator* cg = find_intrinsic(callee, call_does_dispatch);
144
    if (cg != nullptr) {
145
      if (cg->is_predicated()) {
146
        // Code without intrinsic but, hopefully, inlined.
147
        CallGenerator* inline_cg = this->call_generator(callee,
148
              vtable_index, call_does_dispatch, jvms, allow_inline, prof_factor, speculative_receiver_type, false);
149
        if (inline_cg != nullptr) {
150
          cg = CallGenerator::for_predicated_intrinsic(cg, inline_cg);
151
        }
152
      }
153

154
      // If intrinsic does the virtual dispatch, we try to use the type profile
155
      // first, and hopefully inline it as the regular virtual call below.
156
      // We will retry the intrinsic if nothing had claimed it afterwards.
157
      if (cg->does_virtual_dispatch()) {
158
        cg_intrinsic = cg;
159
        cg = nullptr;
160
      } else if (IncrementalInline && should_delay_vector_inlining(callee, jvms)) {
161
        return CallGenerator::for_late_inline(callee, cg);
162
      } else {
163
        return cg;
164
      }
165
    }
166
  }
167

168
  // Do method handle calls.
169
  // NOTE: This must happen before normal inlining logic below since
170
  // MethodHandle.invoke* are native methods which obviously don't
171
  // have bytecodes and so normal inlining fails.
172
  if (callee->is_method_handle_intrinsic()) {
173
    CallGenerator* cg = CallGenerator::for_method_handle_call(jvms, caller, callee, allow_inline);
174
    return cg;
175
  }
176

177
  // Attempt to inline...
178
  if (allow_inline) {
179
    // The profile data is only partly attributable to this caller,
180
    // scale back the call site information.
181
    float past_uses = jvms->method()->scale_count(site_count, prof_factor);
182
    // This is the number of times we expect the call code to be used.
183
    float expected_uses = past_uses;
184

185
    // Try inlining a bytecoded method:
186
    if (!call_does_dispatch) {
187
      InlineTree* ilt = InlineTree::find_subtree_from_root(this->ilt(), jvms->caller(), jvms->method());
188
      bool should_delay = C->should_delay_inlining();
189
      if (ilt->ok_to_inline(callee, jvms, profile, should_delay)) {
190
        CallGenerator* cg = CallGenerator::for_inline(callee, expected_uses);
191
        // For optimized virtual calls assert at runtime that receiver object
192
        // is a subtype of the inlined method holder. CHA can report a method
193
        // as a unique target under an abstract method, but receiver type
194
        // sometimes has a broader type. Similar scenario is possible with
195
        // default methods when type system loses information about implemented
196
        // interfaces.
197
        if (cg != nullptr && is_virtual_or_interface && !callee->is_static()) {
198
          CallGenerator* trap_cg = CallGenerator::for_uncommon_trap(callee,
199
              Deoptimization::Reason_receiver_constraint, Deoptimization::Action_none);
200

201
          cg = CallGenerator::for_guarded_call(callee->holder(), trap_cg, cg);
202
        }
203
        if (cg != nullptr) {
204
          // Delay the inlining of this method to give us the
205
          // opportunity to perform some high level optimizations
206
          // first.
207
          if (should_delay) {
208
            return CallGenerator::for_late_inline(callee, cg);
209
          } else if (should_delay_string_inlining(callee, jvms)) {
210
            return CallGenerator::for_string_late_inline(callee, cg);
211
          } else if (should_delay_boxing_inlining(callee, jvms)) {
212
            return CallGenerator::for_boxing_late_inline(callee, cg);
213
          } else if (should_delay_vector_reboxing_inlining(callee, jvms)) {
214
            return CallGenerator::for_vector_reboxing_late_inline(callee, cg);
215
          } else {
216
            return cg;
217
          }
218
        }
219
      }
220
    }
221

222
    // Try using the type profile.
223
    if (call_does_dispatch && site_count > 0 && UseTypeProfile) {
224
      // The major receiver's count >= TypeProfileMajorReceiverPercent of site_count.
225
      bool have_major_receiver = profile.has_receiver(0) && (100.*profile.receiver_prob(0) >= (float)TypeProfileMajorReceiverPercent);
226
      ciMethod* receiver_method = nullptr;
227

228
      int morphism = profile.morphism();
229
      if (speculative_receiver_type != nullptr) {
230
        if (!too_many_traps_or_recompiles(caller, bci, Deoptimization::Reason_speculate_class_check)) {
231
          // We have a speculative type, we should be able to resolve
232
          // the call. We do that before looking at the profiling at
233
          // this invoke because it may lead to bimorphic inlining which
234
          // a speculative type should help us avoid.
235
          receiver_method = callee->resolve_invoke(jvms->method()->holder(),
236
                                                   speculative_receiver_type);
237
          if (receiver_method == nullptr) {
238
            speculative_receiver_type = nullptr;
239
          } else {
240
            morphism = 1;
241
          }
242
        } else {
243
          // speculation failed before. Use profiling at the call
244
          // (could allow bimorphic inlining for instance).
245
          speculative_receiver_type = nullptr;
246
        }
247
      }
248
      if (receiver_method == nullptr &&
249
          (have_major_receiver || morphism == 1 ||
250
           (morphism == 2 && UseBimorphicInlining))) {
251
        // receiver_method = profile.method();
252
        // Profiles do not suggest methods now.  Look it up in the major receiver.
253
        receiver_method = callee->resolve_invoke(jvms->method()->holder(),
254
                                                      profile.receiver(0));
255
      }
256
      if (receiver_method != nullptr) {
257
        // The single majority receiver sufficiently outweighs the minority.
258
        CallGenerator* hit_cg = this->call_generator(receiver_method,
259
              vtable_index, !call_does_dispatch, jvms, allow_inline, prof_factor);
260
        if (hit_cg != nullptr) {
261
          // Look up second receiver.
262
          CallGenerator* next_hit_cg = nullptr;
263
          ciMethod* next_receiver_method = nullptr;
264
          if (morphism == 2 && UseBimorphicInlining) {
265
            next_receiver_method = callee->resolve_invoke(jvms->method()->holder(),
266
                                                               profile.receiver(1));
267
            if (next_receiver_method != nullptr) {
268
              next_hit_cg = this->call_generator(next_receiver_method,
269
                                  vtable_index, !call_does_dispatch, jvms,
270
                                  allow_inline, prof_factor);
271
              if (next_hit_cg != nullptr && !next_hit_cg->is_inline() &&
272
                  have_major_receiver && UseOnlyInlinedBimorphic) {
273
                  // Skip if we can't inline second receiver's method
274
                  next_hit_cg = nullptr;
275
              }
276
            }
277
          }
278
          CallGenerator* miss_cg;
279
          Deoptimization::DeoptReason reason = (morphism == 2
280
                                               ? Deoptimization::Reason_bimorphic
281
                                               : Deoptimization::reason_class_check(speculative_receiver_type != nullptr));
282
          if ((morphism == 1 || (morphism == 2 && next_hit_cg != nullptr)) &&
283
              !too_many_traps_or_recompiles(caller, bci, reason)
284
             ) {
285
            // Generate uncommon trap for class check failure path
286
            // in case of monomorphic or bimorphic virtual call site.
287
            miss_cg = CallGenerator::for_uncommon_trap(callee, reason,
288
                        Deoptimization::Action_maybe_recompile);
289
          } else {
290
            // Generate virtual call for class check failure path
291
            // in case of polymorphic virtual call site.
292
            miss_cg = (IncrementalInlineVirtual ? CallGenerator::for_late_inline_virtual(callee, vtable_index, prof_factor)
293
                                                : CallGenerator::for_virtual_call(callee, vtable_index));
294
          }
295
          if (miss_cg != nullptr) {
296
            if (next_hit_cg != nullptr) {
297
              assert(speculative_receiver_type == nullptr, "shouldn't end up here if we used speculation");
298
              trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1));
299
              // We don't need to record dependency on a receiver here and below.
300
              // Whenever we inline, the dependency is added by Parse::Parse().
301
              miss_cg = CallGenerator::for_predicted_call(profile.receiver(1), miss_cg, next_hit_cg, PROB_MAX);
302
            }
303
            if (miss_cg != nullptr) {
304
              ciKlass* k = speculative_receiver_type != nullptr ? speculative_receiver_type : profile.receiver(0);
305
              trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), receiver_method, k, site_count, receiver_count);
306
              float hit_prob = speculative_receiver_type != nullptr ? 1.0 : profile.receiver_prob(0);
307
              CallGenerator* cg = CallGenerator::for_predicted_call(k, miss_cg, hit_cg, hit_prob);
308
              if (cg != nullptr)  return cg;
309
            }
310
          }
311
        }
312
      }
313
    }
314

315
    // If there is only one implementor of this interface then we
316
    // may be able to bind this invoke directly to the implementing
317
    // klass but we need both a dependence on the single interface
318
    // and on the method we bind to. Additionally since all we know
319
    // about the receiver type is that it's supposed to implement the
320
    // interface we have to insert a check that it's the class we
321
    // expect.  Interface types are not checked by the verifier so
322
    // they are roughly equivalent to Object.
323
    // The number of implementors for declared_interface is less or
324
    // equal to the number of implementors for target->holder() so
325
    // if number of implementors of target->holder() == 1 then
326
    // number of implementors for decl_interface is 0 or 1. If
327
    // it's 0 then no class implements decl_interface and there's
328
    // no point in inlining.
329
    if (call_does_dispatch && bytecode == Bytecodes::_invokeinterface) {
330
      ciInstanceKlass* declared_interface =
331
          caller->get_declared_method_holder_at_bci(bci)->as_instance_klass();
332
      ciInstanceKlass* singleton = declared_interface->unique_implementor();
333

334
      if (singleton != nullptr) {
335
        assert(singleton != declared_interface, "not a unique implementor");
336

337
        ciMethod* cha_monomorphic_target =
338
            callee->find_monomorphic_target(caller->holder(), declared_interface, singleton);
339

340
        if (cha_monomorphic_target != nullptr &&
341
            cha_monomorphic_target->holder() != env()->Object_klass()) { // subtype check against Object is useless
342
          ciKlass* holder = cha_monomorphic_target->holder();
343

344
          // Try to inline the method found by CHA. Inlined method is guarded by the type check.
345
          CallGenerator* hit_cg = call_generator(cha_monomorphic_target,
346
              vtable_index, !call_does_dispatch, jvms, allow_inline, prof_factor);
347

348
          // Deoptimize on type check fail. The interpreter will throw ICCE for us.
349
          CallGenerator* miss_cg = CallGenerator::for_uncommon_trap(callee,
350
              Deoptimization::Reason_class_check, Deoptimization::Action_none);
351

352
          ciKlass* constraint = (holder->is_subclass_of(singleton) ? holder : singleton); // avoid upcasts
353
          CallGenerator* cg = CallGenerator::for_guarded_call(constraint, miss_cg, hit_cg);
354
          if (hit_cg != nullptr && cg != nullptr) {
355
            dependencies()->assert_unique_implementor(declared_interface, singleton);
356
            dependencies()->assert_unique_concrete_method(declared_interface, cha_monomorphic_target, declared_interface, callee);
357
            return cg;
358
          }
359
        }
360
      }
361
    } // call_does_dispatch && bytecode == Bytecodes::_invokeinterface
362

363
    // Nothing claimed the intrinsic, we go with straight-forward inlining
364
    // for already discovered intrinsic.
365
    if (allow_intrinsics && cg_intrinsic != nullptr) {
366
      assert(cg_intrinsic->does_virtual_dispatch(), "sanity");
367
      return cg_intrinsic;
368
    }
369
  } // allow_inline
370

371
  // There was no special inlining tactic, or it bailed out.
372
  // Use a more generic tactic, like a simple call.
373
  if (call_does_dispatch) {
374
    const char* msg = "virtual call";
375
    if (C->print_inlining()) {
376
      print_inlining(callee, jvms->depth() - 1, jvms->bci(), InliningResult::FAILURE, msg);
377
    }
378
    C->log_inline_failure(msg);
379
    if (IncrementalInlineVirtual && allow_inline) {
380
      return CallGenerator::for_late_inline_virtual(callee, vtable_index, prof_factor); // attempt to inline through virtual call later
381
    } else {
382
      return CallGenerator::for_virtual_call(callee, vtable_index);
383
    }
384
  } else {
385
    // Class Hierarchy Analysis or Type Profile reveals a unique target, or it is a static or special call.
386
    CallGenerator* cg = CallGenerator::for_direct_call(callee, should_delay_inlining(callee, jvms));
387
    // For optimized virtual calls assert at runtime that receiver object
388
    // is a subtype of the method holder.
389
    if (cg != nullptr && is_virtual_or_interface && !callee->is_static()) {
390
      CallGenerator* trap_cg = CallGenerator::for_uncommon_trap(callee,
391
          Deoptimization::Reason_receiver_constraint, Deoptimization::Action_none);
392
      cg = CallGenerator::for_guarded_call(callee->holder(), trap_cg, cg);
393
    }
394
    return cg;
395
  }
396
}
397

398
// Return true for methods that shouldn't be inlined early so that
399
// they are easier to analyze and optimize as intrinsics.
400
bool Compile::should_delay_string_inlining(ciMethod* call_method, JVMState* jvms) {
401
  if (has_stringbuilder()) {
402

403
    if ((call_method->holder() == C->env()->StringBuilder_klass() ||
404
         call_method->holder() == C->env()->StringBuffer_klass()) &&
405
        (jvms->method()->holder() == C->env()->StringBuilder_klass() ||
406
         jvms->method()->holder() == C->env()->StringBuffer_klass())) {
407
      // Delay SB calls only when called from non-SB code
408
      return false;
409
    }
410

411
    switch (call_method->intrinsic_id()) {
412
      case vmIntrinsics::_StringBuilder_void:
413
      case vmIntrinsics::_StringBuilder_int:
414
      case vmIntrinsics::_StringBuilder_String:
415
      case vmIntrinsics::_StringBuilder_append_char:
416
      case vmIntrinsics::_StringBuilder_append_int:
417
      case vmIntrinsics::_StringBuilder_append_String:
418
      case vmIntrinsics::_StringBuilder_toString:
419
      case vmIntrinsics::_StringBuffer_void:
420
      case vmIntrinsics::_StringBuffer_int:
421
      case vmIntrinsics::_StringBuffer_String:
422
      case vmIntrinsics::_StringBuffer_append_char:
423
      case vmIntrinsics::_StringBuffer_append_int:
424
      case vmIntrinsics::_StringBuffer_append_String:
425
      case vmIntrinsics::_StringBuffer_toString:
426
      case vmIntrinsics::_Integer_toString:
427
        return true;
428

429
      case vmIntrinsics::_String_String:
430
        {
431
          Node* receiver = jvms->map()->in(jvms->argoff() + 1);
432
          if (receiver->is_Proj() && receiver->in(0)->is_CallStaticJava()) {
433
            CallStaticJavaNode* csj = receiver->in(0)->as_CallStaticJava();
434
            ciMethod* m = csj->method();
435
            if (m != nullptr &&
436
                (m->intrinsic_id() == vmIntrinsics::_StringBuffer_toString ||
437
                 m->intrinsic_id() == vmIntrinsics::_StringBuilder_toString))
438
              // Delay String.<init>(new SB())
439
              return true;
440
          }
441
          return false;
442
        }
443

444
      default:
445
        return false;
446
    }
447
  }
448
  return false;
449
}
450

451
bool Compile::should_delay_boxing_inlining(ciMethod* call_method, JVMState* jvms) {
452
  if (eliminate_boxing() && call_method->is_boxing_method()) {
453
    set_has_boxed_value(true);
454
    return aggressive_unboxing();
455
  }
456
  return false;
457
}
458

459
bool Compile::should_delay_vector_inlining(ciMethod* call_method, JVMState* jvms) {
460
  return EnableVectorSupport && call_method->is_vector_method();
461
}
462

463
bool Compile::should_delay_vector_reboxing_inlining(ciMethod* call_method, JVMState* jvms) {
464
  return EnableVectorSupport && (call_method->intrinsic_id() == vmIntrinsics::_VectorRebox);
465
}
466

467
// uncommon-trap call-sites where callee is unloaded, uninitialized or will not link
468
bool Parse::can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass* klass) {
469
  // Additional inputs to consider...
470
  // bc      = bc()
471
  // caller  = method()
472
  // iter().get_method_holder_index()
473
  assert( dest_method->is_loaded(), "ciTypeFlow should not let us get here" );
474
  // Interface classes can be loaded & linked and never get around to
475
  // being initialized.  Uncommon-trap for not-initialized static or
476
  // v-calls.  Let interface calls happen.
477
  ciInstanceKlass* holder_klass = dest_method->holder();
478
  if (!holder_klass->is_being_initialized() &&
479
      !holder_klass->is_initialized() &&
480
      !holder_klass->is_interface()) {
481
    uncommon_trap(Deoptimization::Reason_uninitialized,
482
                  Deoptimization::Action_reinterpret,
483
                  holder_klass);
484
    return true;
485
  }
486

487
  assert(dest_method->is_loaded(), "dest_method: typeflow responsibility");
488
  return false;
489
}
490

491
#ifdef ASSERT
492
static bool check_call_consistency(JVMState* jvms, CallGenerator* cg) {
493
  ciMethod* symbolic_info = jvms->method()->get_method_at_bci(jvms->bci());
494
  ciMethod* resolved_method = cg->method();
495
  if (!ciMethod::is_consistent_info(symbolic_info, resolved_method)) {
496
    tty->print_cr("JVMS:");
497
    jvms->dump();
498
    tty->print_cr("Bytecode info:");
499
    jvms->method()->get_method_at_bci(jvms->bci())->print(); tty->cr();
500
    tty->print_cr("Resolved method:");
501
    cg->method()->print(); tty->cr();
502
    return false;
503
  }
504
  return true;
505
}
506
#endif // ASSERT
507

508
//------------------------------do_call----------------------------------------
509
// Handle your basic call.  Inline if we can & want to, else just setup call.
510
void Parse::do_call() {
511
  // It's likely we are going to add debug info soon.
512
  // Also, if we inline a guy who eventually needs debug info for this JVMS,
513
  // our contribution to it is cleaned up right here.
514
  kill_dead_locals();
515

516
  C->print_inlining_assert_ready();
517

518
  // Set frequently used booleans
519
  const bool is_virtual = bc() == Bytecodes::_invokevirtual;
520
  const bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface;
521
  const bool has_receiver = Bytecodes::has_receiver(bc());
522

523
  // Find target being called
524
  bool             will_link;
525
  ciSignature*     declared_signature = nullptr;
526
  ciMethod*        orig_callee  = iter().get_method(will_link, &declared_signature);  // callee in the bytecode
527
  ciInstanceKlass* holder_klass = orig_callee->holder();
528
  ciKlass*         holder       = iter().get_declared_method_holder();
529
  ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder);
530
  assert(declared_signature != nullptr, "cannot be null");
531
  JFR_ONLY(Jfr::on_resolution(this, holder, orig_callee);)
532

533
  // Bump max node limit for JSR292 users
534
  if (bc() == Bytecodes::_invokedynamic || orig_callee->is_method_handle_intrinsic()) {
535
    C->set_max_node_limit(3*MaxNodeLimit);
536
  }
537

538
  // uncommon-trap when callee is unloaded, uninitialized or will not link
539
  // bailout when too many arguments for register representation
540
  if (!will_link || can_not_compile_call_site(orig_callee, klass)) {
541
    if (PrintOpto && (Verbose || WizardMode)) {
542
      method()->print_name(); tty->print_cr(" can not compile call at bci %d to:", bci());
543
      orig_callee->print_name(); tty->cr();
544
    }
545
    return;
546
  }
547
  assert(holder_klass->is_loaded(), "");
548
  //assert((bc_callee->is_static() || is_invokedynamic) == !has_receiver , "must match bc");  // XXX invokehandle (cur_bc_raw)
549
  // Note: this takes into account invokeinterface of methods declared in java/lang/Object,
550
  // which should be invokevirtuals but according to the VM spec may be invokeinterfaces
551
  assert(holder_klass->is_interface() || holder_klass->super() == nullptr || (bc() != Bytecodes::_invokeinterface), "must match bc");
552
  // Note:  In the absence of miranda methods, an abstract class K can perform
553
  // an invokevirtual directly on an interface method I.m if K implements I.
554

555
  // orig_callee is the resolved callee which's signature includes the
556
  // appendix argument.
557
  const int nargs = orig_callee->arg_size();
558
  const bool is_signature_polymorphic = MethodHandles::is_signature_polymorphic(orig_callee->intrinsic_id());
559

560
  // Push appendix argument (MethodType, CallSite, etc.), if one.
561
  if (iter().has_appendix()) {
562
    ciObject* appendix_arg = iter().get_appendix();
563
    const TypeOopPtr* appendix_arg_type = TypeOopPtr::make_from_constant(appendix_arg, /* require_const= */ true);
564
    Node* appendix_arg_node = _gvn.makecon(appendix_arg_type);
565
    push(appendix_arg_node);
566
  }
567

568
  // ---------------------
569
  // Does Class Hierarchy Analysis reveal only a single target of a v-call?
570
  // Then we may inline or make a static call, but become dependent on there being only 1 target.
571
  // Does the call-site type profile reveal only one receiver?
572
  // Then we may introduce a run-time check and inline on the path where it succeeds.
573
  // The other path may uncommon_trap, check for another receiver, or do a v-call.
574

575
  // Try to get the most accurate receiver type
576
  ciMethod* callee             = orig_callee;
577
  int       vtable_index       = Method::invalid_vtable_index;
578
  bool      call_does_dispatch = false;
579

580
  // Speculative type of the receiver if any
581
  ciKlass* speculative_receiver_type = nullptr;
582
  if (is_virtual_or_interface) {
583
    Node* receiver_node             = stack(sp() - nargs);
584
    const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr();
585
    // call_does_dispatch and vtable_index are out-parameters.  They might be changed.
586
    // For arrays, klass below is Object. When vtable calls are used,
587
    // resolving the call with Object would allow an illegal call to
588
    // finalize() on an array. We use holder instead: illegal calls to
589
    // finalize() won't be compiled as vtable calls (IC call
590
    // resolution will catch the illegal call) and the few legal calls
591
    // on array types won't be either.
592
    callee = C->optimize_virtual_call(method(), klass, holder, orig_callee,
593
                                      receiver_type, is_virtual,
594
                                      call_does_dispatch, vtable_index);  // out-parameters
595
    speculative_receiver_type = receiver_type != nullptr ? receiver_type->speculative_type() : nullptr;
596
  }
597

598
  // Additional receiver subtype checks for interface calls via invokespecial or invokeinterface.
599
  ciKlass* receiver_constraint = nullptr;
600
  if (iter().cur_bc_raw() == Bytecodes::_invokespecial && !orig_callee->is_object_initializer()) {
601
    ciInstanceKlass* calling_klass = method()->holder();
602
    ciInstanceKlass* sender_klass = calling_klass;
603
    if (sender_klass->is_interface()) {
604
      receiver_constraint = sender_klass;
605
    }
606
  } else if (iter().cur_bc_raw() == Bytecodes::_invokeinterface && orig_callee->is_private()) {
607
    assert(holder->is_interface(), "How did we get a non-interface method here!");
608
    receiver_constraint = holder;
609
  }
610

611
  if (receiver_constraint != nullptr) {
612
    Node* receiver_node = stack(sp() - nargs);
613
    Node* cls_node = makecon(TypeKlassPtr::make(receiver_constraint, Type::trust_interfaces));
614
    Node* bad_type_ctrl = nullptr;
615
    Node* casted_receiver = gen_checkcast(receiver_node, cls_node, &bad_type_ctrl);
616
    if (bad_type_ctrl != nullptr) {
617
      PreserveJVMState pjvms(this);
618
      set_control(bad_type_ctrl);
619
      uncommon_trap(Deoptimization::Reason_class_check,
620
                    Deoptimization::Action_none);
621
    }
622
    if (stopped()) {
623
      return; // MUST uncommon-trap?
624
    }
625
    set_stack(sp() - nargs, casted_receiver);
626
  }
627

628
  // Note:  It's OK to try to inline a virtual call.
629
  // The call generator will not attempt to inline a polymorphic call
630
  // unless it knows how to optimize the receiver dispatch.
631
  bool try_inline = (C->do_inlining() || InlineAccessors);
632

633
  // ---------------------
634
  dec_sp(nargs);              // Temporarily pop args for JVM state of call
635
  JVMState* jvms = sync_jvms();
636

637
  // ---------------------
638
  // Decide call tactic.
639
  // This call checks with CHA, the interpreter profile, intrinsics table, etc.
640
  // It decides whether inlining is desirable or not.
641
  CallGenerator* cg = C->call_generator(callee, vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), speculative_receiver_type);
642

643
  // NOTE:  Don't use orig_callee and callee after this point!  Use cg->method() instead.
644
  orig_callee = callee = nullptr;
645

646
  // ---------------------
647
  // Round double arguments before call
648
  round_double_arguments(cg->method());
649

650
  // Feed profiling data for arguments to the type system so it can
651
  // propagate it as speculative types
652
  record_profiled_arguments_for_speculation(cg->method(), bc());
653

654
#ifndef PRODUCT
655
  // bump global counters for calls
656
  count_compiled_calls(/*at_method_entry*/ false, cg->is_inline());
657

658
  // Record first part of parsing work for this call
659
  parse_histogram()->record_change();
660
#endif // not PRODUCT
661

662
  assert(jvms == this->jvms(), "still operating on the right JVMS");
663
  assert(jvms_in_sync(),       "jvms must carry full info into CG");
664

665
  // save across call, for a subsequent cast_not_null.
666
  Node* receiver = has_receiver ? argument(0) : nullptr;
667

668
  // The extra CheckCastPPs for speculative types mess with PhaseStringOpts
669
  if (receiver != nullptr && !call_does_dispatch && !cg->is_string_late_inline()) {
670
    // Feed profiling data for a single receiver to the type system so
671
    // it can propagate it as a speculative type
672
    receiver = record_profiled_receiver_for_speculation(receiver);
673
  }
674

675
  JVMState* new_jvms = cg->generate(jvms);
676
  if (new_jvms == nullptr) {
677
    // When inlining attempt fails (e.g., too many arguments),
678
    // it may contaminate the current compile state, making it
679
    // impossible to pull back and try again.  Once we call
680
    // cg->generate(), we are committed.  If it fails, the whole
681
    // compilation task is compromised.
682
    if (failing())  return;
683

684
    // This can happen if a library intrinsic is available, but refuses
685
    // the call site, perhaps because it did not match a pattern the
686
    // intrinsic was expecting to optimize. Should always be possible to
687
    // get a normal java call that may inline in that case
688
    cg = C->call_generator(cg->method(), vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), speculative_receiver_type, /* allow_intrinsics= */ false);
689
    new_jvms = cg->generate(jvms);
690
    if (new_jvms == nullptr) {
691
      guarantee(failing(), "call failed to generate:  calls should work");
692
      return;
693
    }
694
  }
695

696
  if (cg->is_inline()) {
697
    // Accumulate has_loops estimate
698
    C->env()->notice_inlined_method(cg->method());
699
  }
700

701
  // Reset parser state from [new_]jvms, which now carries results of the call.
702
  // Return value (if any) is already pushed on the stack by the cg.
703
  add_exception_states_from(new_jvms);
704
  if (new_jvms->map()->control() == top()) {
705
    stop_and_kill_map();
706
  } else {
707
    assert(new_jvms->same_calls_as(jvms), "method/bci left unchanged");
708
    set_jvms(new_jvms);
709
  }
710

711
  assert(check_call_consistency(jvms, cg), "inconsistent info");
712

713
  if (!stopped()) {
714
    // This was some sort of virtual call, which did a null check for us.
715
    // Now we can assert receiver-not-null, on the normal return path.
716
    if (receiver != nullptr && cg->is_virtual()) {
717
      Node* cast = cast_not_null(receiver);
718
      // %%% assert(receiver == cast, "should already have cast the receiver");
719
    }
720

721
    ciType* rtype = cg->method()->return_type();
722
    ciType* ctype = declared_signature->return_type();
723

724
    if (Bytecodes::has_optional_appendix(iter().cur_bc_raw()) || is_signature_polymorphic) {
725
      // Be careful here with return types.
726
      if (ctype != rtype) {
727
        BasicType rt = rtype->basic_type();
728
        BasicType ct = ctype->basic_type();
729
        if (ct == T_VOID) {
730
          // It's OK for a method  to return a value that is discarded.
731
          // The discarding does not require any special action from the caller.
732
          // The Java code knows this, at VerifyType.isNullConversion.
733
          pop_node(rt);  // whatever it was, pop it
734
        } else if (rt == T_INT || is_subword_type(rt)) {
735
          // Nothing.  These cases are handled in lambda form bytecode.
736
          assert(ct == T_INT || is_subword_type(ct), "must match: rt=%s, ct=%s", type2name(rt), type2name(ct));
737
        } else if (is_reference_type(rt)) {
738
          assert(is_reference_type(ct), "rt=%s, ct=%s", type2name(rt), type2name(ct));
739
          if (ctype->is_loaded()) {
740
            const TypeOopPtr* arg_type = TypeOopPtr::make_from_klass(rtype->as_klass());
741
            const Type*       sig_type = TypeOopPtr::make_from_klass(ctype->as_klass());
742
            if (arg_type != nullptr && !arg_type->higher_equal(sig_type)) {
743
              Node* retnode = pop();
744
              Node* cast_obj = _gvn.transform(new CheckCastPPNode(control(), retnode, sig_type));
745
              push(cast_obj);
746
            }
747
          }
748
        } else {
749
          assert(rt == ct, "unexpected mismatch: rt=%s, ct=%s", type2name(rt), type2name(ct));
750
          // push a zero; it's better than getting an oop/int mismatch
751
          pop_node(rt);
752
          Node* retnode = zerocon(ct);
753
          push_node(ct, retnode);
754
        }
755
        // Now that the value is well-behaved, continue with the call-site type.
756
        rtype = ctype;
757
      }
758
    } else {
759
      // Symbolic resolution enforces the types to be the same.
760
      // NOTE: We must relax the assert for unloaded types because two
761
      // different ciType instances of the same unloaded class type
762
      // can appear to be "loaded" by different loaders (depending on
763
      // the accessing class).
764
      assert(!rtype->is_loaded() || !ctype->is_loaded() || rtype == ctype,
765
             "mismatched return types: rtype=%s, ctype=%s", rtype->name(), ctype->name());
766
    }
767

768
    // If the return type of the method is not loaded, assert that the
769
    // value we got is a null.  Otherwise, we need to recompile.
770
    if (!rtype->is_loaded()) {
771
      if (PrintOpto && (Verbose || WizardMode)) {
772
        method()->print_name(); tty->print_cr(" asserting nullness of result at bci: %d", bci());
773
        cg->method()->print_name(); tty->cr();
774
      }
775
      if (C->log() != nullptr) {
776
        C->log()->elem("assert_null reason='return' klass='%d'",
777
                       C->log()->identify(rtype));
778
      }
779
      // If there is going to be a trap, put it at the next bytecode:
780
      set_bci(iter().next_bci());
781
      null_assert(peek());
782
      set_bci(iter().cur_bci()); // put it back
783
    }
784
    BasicType ct = ctype->basic_type();
785
    if (is_reference_type(ct)) {
786
      record_profiled_return_for_speculation();
787
    }
788
  }
789

790
  // Restart record of parsing work after possible inlining of call
791
#ifndef PRODUCT
792
  parse_histogram()->set_initial_state(bc());
793
#endif
794
}
795

796
//---------------------------catch_call_exceptions-----------------------------
797
// Put a Catch and CatchProj nodes behind a just-created call.
798
// Send their caught exceptions to the proper handler.
799
// This may be used after a call to the rethrow VM stub,
800
// when it is needed to process unloaded exception classes.
801
void Parse::catch_call_exceptions(ciExceptionHandlerStream& handlers) {
802
  // Exceptions are delivered through this channel:
803
  Node* i_o = this->i_o();
804

805
  // Add a CatchNode.
806
  Arena tmp_mem{mtCompiler};
807
  GrowableArray<int> bcis(&tmp_mem, 8, 0, -1);
808
  GrowableArray<const Type*> extypes(&tmp_mem, 8, 0, nullptr);
809
  GrowableArray<int> saw_unloaded(&tmp_mem, 8, 0, -1);
810

811
  bool default_handler = false;
812
  for (; !handlers.is_done(); handlers.next()) {
813
    ciExceptionHandler* h       = handlers.handler();
814
    int                 h_bci   = h->handler_bci();
815
    ciInstanceKlass*    h_klass = h->is_catch_all() ? env()->Throwable_klass() : h->catch_klass();
816
    // Do not introduce unloaded exception types into the graph:
817
    if (!h_klass->is_loaded()) {
818
      if (saw_unloaded.contains(h_bci)) {
819
        /* We've already seen an unloaded exception with h_bci,
820
           so don't duplicate. Duplication will cause the CatchNode to be
821
           unnecessarily large. See 4713716. */
822
        continue;
823
      } else {
824
        saw_unloaded.append(h_bci);
825
      }
826
    }
827
    const Type* h_extype = TypeOopPtr::make_from_klass(h_klass);
828
    // (We use make_from_klass because it respects UseUniqueSubclasses.)
829
    h_extype = h_extype->join(TypeInstPtr::NOTNULL);
830
    assert(!h_extype->empty(), "sanity");
831
    // Note: It's OK if the BCIs repeat themselves.
832
    bcis.append(h_bci);
833
    extypes.append(h_extype);
834
    if (h_bci == -1) {
835
      default_handler = true;
836
    }
837
  }
838

839
  if (!default_handler) {
840
    bcis.append(-1);
841
    const Type* extype = TypeOopPtr::make_from_klass(env()->Throwable_klass())->is_instptr();
842
    extype = extype->join(TypeInstPtr::NOTNULL);
843
    extypes.append(extype);
844
  }
845

846
  int len = bcis.length();
847
  CatchNode *cn = new CatchNode(control(), i_o, len+1);
848
  Node *catch_ = _gvn.transform(cn);
849

850
  // now branch with the exception state to each of the (potential)
851
  // handlers
852
  for(int i=0; i < len; i++) {
853
    // Setup JVM state to enter the handler.
854
    PreserveJVMState pjvms(this);
855
    // Locals are just copied from before the call.
856
    // Get control from the CatchNode.
857
    int handler_bci = bcis.at(i);
858
    Node* ctrl = _gvn.transform( new CatchProjNode(catch_, i+1,handler_bci));
859
    // This handler cannot happen?
860
    if (ctrl == top())  continue;
861
    set_control(ctrl);
862

863
    // Create exception oop
864
    const TypeInstPtr* extype = extypes.at(i)->is_instptr();
865
    Node* ex_oop = _gvn.transform(new CreateExNode(extypes.at(i), ctrl, i_o));
866

867
    // Handle unloaded exception classes.
868
    if (saw_unloaded.contains(handler_bci)) {
869
      // An unloaded exception type is coming here.  Do an uncommon trap.
870
#ifndef PRODUCT
871
      // We do not expect the same handler bci to take both cold unloaded
872
      // and hot loaded exceptions.  But, watch for it.
873
      if (PrintOpto && (Verbose || WizardMode) && extype->is_loaded()) {
874
        tty->print("Warning: Handler @%d takes mixed loaded/unloaded exceptions in ", bci());
875
        method()->print_name(); tty->cr();
876
      } else if (PrintOpto && (Verbose || WizardMode)) {
877
        tty->print("Bailing out on unloaded exception type ");
878
        extype->instance_klass()->print_name();
879
        tty->print(" at bci:%d in ", bci());
880
        method()->print_name(); tty->cr();
881
      }
882
#endif
883
      // Emit an uncommon trap instead of processing the block.
884
      set_bci(handler_bci);
885
      push_ex_oop(ex_oop);
886
      uncommon_trap(Deoptimization::Reason_unloaded,
887
                    Deoptimization::Action_reinterpret,
888
                    extype->instance_klass(), "!loaded exception");
889
      set_bci(iter().cur_bci()); // put it back
890
      continue;
891
    }
892

893
    // go to the exception handler
894
    if (handler_bci < 0) {     // merge with corresponding rethrow node
895
      throw_to_exit(make_exception_state(ex_oop));
896
    } else {                      // Else jump to corresponding handle
897
      push_ex_oop(ex_oop);        // Clear stack and push just the oop.
898
      merge_exception(handler_bci);
899
    }
900
  }
901

902
  // The first CatchProj is for the normal return.
903
  // (Note:  If this is a call to rethrow_Java, this node goes dead.)
904
  set_control(_gvn.transform( new CatchProjNode(catch_, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci)));
905
}
906

907

908
//----------------------------catch_inline_exceptions--------------------------
909
// Handle all exceptions thrown by an inlined method or individual bytecode.
910
// Common case 1: we have no handler, so all exceptions merge right into
911
// the rethrow case.
912
// Case 2: we have some handlers, with loaded exception klasses that have
913
// no subklasses.  We do a Deutsch-Schiffman style type-check on the incoming
914
// exception oop and branch to the handler directly.
915
// Case 3: We have some handlers with subklasses or are not loaded at
916
// compile-time.  We have to call the runtime to resolve the exception.
917
// So we insert a RethrowCall and all the logic that goes with it.
918
void Parse::catch_inline_exceptions(SafePointNode* ex_map) {
919
  // Caller is responsible for saving away the map for normal control flow!
920
  assert(stopped(), "call set_map(nullptr) first");
921
  assert(method()->has_exception_handlers(), "don't come here w/o work to do");
922

923
  Node* ex_node = saved_ex_oop(ex_map);
924
  if (ex_node == top()) {
925
    // No action needed.
926
    return;
927
  }
928
  const TypeInstPtr* ex_type = _gvn.type(ex_node)->isa_instptr();
929
  NOT_PRODUCT(if (ex_type==nullptr) tty->print_cr("*** Exception not InstPtr"));
930
  if (ex_type == nullptr)
931
    ex_type = TypeOopPtr::make_from_klass(env()->Throwable_klass())->is_instptr();
932

933
  // determine potential exception handlers
934
  ciExceptionHandlerStream handlers(method(), bci(),
935
                                    ex_type->instance_klass(),
936
                                    ex_type->klass_is_exact());
937

938
  // Start executing from the given throw state.  (Keep its stack, for now.)
939
  // Get the exception oop as known at compile time.
940
  ex_node = use_exception_state(ex_map);
941

942
  // Get the exception oop klass from its header
943
  Node* ex_klass_node = nullptr;
944
  if (has_exception_handler() && !ex_type->klass_is_exact()) {
945
    Node* p = basic_plus_adr( ex_node, ex_node, oopDesc::klass_offset_in_bytes());
946
    ex_klass_node = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT));
947

948
    // Compute the exception klass a little more cleverly.
949
    // Obvious solution is to simple do a LoadKlass from the 'ex_node'.
950
    // However, if the ex_node is a PhiNode, I'm going to do a LoadKlass for
951
    // each arm of the Phi.  If I know something clever about the exceptions
952
    // I'm loading the class from, I can replace the LoadKlass with the
953
    // klass constant for the exception oop.
954
    if (ex_node->is_Phi()) {
955
      ex_klass_node = new PhiNode(ex_node->in(0), TypeInstKlassPtr::OBJECT);
956
      for (uint i = 1; i < ex_node->req(); i++) {
957
        Node* ex_in = ex_node->in(i);
958
        if (ex_in == top() || ex_in == nullptr) {
959
          // This path was not taken.
960
          ex_klass_node->init_req(i, top());
961
          continue;
962
        }
963
        Node* p = basic_plus_adr(ex_in, ex_in, oopDesc::klass_offset_in_bytes());
964
        Node* k = _gvn.transform( LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT));
965
        ex_klass_node->init_req( i, k );
966
      }
967
      ex_klass_node = _gvn.transform(ex_klass_node);
968
    }
969
  }
970

971
  // Scan the exception table for applicable handlers.
972
  // If none, we can call rethrow() and be done!
973
  // If precise (loaded with no subklasses), insert a D.S. style
974
  // pointer compare to the correct handler and loop back.
975
  // If imprecise, switch to the Rethrow VM-call style handling.
976

977
  int remaining = handlers.count_remaining();
978

979
  // iterate through all entries sequentially
980
  for (;!handlers.is_done(); handlers.next()) {
981
    ciExceptionHandler* handler = handlers.handler();
982

983
    if (handler->is_rethrow()) {
984
      // If we fell off the end of the table without finding an imprecise
985
      // exception klass (and without finding a generic handler) then we
986
      // know this exception is not handled in this method.  We just rethrow
987
      // the exception into the caller.
988
      throw_to_exit(make_exception_state(ex_node));
989
      return;
990
    }
991

992
    // exception handler bci range covers throw_bci => investigate further
993
    int handler_bci = handler->handler_bci();
994

995
    if (remaining == 1) {
996
      push_ex_oop(ex_node);        // Push exception oop for handler
997
      if (PrintOpto && WizardMode) {
998
        tty->print_cr("  Catching every inline exception bci:%d -> handler_bci:%d", bci(), handler_bci);
999
      }
1000
      // If this is a backwards branch in the bytecodes, add safepoint
1001
      maybe_add_safepoint(handler_bci);
1002
      merge_exception(handler_bci); // jump to handler
1003
      return;                   // No more handling to be done here!
1004
    }
1005

1006
    // Get the handler's klass
1007
    ciInstanceKlass* klass = handler->catch_klass();
1008

1009
    if (!klass->is_loaded()) {  // klass is not loaded?
1010
      // fall through into catch_call_exceptions which will emit a
1011
      // handler with an uncommon trap.
1012
      break;
1013
    }
1014

1015
    if (klass->is_interface())  // should not happen, but...
1016
      break;                    // bail out
1017

1018
    // Check the type of the exception against the catch type
1019
    const TypeKlassPtr *tk = TypeKlassPtr::make(klass);
1020
    Node* con = _gvn.makecon(tk);
1021
    Node* not_subtype_ctrl = gen_subtype_check(ex_klass_node, con);
1022
    if (!stopped()) {
1023
      PreserveJVMState pjvms(this);
1024
      const TypeInstPtr* tinst = TypeOopPtr::make_from_klass_unique(klass)->cast_to_ptr_type(TypePtr::NotNull)->is_instptr();
1025
      assert(klass->has_subklass() || tinst->klass_is_exact(), "lost exactness");
1026
      Node* ex_oop = _gvn.transform(new CheckCastPPNode(control(), ex_node, tinst));
1027
      push_ex_oop(ex_oop);      // Push exception oop for handler
1028
      if (PrintOpto && WizardMode) {
1029
        tty->print("  Catching inline exception bci:%d -> handler_bci:%d -- ", bci(), handler_bci);
1030
        klass->print_name();
1031
        tty->cr();
1032
      }
1033
      // If this is a backwards branch in the bytecodes, add safepoint
1034
      maybe_add_safepoint(handler_bci);
1035
      merge_exception(handler_bci);
1036
    }
1037
    set_control(not_subtype_ctrl);
1038

1039
    // Come here if exception does not match handler.
1040
    // Carry on with more handler checks.
1041
    --remaining;
1042
  }
1043

1044
  assert(!stopped(), "you should return if you finish the chain");
1045

1046
  // Oops, need to call into the VM to resolve the klasses at runtime.
1047
  // Note:  This call must not deoptimize, since it is not a real at this bci!
1048
  kill_dead_locals();
1049

1050
  make_runtime_call(RC_NO_LEAF | RC_MUST_THROW,
1051
                    OptoRuntime::rethrow_Type(),
1052
                    OptoRuntime::rethrow_stub(),
1053
                    nullptr, nullptr,
1054
                    ex_node);
1055

1056
  // Rethrow is a pure call, no side effects, only a result.
1057
  // The result cannot be allocated, so we use I_O
1058

1059
  // Catch exceptions from the rethrow
1060
  catch_call_exceptions(handlers);
1061
}
1062

1063

1064
// (Note:  Moved add_debug_info into GraphKit::add_safepoint_edges.)
1065

1066

1067
#ifndef PRODUCT
1068
void Parse::count_compiled_calls(bool at_method_entry, bool is_inline) {
1069
  if( CountCompiledCalls ) {
1070
    if( at_method_entry ) {
1071
      // bump invocation counter if top method (for statistics)
1072
      if (CountCompiledCalls && depth() == 1) {
1073
        const TypePtr* addr_type = TypeMetadataPtr::make(method());
1074
        Node* adr1 = makecon(addr_type);
1075
        Node* adr2 = basic_plus_adr(adr1, adr1, in_bytes(Method::compiled_invocation_counter_offset()));
1076
        increment_counter(adr2);
1077
      }
1078
    } else if (is_inline) {
1079
      switch (bc()) {
1080
      case Bytecodes::_invokevirtual:   increment_counter(SharedRuntime::nof_inlined_calls_addr()); break;
1081
      case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_inlined_interface_calls_addr()); break;
1082
      case Bytecodes::_invokestatic:
1083
      case Bytecodes::_invokedynamic:
1084
      case Bytecodes::_invokespecial:   increment_counter(SharedRuntime::nof_inlined_static_calls_addr()); break;
1085
      default: fatal("unexpected call bytecode");
1086
      }
1087
    } else {
1088
      switch (bc()) {
1089
      case Bytecodes::_invokevirtual:   increment_counter(SharedRuntime::nof_normal_calls_addr()); break;
1090
      case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_interface_calls_addr()); break;
1091
      case Bytecodes::_invokestatic:
1092
      case Bytecodes::_invokedynamic:
1093
      case Bytecodes::_invokespecial:   increment_counter(SharedRuntime::nof_static_calls_addr()); break;
1094
      default: fatal("unexpected call bytecode");
1095
      }
1096
    }
1097
  }
1098
}
1099
#endif //PRODUCT
1100

1101

1102
ciMethod* Compile::optimize_virtual_call(ciMethod* caller, ciInstanceKlass* klass,
1103
                                         ciKlass* holder, ciMethod* callee,
1104
                                         const TypeOopPtr* receiver_type, bool is_virtual,
1105
                                         bool& call_does_dispatch, int& vtable_index,
1106
                                         bool check_access) {
1107
  // Set default values for out-parameters.
1108
  call_does_dispatch = true;
1109
  vtable_index       = Method::invalid_vtable_index;
1110

1111
  // Choose call strategy.
1112
  ciMethod* optimized_virtual_method = optimize_inlining(caller, klass, holder, callee,
1113
                                                         receiver_type, check_access);
1114

1115
  // Have the call been sufficiently improved such that it is no longer a virtual?
1116
  if (optimized_virtual_method != nullptr) {
1117
    callee             = optimized_virtual_method;
1118
    call_does_dispatch = false;
1119
  } else if (!UseInlineCaches && is_virtual && callee->is_loaded()) {
1120
    // We can make a vtable call at this site
1121
    vtable_index = callee->resolve_vtable_index(caller->holder(), holder);
1122
  }
1123
  return callee;
1124
}
1125

1126
// Identify possible target method and inlining style
1127
ciMethod* Compile::optimize_inlining(ciMethod* caller, ciInstanceKlass* klass, ciKlass* holder,
1128
                                     ciMethod* callee, const TypeOopPtr* receiver_type,
1129
                                     bool check_access) {
1130
  // only use for virtual or interface calls
1131

1132
  // If it is obviously final, do not bother to call find_monomorphic_target,
1133
  // because the class hierarchy checks are not needed, and may fail due to
1134
  // incompletely loaded classes.  Since we do our own class loading checks
1135
  // in this module, we may confidently bind to any method.
1136
  if (callee->can_be_statically_bound()) {
1137
    return callee;
1138
  }
1139

1140
  if (receiver_type == nullptr) {
1141
    return nullptr; // no receiver type info
1142
  }
1143

1144
  // Attempt to improve the receiver
1145
  bool actual_receiver_is_exact = false;
1146
  ciInstanceKlass* actual_receiver = klass;
1147
  // Array methods are all inherited from Object, and are monomorphic.
1148
  // finalize() call on array is not allowed.
1149
  if (receiver_type->isa_aryptr() &&
1150
      callee->holder() == env()->Object_klass() &&
1151
      callee->name() != ciSymbols::finalize_method_name()) {
1152
    return callee;
1153
  }
1154

1155
  // All other interesting cases are instance klasses.
1156
  if (!receiver_type->isa_instptr()) {
1157
    return nullptr;
1158
  }
1159

1160
  ciInstanceKlass* receiver_klass = receiver_type->is_instptr()->instance_klass();
1161
  if (receiver_klass->is_loaded() && receiver_klass->is_initialized() && !receiver_klass->is_interface() &&
1162
      (receiver_klass == actual_receiver || receiver_klass->is_subtype_of(actual_receiver))) {
1163
    // ikl is a same or better type than the original actual_receiver,
1164
    // e.g. static receiver from bytecodes.
1165
    actual_receiver = receiver_klass;
1166
    // Is the actual_receiver exact?
1167
    actual_receiver_is_exact = receiver_type->klass_is_exact();
1168
  }
1169

1170
  ciInstanceKlass*   calling_klass = caller->holder();
1171
  ciMethod* cha_monomorphic_target = callee->find_monomorphic_target(calling_klass, klass, actual_receiver, check_access);
1172

1173
  if (cha_monomorphic_target != nullptr) {
1174
    // Hardwiring a virtual.
1175
    assert(!callee->can_be_statically_bound(), "should have been handled earlier");
1176
    assert(!cha_monomorphic_target->is_abstract(), "");
1177
    if (!cha_monomorphic_target->can_be_statically_bound(actual_receiver)) {
1178
      // If we inlined because CHA revealed only a single target method,
1179
      // then we are dependent on that target method not getting overridden
1180
      // by dynamic class loading.  Be sure to test the "static" receiver
1181
      // dest_method here, as opposed to the actual receiver, which may
1182
      // falsely lead us to believe that the receiver is final or private.
1183
      dependencies()->assert_unique_concrete_method(actual_receiver, cha_monomorphic_target, holder, callee);
1184
    }
1185
    return cha_monomorphic_target;
1186
  }
1187

1188
  // If the type is exact, we can still bind the method w/o a vcall.
1189
  // (This case comes after CHA so we can see how much extra work it does.)
1190
  if (actual_receiver_is_exact) {
1191
    // In case of evolution, there is a dependence on every inlined method, since each
1192
    // such method can be changed when its class is redefined.
1193
    ciMethod* exact_method = callee->resolve_invoke(calling_klass, actual_receiver);
1194
    if (exact_method != nullptr) {
1195
      return exact_method;
1196
    }
1197
  }
1198

1199
  return nullptr;
1200
}
1201

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.