jdk

Форк
0
/
sharedRuntime_aarch64.cpp 
3123 строки · 112.3 Кб
1
/*
2
 * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
3
 * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
4
 * Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
5
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6
 *
7
 * This code is free software; you can redistribute it and/or modify it
8
 * under the terms of the GNU General Public License version 2 only, as
9
 * published by the Free Software Foundation.
10
 *
11
 * This code is distributed in the hope that it will be useful, but WITHOUT
12
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14
 * version 2 for more details (a copy is included in the LICENSE file that
15
 * accompanied this code).
16
 *
17
 * You should have received a copy of the GNU General Public License version
18
 * 2 along with this work; if not, write to the Free Software Foundation,
19
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20
 *
21
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22
 * or visit www.oracle.com if you need additional information or have any
23
 * questions.
24
 *
25
 */
26

27
#include "precompiled.hpp"
28
#include "asm/macroAssembler.hpp"
29
#include "asm/macroAssembler.inline.hpp"
30
#include "code/codeCache.hpp"
31
#include "code/compiledIC.hpp"
32
#include "code/debugInfoRec.hpp"
33
#include "code/vtableStubs.hpp"
34
#include "compiler/oopMap.hpp"
35
#include "gc/shared/barrierSetAssembler.hpp"
36
#include "interpreter/interpreter.hpp"
37
#include "interpreter/interp_masm.hpp"
38
#include "logging/log.hpp"
39
#include "memory/resourceArea.hpp"
40
#include "nativeInst_aarch64.hpp"
41
#include "oops/klass.inline.hpp"
42
#include "oops/method.inline.hpp"
43
#include "prims/methodHandles.hpp"
44
#include "runtime/continuation.hpp"
45
#include "runtime/continuationEntry.inline.hpp"
46
#include "runtime/globals.hpp"
47
#include "runtime/jniHandles.hpp"
48
#include "runtime/safepointMechanism.hpp"
49
#include "runtime/sharedRuntime.hpp"
50
#include "runtime/signature.hpp"
51
#include "runtime/stubRoutines.hpp"
52
#include "runtime/vframeArray.hpp"
53
#include "utilities/align.hpp"
54
#include "utilities/formatBuffer.hpp"
55
#include "vmreg_aarch64.inline.hpp"
56
#ifdef COMPILER1
57
#include "c1/c1_Runtime1.hpp"
58
#endif
59
#ifdef COMPILER2
60
#include "adfiles/ad_aarch64.hpp"
61
#include "opto/runtime.hpp"
62
#endif
63
#if INCLUDE_JVMCI
64
#include "jvmci/jvmciJavaClasses.hpp"
65
#endif
66

67
#define __ masm->
68

69
const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
70

71
class SimpleRuntimeFrame {
72

73
  public:
74

75
  // Most of the runtime stubs have this simple frame layout.
76
  // This class exists to make the layout shared in one place.
77
  // Offsets are for compiler stack slots, which are jints.
78
  enum layout {
79
    // The frame sender code expects that rbp will be in the "natural" place and
80
    // will override any oopMap setting for it. We must therefore force the layout
81
    // so that it agrees with the frame sender code.
82
    // we don't expect any arg reg save area so aarch64 asserts that
83
    // frame::arg_reg_save_area_bytes == 0
84
    rfp_off = 0,
85
    rfp_off2,
86
    return_off, return_off2,
87
    framesize
88
  };
89
};
90

91
// FIXME -- this is used by C1
92
class RegisterSaver {
93
  const bool _save_vectors;
94
 public:
95
  RegisterSaver(bool save_vectors) : _save_vectors(save_vectors) {}
96

97
  OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words);
98
  void restore_live_registers(MacroAssembler* masm);
99

100
  // Offsets into the register save area
101
  // Used by deoptimization when it is managing result register
102
  // values on its own
103

104
  int reg_offset_in_bytes(Register r);
105
  int r0_offset_in_bytes()    { return reg_offset_in_bytes(r0); }
106
  int rscratch1_offset_in_bytes()    { return reg_offset_in_bytes(rscratch1); }
107
  int v0_offset_in_bytes();
108

109
  // Total stack size in bytes for saving sve predicate registers.
110
  int total_sve_predicate_in_bytes();
111

112
  // Capture info about frame layout
113
  // Note this is only correct when not saving full vectors.
114
  enum layout {
115
                fpu_state_off = 0,
116
                fpu_state_end = fpu_state_off + FPUStateSizeInWords - 1,
117
                // The frame sender code expects that rfp will be in
118
                // the "natural" place and will override any oopMap
119
                // setting for it. We must therefore force the layout
120
                // so that it agrees with the frame sender code.
121
                r0_off = fpu_state_off + FPUStateSizeInWords,
122
                rfp_off = r0_off + (Register::number_of_registers - 2) * Register::max_slots_per_register,
123
                return_off = rfp_off + Register::max_slots_per_register,      // slot for return address
124
                reg_save_size = return_off + Register::max_slots_per_register};
125

126
};
127

128
int RegisterSaver::reg_offset_in_bytes(Register r) {
129
  // The integer registers are located above the floating point
130
  // registers in the stack frame pushed by save_live_registers() so the
131
  // offset depends on whether we are saving full vectors, and whether
132
  // those vectors are NEON or SVE.
133

134
  int slots_per_vect = FloatRegister::save_slots_per_register;
135

136
#if COMPILER2_OR_JVMCI
137
  if (_save_vectors) {
138
    slots_per_vect = FloatRegister::slots_per_neon_register;
139

140
#ifdef COMPILER2
141
    if (Matcher::supports_scalable_vector()) {
142
      slots_per_vect = Matcher::scalable_vector_reg_size(T_FLOAT);
143
    }
144
#endif
145
  }
146
#endif
147

148
  int r0_offset = v0_offset_in_bytes() + (slots_per_vect * FloatRegister::number_of_registers) * BytesPerInt;
149
  return r0_offset + r->encoding() * wordSize;
150
}
151

152
int RegisterSaver::v0_offset_in_bytes() {
153
  // The floating point registers are located above the predicate registers if
154
  // they are present in the stack frame pushed by save_live_registers(). So the
155
  // offset depends on the saved total predicate vectors in the stack frame.
156
  return (total_sve_predicate_in_bytes() / VMRegImpl::stack_slot_size) * BytesPerInt;
157
}
158

159
int RegisterSaver::total_sve_predicate_in_bytes() {
160
#ifdef COMPILER2
161
  if (_save_vectors && Matcher::supports_scalable_vector()) {
162
    return (Matcher::scalable_vector_reg_size(T_BYTE) >> LogBitsPerByte) *
163
           PRegister::number_of_registers;
164
  }
165
#endif
166
  return 0;
167
}
168

169
OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) {
170
  bool use_sve = false;
171
  int sve_vector_size_in_bytes = 0;
172
  int sve_vector_size_in_slots = 0;
173
  int sve_predicate_size_in_slots = 0;
174
  int total_predicate_in_bytes = total_sve_predicate_in_bytes();
175
  int total_predicate_in_slots = total_predicate_in_bytes / VMRegImpl::stack_slot_size;
176

177
#ifdef COMPILER2
178
  use_sve = Matcher::supports_scalable_vector();
179
  if (use_sve) {
180
    sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
181
    sve_vector_size_in_slots = Matcher::scalable_vector_reg_size(T_FLOAT);
182
    sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots();
183
  }
184
#endif
185

186
#if COMPILER2_OR_JVMCI
187
  if (_save_vectors) {
188
    int extra_save_slots_per_register = 0;
189
    // Save upper half of vector registers
190
    if (use_sve) {
191
      extra_save_slots_per_register = sve_vector_size_in_slots - FloatRegister::save_slots_per_register;
192
    } else {
193
      extra_save_slots_per_register = FloatRegister::extra_save_slots_per_neon_register;
194
    }
195
    int extra_vector_bytes = extra_save_slots_per_register *
196
                             VMRegImpl::stack_slot_size *
197
                             FloatRegister::number_of_registers;
198
    additional_frame_words += ((extra_vector_bytes + total_predicate_in_bytes) / wordSize);
199
  }
200
#else
201
  assert(!_save_vectors, "vectors are generated only by C2 and JVMCI");
202
#endif
203

204
  int frame_size_in_bytes = align_up(additional_frame_words * wordSize +
205
                                     reg_save_size * BytesPerInt, 16);
206
  // OopMap frame size is in compiler stack slots (jint's) not bytes or words
207
  int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
208
  // The caller will allocate additional_frame_words
209
  int additional_frame_slots = additional_frame_words * wordSize / BytesPerInt;
210
  // CodeBlob frame size is in words.
211
  int frame_size_in_words = frame_size_in_bytes / wordSize;
212
  *total_frame_words = frame_size_in_words;
213

214
  // Save Integer and Float registers.
215
  __ enter();
216
  __ push_CPU_state(_save_vectors, use_sve, sve_vector_size_in_bytes, total_predicate_in_bytes);
217

218
  // Set an oopmap for the call site.  This oopmap will map all
219
  // oop-registers and debug-info registers as callee-saved.  This
220
  // will allow deoptimization at this safepoint to find all possible
221
  // debug-info recordings, as well as let GC find all oops.
222

223
  OopMapSet *oop_maps = new OopMapSet();
224
  OopMap* oop_map = new OopMap(frame_size_in_slots, 0);
225

226
  for (int i = 0; i < Register::number_of_registers; i++) {
227
    Register r = as_Register(i);
228
    if (i <= rfp->encoding() && r != rscratch1 && r != rscratch2) {
229
      // SP offsets are in 4-byte words.
230
      // Register slots are 8 bytes wide, 32 floating-point registers.
231
      int sp_offset = Register::max_slots_per_register * i +
232
                      FloatRegister::save_slots_per_register * FloatRegister::number_of_registers;
233
      oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset + additional_frame_slots), r->as_VMReg());
234
    }
235
  }
236

237
  for (int i = 0; i < FloatRegister::number_of_registers; i++) {
238
    FloatRegister r = as_FloatRegister(i);
239
    int sp_offset = 0;
240
    if (_save_vectors) {
241
      sp_offset = use_sve ? (total_predicate_in_slots + sve_vector_size_in_slots * i) :
242
                            (FloatRegister::slots_per_neon_register * i);
243
    } else {
244
      sp_offset = FloatRegister::save_slots_per_register * i;
245
    }
246
    oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset), r->as_VMReg());
247
  }
248

249
  return oop_map;
250
}
251

252
void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
253
#ifdef COMPILER2
254
  __ pop_CPU_state(_save_vectors, Matcher::supports_scalable_vector(),
255
                   Matcher::scalable_vector_reg_size(T_BYTE), total_sve_predicate_in_bytes());
256
#else
257
#if !INCLUDE_JVMCI
258
  assert(!_save_vectors, "vectors are generated only by C2 and JVMCI");
259
#endif
260
  __ pop_CPU_state(_save_vectors);
261
#endif
262
  __ ldp(rfp, lr, Address(__ post(sp, 2 * wordSize)));
263
  __ authenticate_return_address();
264
}
265

266
// Is vector's size (in bytes) bigger than a size saved by default?
267
// 8 bytes vector registers are saved by default on AArch64.
268
// The SVE supported min vector size is 8 bytes and we need to save
269
// predicate registers when the vector size is 8 bytes as well.
270
bool SharedRuntime::is_wide_vector(int size) {
271
  return size > 8 || (UseSVE > 0 && size >= 8);
272
}
273

274
// ---------------------------------------------------------------------------
275
// Read the array of BasicTypes from a signature, and compute where the
276
// arguments should go.  Values in the VMRegPair regs array refer to 4-byte
277
// quantities.  Values less than VMRegImpl::stack0 are registers, those above
278
// refer to 4-byte stack slots.  All stack slots are based off of the stack pointer
279
// as framesizes are fixed.
280
// VMRegImpl::stack0 refers to the first slot 0(sp).
281
// and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.
282
// Register up to Register::number_of_registers are the 64-bit
283
// integer registers.
284

285
// Note: the INPUTS in sig_bt are in units of Java argument words,
286
// which are 64-bit.  The OUTPUTS are in 32-bit units.
287

288
// The Java calling convention is a "shifted" version of the C ABI.
289
// By skipping the first C ABI register we can call non-static jni
290
// methods with small numbers of arguments without having to shuffle
291
// the arguments at all. Since we control the java ABI we ought to at
292
// least get some advantage out of it.
293

294
int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
295
                                           VMRegPair *regs,
296
                                           int total_args_passed) {
297

298
  // Create the mapping between argument positions and
299
  // registers.
300
  static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = {
301
    j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7
302
  };
303
  static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_j] = {
304
    j_farg0, j_farg1, j_farg2, j_farg3,
305
    j_farg4, j_farg5, j_farg6, j_farg7
306
  };
307

308

309
  uint int_args = 0;
310
  uint fp_args = 0;
311
  uint stk_args = 0;
312

313
  for (int i = 0; i < total_args_passed; i++) {
314
    switch (sig_bt[i]) {
315
    case T_BOOLEAN:
316
    case T_CHAR:
317
    case T_BYTE:
318
    case T_SHORT:
319
    case T_INT:
320
      if (int_args < Argument::n_int_register_parameters_j) {
321
        regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
322
      } else {
323
        stk_args = align_up(stk_args, 2);
324
        regs[i].set1(VMRegImpl::stack2reg(stk_args));
325
        stk_args += 1;
326
      }
327
      break;
328
    case T_VOID:
329
      // halves of T_LONG or T_DOUBLE
330
      assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
331
      regs[i].set_bad();
332
      break;
333
    case T_LONG:
334
      assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
335
      // fall through
336
    case T_OBJECT:
337
    case T_ARRAY:
338
    case T_ADDRESS:
339
      if (int_args < Argument::n_int_register_parameters_j) {
340
        regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
341
      } else {
342
        stk_args = align_up(stk_args, 2);
343
        regs[i].set2(VMRegImpl::stack2reg(stk_args));
344
        stk_args += 2;
345
      }
346
      break;
347
    case T_FLOAT:
348
      if (fp_args < Argument::n_float_register_parameters_j) {
349
        regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
350
      } else {
351
        stk_args = align_up(stk_args, 2);
352
        regs[i].set1(VMRegImpl::stack2reg(stk_args));
353
        stk_args += 1;
354
      }
355
      break;
356
    case T_DOUBLE:
357
      assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
358
      if (fp_args < Argument::n_float_register_parameters_j) {
359
        regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
360
      } else {
361
        stk_args = align_up(stk_args, 2);
362
        regs[i].set2(VMRegImpl::stack2reg(stk_args));
363
        stk_args += 2;
364
      }
365
      break;
366
    default:
367
      ShouldNotReachHere();
368
      break;
369
    }
370
  }
371

372
  return stk_args;
373
}
374

375
// Patch the callers callsite with entry to compiled code if it exists.
376
static void patch_callers_callsite(MacroAssembler *masm) {
377
  Label L;
378
  __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
379
  __ cbz(rscratch1, L);
380

381
  __ enter();
382
  __ push_CPU_state();
383

384
  // VM needs caller's callsite
385
  // VM needs target method
386
  // This needs to be a long call since we will relocate this adapter to
387
  // the codeBuffer and it may not reach
388

389
#ifndef PRODUCT
390
  assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
391
#endif
392

393
  __ mov(c_rarg0, rmethod);
394
  __ mov(c_rarg1, lr);
395
  __ authenticate_return_address(c_rarg1);
396
  __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
397
  __ blr(rscratch1);
398

399
  // Explicit isb required because fixup_callers_callsite may change the code
400
  // stream.
401
  __ safepoint_isb();
402

403
  __ pop_CPU_state();
404
  // restore sp
405
  __ leave();
406
  __ bind(L);
407
}
408

409
static void gen_c2i_adapter(MacroAssembler *masm,
410
                            int total_args_passed,
411
                            int comp_args_on_stack,
412
                            const BasicType *sig_bt,
413
                            const VMRegPair *regs,
414
                            Label& skip_fixup) {
415
  // Before we get into the guts of the C2I adapter, see if we should be here
416
  // at all.  We've come from compiled code and are attempting to jump to the
417
  // interpreter, which means the caller made a static call to get here
418
  // (vcalls always get a compiled target if there is one).  Check for a
419
  // compiled target.  If there is one, we need to patch the caller's call.
420
  patch_callers_callsite(masm);
421

422
  __ bind(skip_fixup);
423

424
  int words_pushed = 0;
425

426
  // Since all args are passed on the stack, total_args_passed *
427
  // Interpreter::stackElementSize is the space we need.
428

429
  int extraspace = total_args_passed * Interpreter::stackElementSize;
430

431
  __ mov(r19_sender_sp, sp);
432

433
  // stack is aligned, keep it that way
434
  extraspace = align_up(extraspace, 2*wordSize);
435

436
  if (extraspace)
437
    __ sub(sp, sp, extraspace);
438

439
  // Now write the args into the outgoing interpreter space
440
  for (int i = 0; i < total_args_passed; i++) {
441
    if (sig_bt[i] == T_VOID) {
442
      assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
443
      continue;
444
    }
445

446
    // offset to start parameters
447
    int st_off   = (total_args_passed - i - 1) * Interpreter::stackElementSize;
448
    int next_off = st_off - Interpreter::stackElementSize;
449

450
    // Say 4 args:
451
    // i   st_off
452
    // 0   32 T_LONG
453
    // 1   24 T_VOID
454
    // 2   16 T_OBJECT
455
    // 3    8 T_BOOL
456
    // -    0 return address
457
    //
458
    // However to make thing extra confusing. Because we can fit a Java long/double in
459
    // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
460
    // leaves one slot empty and only stores to a single slot. In this case the
461
    // slot that is occupied is the T_VOID slot. See I said it was confusing.
462

463
    VMReg r_1 = regs[i].first();
464
    VMReg r_2 = regs[i].second();
465
    if (!r_1->is_valid()) {
466
      assert(!r_2->is_valid(), "");
467
      continue;
468
    }
469
    if (r_1->is_stack()) {
470
      // memory to memory use rscratch1
471
      int ld_off = (r_1->reg2stack() * VMRegImpl::stack_slot_size
472
                    + extraspace
473
                    + words_pushed * wordSize);
474
      if (!r_2->is_valid()) {
475
        // sign extend??
476
        __ ldrw(rscratch1, Address(sp, ld_off));
477
        __ str(rscratch1, Address(sp, st_off));
478

479
      } else {
480

481
        __ ldr(rscratch1, Address(sp, ld_off));
482

483
        // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
484
        // T_DOUBLE and T_LONG use two slots in the interpreter
485
        if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
486
          // ld_off == LSW, ld_off+wordSize == MSW
487
          // st_off == MSW, next_off == LSW
488
          __ str(rscratch1, Address(sp, next_off));
489
#ifdef ASSERT
490
          // Overwrite the unused slot with known junk
491
          __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaaaull);
492
          __ str(rscratch1, Address(sp, st_off));
493
#endif /* ASSERT */
494
        } else {
495
          __ str(rscratch1, Address(sp, st_off));
496
        }
497
      }
498
    } else if (r_1->is_Register()) {
499
      Register r = r_1->as_Register();
500
      if (!r_2->is_valid()) {
501
        // must be only an int (or less ) so move only 32bits to slot
502
        // why not sign extend??
503
        __ str(r, Address(sp, st_off));
504
      } else {
505
        // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
506
        // T_DOUBLE and T_LONG use two slots in the interpreter
507
        if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
508
          // jlong/double in gpr
509
#ifdef ASSERT
510
          // Overwrite the unused slot with known junk
511
          __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaabull);
512
          __ str(rscratch1, Address(sp, st_off));
513
#endif /* ASSERT */
514
          __ str(r, Address(sp, next_off));
515
        } else {
516
          __ str(r, Address(sp, st_off));
517
        }
518
      }
519
    } else {
520
      assert(r_1->is_FloatRegister(), "");
521
      if (!r_2->is_valid()) {
522
        // only a float use just part of the slot
523
        __ strs(r_1->as_FloatRegister(), Address(sp, st_off));
524
      } else {
525
#ifdef ASSERT
526
        // Overwrite the unused slot with known junk
527
        __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaacull);
528
        __ str(rscratch1, Address(sp, st_off));
529
#endif /* ASSERT */
530
        __ strd(r_1->as_FloatRegister(), Address(sp, next_off));
531
      }
532
    }
533
  }
534

535
  __ mov(esp, sp); // Interp expects args on caller's expression stack
536

537
  __ ldr(rscratch1, Address(rmethod, in_bytes(Method::interpreter_entry_offset())));
538
  __ br(rscratch1);
539
}
540

541

542
void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
543
                                    int total_args_passed,
544
                                    int comp_args_on_stack,
545
                                    const BasicType *sig_bt,
546
                                    const VMRegPair *regs) {
547

548
  // Note: r19_sender_sp contains the senderSP on entry. We must
549
  // preserve it since we may do a i2c -> c2i transition if we lose a
550
  // race where compiled code goes non-entrant while we get args
551
  // ready.
552

553
  // Adapters are frameless.
554

555
  // An i2c adapter is frameless because the *caller* frame, which is
556
  // interpreted, routinely repairs its own esp (from
557
  // interpreter_frame_last_sp), even if a callee has modified the
558
  // stack pointer.  It also recalculates and aligns sp.
559

560
  // A c2i adapter is frameless because the *callee* frame, which is
561
  // interpreted, routinely repairs its caller's sp (from sender_sp,
562
  // which is set up via the senderSP register).
563

564
  // In other words, if *either* the caller or callee is interpreted, we can
565
  // get the stack pointer repaired after a call.
566

567
  // This is why c2i and i2c adapters cannot be indefinitely composed.
568
  // In particular, if a c2i adapter were to somehow call an i2c adapter,
569
  // both caller and callee would be compiled methods, and neither would
570
  // clean up the stack pointer changes performed by the two adapters.
571
  // If this happens, control eventually transfers back to the compiled
572
  // caller, but with an uncorrected stack, causing delayed havoc.
573

574
  if (VerifyAdapterCalls &&
575
      (Interpreter::code() != nullptr || StubRoutines::final_stubs_code() != nullptr)) {
576
#if 0
577
    // So, let's test for cascading c2i/i2c adapters right now.
578
    //  assert(Interpreter::contains($return_addr) ||
579
    //         StubRoutines::contains($return_addr),
580
    //         "i2c adapter must return to an interpreter frame");
581
    __ block_comment("verify_i2c { ");
582
    Label L_ok;
583
    if (Interpreter::code() != nullptr) {
584
      range_check(masm, rax, r11,
585
                  Interpreter::code()->code_start(), Interpreter::code()->code_end(),
586
                  L_ok);
587
    }
588
    if (StubRoutines::initial_stubs_code() != nullptr) {
589
      range_check(masm, rax, r11,
590
                  StubRoutines::initial_stubs_code()->code_begin(),
591
                  StubRoutines::initial_stubs_code()->code_end(),
592
                  L_ok);
593
    }
594
    if (StubRoutines::final_stubs_code() != nullptr) {
595
      range_check(masm, rax, r11,
596
                  StubRoutines::final_stubs_code()->code_begin(),
597
                  StubRoutines::final_stubs_code()->code_end(),
598
                  L_ok);
599
    }
600
    const char* msg = "i2c adapter must return to an interpreter frame";
601
    __ block_comment(msg);
602
    __ stop(msg);
603
    __ bind(L_ok);
604
    __ block_comment("} verify_i2ce ");
605
#endif
606
  }
607

608
  // Cut-out for having no stack args.
609
  int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
610
  if (comp_args_on_stack) {
611
    __ sub(rscratch1, sp, comp_words_on_stack * wordSize);
612
    __ andr(sp, rscratch1, -16);
613
  }
614

615
  // Will jump to the compiled code just as if compiled code was doing it.
616
  // Pre-load the register-jump target early, to schedule it better.
617
  __ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_offset())));
618

619
#if INCLUDE_JVMCI
620
  if (EnableJVMCI) {
621
    // check if this call should be routed towards a specific entry point
622
    __ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
623
    Label no_alternative_target;
624
    __ cbz(rscratch2, no_alternative_target);
625
    __ mov(rscratch1, rscratch2);
626
    __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
627
    __ bind(no_alternative_target);
628
  }
629
#endif // INCLUDE_JVMCI
630

631
  // Now generate the shuffle code.
632
  for (int i = 0; i < total_args_passed; i++) {
633
    if (sig_bt[i] == T_VOID) {
634
      assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
635
      continue;
636
    }
637

638
    // Pick up 0, 1 or 2 words from SP+offset.
639

640
    assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
641
            "scrambled load targets?");
642
    // Load in argument order going down.
643
    int ld_off = (total_args_passed - i - 1)*Interpreter::stackElementSize;
644
    // Point to interpreter value (vs. tag)
645
    int next_off = ld_off - Interpreter::stackElementSize;
646
    //
647
    //
648
    //
649
    VMReg r_1 = regs[i].first();
650
    VMReg r_2 = regs[i].second();
651
    if (!r_1->is_valid()) {
652
      assert(!r_2->is_valid(), "");
653
      continue;
654
    }
655
    if (r_1->is_stack()) {
656
      // Convert stack slot to an SP offset (+ wordSize to account for return address )
657
      int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size;
658
      if (!r_2->is_valid()) {
659
        // sign extend???
660
        __ ldrsw(rscratch2, Address(esp, ld_off));
661
        __ str(rscratch2, Address(sp, st_off));
662
      } else {
663
        //
664
        // We are using two optoregs. This can be either T_OBJECT,
665
        // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
666
        // two slots but only uses one for thr T_LONG or T_DOUBLE case
667
        // So we must adjust where to pick up the data to match the
668
        // interpreter.
669
        //
670
        // Interpreter local[n] == MSW, local[n+1] == LSW however locals
671
        // are accessed as negative so LSW is at LOW address
672

673
        // ld_off is MSW so get LSW
674
        const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
675
                           next_off : ld_off;
676
        __ ldr(rscratch2, Address(esp, offset));
677
        // st_off is LSW (i.e. reg.first())
678
        __ str(rscratch2, Address(sp, st_off));
679
      }
680
    } else if (r_1->is_Register()) {  // Register argument
681
      Register r = r_1->as_Register();
682
      if (r_2->is_valid()) {
683
        //
684
        // We are using two VMRegs. This can be either T_OBJECT,
685
        // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
686
        // two slots but only uses one for thr T_LONG or T_DOUBLE case
687
        // So we must adjust where to pick up the data to match the
688
        // interpreter.
689

690
        const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
691
                           next_off : ld_off;
692

693
        // this can be a misaligned move
694
        __ ldr(r, Address(esp, offset));
695
      } else {
696
        // sign extend and use a full word?
697
        __ ldrw(r, Address(esp, ld_off));
698
      }
699
    } else {
700
      if (!r_2->is_valid()) {
701
        __ ldrs(r_1->as_FloatRegister(), Address(esp, ld_off));
702
      } else {
703
        __ ldrd(r_1->as_FloatRegister(), Address(esp, next_off));
704
      }
705
    }
706
  }
707

708
  __ mov(rscratch2, rscratch1);
709
  __ push_cont_fastpath(rthread); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about; kills rscratch1
710
  __ mov(rscratch1, rscratch2);
711

712
  // 6243940 We might end up in handle_wrong_method if
713
  // the callee is deoptimized as we race thru here. If that
714
  // happens we don't want to take a safepoint because the
715
  // caller frame will look interpreted and arguments are now
716
  // "compiled" so it is much better to make this transition
717
  // invisible to the stack walking code. Unfortunately if
718
  // we try and find the callee by normal means a safepoint
719
  // is possible. So we stash the desired callee in the thread
720
  // and the vm will find there should this case occur.
721

722
  __ str(rmethod, Address(rthread, JavaThread::callee_target_offset()));
723

724
  __ br(rscratch1);
725
}
726

727
// ---------------------------------------------------------------
728
AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
729
                                                            int total_args_passed,
730
                                                            int comp_args_on_stack,
731
                                                            const BasicType *sig_bt,
732
                                                            const VMRegPair *regs,
733
                                                            AdapterFingerPrint* fingerprint) {
734
  address i2c_entry = __ pc();
735

736
  gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
737

738
  address c2i_unverified_entry = __ pc();
739
  Label skip_fixup;
740

741
  Register data = rscratch2;
742
  Register receiver = j_rarg0;
743
  Register tmp = r10;  // A call-clobbered register not used for arg passing
744

745
  // -------------------------------------------------------------------------
746
  // Generate a C2I adapter.  On entry we know rmethod holds the Method* during calls
747
  // to the interpreter.  The args start out packed in the compiled layout.  They
748
  // need to be unpacked into the interpreter layout.  This will almost always
749
  // require some stack space.  We grow the current (compiled) stack, then repack
750
  // the args.  We  finally end in a jump to the generic interpreter entry point.
751
  // On exit from the interpreter, the interpreter will restore our SP (lest the
752
  // compiled code, which relies solely on SP and not FP, get sick).
753

754
  {
755
    __ block_comment("c2i_unverified_entry {");
756
    // Method might have been compiled since the call site was patched to
757
    // interpreted; if that is the case treat it as a miss so we can get
758
    // the call site corrected.
759
    __ ic_check(1 /* end_alignment */);
760
    __ ldr(rmethod, Address(data, CompiledICData::speculated_method_offset()));
761

762
    __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
763
    __ cbz(rscratch1, skip_fixup);
764
    __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
765
    __ block_comment("} c2i_unverified_entry");
766
  }
767

768
  address c2i_entry = __ pc();
769

770
  // Class initialization barrier for static methods
771
  address c2i_no_clinit_check_entry = nullptr;
772
  if (VM_Version::supports_fast_class_init_checks()) {
773
    Label L_skip_barrier;
774

775
    { // Bypass the barrier for non-static methods
776
      __ ldrw(rscratch1, Address(rmethod, Method::access_flags_offset()));
777
      __ andsw(zr, rscratch1, JVM_ACC_STATIC);
778
      __ br(Assembler::EQ, L_skip_barrier); // non-static
779
    }
780

781
    __ load_method_holder(rscratch2, rmethod);
782
    __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
783
    __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
784

785
    __ bind(L_skip_barrier);
786
    c2i_no_clinit_check_entry = __ pc();
787
  }
788

789
  BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
790
  bs->c2i_entry_barrier(masm);
791

792
  gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
793

794
  return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry);
795
}
796

797
static int c_calling_convention_priv(const BasicType *sig_bt,
798
                                         VMRegPair *regs,
799
                                         int total_args_passed) {
800

801
// We return the amount of VMRegImpl stack slots we need to reserve for all
802
// the arguments NOT counting out_preserve_stack_slots.
803

804
    static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
805
      c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5,  c_rarg6,  c_rarg7
806
    };
807
    static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
808
      c_farg0, c_farg1, c_farg2, c_farg3,
809
      c_farg4, c_farg5, c_farg6, c_farg7
810
    };
811

812
    uint int_args = 0;
813
    uint fp_args = 0;
814
    uint stk_args = 0; // inc by 2 each time
815

816
    for (int i = 0; i < total_args_passed; i++) {
817
      switch (sig_bt[i]) {
818
      case T_BOOLEAN:
819
      case T_CHAR:
820
      case T_BYTE:
821
      case T_SHORT:
822
      case T_INT:
823
        if (int_args < Argument::n_int_register_parameters_c) {
824
          regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
825
        } else {
826
#ifdef __APPLE__
827
          // Less-than word types are stored one after another.
828
          // The code is unable to handle this so bailout.
829
          return -1;
830
#endif
831
          regs[i].set1(VMRegImpl::stack2reg(stk_args));
832
          stk_args += 2;
833
        }
834
        break;
835
      case T_LONG:
836
        assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
837
        // fall through
838
      case T_OBJECT:
839
      case T_ARRAY:
840
      case T_ADDRESS:
841
      case T_METADATA:
842
        if (int_args < Argument::n_int_register_parameters_c) {
843
          regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
844
        } else {
845
          regs[i].set2(VMRegImpl::stack2reg(stk_args));
846
          stk_args += 2;
847
        }
848
        break;
849
      case T_FLOAT:
850
        if (fp_args < Argument::n_float_register_parameters_c) {
851
          regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
852
        } else {
853
#ifdef __APPLE__
854
          // Less-than word types are stored one after another.
855
          // The code is unable to handle this so bailout.
856
          return -1;
857
#endif
858
          regs[i].set1(VMRegImpl::stack2reg(stk_args));
859
          stk_args += 2;
860
        }
861
        break;
862
      case T_DOUBLE:
863
        assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
864
        if (fp_args < Argument::n_float_register_parameters_c) {
865
          regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
866
        } else {
867
          regs[i].set2(VMRegImpl::stack2reg(stk_args));
868
          stk_args += 2;
869
        }
870
        break;
871
      case T_VOID: // Halves of longs and doubles
872
        assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
873
        regs[i].set_bad();
874
        break;
875
      default:
876
        ShouldNotReachHere();
877
        break;
878
      }
879
    }
880

881
  return stk_args;
882
}
883

884
int SharedRuntime::vector_calling_convention(VMRegPair *regs,
885
                                             uint num_bits,
886
                                             uint total_args_passed) {
887
  Unimplemented();
888
  return 0;
889
}
890

891
int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
892
                                         VMRegPair *regs,
893
                                         int total_args_passed)
894
{
895
  int result = c_calling_convention_priv(sig_bt, regs, total_args_passed);
896
  guarantee(result >= 0, "Unsupported arguments configuration");
897
  return result;
898
}
899

900

901
void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
902
  // We always ignore the frame_slots arg and just use the space just below frame pointer
903
  // which by this time is free to use
904
  switch (ret_type) {
905
  case T_FLOAT:
906
    __ strs(v0, Address(rfp, -wordSize));
907
    break;
908
  case T_DOUBLE:
909
    __ strd(v0, Address(rfp, -wordSize));
910
    break;
911
  case T_VOID:  break;
912
  default: {
913
    __ str(r0, Address(rfp, -wordSize));
914
    }
915
  }
916
}
917

918
void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
919
  // We always ignore the frame_slots arg and just use the space just below frame pointer
920
  // which by this time is free to use
921
  switch (ret_type) {
922
  case T_FLOAT:
923
    __ ldrs(v0, Address(rfp, -wordSize));
924
    break;
925
  case T_DOUBLE:
926
    __ ldrd(v0, Address(rfp, -wordSize));
927
    break;
928
  case T_VOID:  break;
929
  default: {
930
    __ ldr(r0, Address(rfp, -wordSize));
931
    }
932
  }
933
}
934
static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
935
  RegSet x;
936
  for ( int i = first_arg ; i < arg_count ; i++ ) {
937
    if (args[i].first()->is_Register()) {
938
      x = x + args[i].first()->as_Register();
939
    } else if (args[i].first()->is_FloatRegister()) {
940
      __ strd(args[i].first()->as_FloatRegister(), Address(__ pre(sp, -2 * wordSize)));
941
    }
942
  }
943
  __ push(x, sp);
944
}
945

946
static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
947
  RegSet x;
948
  for ( int i = first_arg ; i < arg_count ; i++ ) {
949
    if (args[i].first()->is_Register()) {
950
      x = x + args[i].first()->as_Register();
951
    } else {
952
      ;
953
    }
954
  }
955
  __ pop(x, sp);
956
  for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) {
957
    if (args[i].first()->is_Register()) {
958
      ;
959
    } else if (args[i].first()->is_FloatRegister()) {
960
      __ ldrd(args[i].first()->as_FloatRegister(), Address(__ post(sp, 2 * wordSize)));
961
    }
962
  }
963
}
964

965
static void verify_oop_args(MacroAssembler* masm,
966
                            const methodHandle& method,
967
                            const BasicType* sig_bt,
968
                            const VMRegPair* regs) {
969
  Register temp_reg = r19;  // not part of any compiled calling seq
970
  if (VerifyOops) {
971
    for (int i = 0; i < method->size_of_parameters(); i++) {
972
      if (sig_bt[i] == T_OBJECT ||
973
          sig_bt[i] == T_ARRAY) {
974
        VMReg r = regs[i].first();
975
        assert(r->is_valid(), "bad oop arg");
976
        if (r->is_stack()) {
977
          __ ldr(temp_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
978
          __ verify_oop(temp_reg);
979
        } else {
980
          __ verify_oop(r->as_Register());
981
        }
982
      }
983
    }
984
  }
985
}
986

987
// on exit, sp points to the ContinuationEntry
988
static OopMap* continuation_enter_setup(MacroAssembler* masm, int& stack_slots) {
989
  assert(ContinuationEntry::size() % VMRegImpl::stack_slot_size == 0, "");
990
  assert(in_bytes(ContinuationEntry::cont_offset())  % VMRegImpl::stack_slot_size == 0, "");
991
  assert(in_bytes(ContinuationEntry::chunk_offset()) % VMRegImpl::stack_slot_size == 0, "");
992

993
  stack_slots += (int)ContinuationEntry::size()/wordSize;
994
  __ sub(sp, sp, (int)ContinuationEntry::size()); // place Continuation metadata
995

996
  OopMap* map = new OopMap(((int)ContinuationEntry::size() + wordSize)/ VMRegImpl::stack_slot_size, 0 /* arg_slots*/);
997

998
  __ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
999
  __ str(rscratch1, Address(sp, ContinuationEntry::parent_offset()));
1000
  __ mov(rscratch1, sp); // we can't use sp as the source in str
1001
  __ str(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
1002

1003
  return map;
1004
}
1005

1006
// on entry c_rarg1 points to the continuation
1007
//          sp points to ContinuationEntry
1008
//          c_rarg3 -- isVirtualThread
1009
static void fill_continuation_entry(MacroAssembler* masm) {
1010
#ifdef ASSERT
1011
  __ movw(rscratch1, ContinuationEntry::cookie_value());
1012
  __ strw(rscratch1, Address(sp, ContinuationEntry::cookie_offset()));
1013
#endif
1014

1015
  __ str (c_rarg1, Address(sp, ContinuationEntry::cont_offset()));
1016
  __ strw(c_rarg3, Address(sp, ContinuationEntry::flags_offset()));
1017
  __ str (zr,      Address(sp, ContinuationEntry::chunk_offset()));
1018
  __ strw(zr,      Address(sp, ContinuationEntry::argsize_offset()));
1019
  __ strw(zr,      Address(sp, ContinuationEntry::pin_count_offset()));
1020

1021
  __ ldr(rscratch1, Address(rthread, JavaThread::cont_fastpath_offset()));
1022
  __ str(rscratch1, Address(sp, ContinuationEntry::parent_cont_fastpath_offset()));
1023
  __ ldr(rscratch1, Address(rthread, JavaThread::held_monitor_count_offset()));
1024
  __ str(rscratch1, Address(sp, ContinuationEntry::parent_held_monitor_count_offset()));
1025

1026
  __ str(zr, Address(rthread, JavaThread::cont_fastpath_offset()));
1027
  __ str(zr, Address(rthread, JavaThread::held_monitor_count_offset()));
1028
}
1029

1030
// on entry, sp points to the ContinuationEntry
1031
// on exit, rfp points to the spilled rfp in the entry frame
1032
static void continuation_enter_cleanup(MacroAssembler* masm) {
1033
#ifndef PRODUCT
1034
  Label OK;
1035
  __ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
1036
  __ cmp(sp, rscratch1);
1037
  __ br(Assembler::EQ, OK);
1038
  __ stop("incorrect sp1");
1039
  __ bind(OK);
1040
#endif
1041
  __ ldr(rscratch1, Address(sp, ContinuationEntry::parent_cont_fastpath_offset()));
1042
  __ str(rscratch1, Address(rthread, JavaThread::cont_fastpath_offset()));
1043

1044
  if (CheckJNICalls) {
1045
    // Check if this is a virtual thread continuation
1046
    Label L_skip_vthread_code;
1047
    __ ldrw(rscratch1, Address(sp, ContinuationEntry::flags_offset()));
1048
    __ cbzw(rscratch1, L_skip_vthread_code);
1049

1050
    // If the held monitor count is > 0 and this vthread is terminating then
1051
    // it failed to release a JNI monitor. So we issue the same log message
1052
    // that JavaThread::exit does.
1053
    __ ldr(rscratch1, Address(rthread, JavaThread::jni_monitor_count_offset()));
1054
    __ cbz(rscratch1, L_skip_vthread_code);
1055

1056
    // Save return value potentially containing the exception oop in callee-saved R19.
1057
    __ mov(r19, r0);
1058
    __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::log_jni_monitor_still_held));
1059
    // Restore potential return value.
1060
    __ mov(r0, r19);
1061

1062
    // For vthreads we have to explicitly zero the JNI monitor count of the carrier
1063
    // on termination. The held count is implicitly zeroed below when we restore from
1064
    // the parent held count (which has to be zero).
1065
    __ str(zr, Address(rthread, JavaThread::jni_monitor_count_offset()));
1066

1067
    __ bind(L_skip_vthread_code);
1068
  }
1069
#ifdef ASSERT
1070
  else {
1071
    // Check if this is a virtual thread continuation
1072
    Label L_skip_vthread_code;
1073
    __ ldrw(rscratch1, Address(sp, ContinuationEntry::flags_offset()));
1074
    __ cbzw(rscratch1, L_skip_vthread_code);
1075

1076
    // See comment just above. If not checking JNI calls the JNI count is only
1077
    // needed for assertion checking.
1078
    __ str(zr, Address(rthread, JavaThread::jni_monitor_count_offset()));
1079

1080
    __ bind(L_skip_vthread_code);
1081
  }
1082
#endif
1083

1084
  __ ldr(rscratch1, Address(sp, ContinuationEntry::parent_held_monitor_count_offset()));
1085
  __ str(rscratch1, Address(rthread, JavaThread::held_monitor_count_offset()));
1086

1087
  __ ldr(rscratch2, Address(sp, ContinuationEntry::parent_offset()));
1088
  __ str(rscratch2, Address(rthread, JavaThread::cont_entry_offset()));
1089
  __ add(rfp, sp, (int)ContinuationEntry::size());
1090
}
1091

1092
// enterSpecial(Continuation c, boolean isContinue, boolean isVirtualThread)
1093
// On entry: c_rarg1 -- the continuation object
1094
//           c_rarg2 -- isContinue
1095
//           c_rarg3 -- isVirtualThread
1096
static void gen_continuation_enter(MacroAssembler* masm,
1097
                                 const methodHandle& method,
1098
                                 const BasicType* sig_bt,
1099
                                 const VMRegPair* regs,
1100
                                 int& exception_offset,
1101
                                 OopMapSet*oop_maps,
1102
                                 int& frame_complete,
1103
                                 int& stack_slots,
1104
                                 int& interpreted_entry_offset,
1105
                                 int& compiled_entry_offset) {
1106
  //verify_oop_args(masm, method, sig_bt, regs);
1107
  Address resolve(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type);
1108

1109
  address start = __ pc();
1110

1111
  Label call_thaw, exit;
1112

1113
  // i2i entry used at interp_only_mode only
1114
  interpreted_entry_offset = __ pc() - start;
1115
  {
1116

1117
#ifdef ASSERT
1118
    Label is_interp_only;
1119
    __ ldrw(rscratch1, Address(rthread, JavaThread::interp_only_mode_offset()));
1120
    __ cbnzw(rscratch1, is_interp_only);
1121
    __ stop("enterSpecial interpreter entry called when not in interp_only_mode");
1122
    __ bind(is_interp_only);
1123
#endif
1124

1125
    // Read interpreter arguments into registers (this is an ad-hoc i2c adapter)
1126
    __ ldr(c_rarg1, Address(esp, Interpreter::stackElementSize*2));
1127
    __ ldr(c_rarg2, Address(esp, Interpreter::stackElementSize*1));
1128
    __ ldr(c_rarg3, Address(esp, Interpreter::stackElementSize*0));
1129
    __ push_cont_fastpath(rthread);
1130

1131
    __ enter();
1132
    stack_slots = 2; // will be adjusted in setup
1133
    OopMap* map = continuation_enter_setup(masm, stack_slots);
1134
    // The frame is complete here, but we only record it for the compiled entry, so the frame would appear unsafe,
1135
    // but that's okay because at the very worst we'll miss an async sample, but we're in interp_only_mode anyway.
1136

1137
    fill_continuation_entry(masm);
1138

1139
    __ cbnz(c_rarg2, call_thaw);
1140

1141
    const address tr_call = __ trampoline_call(resolve);
1142
    if (tr_call == nullptr) {
1143
      fatal("CodeCache is full at gen_continuation_enter");
1144
    }
1145

1146
    oop_maps->add_gc_map(__ pc() - start, map);
1147
    __ post_call_nop();
1148

1149
    __ b(exit);
1150

1151
    address stub = CompiledDirectCall::emit_to_interp_stub(masm, tr_call);
1152
    if (stub == nullptr) {
1153
      fatal("CodeCache is full at gen_continuation_enter");
1154
    }
1155
  }
1156

1157
  // compiled entry
1158
  __ align(CodeEntryAlignment);
1159
  compiled_entry_offset = __ pc() - start;
1160

1161
  __ enter();
1162
  stack_slots = 2; // will be adjusted in setup
1163
  OopMap* map = continuation_enter_setup(masm, stack_slots);
1164
  frame_complete = __ pc() - start;
1165

1166
  fill_continuation_entry(masm);
1167

1168
  __ cbnz(c_rarg2, call_thaw);
1169

1170
  const address tr_call = __ trampoline_call(resolve);
1171
  if (tr_call == nullptr) {
1172
    fatal("CodeCache is full at gen_continuation_enter");
1173
  }
1174

1175
  oop_maps->add_gc_map(__ pc() - start, map);
1176
  __ post_call_nop();
1177

1178
  __ b(exit);
1179

1180
  __ bind(call_thaw);
1181

1182
  __ rt_call(CAST_FROM_FN_PTR(address, StubRoutines::cont_thaw()));
1183
  oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
1184
  ContinuationEntry::_return_pc_offset = __ pc() - start;
1185
  __ post_call_nop();
1186

1187
  __ bind(exit);
1188
  continuation_enter_cleanup(masm);
1189
  __ leave();
1190
  __ ret(lr);
1191

1192
  /// exception handling
1193

1194
  exception_offset = __ pc() - start;
1195
  {
1196
      __ mov(r19, r0); // save return value contaning the exception oop in callee-saved R19
1197

1198
      continuation_enter_cleanup(masm);
1199

1200
      __ ldr(c_rarg1, Address(rfp, wordSize)); // return address
1201
      __ authenticate_return_address(c_rarg1);
1202
      __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rthread, c_rarg1);
1203

1204
      // see OptoRuntime::generate_exception_blob: r0 -- exception oop, r3 -- exception pc
1205

1206
      __ mov(r1, r0); // the exception handler
1207
      __ mov(r0, r19); // restore return value contaning the exception oop
1208
      __ verify_oop(r0);
1209

1210
      __ leave();
1211
      __ mov(r3, lr);
1212
      __ br(r1); // the exception handler
1213
  }
1214

1215
  address stub = CompiledDirectCall::emit_to_interp_stub(masm, tr_call);
1216
  if (stub == nullptr) {
1217
    fatal("CodeCache is full at gen_continuation_enter");
1218
  }
1219
}
1220

1221
static void gen_continuation_yield(MacroAssembler* masm,
1222
                                   const methodHandle& method,
1223
                                   const BasicType* sig_bt,
1224
                                   const VMRegPair* regs,
1225
                                   OopMapSet* oop_maps,
1226
                                   int& frame_complete,
1227
                                   int& stack_slots,
1228
                                   int& compiled_entry_offset) {
1229
    enum layout {
1230
      rfp_off1,
1231
      rfp_off2,
1232
      lr_off,
1233
      lr_off2,
1234
      framesize // inclusive of return address
1235
    };
1236
    // assert(is_even(framesize/2), "sp not 16-byte aligned");
1237
    stack_slots = framesize /  VMRegImpl::slots_per_word;
1238
    assert(stack_slots == 2, "recheck layout");
1239

1240
    address start = __ pc();
1241

1242
    compiled_entry_offset = __ pc() - start;
1243
    __ enter();
1244

1245
    __ mov(c_rarg1, sp);
1246

1247
    frame_complete = __ pc() - start;
1248
    address the_pc = __ pc();
1249

1250
    __ post_call_nop(); // this must be exactly after the pc value that is pushed into the frame info, we use this nop for fast CodeBlob lookup
1251

1252
    __ mov(c_rarg0, rthread);
1253
    __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
1254
    __ call_VM_leaf(Continuation::freeze_entry(), 2);
1255
    __ reset_last_Java_frame(true);
1256

1257
    Label pinned;
1258

1259
    __ cbnz(r0, pinned);
1260

1261
    // We've succeeded, set sp to the ContinuationEntry
1262
    __ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
1263
    __ mov(sp, rscratch1);
1264
    continuation_enter_cleanup(masm);
1265

1266
    __ bind(pinned); // pinned -- return to caller
1267

1268
    // handle pending exception thrown by freeze
1269
    __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
1270
    Label ok;
1271
    __ cbz(rscratch1, ok);
1272
    __ leave();
1273
    __ lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry()));
1274
    __ br(rscratch1);
1275
    __ bind(ok);
1276

1277
    __ leave();
1278
    __ ret(lr);
1279

1280
    OopMap* map = new OopMap(framesize, 1);
1281
    oop_maps->add_gc_map(the_pc - start, map);
1282
}
1283

1284
static void gen_special_dispatch(MacroAssembler* masm,
1285
                                 const methodHandle& method,
1286
                                 const BasicType* sig_bt,
1287
                                 const VMRegPair* regs) {
1288
  verify_oop_args(masm, method, sig_bt, regs);
1289
  vmIntrinsics::ID iid = method->intrinsic_id();
1290

1291
  // Now write the args into the outgoing interpreter space
1292
  bool     has_receiver   = false;
1293
  Register receiver_reg   = noreg;
1294
  int      member_arg_pos = -1;
1295
  Register member_reg     = noreg;
1296
  int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1297
  if (ref_kind != 0) {
1298
    member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1299
    member_reg = r19;  // known to be free at this point
1300
    has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1301
  } else if (iid == vmIntrinsics::_invokeBasic) {
1302
    has_receiver = true;
1303
  } else if (iid == vmIntrinsics::_linkToNative) {
1304
    member_arg_pos = method->size_of_parameters() - 1;  // trailing NativeEntryPoint argument
1305
    member_reg = r19;  // known to be free at this point
1306
  } else {
1307
    fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid));
1308
  }
1309

1310
  if (member_reg != noreg) {
1311
    // Load the member_arg into register, if necessary.
1312
    SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1313
    VMReg r = regs[member_arg_pos].first();
1314
    if (r->is_stack()) {
1315
      __ ldr(member_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1316
    } else {
1317
      // no data motion is needed
1318
      member_reg = r->as_Register();
1319
    }
1320
  }
1321

1322
  if (has_receiver) {
1323
    // Make sure the receiver is loaded into a register.
1324
    assert(method->size_of_parameters() > 0, "oob");
1325
    assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1326
    VMReg r = regs[0].first();
1327
    assert(r->is_valid(), "bad receiver arg");
1328
    if (r->is_stack()) {
1329
      // Porting note:  This assumes that compiled calling conventions always
1330
      // pass the receiver oop in a register.  If this is not true on some
1331
      // platform, pick a temp and load the receiver from stack.
1332
      fatal("receiver always in a register");
1333
      receiver_reg = r2;  // known to be free at this point
1334
      __ ldr(receiver_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1335
    } else {
1336
      // no data motion is needed
1337
      receiver_reg = r->as_Register();
1338
    }
1339
  }
1340

1341
  // Figure out which address we are really jumping to:
1342
  MethodHandles::generate_method_handle_dispatch(masm, iid,
1343
                                                 receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1344
}
1345

1346
// ---------------------------------------------------------------------------
1347
// Generate a native wrapper for a given method.  The method takes arguments
1348
// in the Java compiled code convention, marshals them to the native
1349
// convention (handlizes oops, etc), transitions to native, makes the call,
1350
// returns to java state (possibly blocking), unhandlizes any result and
1351
// returns.
1352
//
1353
// Critical native functions are a shorthand for the use of
1354
// GetPrimtiveArrayCritical and disallow the use of any other JNI
1355
// functions.  The wrapper is expected to unpack the arguments before
1356
// passing them to the callee. Critical native functions leave the state _in_Java,
1357
// since they block out GC.
1358
// Some other parts of JNI setup are skipped like the tear down of the JNI handle
1359
// block and the check for pending exceptions it's impossible for them
1360
// to be thrown.
1361
//
1362
nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1363
                                                const methodHandle& method,
1364
                                                int compile_id,
1365
                                                BasicType* in_sig_bt,
1366
                                                VMRegPair* in_regs,
1367
                                                BasicType ret_type) {
1368
  if (method->is_continuation_native_intrinsic()) {
1369
    int exception_offset = -1;
1370
    OopMapSet* oop_maps = new OopMapSet();
1371
    int frame_complete = -1;
1372
    int stack_slots = -1;
1373
    int interpreted_entry_offset = -1;
1374
    int vep_offset = -1;
1375
    if (method->is_continuation_enter_intrinsic()) {
1376
      gen_continuation_enter(masm,
1377
                             method,
1378
                             in_sig_bt,
1379
                             in_regs,
1380
                             exception_offset,
1381
                             oop_maps,
1382
                             frame_complete,
1383
                             stack_slots,
1384
                             interpreted_entry_offset,
1385
                             vep_offset);
1386
    } else if (method->is_continuation_yield_intrinsic()) {
1387
      gen_continuation_yield(masm,
1388
                             method,
1389
                             in_sig_bt,
1390
                             in_regs,
1391
                             oop_maps,
1392
                             frame_complete,
1393
                             stack_slots,
1394
                             vep_offset);
1395
    } else {
1396
      guarantee(false, "Unknown Continuation native intrinsic");
1397
    }
1398

1399
#ifdef ASSERT
1400
    if (method->is_continuation_enter_intrinsic()) {
1401
      assert(interpreted_entry_offset != -1, "Must be set");
1402
      assert(exception_offset != -1,         "Must be set");
1403
    } else {
1404
      assert(interpreted_entry_offset == -1, "Must be unset");
1405
      assert(exception_offset == -1,         "Must be unset");
1406
    }
1407
    assert(frame_complete != -1,    "Must be set");
1408
    assert(stack_slots != -1,       "Must be set");
1409
    assert(vep_offset != -1,        "Must be set");
1410
#endif
1411

1412
    __ flush();
1413
    nmethod* nm = nmethod::new_native_nmethod(method,
1414
                                              compile_id,
1415
                                              masm->code(),
1416
                                              vep_offset,
1417
                                              frame_complete,
1418
                                              stack_slots,
1419
                                              in_ByteSize(-1),
1420
                                              in_ByteSize(-1),
1421
                                              oop_maps,
1422
                                              exception_offset);
1423
    if (nm == nullptr) return nm;
1424
    if (method->is_continuation_enter_intrinsic()) {
1425
      ContinuationEntry::set_enter_code(nm, interpreted_entry_offset);
1426
    } else if (method->is_continuation_yield_intrinsic()) {
1427
      _cont_doYield_stub = nm;
1428
    } else {
1429
      guarantee(false, "Unknown Continuation native intrinsic");
1430
    }
1431
    return nm;
1432
  }
1433

1434
  if (method->is_method_handle_intrinsic()) {
1435
    vmIntrinsics::ID iid = method->intrinsic_id();
1436
    intptr_t start = (intptr_t)__ pc();
1437
    int vep_offset = ((intptr_t)__ pc()) - start;
1438

1439
    // First instruction must be a nop as it may need to be patched on deoptimisation
1440
    __ nop();
1441
    gen_special_dispatch(masm,
1442
                         method,
1443
                         in_sig_bt,
1444
                         in_regs);
1445
    int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1446
    __ flush();
1447
    int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1448
    return nmethod::new_native_nmethod(method,
1449
                                       compile_id,
1450
                                       masm->code(),
1451
                                       vep_offset,
1452
                                       frame_complete,
1453
                                       stack_slots / VMRegImpl::slots_per_word,
1454
                                       in_ByteSize(-1),
1455
                                       in_ByteSize(-1),
1456
                                       nullptr);
1457
  }
1458
  address native_func = method->native_function();
1459
  assert(native_func != nullptr, "must have function");
1460

1461
  // An OopMap for lock (and class if static)
1462
  OopMapSet *oop_maps = new OopMapSet();
1463
  intptr_t start = (intptr_t)__ pc();
1464

1465
  // We have received a description of where all the java arg are located
1466
  // on entry to the wrapper. We need to convert these args to where
1467
  // the jni function will expect them. To figure out where they go
1468
  // we convert the java signature to a C signature by inserting
1469
  // the hidden arguments as arg[0] and possibly arg[1] (static method)
1470

1471
  const int total_in_args = method->size_of_parameters();
1472
  int total_c_args = total_in_args + (method->is_static() ? 2 : 1);
1473

1474
  BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1475
  VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1476
  BasicType* in_elem_bt = nullptr;
1477

1478
  int argc = 0;
1479
  out_sig_bt[argc++] = T_ADDRESS;
1480
  if (method->is_static()) {
1481
    out_sig_bt[argc++] = T_OBJECT;
1482
  }
1483

1484
  for (int i = 0; i < total_in_args ; i++ ) {
1485
    out_sig_bt[argc++] = in_sig_bt[i];
1486
  }
1487

1488
  // Now figure out where the args must be stored and how much stack space
1489
  // they require.
1490
  int out_arg_slots;
1491
  out_arg_slots = c_calling_convention_priv(out_sig_bt, out_regs, total_c_args);
1492

1493
  if (out_arg_slots < 0) {
1494
    return nullptr;
1495
  }
1496

1497
  // Compute framesize for the wrapper.  We need to handlize all oops in
1498
  // incoming registers
1499

1500
  // Calculate the total number of stack slots we will need.
1501

1502
  // First count the abi requirement plus all of the outgoing args
1503
  int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1504

1505
  // Now the space for the inbound oop handle area
1506
  int total_save_slots = 8 * VMRegImpl::slots_per_word;  // 8 arguments passed in registers
1507

1508
  int oop_handle_offset = stack_slots;
1509
  stack_slots += total_save_slots;
1510

1511
  // Now any space we need for handlizing a klass if static method
1512

1513
  int klass_slot_offset = 0;
1514
  int klass_offset = -1;
1515
  int lock_slot_offset = 0;
1516
  bool is_static = false;
1517

1518
  if (method->is_static()) {
1519
    klass_slot_offset = stack_slots;
1520
    stack_slots += VMRegImpl::slots_per_word;
1521
    klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1522
    is_static = true;
1523
  }
1524

1525
  // Plus a lock if needed
1526

1527
  if (method->is_synchronized()) {
1528
    lock_slot_offset = stack_slots;
1529
    stack_slots += VMRegImpl::slots_per_word;
1530
  }
1531

1532
  // Now a place (+2) to save return values or temp during shuffling
1533
  // + 4 for return address (which we own) and saved rfp
1534
  stack_slots += 6;
1535

1536
  // Ok The space we have allocated will look like:
1537
  //
1538
  //
1539
  // FP-> |                     |
1540
  //      |---------------------|
1541
  //      | 2 slots for moves   |
1542
  //      |---------------------|
1543
  //      | lock box (if sync)  |
1544
  //      |---------------------| <- lock_slot_offset
1545
  //      | klass (if static)   |
1546
  //      |---------------------| <- klass_slot_offset
1547
  //      | oopHandle area      |
1548
  //      |---------------------| <- oop_handle_offset (8 java arg registers)
1549
  //      | outbound memory     |
1550
  //      | based arguments     |
1551
  //      |                     |
1552
  //      |---------------------|
1553
  //      |                     |
1554
  // SP-> | out_preserved_slots |
1555
  //
1556
  //
1557

1558

1559
  // Now compute actual number of stack words we need rounding to make
1560
  // stack properly aligned.
1561
  stack_slots = align_up(stack_slots, StackAlignmentInSlots);
1562

1563
  int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1564

1565
  // First thing make an ic check to see if we should even be here
1566

1567
  // We are free to use all registers as temps without saving them and
1568
  // restoring them except rfp. rfp is the only callee save register
1569
  // as far as the interpreter and the compiler(s) are concerned.
1570

1571
  const Register receiver = j_rarg0;
1572

1573
  Label exception_pending;
1574

1575
  assert_different_registers(receiver, rscratch1);
1576
  __ verify_oop(receiver);
1577
  __ ic_check(8 /* end_alignment */);
1578

1579
  // Verified entry point must be aligned
1580
  int vep_offset = ((intptr_t)__ pc()) - start;
1581

1582
  // If we have to make this method not-entrant we'll overwrite its
1583
  // first instruction with a jump.  For this action to be legal we
1584
  // must ensure that this first instruction is a B, BL, NOP, BKPT,
1585
  // SVC, HVC, or SMC.  Make it a NOP.
1586
  __ nop();
1587

1588
  if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
1589
    Label L_skip_barrier;
1590
    __ mov_metadata(rscratch2, method->method_holder()); // InstanceKlass*
1591
    __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
1592
    __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
1593

1594
    __ bind(L_skip_barrier);
1595
  }
1596

1597
  // Generate stack overflow check
1598
  __ bang_stack_with_offset(checked_cast<int>(StackOverflow::stack_shadow_zone_size()));
1599

1600
  // Generate a new frame for the wrapper.
1601
  __ enter();
1602
  // -2 because return address is already present and so is saved rfp
1603
  __ sub(sp, sp, stack_size - 2*wordSize);
1604

1605
  BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1606
  bs->nmethod_entry_barrier(masm, nullptr /* slow_path */, nullptr /* continuation */, nullptr /* guard */);
1607

1608
  // Frame is now completed as far as size and linkage.
1609
  int frame_complete = ((intptr_t)__ pc()) - start;
1610

1611
  // We use r20 as the oop handle for the receiver/klass
1612
  // It is callee save so it survives the call to native
1613

1614
  const Register oop_handle_reg = r20;
1615

1616
  //
1617
  // We immediately shuffle the arguments so that any vm call we have to
1618
  // make from here on out (sync slow path, jvmti, etc.) we will have
1619
  // captured the oops from our caller and have a valid oopMap for
1620
  // them.
1621

1622
  // -----------------
1623
  // The Grand Shuffle
1624

1625
  // The Java calling convention is either equal (linux) or denser (win64) than the
1626
  // c calling convention. However the because of the jni_env argument the c calling
1627
  // convention always has at least one more (and two for static) arguments than Java.
1628
  // Therefore if we move the args from java -> c backwards then we will never have
1629
  // a register->register conflict and we don't have to build a dependency graph
1630
  // and figure out how to break any cycles.
1631
  //
1632

1633
  // Record esp-based slot for receiver on stack for non-static methods
1634
  int receiver_offset = -1;
1635

1636
  // This is a trick. We double the stack slots so we can claim
1637
  // the oops in the caller's frame. Since we are sure to have
1638
  // more args than the caller doubling is enough to make
1639
  // sure we can capture all the incoming oop args from the
1640
  // caller.
1641
  //
1642
  OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1643

1644
  // Mark location of rfp (someday)
1645
  // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rfp));
1646

1647

1648
  int float_args = 0;
1649
  int int_args = 0;
1650

1651
#ifdef ASSERT
1652
  bool reg_destroyed[Register::number_of_registers];
1653
  bool freg_destroyed[FloatRegister::number_of_registers];
1654
  for ( int r = 0 ; r < Register::number_of_registers ; r++ ) {
1655
    reg_destroyed[r] = false;
1656
  }
1657
  for ( int f = 0 ; f < FloatRegister::number_of_registers ; f++ ) {
1658
    freg_destroyed[f] = false;
1659
  }
1660

1661
#endif /* ASSERT */
1662

1663
  // For JNI natives the incoming and outgoing registers are offset upwards.
1664
  GrowableArray<int> arg_order(2 * total_in_args);
1665
  VMRegPair tmp_vmreg;
1666
  tmp_vmreg.set2(r19->as_VMReg());
1667

1668
  for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
1669
    arg_order.push(i);
1670
    arg_order.push(c_arg);
1671
  }
1672

1673
  int temploc = -1;
1674
  for (int ai = 0; ai < arg_order.length(); ai += 2) {
1675
    int i = arg_order.at(ai);
1676
    int c_arg = arg_order.at(ai + 1);
1677
    __ block_comment(err_msg("move %d -> %d", i, c_arg));
1678
    assert(c_arg != -1 && i != -1, "wrong order");
1679
#ifdef ASSERT
1680
    if (in_regs[i].first()->is_Register()) {
1681
      assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
1682
    } else if (in_regs[i].first()->is_FloatRegister()) {
1683
      assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding()], "destroyed reg!");
1684
    }
1685
    if (out_regs[c_arg].first()->is_Register()) {
1686
      reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1687
    } else if (out_regs[c_arg].first()->is_FloatRegister()) {
1688
      freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true;
1689
    }
1690
#endif /* ASSERT */
1691
    switch (in_sig_bt[i]) {
1692
      case T_ARRAY:
1693
      case T_OBJECT:
1694
        __ object_move(map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1695
                       ((i == 0) && (!is_static)),
1696
                       &receiver_offset);
1697
        int_args++;
1698
        break;
1699
      case T_VOID:
1700
        break;
1701

1702
      case T_FLOAT:
1703
        __ float_move(in_regs[i], out_regs[c_arg]);
1704
        float_args++;
1705
        break;
1706

1707
      case T_DOUBLE:
1708
        assert( i + 1 < total_in_args &&
1709
                in_sig_bt[i + 1] == T_VOID &&
1710
                out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
1711
        __ double_move(in_regs[i], out_regs[c_arg]);
1712
        float_args++;
1713
        break;
1714

1715
      case T_LONG :
1716
        __ long_move(in_regs[i], out_regs[c_arg]);
1717
        int_args++;
1718
        break;
1719

1720
      case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
1721

1722
      default:
1723
        __ move32_64(in_regs[i], out_regs[c_arg]);
1724
        int_args++;
1725
    }
1726
  }
1727

1728
  // point c_arg at the first arg that is already loaded in case we
1729
  // need to spill before we call out
1730
  int c_arg = total_c_args - total_in_args;
1731

1732
  // Pre-load a static method's oop into c_rarg1.
1733
  if (method->is_static()) {
1734

1735
    //  load oop into a register
1736
    __ movoop(c_rarg1,
1737
              JNIHandles::make_local(method->method_holder()->java_mirror()));
1738

1739
    // Now handlize the static class mirror it's known not-null.
1740
    __ str(c_rarg1, Address(sp, klass_offset));
1741
    map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
1742

1743
    // Now get the handle
1744
    __ lea(c_rarg1, Address(sp, klass_offset));
1745
    // and protect the arg if we must spill
1746
    c_arg--;
1747
  }
1748

1749
  // Change state to native (we save the return address in the thread, since it might not
1750
  // be pushed on the stack when we do a stack traversal).
1751
  // We use the same pc/oopMap repeatedly when we call out
1752

1753
  Label native_return;
1754
  __ set_last_Java_frame(sp, noreg, native_return, rscratch1);
1755

1756
  Label dtrace_method_entry, dtrace_method_entry_done;
1757
  if (DTraceMethodProbes) {
1758
    __ b(dtrace_method_entry);
1759
    __ bind(dtrace_method_entry_done);
1760
  }
1761

1762
  // RedefineClasses() tracing support for obsolete method entry
1763
  if (log_is_enabled(Trace, redefine, class, obsolete)) {
1764
    // protect the args we've loaded
1765
    save_args(masm, total_c_args, c_arg, out_regs);
1766
    __ mov_metadata(c_rarg1, method());
1767
    __ call_VM_leaf(
1768
      CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1769
      rthread, c_rarg1);
1770
    restore_args(masm, total_c_args, c_arg, out_regs);
1771
  }
1772

1773
  // Lock a synchronized method
1774

1775
  // Register definitions used by locking and unlocking
1776

1777
  const Register swap_reg = r0;
1778
  const Register obj_reg  = r19;  // Will contain the oop
1779
  const Register lock_reg = r13;  // Address of compiler lock object (BasicLock)
1780
  const Register old_hdr  = r13;  // value of old header at unlock time
1781
  const Register lock_tmp = r14;  // Temporary used by lightweight_lock/unlock
1782
  const Register tmp = lr;
1783

1784
  Label slow_path_lock;
1785
  Label lock_done;
1786

1787
  if (method->is_synchronized()) {
1788
    Label count;
1789
    const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1790

1791
    // Get the handle (the 2nd argument)
1792
    __ mov(oop_handle_reg, c_rarg1);
1793

1794
    // Get address of the box
1795

1796
    __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1797

1798
    // Load the oop from the handle
1799
    __ ldr(obj_reg, Address(oop_handle_reg, 0));
1800

1801
    if (LockingMode == LM_MONITOR) {
1802
      __ b(slow_path_lock);
1803
    } else if (LockingMode == LM_LEGACY) {
1804
      // Load (object->mark() | 1) into swap_reg %r0
1805
      __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1806
      __ orr(swap_reg, rscratch1, 1);
1807

1808
      // Save (object->mark() | 1) into BasicLock's displaced header
1809
      __ str(swap_reg, Address(lock_reg, mark_word_offset));
1810

1811
      // src -> dest iff dest == r0 else r0 <- dest
1812
      __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
1813

1814
      // Hmm should this move to the slow path code area???
1815

1816
      // Test if the oopMark is an obvious stack pointer, i.e.,
1817
      //  1) (mark & 3) == 0, and
1818
      //  2) sp <= mark < mark + os::pagesize()
1819
      // These 3 tests can be done by evaluating the following
1820
      // expression: ((mark - sp) & (3 - os::vm_page_size())),
1821
      // assuming both stack pointer and pagesize have their
1822
      // least significant 2 bits clear.
1823
      // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
1824

1825
      __ sub(swap_reg, sp, swap_reg);
1826
      __ neg(swap_reg, swap_reg);
1827
      __ ands(swap_reg, swap_reg, 3 - (int)os::vm_page_size());
1828

1829
      // Save the test result, for recursive case, the result is zero
1830
      __ str(swap_reg, Address(lock_reg, mark_word_offset));
1831
      __ br(Assembler::NE, slow_path_lock);
1832
    } else {
1833
      assert(LockingMode == LM_LIGHTWEIGHT, "must be");
1834
      __ lightweight_lock(obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
1835
    }
1836
    __ bind(count);
1837
    __ increment(Address(rthread, JavaThread::held_monitor_count_offset()));
1838

1839
    // Slow path will re-enter here
1840
    __ bind(lock_done);
1841
  }
1842

1843

1844
  // Finally just about ready to make the JNI call
1845

1846
  // get JNIEnv* which is first argument to native
1847
  __ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
1848

1849
  // Now set thread in native
1850
  __ mov(rscratch1, _thread_in_native);
1851
  __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1852
  __ stlrw(rscratch1, rscratch2);
1853

1854
  __ rt_call(native_func);
1855

1856
  __ bind(native_return);
1857

1858
  intptr_t return_pc = (intptr_t) __ pc();
1859
  oop_maps->add_gc_map(return_pc - start, map);
1860

1861
  // Verify or restore cpu control state after JNI call
1862
  __ restore_cpu_control_state_after_jni(rscratch1, rscratch2);
1863

1864
  // Unpack native results.
1865
  switch (ret_type) {
1866
  case T_BOOLEAN: __ c2bool(r0);                     break;
1867
  case T_CHAR   : __ ubfx(r0, r0, 0, 16);            break;
1868
  case T_BYTE   : __ sbfx(r0, r0, 0, 8);             break;
1869
  case T_SHORT  : __ sbfx(r0, r0, 0, 16);            break;
1870
  case T_INT    : __ sbfx(r0, r0, 0, 32);            break;
1871
  case T_DOUBLE :
1872
  case T_FLOAT  :
1873
    // Result is in v0 we'll save as needed
1874
    break;
1875
  case T_ARRAY:                 // Really a handle
1876
  case T_OBJECT:                // Really a handle
1877
      break; // can't de-handlize until after safepoint check
1878
  case T_VOID: break;
1879
  case T_LONG: break;
1880
  default       : ShouldNotReachHere();
1881
  }
1882

1883
  Label safepoint_in_progress, safepoint_in_progress_done;
1884
  Label after_transition;
1885

1886
  // Switch thread to "native transition" state before reading the synchronization state.
1887
  // This additional state is necessary because reading and testing the synchronization
1888
  // state is not atomic w.r.t. GC, as this scenario demonstrates:
1889
  //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
1890
  //     VM thread changes sync state to synchronizing and suspends threads for GC.
1891
  //     Thread A is resumed to finish this native method, but doesn't block here since it
1892
  //     didn't see any synchronization is progress, and escapes.
1893
  __ mov(rscratch1, _thread_in_native_trans);
1894

1895
  __ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset()));
1896

1897
  // Force this write out before the read below
1898
  if (!UseSystemMemoryBarrier) {
1899
    __ dmb(Assembler::ISH);
1900
  }
1901

1902
  __ verify_sve_vector_length();
1903

1904
  // Check for safepoint operation in progress and/or pending suspend requests.
1905
  {
1906
    // We need an acquire here to ensure that any subsequent load of the
1907
    // global SafepointSynchronize::_state flag is ordered after this load
1908
    // of the thread-local polling word.  We don't want this poll to
1909
    // return false (i.e. not safepointing) and a later poll of the global
1910
    // SafepointSynchronize::_state spuriously to return true.
1911
    //
1912
    // This is to avoid a race when we're in a native->Java transition
1913
    // racing the code which wakes up from a safepoint.
1914

1915
    __ safepoint_poll(safepoint_in_progress, true /* at_return */, true /* acquire */, false /* in_nmethod */);
1916
    __ ldrw(rscratch1, Address(rthread, JavaThread::suspend_flags_offset()));
1917
    __ cbnzw(rscratch1, safepoint_in_progress);
1918
    __ bind(safepoint_in_progress_done);
1919
  }
1920

1921
  // change thread state
1922
  __ mov(rscratch1, _thread_in_Java);
1923
  __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1924
  __ stlrw(rscratch1, rscratch2);
1925
  __ bind(after_transition);
1926

1927
  Label reguard;
1928
  Label reguard_done;
1929
  __ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
1930
  __ cmpw(rscratch1, StackOverflow::stack_guard_yellow_reserved_disabled);
1931
  __ br(Assembler::EQ, reguard);
1932
  __ bind(reguard_done);
1933

1934
  // native result if any is live
1935

1936
  // Unlock
1937
  Label unlock_done;
1938
  Label slow_path_unlock;
1939
  if (method->is_synchronized()) {
1940

1941
    // Get locked oop from the handle we passed to jni
1942
    __ ldr(obj_reg, Address(oop_handle_reg, 0));
1943

1944
    Label done, not_recursive;
1945

1946
    if (LockingMode == LM_LEGACY) {
1947
      // Simple recursive lock?
1948
      __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1949
      __ cbnz(rscratch1, not_recursive);
1950
      __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
1951
      __ b(done);
1952
    }
1953

1954
    __ bind(not_recursive);
1955

1956
    // Must save r0 if if it is live now because cmpxchg must use it
1957
    if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1958
      save_native_result(masm, ret_type, stack_slots);
1959
    }
1960

1961
    if (LockingMode == LM_MONITOR) {
1962
      __ b(slow_path_unlock);
1963
    } else if (LockingMode == LM_LEGACY) {
1964
      // get address of the stack lock
1965
      __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1966
      //  get old displaced header
1967
      __ ldr(old_hdr, Address(r0, 0));
1968

1969
      // Atomic swap old header if oop still contains the stack lock
1970
      Label count;
1971
      __ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, count, &slow_path_unlock);
1972
      __ bind(count);
1973
      __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
1974
    } else {
1975
      assert(LockingMode == LM_LIGHTWEIGHT, "");
1976
      __ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
1977
      __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
1978
    }
1979

1980
    // slow path re-enters here
1981
    __ bind(unlock_done);
1982
    if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1983
      restore_native_result(masm, ret_type, stack_slots);
1984
    }
1985

1986
    __ bind(done);
1987
  }
1988

1989
  Label dtrace_method_exit, dtrace_method_exit_done;
1990
  if (DTraceMethodProbes) {
1991
    __ b(dtrace_method_exit);
1992
    __ bind(dtrace_method_exit_done);
1993
  }
1994

1995
  __ reset_last_Java_frame(false);
1996

1997
  // Unbox oop result, e.g. JNIHandles::resolve result.
1998
  if (is_reference_type(ret_type)) {
1999
    __ resolve_jobject(r0, r1, r2);
2000
  }
2001

2002
  if (CheckJNICalls) {
2003
    // clear_pending_jni_exception_check
2004
    __ str(zr, Address(rthread, JavaThread::pending_jni_exception_check_fn_offset()));
2005
  }
2006

2007
  // reset handle block
2008
  __ ldr(r2, Address(rthread, JavaThread::active_handles_offset()));
2009
  __ str(zr, Address(r2, JNIHandleBlock::top_offset()));
2010

2011
  __ leave();
2012

2013
  // Any exception pending?
2014
  __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2015
  __ cbnz(rscratch1, exception_pending);
2016

2017
  // We're done
2018
  __ ret(lr);
2019

2020
  // Unexpected paths are out of line and go here
2021

2022
  // forward the exception
2023
  __ bind(exception_pending);
2024

2025
  // and forward the exception
2026
  __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2027

2028
  // Slow path locking & unlocking
2029
  if (method->is_synchronized()) {
2030

2031
    __ block_comment("Slow path lock {");
2032
    __ bind(slow_path_lock);
2033

2034
    // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2035
    // args are (oop obj, BasicLock* lock, JavaThread* thread)
2036

2037
    // protect the args we've loaded
2038
    save_args(masm, total_c_args, c_arg, out_regs);
2039

2040
    __ mov(c_rarg0, obj_reg);
2041
    __ mov(c_rarg1, lock_reg);
2042
    __ mov(c_rarg2, rthread);
2043

2044
    // Not a leaf but we have last_Java_frame setup as we want
2045
    __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
2046
    restore_args(masm, total_c_args, c_arg, out_regs);
2047

2048
#ifdef ASSERT
2049
    { Label L;
2050
      __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2051
      __ cbz(rscratch1, L);
2052
      __ stop("no pending exception allowed on exit from monitorenter");
2053
      __ bind(L);
2054
    }
2055
#endif
2056
    __ b(lock_done);
2057

2058
    __ block_comment("} Slow path lock");
2059

2060
    __ block_comment("Slow path unlock {");
2061
    __ bind(slow_path_unlock);
2062

2063
    // If we haven't already saved the native result we must save it now as xmm registers
2064
    // are still exposed.
2065

2066
    if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2067
      save_native_result(masm, ret_type, stack_slots);
2068
    }
2069

2070
    __ mov(c_rarg2, rthread);
2071
    __ lea(c_rarg1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2072
    __ mov(c_rarg0, obj_reg);
2073

2074
    // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2075
    // NOTE that obj_reg == r19 currently
2076
    __ ldr(r19, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2077
    __ str(zr, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2078

2079
    __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C));
2080

2081
#ifdef ASSERT
2082
    {
2083
      Label L;
2084
      __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2085
      __ cbz(rscratch1, L);
2086
      __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2087
      __ bind(L);
2088
    }
2089
#endif /* ASSERT */
2090

2091
    __ str(r19, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2092

2093
    if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2094
      restore_native_result(masm, ret_type, stack_slots);
2095
    }
2096
    __ b(unlock_done);
2097

2098
    __ block_comment("} Slow path unlock");
2099

2100
  } // synchronized
2101

2102
  // SLOW PATH Reguard the stack if needed
2103

2104
  __ bind(reguard);
2105
  save_native_result(masm, ret_type, stack_slots);
2106
  __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
2107
  restore_native_result(masm, ret_type, stack_slots);
2108
  // and continue
2109
  __ b(reguard_done);
2110

2111
  // SLOW PATH safepoint
2112
  {
2113
    __ block_comment("safepoint {");
2114
    __ bind(safepoint_in_progress);
2115

2116
    // Don't use call_VM as it will see a possible pending exception and forward it
2117
    // and never return here preventing us from clearing _last_native_pc down below.
2118
    //
2119
    save_native_result(masm, ret_type, stack_slots);
2120
    __ mov(c_rarg0, rthread);
2121
#ifndef PRODUCT
2122
  assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
2123
#endif
2124
    __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
2125
    __ blr(rscratch1);
2126

2127
    // Restore any method result value
2128
    restore_native_result(masm, ret_type, stack_slots);
2129

2130
    __ b(safepoint_in_progress_done);
2131
    __ block_comment("} safepoint");
2132
  }
2133

2134
  // SLOW PATH dtrace support
2135
  if (DTraceMethodProbes) {
2136
    {
2137
      __ block_comment("dtrace entry {");
2138
      __ bind(dtrace_method_entry);
2139

2140
      // We have all of the arguments setup at this point. We must not touch any register
2141
      // argument registers at this point (what if we save/restore them there are no oop?
2142

2143
      save_args(masm, total_c_args, c_arg, out_regs);
2144
      __ mov_metadata(c_rarg1, method());
2145
      __ call_VM_leaf(
2146
        CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2147
        rthread, c_rarg1);
2148
      restore_args(masm, total_c_args, c_arg, out_regs);
2149
      __ b(dtrace_method_entry_done);
2150
      __ block_comment("} dtrace entry");
2151
    }
2152

2153
    {
2154
      __ block_comment("dtrace exit {");
2155
      __ bind(dtrace_method_exit);
2156
      save_native_result(masm, ret_type, stack_slots);
2157
      __ mov_metadata(c_rarg1, method());
2158
      __ call_VM_leaf(
2159
        CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2160
        rthread, c_rarg1);
2161
      restore_native_result(masm, ret_type, stack_slots);
2162
      __ b(dtrace_method_exit_done);
2163
      __ block_comment("} dtrace exit");
2164
    }
2165
  }
2166

2167
  __ flush();
2168

2169
  nmethod *nm = nmethod::new_native_nmethod(method,
2170
                                            compile_id,
2171
                                            masm->code(),
2172
                                            vep_offset,
2173
                                            frame_complete,
2174
                                            stack_slots / VMRegImpl::slots_per_word,
2175
                                            (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2176
                                            in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2177
                                            oop_maps);
2178

2179
  return nm;
2180
}
2181

2182
// this function returns the adjust size (in number of words) to a c2i adapter
2183
// activation for use during deoptimization
2184
int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
2185
  assert(callee_locals >= callee_parameters,
2186
          "test and remove; got more parms than locals");
2187
  if (callee_locals < callee_parameters)
2188
    return 0;                   // No adjustment for negative locals
2189
  int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2190
  // diff is counted in stack words
2191
  return align_up(diff, 2);
2192
}
2193

2194

2195
//------------------------------generate_deopt_blob----------------------------
2196
void SharedRuntime::generate_deopt_blob() {
2197
  // Allocate space for the code
2198
  ResourceMark rm;
2199
  // Setup code generation tools
2200
  int pad = 0;
2201
#if INCLUDE_JVMCI
2202
  if (EnableJVMCI) {
2203
    pad += 512; // Increase the buffer size when compiling for JVMCI
2204
  }
2205
#endif
2206
  CodeBuffer buffer("deopt_blob", 2048+pad, 1024);
2207
  MacroAssembler* masm = new MacroAssembler(&buffer);
2208
  int frame_size_in_words;
2209
  OopMap* map = nullptr;
2210
  OopMapSet *oop_maps = new OopMapSet();
2211
  RegisterSaver reg_save(COMPILER2_OR_JVMCI != 0);
2212

2213
  // -------------
2214
  // This code enters when returning to a de-optimized nmethod.  A return
2215
  // address has been pushed on the stack, and return values are in
2216
  // registers.
2217
  // If we are doing a normal deopt then we were called from the patched
2218
  // nmethod from the point we returned to the nmethod. So the return
2219
  // address on the stack is wrong by NativeCall::instruction_size
2220
  // We will adjust the value so it looks like we have the original return
2221
  // address on the stack (like when we eagerly deoptimized).
2222
  // In the case of an exception pending when deoptimizing, we enter
2223
  // with a return address on the stack that points after the call we patched
2224
  // into the exception handler. We have the following register state from,
2225
  // e.g., the forward exception stub (see stubGenerator_x86_64.cpp).
2226
  //    r0: exception oop
2227
  //    r19: exception handler
2228
  //    r3: throwing pc
2229
  // So in this case we simply jam r3 into the useless return address and
2230
  // the stack looks just like we want.
2231
  //
2232
  // At this point we need to de-opt.  We save the argument return
2233
  // registers.  We call the first C routine, fetch_unroll_info().  This
2234
  // routine captures the return values and returns a structure which
2235
  // describes the current frame size and the sizes of all replacement frames.
2236
  // The current frame is compiled code and may contain many inlined
2237
  // functions, each with their own JVM state.  We pop the current frame, then
2238
  // push all the new frames.  Then we call the C routine unpack_frames() to
2239
  // populate these frames.  Finally unpack_frames() returns us the new target
2240
  // address.  Notice that callee-save registers are BLOWN here; they have
2241
  // already been captured in the vframeArray at the time the return PC was
2242
  // patched.
2243
  address start = __ pc();
2244
  Label cont;
2245

2246
  // Prolog for non exception case!
2247

2248
  // Save everything in sight.
2249
  map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2250

2251
  // Normal deoptimization.  Save exec mode for unpack_frames.
2252
  __ movw(rcpool, Deoptimization::Unpack_deopt); // callee-saved
2253
  __ b(cont);
2254

2255
  int reexecute_offset = __ pc() - start;
2256
#if INCLUDE_JVMCI && !defined(COMPILER1)
2257
  if (EnableJVMCI && UseJVMCICompiler) {
2258
    // JVMCI does not use this kind of deoptimization
2259
    __ should_not_reach_here();
2260
  }
2261
#endif
2262

2263
  // Reexecute case
2264
  // return address is the pc describes what bci to do re-execute at
2265

2266
  // No need to update map as each call to save_live_registers will produce identical oopmap
2267
  (void) reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2268

2269
  __ movw(rcpool, Deoptimization::Unpack_reexecute); // callee-saved
2270
  __ b(cont);
2271

2272
#if INCLUDE_JVMCI
2273
  Label after_fetch_unroll_info_call;
2274
  int implicit_exception_uncommon_trap_offset = 0;
2275
  int uncommon_trap_offset = 0;
2276

2277
  if (EnableJVMCI) {
2278
    implicit_exception_uncommon_trap_offset = __ pc() - start;
2279

2280
    __ ldr(lr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
2281
    __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
2282

2283
    uncommon_trap_offset = __ pc() - start;
2284

2285
    // Save everything in sight.
2286
    reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2287
    // fetch_unroll_info needs to call last_java_frame()
2288
    Label retaddr;
2289
    __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2290

2291
    __ ldrw(c_rarg1, Address(rthread, in_bytes(JavaThread::pending_deoptimization_offset())));
2292
    __ movw(rscratch1, -1);
2293
    __ strw(rscratch1, Address(rthread, in_bytes(JavaThread::pending_deoptimization_offset())));
2294

2295
    __ movw(rcpool, (int32_t)Deoptimization::Unpack_reexecute);
2296
    __ mov(c_rarg0, rthread);
2297
    __ movw(c_rarg2, rcpool); // exec mode
2298
    __ lea(rscratch1,
2299
           RuntimeAddress(CAST_FROM_FN_PTR(address,
2300
                                           Deoptimization::uncommon_trap)));
2301
    __ blr(rscratch1);
2302
    __ bind(retaddr);
2303
    oop_maps->add_gc_map( __ pc()-start, map->deep_copy());
2304

2305
    __ reset_last_Java_frame(false);
2306

2307
    __ b(after_fetch_unroll_info_call);
2308
  } // EnableJVMCI
2309
#endif // INCLUDE_JVMCI
2310

2311
  int exception_offset = __ pc() - start;
2312

2313
  // Prolog for exception case
2314

2315
  // all registers are dead at this entry point, except for r0, and
2316
  // r3 which contain the exception oop and exception pc
2317
  // respectively.  Set them in TLS and fall thru to the
2318
  // unpack_with_exception_in_tls entry point.
2319

2320
  __ str(r3, Address(rthread, JavaThread::exception_pc_offset()));
2321
  __ str(r0, Address(rthread, JavaThread::exception_oop_offset()));
2322

2323
  int exception_in_tls_offset = __ pc() - start;
2324

2325
  // new implementation because exception oop is now passed in JavaThread
2326

2327
  // Prolog for exception case
2328
  // All registers must be preserved because they might be used by LinearScan
2329
  // Exceptiop oop and throwing PC are passed in JavaThread
2330
  // tos: stack at point of call to method that threw the exception (i.e. only
2331
  // args are on the stack, no return address)
2332

2333
  // The return address pushed by save_live_registers will be patched
2334
  // later with the throwing pc. The correct value is not available
2335
  // now because loading it from memory would destroy registers.
2336

2337
  // NB: The SP at this point must be the SP of the method that is
2338
  // being deoptimized.  Deoptimization assumes that the frame created
2339
  // here by save_live_registers is immediately below the method's SP.
2340
  // This is a somewhat fragile mechanism.
2341

2342
  // Save everything in sight.
2343
  map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2344

2345
  // Now it is safe to overwrite any register
2346

2347
  // Deopt during an exception.  Save exec mode for unpack_frames.
2348
  __ mov(rcpool, Deoptimization::Unpack_exception); // callee-saved
2349

2350
  // load throwing pc from JavaThread and patch it as the return address
2351
  // of the current frame. Then clear the field in JavaThread
2352
  __ ldr(r3, Address(rthread, JavaThread::exception_pc_offset()));
2353
  __ protect_return_address(r3);
2354
  __ str(r3, Address(rfp, wordSize));
2355
  __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
2356

2357
#ifdef ASSERT
2358
  // verify that there is really an exception oop in JavaThread
2359
  __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
2360
  __ verify_oop(r0);
2361

2362
  // verify that there is no pending exception
2363
  Label no_pending_exception;
2364
  __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2365
  __ cbz(rscratch1, no_pending_exception);
2366
  __ stop("must not have pending exception here");
2367
  __ bind(no_pending_exception);
2368
#endif
2369

2370
  __ bind(cont);
2371

2372
  // Call C code.  Need thread and this frame, but NOT official VM entry
2373
  // crud.  We cannot block on this call, no GC can happen.
2374
  //
2375
  // UnrollBlock* fetch_unroll_info(JavaThread* thread)
2376

2377
  // fetch_unroll_info needs to call last_java_frame().
2378

2379
  Label retaddr;
2380
  __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2381
#ifdef ASSERT
2382
  { Label L;
2383
    __ ldr(rscratch1, Address(rthread, JavaThread::last_Java_fp_offset()));
2384
    __ cbz(rscratch1, L);
2385
    __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
2386
    __ bind(L);
2387
  }
2388
#endif // ASSERT
2389
  __ mov(c_rarg0, rthread);
2390
  __ mov(c_rarg1, rcpool);
2391
  __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
2392
  __ blr(rscratch1);
2393
  __ bind(retaddr);
2394

2395
  // Need to have an oopmap that tells fetch_unroll_info where to
2396
  // find any register it might need.
2397
  oop_maps->add_gc_map(__ pc() - start, map);
2398

2399
  __ reset_last_Java_frame(false);
2400

2401
#if INCLUDE_JVMCI
2402
  if (EnableJVMCI) {
2403
    __ bind(after_fetch_unroll_info_call);
2404
  }
2405
#endif
2406

2407
  // Load UnrollBlock* into r5
2408
  __ mov(r5, r0);
2409

2410
  __ ldrw(rcpool, Address(r5, Deoptimization::UnrollBlock::unpack_kind_offset()));
2411
   Label noException;
2412
  __ cmpw(rcpool, Deoptimization::Unpack_exception);   // Was exception pending?
2413
  __ br(Assembler::NE, noException);
2414
  __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
2415
  // QQQ this is useless it was null above
2416
  __ ldr(r3, Address(rthread, JavaThread::exception_pc_offset()));
2417
  __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
2418
  __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
2419

2420
  __ verify_oop(r0);
2421

2422
  // Overwrite the result registers with the exception results.
2423
  __ str(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2424
  // I think this is useless
2425
  // __ str(r3, Address(sp, RegisterSaver::r3_offset_in_bytes()));
2426

2427
  __ bind(noException);
2428

2429
  // Only register save data is on the stack.
2430
  // Now restore the result registers.  Everything else is either dead
2431
  // or captured in the vframeArray.
2432

2433
  // Restore fp result register
2434
  __ ldrd(v0, Address(sp, reg_save.v0_offset_in_bytes()));
2435
  // Restore integer result register
2436
  __ ldr(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2437

2438
  // Pop all of the register save area off the stack
2439
  __ add(sp, sp, frame_size_in_words * wordSize);
2440

2441
  // All of the register save area has been popped of the stack. Only the
2442
  // return address remains.
2443

2444
  // Pop all the frames we must move/replace.
2445
  //
2446
  // Frame picture (youngest to oldest)
2447
  // 1: self-frame (no frame link)
2448
  // 2: deopting frame  (no frame link)
2449
  // 3: caller of deopting frame (could be compiled/interpreted).
2450
  //
2451
  // Note: by leaving the return address of self-frame on the stack
2452
  // and using the size of frame 2 to adjust the stack
2453
  // when we are done the return to frame 3 will still be on the stack.
2454

2455
  // Pop deoptimized frame
2456
  __ ldrw(r2, Address(r5, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset()));
2457
  __ sub(r2, r2, 2 * wordSize);
2458
  __ add(sp, sp, r2);
2459
  __ ldp(rfp, zr, __ post(sp, 2 * wordSize));
2460

2461
#ifdef ASSERT
2462
  // Compilers generate code that bang the stack by as much as the
2463
  // interpreter would need. So this stack banging should never
2464
  // trigger a fault. Verify that it does not on non product builds.
2465
  __ ldrw(r19, Address(r5, Deoptimization::UnrollBlock::total_frame_sizes_offset()));
2466
  __ bang_stack_size(r19, r2);
2467
#endif
2468
  // Load address of array of frame pcs into r2
2469
  __ ldr(r2, Address(r5, Deoptimization::UnrollBlock::frame_pcs_offset()));
2470

2471
  // Trash the old pc
2472
  // __ addptr(sp, wordSize);  FIXME ????
2473

2474
  // Load address of array of frame sizes into r4
2475
  __ ldr(r4, Address(r5, Deoptimization::UnrollBlock::frame_sizes_offset()));
2476

2477
  // Load counter into r3
2478
  __ ldrw(r3, Address(r5, Deoptimization::UnrollBlock::number_of_frames_offset()));
2479

2480
  // Now adjust the caller's stack to make up for the extra locals
2481
  // but record the original sp so that we can save it in the skeletal interpreter
2482
  // frame and the stack walking of interpreter_sender will get the unextended sp
2483
  // value and not the "real" sp value.
2484

2485
  const Register sender_sp = r6;
2486

2487
  __ mov(sender_sp, sp);
2488
  __ ldrw(r19, Address(r5,
2489
                       Deoptimization::UnrollBlock::
2490
                       caller_adjustment_offset()));
2491
  __ sub(sp, sp, r19);
2492

2493
  // Push interpreter frames in a loop
2494
  __ mov(rscratch1, (uint64_t)0xDEADDEAD);        // Make a recognizable pattern
2495
  __ mov(rscratch2, rscratch1);
2496
  Label loop;
2497
  __ bind(loop);
2498
  __ ldr(r19, Address(__ post(r4, wordSize)));          // Load frame size
2499
  __ sub(r19, r19, 2*wordSize);           // We'll push pc and fp by hand
2500
  __ ldr(lr, Address(__ post(r2, wordSize)));  // Load pc
2501
  __ enter();                           // Save old & set new fp
2502
  __ sub(sp, sp, r19);                  // Prolog
2503
  // This value is corrected by layout_activation_impl
2504
  __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
2505
  __ str(sender_sp, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize)); // Make it walkable
2506
  __ mov(sender_sp, sp);               // Pass sender_sp to next frame
2507
  __ sub(r3, r3, 1);                   // Decrement counter
2508
  __ cbnz(r3, loop);
2509

2510
    // Re-push self-frame
2511
  __ ldr(lr, Address(r2));
2512
  __ enter();
2513

2514
  // Allocate a full sized register save area.  We subtract 2 because
2515
  // enter() just pushed 2 words
2516
  __ sub(sp, sp, (frame_size_in_words - 2) * wordSize);
2517

2518
  // Restore frame locals after moving the frame
2519
  __ strd(v0, Address(sp, reg_save.v0_offset_in_bytes()));
2520
  __ str(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2521

2522
  // Call C code.  Need thread but NOT official VM entry
2523
  // crud.  We cannot block on this call, no GC can happen.  Call should
2524
  // restore return values to their stack-slots with the new SP.
2525
  //
2526
  // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)
2527

2528
  // Use rfp because the frames look interpreted now
2529
  // Don't need the precise return PC here, just precise enough to point into this code blob.
2530
  address the_pc = __ pc();
2531
  __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
2532

2533
  __ mov(c_rarg0, rthread);
2534
  __ movw(c_rarg1, rcpool); // second arg: exec_mode
2535
  __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2536
  __ blr(rscratch1);
2537

2538
  // Set an oopmap for the call site
2539
  // Use the same PC we used for the last java frame
2540
  oop_maps->add_gc_map(the_pc - start,
2541
                       new OopMap( frame_size_in_words, 0 ));
2542

2543
  // Clear fp AND pc
2544
  __ reset_last_Java_frame(true);
2545

2546
  // Collect return values
2547
  __ ldrd(v0, Address(sp, reg_save.v0_offset_in_bytes()));
2548
  __ ldr(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2549
  // I think this is useless (throwing pc?)
2550
  // __ ldr(r3, Address(sp, RegisterSaver::r3_offset_in_bytes()));
2551

2552
  // Pop self-frame.
2553
  __ leave();                           // Epilog
2554

2555
  // Jump to interpreter
2556
  __ ret(lr);
2557

2558
  // Make sure all code is generated
2559
  masm->flush();
2560

2561
  _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
2562
  _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
2563
#if INCLUDE_JVMCI
2564
  if (EnableJVMCI) {
2565
    _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
2566
    _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
2567
  }
2568
#endif
2569
}
2570

2571
// Number of stack slots between incoming argument block and the start of
2572
// a new frame.  The PROLOG must add this many slots to the stack.  The
2573
// EPILOG must remove this many slots. aarch64 needs two slots for
2574
// return address and fp.
2575
// TODO think this is correct but check
2576
uint SharedRuntime::in_preserve_stack_slots() {
2577
  return 4;
2578
}
2579

2580
uint SharedRuntime::out_preserve_stack_slots() {
2581
  return 0;
2582
}
2583

2584
#ifdef COMPILER2
2585
//------------------------------generate_uncommon_trap_blob--------------------
2586
void SharedRuntime::generate_uncommon_trap_blob() {
2587
  // Allocate space for the code
2588
  ResourceMark rm;
2589
  // Setup code generation tools
2590
  CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
2591
  MacroAssembler* masm = new MacroAssembler(&buffer);
2592

2593
  assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
2594

2595
  address start = __ pc();
2596

2597
  // Push self-frame.  We get here with a return address in LR
2598
  // and sp should be 16 byte aligned
2599
  // push rfp and retaddr by hand
2600
  __ protect_return_address();
2601
  __ stp(rfp, lr, Address(__ pre(sp, -2 * wordSize)));
2602
  // we don't expect an arg reg save area
2603
#ifndef PRODUCT
2604
  assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
2605
#endif
2606
  // compiler left unloaded_class_index in j_rarg0 move to where the
2607
  // runtime expects it.
2608
  if (c_rarg1 != j_rarg0) {
2609
    __ movw(c_rarg1, j_rarg0);
2610
  }
2611

2612
  // we need to set the past SP to the stack pointer of the stub frame
2613
  // and the pc to the address where this runtime call will return
2614
  // although actually any pc in this code blob will do).
2615
  Label retaddr;
2616
  __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2617

2618
  // Call C code.  Need thread but NOT official VM entry
2619
  // crud.  We cannot block on this call, no GC can happen.  Call should
2620
  // capture callee-saved registers as well as return values.
2621
  // Thread is in rdi already.
2622
  //
2623
  // UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index);
2624
  //
2625
  // n.b. 2 gp args, 0 fp args, integral return type
2626

2627
  __ mov(c_rarg0, rthread);
2628
  __ movw(c_rarg2, (unsigned)Deoptimization::Unpack_uncommon_trap);
2629
  __ lea(rscratch1,
2630
         RuntimeAddress(CAST_FROM_FN_PTR(address,
2631
                                         Deoptimization::uncommon_trap)));
2632
  __ blr(rscratch1);
2633
  __ bind(retaddr);
2634

2635
  // Set an oopmap for the call site
2636
  OopMapSet* oop_maps = new OopMapSet();
2637
  OopMap* map = new OopMap(SimpleRuntimeFrame::framesize, 0);
2638

2639
  // location of rfp is known implicitly by the frame sender code
2640

2641
  oop_maps->add_gc_map(__ pc() - start, map);
2642

2643
  __ reset_last_Java_frame(false);
2644

2645
  // move UnrollBlock* into r4
2646
  __ mov(r4, r0);
2647

2648
#ifdef ASSERT
2649
  { Label L;
2650
    __ ldrw(rscratch1, Address(r4, Deoptimization::UnrollBlock::unpack_kind_offset()));
2651
    __ cmpw(rscratch1, (unsigned)Deoptimization::Unpack_uncommon_trap);
2652
    __ br(Assembler::EQ, L);
2653
    __ stop("SharedRuntime::generate_uncommon_trap_blob: expected Unpack_uncommon_trap");
2654
    __ bind(L);
2655
  }
2656
#endif
2657

2658
  // Pop all the frames we must move/replace.
2659
  //
2660
  // Frame picture (youngest to oldest)
2661
  // 1: self-frame (no frame link)
2662
  // 2: deopting frame  (no frame link)
2663
  // 3: caller of deopting frame (could be compiled/interpreted).
2664

2665
  // Pop self-frame.  We have no frame, and must rely only on r0 and sp.
2666
  __ add(sp, sp, (SimpleRuntimeFrame::framesize) << LogBytesPerInt); // Epilog!
2667

2668
  // Pop deoptimized frame (int)
2669
  __ ldrw(r2, Address(r4,
2670
                      Deoptimization::UnrollBlock::
2671
                      size_of_deoptimized_frame_offset()));
2672
  __ sub(r2, r2, 2 * wordSize);
2673
  __ add(sp, sp, r2);
2674
  __ ldp(rfp, zr, __ post(sp, 2 * wordSize));
2675

2676
#ifdef ASSERT
2677
  // Compilers generate code that bang the stack by as much as the
2678
  // interpreter would need. So this stack banging should never
2679
  // trigger a fault. Verify that it does not on non product builds.
2680
  __ ldrw(r1, Address(r4,
2681
                      Deoptimization::UnrollBlock::
2682
                      total_frame_sizes_offset()));
2683
  __ bang_stack_size(r1, r2);
2684
#endif
2685

2686
  // Load address of array of frame pcs into r2 (address*)
2687
  __ ldr(r2, Address(r4,
2688
                     Deoptimization::UnrollBlock::frame_pcs_offset()));
2689

2690
  // Load address of array of frame sizes into r5 (intptr_t*)
2691
  __ ldr(r5, Address(r4,
2692
                     Deoptimization::UnrollBlock::
2693
                     frame_sizes_offset()));
2694

2695
  // Counter
2696
  __ ldrw(r3, Address(r4,
2697
                      Deoptimization::UnrollBlock::
2698
                      number_of_frames_offset())); // (int)
2699

2700
  // Now adjust the caller's stack to make up for the extra locals but
2701
  // record the original sp so that we can save it in the skeletal
2702
  // interpreter frame and the stack walking of interpreter_sender
2703
  // will get the unextended sp value and not the "real" sp value.
2704

2705
  const Register sender_sp = r8;
2706

2707
  __ mov(sender_sp, sp);
2708
  __ ldrw(r1, Address(r4,
2709
                      Deoptimization::UnrollBlock::
2710
                      caller_adjustment_offset())); // (int)
2711
  __ sub(sp, sp, r1);
2712

2713
  // Push interpreter frames in a loop
2714
  Label loop;
2715
  __ bind(loop);
2716
  __ ldr(r1, Address(r5, 0));       // Load frame size
2717
  __ sub(r1, r1, 2 * wordSize);     // We'll push pc and rfp by hand
2718
  __ ldr(lr, Address(r2, 0));       // Save return address
2719
  __ enter();                       // and old rfp & set new rfp
2720
  __ sub(sp, sp, r1);               // Prolog
2721
  __ str(sender_sp, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize)); // Make it walkable
2722
  // This value is corrected by layout_activation_impl
2723
  __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
2724
  __ mov(sender_sp, sp);          // Pass sender_sp to next frame
2725
  __ add(r5, r5, wordSize);       // Bump array pointer (sizes)
2726
  __ add(r2, r2, wordSize);       // Bump array pointer (pcs)
2727
  __ subsw(r3, r3, 1);            // Decrement counter
2728
  __ br(Assembler::GT, loop);
2729
  __ ldr(lr, Address(r2, 0));     // save final return address
2730
  // Re-push self-frame
2731
  __ enter();                     // & old rfp & set new rfp
2732

2733
  // Use rfp because the frames look interpreted now
2734
  // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP.
2735
  // Don't need the precise return PC here, just precise enough to point into this code blob.
2736
  address the_pc = __ pc();
2737
  __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
2738

2739
  // Call C code.  Need thread but NOT official VM entry
2740
  // crud.  We cannot block on this call, no GC can happen.  Call should
2741
  // restore return values to their stack-slots with the new SP.
2742
  // Thread is in rdi already.
2743
  //
2744
  // BasicType unpack_frames(JavaThread* thread, int exec_mode);
2745
  //
2746
  // n.b. 2 gp args, 0 fp args, integral return type
2747

2748
  // sp should already be aligned
2749
  __ mov(c_rarg0, rthread);
2750
  __ movw(c_rarg1, (unsigned)Deoptimization::Unpack_uncommon_trap);
2751
  __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2752
  __ blr(rscratch1);
2753

2754
  // Set an oopmap for the call site
2755
  // Use the same PC we used for the last java frame
2756
  oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
2757

2758
  // Clear fp AND pc
2759
  __ reset_last_Java_frame(true);
2760

2761
  // Pop self-frame.
2762
  __ leave();                 // Epilog
2763

2764
  // Jump to interpreter
2765
  __ ret(lr);
2766

2767
  // Make sure all code is generated
2768
  masm->flush();
2769

2770
  _uncommon_trap_blob =  UncommonTrapBlob::create(&buffer, oop_maps,
2771
                                                 SimpleRuntimeFrame::framesize >> 1);
2772
}
2773
#endif // COMPILER2
2774

2775

2776
//------------------------------generate_handler_blob------
2777
//
2778
// Generate a special Compile2Runtime blob that saves all registers,
2779
// and setup oopmap.
2780
//
2781
SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
2782
  ResourceMark rm;
2783
  OopMapSet *oop_maps = new OopMapSet();
2784
  OopMap* map;
2785

2786
  // Allocate space for the code.  Setup code generation tools.
2787
  CodeBuffer buffer("handler_blob", 2048, 1024);
2788
  MacroAssembler* masm = new MacroAssembler(&buffer);
2789

2790
  address start   = __ pc();
2791
  address call_pc = nullptr;
2792
  int frame_size_in_words;
2793
  bool cause_return = (poll_type == POLL_AT_RETURN);
2794
  RegisterSaver reg_save(poll_type == POLL_AT_VECTOR_LOOP /* save_vectors */);
2795

2796
  // When the signal occurred, the LR was either signed and stored on the stack (in which
2797
  // case it will be restored from the stack before being used) or unsigned and not stored
2798
  // on the stack. Stipping ensures we get the right value.
2799
  __ strip_return_address();
2800

2801
  // Save Integer and Float registers.
2802
  map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2803

2804
  // The following is basically a call_VM.  However, we need the precise
2805
  // address of the call in order to generate an oopmap. Hence, we do all the
2806
  // work ourselves.
2807

2808
  Label retaddr;
2809
  __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2810

2811
  // The return address must always be correct so that frame constructor never
2812
  // sees an invalid pc.
2813

2814
  if (!cause_return) {
2815
    // overwrite the return address pushed by save_live_registers
2816
    // Additionally, r20 is a callee-saved register so we can look at
2817
    // it later to determine if someone changed the return address for
2818
    // us!
2819
    __ ldr(r20, Address(rthread, JavaThread::saved_exception_pc_offset()));
2820
    __ protect_return_address(r20);
2821
    __ str(r20, Address(rfp, wordSize));
2822
  }
2823

2824
  // Do the call
2825
  __ mov(c_rarg0, rthread);
2826
  __ lea(rscratch1, RuntimeAddress(call_ptr));
2827
  __ blr(rscratch1);
2828
  __ bind(retaddr);
2829

2830
  // Set an oopmap for the call site.  This oopmap will map all
2831
  // oop-registers and debug-info registers as callee-saved.  This
2832
  // will allow deoptimization at this safepoint to find all possible
2833
  // debug-info recordings, as well as let GC find all oops.
2834

2835
  oop_maps->add_gc_map( __ pc() - start, map);
2836

2837
  Label noException;
2838

2839
  __ reset_last_Java_frame(false);
2840

2841
  __ membar(Assembler::LoadLoad | Assembler::LoadStore);
2842

2843
  __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2844
  __ cbz(rscratch1, noException);
2845

2846
  // Exception pending
2847

2848
  reg_save.restore_live_registers(masm);
2849

2850
  __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2851

2852
  // No exception case
2853
  __ bind(noException);
2854

2855
  Label no_adjust, bail;
2856
  if (!cause_return) {
2857
    // If our stashed return pc was modified by the runtime we avoid touching it
2858
    __ ldr(rscratch1, Address(rfp, wordSize));
2859
    __ cmp(r20, rscratch1);
2860
    __ br(Assembler::NE, no_adjust);
2861
    __ authenticate_return_address(r20);
2862

2863
#ifdef ASSERT
2864
    // Verify the correct encoding of the poll we're about to skip.
2865
    // See NativeInstruction::is_ldrw_to_zr()
2866
    __ ldrw(rscratch1, Address(r20));
2867
    __ ubfx(rscratch2, rscratch1, 22, 10);
2868
    __ cmpw(rscratch2, 0b1011100101);
2869
    __ br(Assembler::NE, bail);
2870
    __ ubfx(rscratch2, rscratch1, 0, 5);
2871
    __ cmpw(rscratch2, 0b11111);
2872
    __ br(Assembler::NE, bail);
2873
#endif
2874
    // Adjust return pc forward to step over the safepoint poll instruction
2875
    __ add(r20, r20, NativeInstruction::instruction_size);
2876
    __ protect_return_address(r20);
2877
    __ str(r20, Address(rfp, wordSize));
2878
  }
2879

2880
  __ bind(no_adjust);
2881
  // Normal exit, restore registers and exit.
2882
  reg_save.restore_live_registers(masm);
2883

2884
  __ ret(lr);
2885

2886
#ifdef ASSERT
2887
  __ bind(bail);
2888
  __ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected");
2889
#endif
2890

2891
  // Make sure all code is generated
2892
  masm->flush();
2893

2894
  // Fill-out other meta info
2895
  return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
2896
}
2897

2898
//
2899
// generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
2900
//
2901
// Generate a stub that calls into vm to find out the proper destination
2902
// of a java call. All the argument registers are live at this point
2903
// but since this is generic code we don't know what they are and the caller
2904
// must do any gc of the args.
2905
//
2906
RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
2907
  assert (StubRoutines::forward_exception_entry() != nullptr, "must be generated before");
2908

2909
  // allocate space for the code
2910
  ResourceMark rm;
2911

2912
  CodeBuffer buffer(name, 1000, 512);
2913
  MacroAssembler* masm                = new MacroAssembler(&buffer);
2914

2915
  int frame_size_in_words;
2916
  RegisterSaver reg_save(false /* save_vectors */);
2917

2918
  OopMapSet *oop_maps = new OopMapSet();
2919
  OopMap* map = nullptr;
2920

2921
  int start = __ offset();
2922

2923
  map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2924

2925
  int frame_complete = __ offset();
2926

2927
  {
2928
    Label retaddr;
2929
    __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2930

2931
    __ mov(c_rarg0, rthread);
2932
    __ lea(rscratch1, RuntimeAddress(destination));
2933

2934
    __ blr(rscratch1);
2935
    __ bind(retaddr);
2936
  }
2937

2938
  // Set an oopmap for the call site.
2939
  // We need this not only for callee-saved registers, but also for volatile
2940
  // registers that the compiler might be keeping live across a safepoint.
2941

2942
  oop_maps->add_gc_map( __ offset() - start, map);
2943

2944
  // r0 contains the address we are going to jump to assuming no exception got installed
2945

2946
  // clear last_Java_sp
2947
  __ reset_last_Java_frame(false);
2948
  // check for pending exceptions
2949
  Label pending;
2950
  __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2951
  __ cbnz(rscratch1, pending);
2952

2953
  // get the returned Method*
2954
  __ get_vm_result_2(rmethod, rthread);
2955
  __ str(rmethod, Address(sp, reg_save.reg_offset_in_bytes(rmethod)));
2956

2957
  // r0 is where we want to jump, overwrite rscratch1 which is saved and scratch
2958
  __ str(r0, Address(sp, reg_save.rscratch1_offset_in_bytes()));
2959
  reg_save.restore_live_registers(masm);
2960

2961
  // We are back to the original state on entry and ready to go.
2962

2963
  __ br(rscratch1);
2964

2965
  // Pending exception after the safepoint
2966

2967
  __ bind(pending);
2968

2969
  reg_save.restore_live_registers(masm);
2970

2971
  // exception pending => remove activation and forward to exception handler
2972

2973
  __ str(zr, Address(rthread, JavaThread::vm_result_offset()));
2974

2975
  __ ldr(r0, Address(rthread, Thread::pending_exception_offset()));
2976
  __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2977

2978
  // -------------
2979
  // make sure all code is generated
2980
  masm->flush();
2981

2982
  // return the  blob
2983
  // frame_size_words or bytes??
2984
  return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
2985
}
2986

2987
#ifdef COMPILER2
2988
// This is here instead of runtime_aarch64_64.cpp because it uses SimpleRuntimeFrame
2989
//
2990
//------------------------------generate_exception_blob---------------------------
2991
// creates exception blob at the end
2992
// Using exception blob, this code is jumped from a compiled method.
2993
// (see emit_exception_handler in x86_64.ad file)
2994
//
2995
// Given an exception pc at a call we call into the runtime for the
2996
// handler in this method. This handler might merely restore state
2997
// (i.e. callee save registers) unwind the frame and jump to the
2998
// exception handler for the nmethod if there is no Java level handler
2999
// for the nmethod.
3000
//
3001
// This code is entered with a jmp.
3002
//
3003
// Arguments:
3004
//   r0: exception oop
3005
//   r3: exception pc
3006
//
3007
// Results:
3008
//   r0: exception oop
3009
//   r3: exception pc in caller or ???
3010
//   destination: exception handler of caller
3011
//
3012
// Note: the exception pc MUST be at a call (precise debug information)
3013
//       Registers r0, r3, r2, r4, r5, r8-r11 are not callee saved.
3014
//
3015

3016
void OptoRuntime::generate_exception_blob() {
3017
  assert(!OptoRuntime::is_callee_saved_register(R3_num), "");
3018
  assert(!OptoRuntime::is_callee_saved_register(R0_num), "");
3019
  assert(!OptoRuntime::is_callee_saved_register(R2_num), "");
3020

3021
  assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
3022

3023
  // Allocate space for the code
3024
  ResourceMark rm;
3025
  // Setup code generation tools
3026
  CodeBuffer buffer("exception_blob", 2048, 1024);
3027
  MacroAssembler* masm = new MacroAssembler(&buffer);
3028

3029
  // TODO check various assumptions made here
3030
  //
3031
  // make sure we do so before running this
3032

3033
  address start = __ pc();
3034

3035
  // push rfp and retaddr by hand
3036
  // Exception pc is 'return address' for stack walker
3037
  __ protect_return_address();
3038
  __ stp(rfp, lr, Address(__ pre(sp, -2 * wordSize)));
3039
  // there are no callee save registers and we don't expect an
3040
  // arg reg save area
3041
#ifndef PRODUCT
3042
  assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
3043
#endif
3044
  // Store exception in Thread object. We cannot pass any arguments to the
3045
  // handle_exception call, since we do not want to make any assumption
3046
  // about the size of the frame where the exception happened in.
3047
  __ str(r0, Address(rthread, JavaThread::exception_oop_offset()));
3048
  __ str(r3, Address(rthread, JavaThread::exception_pc_offset()));
3049

3050
  // This call does all the hard work.  It checks if an exception handler
3051
  // exists in the method.
3052
  // If so, it returns the handler address.
3053
  // If not, it prepares for stack-unwinding, restoring the callee-save
3054
  // registers of the frame being removed.
3055
  //
3056
  // address OptoRuntime::handle_exception_C(JavaThread* thread)
3057
  //
3058
  // n.b. 1 gp arg, 0 fp args, integral return type
3059

3060
  // the stack should always be aligned
3061
  address the_pc = __ pc();
3062
  __ set_last_Java_frame(sp, noreg, the_pc, rscratch1);
3063
  __ mov(c_rarg0, rthread);
3064
  __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C)));
3065
  __ blr(rscratch1);
3066
  // handle_exception_C is a special VM call which does not require an explicit
3067
  // instruction sync afterwards.
3068

3069
  // May jump to SVE compiled code
3070
  __ reinitialize_ptrue();
3071

3072
  // Set an oopmap for the call site.  This oopmap will only be used if we
3073
  // are unwinding the stack.  Hence, all locations will be dead.
3074
  // Callee-saved registers will be the same as the frame above (i.e.,
3075
  // handle_exception_stub), since they were restored when we got the
3076
  // exception.
3077

3078
  OopMapSet* oop_maps = new OopMapSet();
3079

3080
  oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
3081

3082
  __ reset_last_Java_frame(false);
3083

3084
  // Restore callee-saved registers
3085

3086
  // rfp is an implicitly saved callee saved register (i.e. the calling
3087
  // convention will save restore it in prolog/epilog) Other than that
3088
  // there are no callee save registers now that adapter frames are gone.
3089
  // and we dont' expect an arg reg save area
3090
  __ ldp(rfp, r3, Address(__ post(sp, 2 * wordSize)));
3091
  __ authenticate_return_address(r3);
3092

3093
  // r0: exception handler
3094

3095
  // We have a handler in r0 (could be deopt blob).
3096
  __ mov(r8, r0);
3097

3098
  // Get the exception oop
3099
  __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
3100
  // Get the exception pc in case we are deoptimized
3101
  __ ldr(r4, Address(rthread, JavaThread::exception_pc_offset()));
3102
#ifdef ASSERT
3103
  __ str(zr, Address(rthread, JavaThread::exception_handler_pc_offset()));
3104
  __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
3105
#endif
3106
  // Clear the exception oop so GC no longer processes it as a root.
3107
  __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
3108

3109
  // r0: exception oop
3110
  // r8:  exception handler
3111
  // r4: exception pc
3112
  // Jump to handler
3113

3114
  __ br(r8);
3115

3116
  // Make sure all code is generated
3117
  masm->flush();
3118

3119
  // Set exception blob
3120
  _exception_blob =  ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
3121
}
3122

3123
#endif // COMPILER2
3124

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.