jdk

Форк
0
/
c1_Runtime1_arm.cpp 
794 строки · 25.8 Кб
1
/*
2
 * Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
3
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
 *
5
 * This code is free software; you can redistribute it and/or modify it
6
 * under the terms of the GNU General Public License version 2 only, as
7
 * published by the Free Software Foundation.
8
 *
9
 * This code is distributed in the hope that it will be useful, but WITHOUT
10
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12
 * version 2 for more details (a copy is included in the LICENSE file that
13
 * accompanied this code).
14
 *
15
 * You should have received a copy of the GNU General Public License version
16
 * 2 along with this work; if not, write to the Free Software Foundation,
17
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
 *
19
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
 * or visit www.oracle.com if you need additional information or have any
21
 * questions.
22
 *
23
 */
24

25
#include "precompiled.hpp"
26
#include "asm/macroAssembler.inline.hpp"
27
#include "c1/c1_Defs.hpp"
28
#include "c1/c1_LIRAssembler.hpp"
29
#include "c1/c1_MacroAssembler.hpp"
30
#include "c1/c1_Runtime1.hpp"
31
#include "ci/ciUtilities.hpp"
32
#include "compiler/oopMap.hpp"
33
#include "gc/shared/cardTable.hpp"
34
#include "gc/shared/cardTableBarrierSet.hpp"
35
#include "gc/shared/collectedHeap.hpp"
36
#include "gc/shared/tlab_globals.hpp"
37
#include "interpreter/interpreter.hpp"
38
#include "memory/universe.hpp"
39
#include "nativeInst_arm.hpp"
40
#include "oops/oop.inline.hpp"
41
#include "prims/jvmtiExport.hpp"
42
#include "register_arm.hpp"
43
#include "runtime/sharedRuntime.hpp"
44
#include "runtime/signature.hpp"
45
#include "runtime/vframeArray.hpp"
46
#include "utilities/align.hpp"
47
#include "vmreg_arm.inline.hpp"
48

49
// Note: Rtemp usage is this file should not impact C2 and should be
50
// correct as long as it is not implicitly used in lower layers (the
51
// arm [macro]assembler) and used with care in the other C1 specific
52
// files.
53

54
// Implementation of StubAssembler
55

56
int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) {
57
  mov(R0, Rthread);
58

59
  int call_offset = set_last_Java_frame(SP, FP, false, Rtemp);
60

61
  call(entry);
62
  if (call_offset == -1) { // PC not saved
63
    call_offset = offset();
64
  }
65
  reset_last_Java_frame(Rtemp);
66

67
  assert(frame_size() != no_frame_size, "frame must be fixed");
68
  if (_stub_id != Runtime1::forward_exception_id) {
69
    ldr(R3, Address(Rthread, Thread::pending_exception_offset()));
70
  }
71

72
  if (oop_result1->is_valid()) {
73
    assert_different_registers(oop_result1, R3, Rtemp);
74
    get_vm_result(oop_result1, Rtemp);
75
  }
76
  if (metadata_result->is_valid()) {
77
    assert_different_registers(metadata_result, R3, Rtemp);
78
    get_vm_result_2(metadata_result, Rtemp);
79
  }
80

81
  // Check for pending exception
82
  // unpack_with_exception_in_tls path is taken through
83
  // Runtime1::exception_handler_for_pc
84
  if (_stub_id != Runtime1::forward_exception_id) {
85
    assert(frame_size() != no_frame_size, "cannot directly call forward_exception_id");
86
    cmp(R3, 0);
87
    jump(Runtime1::entry_for(Runtime1::forward_exception_id), relocInfo::runtime_call_type, Rtemp, ne);
88
  } else {
89
#ifdef ASSERT
90
    // Should not have pending exception in forward_exception stub
91
    ldr(R3, Address(Rthread, Thread::pending_exception_offset()));
92
    cmp(R3, 0);
93
    breakpoint(ne);
94
#endif // ASSERT
95
  }
96
  return call_offset;
97
}
98

99

100
int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) {
101
  if (arg1 != R1) {
102
    mov(R1, arg1);
103
  }
104
  return call_RT(oop_result1, metadata_result, entry, 1);
105
}
106

107

108
int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) {
109
  assert(arg1 == R1 && arg2 == R2, "cannot handle otherwise");
110
  return call_RT(oop_result1, metadata_result, entry, 2);
111
}
112

113

114
int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) {
115
  assert(arg1 == R1 && arg2 == R2 && arg3 == R3, "cannot handle otherwise");
116
  return call_RT(oop_result1, metadata_result, entry, 3);
117
}
118

119

120
#define __ sasm->
121

122
// TODO: ARM - does this duplicate RegisterSaver in SharedRuntime?
123

124
enum RegisterLayout {
125
  fpu_save_size = pd_nof_fpu_regs_reg_alloc,
126
#ifndef __SOFTFP__
127
  D0_offset = 0,
128
#endif
129
  R0_offset = fpu_save_size,
130
  R1_offset,
131
  R2_offset,
132
  R3_offset,
133
  R4_offset,
134
  R5_offset,
135
  R6_offset,
136
#if (FP_REG_NUM != 7)
137
  R7_offset,
138
#endif
139
  R8_offset,
140
  R9_offset,
141
  R10_offset,
142
#if (FP_REG_NUM != 11)
143
  R11_offset,
144
#endif
145
  R12_offset,
146
  FP_offset,
147
  LR_offset,
148
  reg_save_size,
149
  arg1_offset = reg_save_size * wordSize,
150
  arg2_offset = (reg_save_size + 1) * wordSize
151
};
152

153

154
static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers = HaveVFP) {
155
  sasm->set_frame_size(reg_save_size /* in words */);
156

157
  // Record saved value locations in an OopMap.
158
  // Locations are offsets from sp after runtime call.
159
  OopMap* map = new OopMap(VMRegImpl::slots_per_word * reg_save_size, 0);
160

161
  int j=0;
162
  for (int i = R0_offset; i < R10_offset; i++) {
163
    if (j == FP_REG_NUM) {
164
      // skip the FP register, saved below
165
      j++;
166
    }
167
    map->set_callee_saved(VMRegImpl::stack2reg(i), as_Register(j)->as_VMReg());
168
    j++;
169
  }
170
  assert(j == R10->encoding(), "must be");
171
#if (FP_REG_NUM != 11)
172
  // add R11, if not saved as FP
173
  map->set_callee_saved(VMRegImpl::stack2reg(R11_offset), R11->as_VMReg());
174
#endif
175
  map->set_callee_saved(VMRegImpl::stack2reg(FP_offset), FP->as_VMReg());
176
  map->set_callee_saved(VMRegImpl::stack2reg(LR_offset), LR->as_VMReg());
177

178
  if (save_fpu_registers) {
179
    for (int i = 0; i < fpu_save_size; i++) {
180
      map->set_callee_saved(VMRegImpl::stack2reg(i), as_FloatRegister(i)->as_VMReg());
181
    }
182
  }
183

184
  return map;
185
}
186

187
static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = HaveVFP) {
188
  __ block_comment("save_live_registers");
189
  sasm->set_frame_size(reg_save_size /* in words */);
190

191
  __ push(RegisterSet(FP) | RegisterSet(LR));
192
  __ push(RegisterSet(R0, R6) | RegisterSet(R8, R10) | R12 | altFP_7_11);
193
  if (save_fpu_registers) {
194
    __ fpush(FloatRegisterSet(D0, fpu_save_size / 2));
195
  } else {
196
    __ sub(SP, SP, fpu_save_size * wordSize);
197
  }
198

199
  return generate_oop_map(sasm, save_fpu_registers);
200
}
201

202

203
static void restore_live_registers(StubAssembler* sasm,
204
                                   bool restore_R0,
205
                                   bool restore_FP_LR,
206
                                   bool do_return,
207
                                   bool restore_fpu_registers = HaveVFP) {
208
  __ block_comment("restore_live_registers");
209

210
  if (restore_fpu_registers) {
211
    __ fpop(FloatRegisterSet(D0, fpu_save_size / 2));
212
    if (!restore_R0) {
213
      __ add(SP, SP, (R1_offset - fpu_save_size) * wordSize);
214
    }
215
  } else {
216
    __ add(SP, SP, (restore_R0 ? fpu_save_size : R1_offset) * wordSize);
217
  }
218
  __ pop(RegisterSet((restore_R0 ? R0 : R1), R6) | RegisterSet(R8, R10) | R12 | altFP_7_11);
219
  if (restore_FP_LR) {
220
    __ pop(RegisterSet(FP) | RegisterSet(do_return ? PC : LR));
221
  } else {
222
    assert (!do_return, "return without restoring FP/LR");
223
  }
224
}
225

226

227
static void restore_live_registers_except_R0(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) {
228
  restore_live_registers(sasm, false, true, true, restore_fpu_registers);
229
}
230

231
static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) {
232
  restore_live_registers(sasm, true, true, true, restore_fpu_registers);
233
}
234

235
static void restore_live_registers_except_FP_LR(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) {
236
  restore_live_registers(sasm, true, false, false, restore_fpu_registers);
237
}
238

239
static void restore_live_registers_without_return(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) {
240
  restore_live_registers(sasm, true, true, false, restore_fpu_registers);
241
}
242

243
void StubAssembler::save_live_registers() {
244
  ::save_live_registers(this);
245
}
246

247
void StubAssembler::restore_live_registers_without_return() {
248
  ::restore_live_registers_without_return(this);
249
}
250

251
void Runtime1::initialize_pd() {
252
}
253

254

255
OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
256
  OopMap* oop_map = save_live_registers(sasm);
257

258
  int call_offset;
259
  if (has_argument) {
260
    __ ldr(R1, Address(SP, arg1_offset));
261
    __ ldr(R2, Address(SP, arg2_offset));
262
    call_offset = __ call_RT(noreg, noreg, target, R1, R2);
263
  } else {
264
    call_offset = __ call_RT(noreg, noreg, target);
265
  }
266

267
  OopMapSet* oop_maps = new OopMapSet();
268
  oop_maps->add_gc_map(call_offset, oop_map);
269

270
  DEBUG_ONLY(STOP("generate_exception_throw");)  // Should not reach here
271
  return oop_maps;
272
}
273

274

275
static void restore_sp_for_method_handle(StubAssembler* sasm) {
276
  // Restore SP from its saved reg (FP) if the exception PC is a MethodHandle call site.
277
  __ ldr_s32(Rtemp, Address(Rthread, JavaThread::is_method_handle_return_offset()));
278
  __ cmp(Rtemp, 0);
279
  __ mov(SP, Rmh_SP_save, ne);
280
}
281

282

283
OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) {
284
  __ block_comment("generate_handle_exception");
285

286
  bool save_fpu_registers = false;
287

288
  // Save registers, if required.
289
  OopMapSet* oop_maps = new OopMapSet();
290
  OopMap* oop_map = nullptr;
291

292
  switch (id) {
293
  case forward_exception_id: {
294
    save_fpu_registers = HaveVFP;
295
    oop_map = generate_oop_map(sasm);
296
    __ ldr(Rexception_obj, Address(Rthread, Thread::pending_exception_offset()));
297
    __ ldr(Rexception_pc, Address(SP, LR_offset * wordSize));
298
    Register zero = __ zero_register(Rtemp);
299
    __ str(zero, Address(Rthread, Thread::pending_exception_offset()));
300
    break;
301
  }
302
  case handle_exception_id:
303
    save_fpu_registers = HaveVFP;
304
    // fall-through
305
  case handle_exception_nofpu_id:
306
    // At this point all registers MAY be live.
307
    oop_map = save_live_registers(sasm, save_fpu_registers);
308
    break;
309
  case handle_exception_from_callee_id:
310
    // At this point all registers except exception oop (R4/R19) and
311
    // exception pc (R5/R20) are dead.
312
    oop_map = save_live_registers(sasm);  // TODO it's not required to save all registers
313
    break;
314
  default:  ShouldNotReachHere();
315
  }
316

317
  __ str(Rexception_obj, Address(Rthread, JavaThread::exception_oop_offset()));
318
  __ str(Rexception_pc, Address(Rthread, JavaThread::exception_pc_offset()));
319

320
  __ str(Rexception_pc, Address(SP, LR_offset * wordSize)); // patch throwing pc into return address
321

322
  int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
323
  oop_maps->add_gc_map(call_offset, oop_map);
324

325
  // Exception handler found
326
  __ str(R0, Address(SP, LR_offset * wordSize)); // patch the return address
327

328
  // Restore the registers that were saved at the beginning, remove
329
  // frame and jump to the exception handler.
330
  switch (id) {
331
  case forward_exception_id:
332
  case handle_exception_nofpu_id:
333
  case handle_exception_id:
334
    restore_live_registers(sasm, save_fpu_registers);
335
    // Note: the restore live registers includes the jump to LR (patched to R0)
336
    break;
337
  case handle_exception_from_callee_id:
338
    restore_live_registers_without_return(sasm); // must not jump immediately to handler
339
    restore_sp_for_method_handle(sasm);
340
    __ ret();
341
    break;
342
  default:  ShouldNotReachHere();
343
  }
344

345
  DEBUG_ONLY(STOP("generate_handle_exception");)  // Should not reach here
346

347
  return oop_maps;
348
}
349

350

351
void Runtime1::generate_unwind_exception(StubAssembler* sasm) {
352

353
  if (AbortVMOnException) {
354
    save_live_registers(sasm);
355
    __ call_VM_leaf(CAST_FROM_FN_PTR(address, check_abort_on_vm_exception), Rexception_obj);
356
    restore_live_registers(sasm);
357
  }
358

359
  // FP no longer used to find the frame start
360
  // on entry, remove_frame() has already been called (restoring FP and LR)
361

362
  // search the exception handler address of the caller (using the return address)
363
  __ mov(c_rarg0, Rthread);
364
  __ mov(Rexception_pc, LR);
365
  __ mov(c_rarg1, LR);
366
  __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), c_rarg0, c_rarg1);
367

368
  // Exception oop should be still in Rexception_obj and pc in Rexception_pc
369
  // Jump to handler
370
  __ verify_not_null_oop(Rexception_obj);
371

372
  // JSR292 extension
373
  restore_sp_for_method_handle(sasm);
374

375
  __ jump(R0);
376
}
377

378

379
OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
380
  OopMap* oop_map = save_live_registers(sasm);
381

382
  // call the runtime patching routine, returns non-zero if nmethod got deopted.
383
  int call_offset = __ call_RT(noreg, noreg, target);
384
  OopMapSet* oop_maps = new OopMapSet();
385
  oop_maps->add_gc_map(call_offset, oop_map);
386

387
  DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
388
  assert(deopt_blob != nullptr, "deoptimization blob must have been created");
389

390
  __ cmp_32(R0, 0);
391

392
  restore_live_registers_except_FP_LR(sasm);
393
  __ pop(RegisterSet(FP) | RegisterSet(PC), eq);
394

395
  // Deoptimization needed
396
  // TODO: ARM - no need to restore FP & LR because unpack_with_reexecution() stores them back
397
  __ pop(RegisterSet(FP) | RegisterSet(LR));
398

399
  __ jump(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type, Rtemp);
400

401
  DEBUG_ONLY(STOP("generate_patching");)  // Should not reach here
402
  return oop_maps;
403
}
404

405

406
OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
407
  const bool must_gc_arguments = true;
408
  const bool dont_gc_arguments = false;
409

410
  OopMapSet* oop_maps = nullptr;
411
  bool save_fpu_registers = HaveVFP;
412

413
  switch (id) {
414
    case forward_exception_id:
415
      {
416
        oop_maps = generate_handle_exception(id, sasm);
417
        // does not return on ARM
418
      }
419
      break;
420

421
    case new_instance_id:
422
    case fast_new_instance_id:
423
    case fast_new_instance_init_check_id:
424
      {
425
        const Register result = R0;
426
        const Register klass  = R1;
427

428
        OopMap* map = save_live_registers(sasm);
429
        int call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
430
        oop_maps = new OopMapSet();
431
        oop_maps->add_gc_map(call_offset, map);
432

433
        // MacroAssembler::StoreStore useless (included in the runtime exit path)
434

435
        restore_live_registers_except_R0(sasm);
436
      }
437
      break;
438

439
    case counter_overflow_id:
440
      {
441
        OopMap* oop_map = save_live_registers(sasm);
442
        __ ldr(R1, Address(SP, arg1_offset));
443
        __ ldr(R2, Address(SP, arg2_offset));
444
        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), R1, R2);
445
        oop_maps = new OopMapSet();
446
        oop_maps->add_gc_map(call_offset, oop_map);
447
        restore_live_registers(sasm);
448
      }
449
      break;
450

451
    case new_type_array_id:
452
    case new_object_array_id:
453
      {
454
        if (id == new_type_array_id) {
455
          __ set_info("new_type_array", dont_gc_arguments);
456
        } else {
457
          __ set_info("new_object_array", dont_gc_arguments);
458
        }
459

460
        const Register result = R0;
461
        const Register klass  = R1;
462
        const Register length = R2;
463

464
        OopMap* map = save_live_registers(sasm);
465
        int call_offset;
466
        if (id == new_type_array_id) {
467
          call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
468
        } else {
469
          call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
470
        }
471
        oop_maps = new OopMapSet();
472
        oop_maps->add_gc_map(call_offset, map);
473

474
        // MacroAssembler::StoreStore useless (included in the runtime exit path)
475

476
        restore_live_registers_except_R0(sasm);
477
      }
478
      break;
479

480
    case new_multi_array_id:
481
      {
482
        __ set_info("new_multi_array", dont_gc_arguments);
483

484
        // R0: klass
485
        // R2: rank
486
        // SP: address of 1st dimension
487
        const Register result = R0;
488
        OopMap* map = save_live_registers(sasm);
489

490
        __ mov(R1, R0);
491
        __ add(R3, SP, arg1_offset);
492
        int call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_multi_array), R1, R2, R3);
493

494
        oop_maps = new OopMapSet();
495
        oop_maps->add_gc_map(call_offset, map);
496

497
        // MacroAssembler::StoreStore useless (included in the runtime exit path)
498

499
        restore_live_registers_except_R0(sasm);
500
      }
501
      break;
502

503
    case register_finalizer_id:
504
      {
505
        __ set_info("register_finalizer", dont_gc_arguments);
506

507
        // Do not call runtime if JVM_ACC_HAS_FINALIZER flag is not set
508
        __ load_klass(Rtemp, R0);
509
        __ ldr_u32(Rtemp, Address(Rtemp, Klass::access_flags_offset()));
510

511
        __ tst(Rtemp, JVM_ACC_HAS_FINALIZER);
512
        __ bx(LR, eq);
513

514
        // Call VM
515
        OopMap* map = save_live_registers(sasm);
516
        oop_maps = new OopMapSet();
517
        int call_offset = __ call_RT(noreg, noreg,
518
                                     CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), R0);
519
        oop_maps->add_gc_map(call_offset, map);
520
        restore_live_registers(sasm);
521
      }
522
      break;
523

524
    case throw_range_check_failed_id:
525
      {
526
        __ set_info("range_check_failed", dont_gc_arguments);
527
        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
528
      }
529
      break;
530

531
    case throw_index_exception_id:
532
      {
533
        __ set_info("index_range_check_failed", dont_gc_arguments);
534
        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
535
      }
536
      break;
537

538
    case throw_div0_exception_id:
539
      {
540
        __ set_info("throw_div0_exception", dont_gc_arguments);
541
        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
542
      }
543
      break;
544

545
    case throw_null_pointer_exception_id:
546
      {
547
        __ set_info("throw_null_pointer_exception", dont_gc_arguments);
548
        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
549
      }
550
      break;
551

552
    case handle_exception_nofpu_id:
553
    case handle_exception_id:
554
      {
555
        __ set_info("handle_exception", dont_gc_arguments);
556
        oop_maps = generate_handle_exception(id, sasm);
557
      }
558
      break;
559

560
    case handle_exception_from_callee_id:
561
      {
562
        __ set_info("handle_exception_from_callee", dont_gc_arguments);
563
        oop_maps = generate_handle_exception(id, sasm);
564
      }
565
      break;
566

567
    case unwind_exception_id:
568
      {
569
        __ set_info("unwind_exception", dont_gc_arguments);
570
        generate_unwind_exception(sasm);
571
      }
572
      break;
573

574
    case throw_array_store_exception_id:
575
      {
576
        __ set_info("throw_array_store_exception", dont_gc_arguments);
577
        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
578
      }
579
      break;
580

581
    case throw_class_cast_exception_id:
582
      {
583
        __ set_info("throw_class_cast_exception", dont_gc_arguments);
584
        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
585
      }
586
      break;
587

588
    case throw_incompatible_class_change_error_id:
589
      {
590
        __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments);
591
        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
592
      }
593
      break;
594

595
    case slow_subtype_check_id:
596
      {
597
        // (in)  R0 - sub, destroyed,
598
        // (in)  R1 - super, not changed
599
        // (out) R0 - result: 1 if check passed, 0 otherwise
600
        __ raw_push(R2, R3, LR);
601

602
        // Load an array of secondary_supers
603
        __ ldr(R2, Address(R0, Klass::secondary_supers_offset()));
604
        // Length goes to R3
605
        __ ldr_s32(R3, Address(R2, Array<Klass*>::length_offset_in_bytes()));
606
        __ add(R2, R2, Array<Klass*>::base_offset_in_bytes());
607

608
        Label loop, miss;
609
        __ bind(loop);
610
        __ cbz(R3, miss);
611
        __ ldr(LR, Address(R2, wordSize, post_indexed));
612
        __ sub(R3, R3, 1);
613
        __ cmp(LR, R1);
614
        __ b(loop, ne);
615

616
        // We get here if an equal cache entry is found
617
        __ str(R1, Address(R0, Klass::secondary_super_cache_offset()));
618
        __ mov(R0, 1);
619
        __ raw_pop_and_ret(R2, R3);
620

621
        // A cache entry not found - return false
622
        __ bind(miss);
623
        __ mov(R0, 0);
624
        __ raw_pop_and_ret(R2, R3);
625
      }
626
      break;
627

628
    case monitorenter_nofpu_id:
629
      save_fpu_registers = false;
630
      // fall through
631
    case monitorenter_id:
632
      {
633
        __ set_info("monitorenter", dont_gc_arguments);
634
        const Register obj  = R1;
635
        const Register lock = R2;
636
        OopMap* map = save_live_registers(sasm, save_fpu_registers);
637
        __ ldr(obj, Address(SP, arg1_offset));
638
        __ ldr(lock, Address(SP, arg2_offset));
639
        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), obj, lock);
640
        oop_maps = new OopMapSet();
641
        oop_maps->add_gc_map(call_offset, map);
642
        restore_live_registers(sasm, save_fpu_registers);
643
      }
644
      break;
645

646
    case monitorexit_nofpu_id:
647
      save_fpu_registers = false;
648
      // fall through
649
    case monitorexit_id:
650
      {
651
        __ set_info("monitorexit", dont_gc_arguments);
652
        const Register lock = R1;
653
        OopMap* map = save_live_registers(sasm, save_fpu_registers);
654
        __ ldr(lock, Address(SP, arg1_offset));
655
        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), lock);
656
        oop_maps = new OopMapSet();
657
        oop_maps->add_gc_map(call_offset, map);
658
        restore_live_registers(sasm, save_fpu_registers);
659
      }
660
      break;
661

662
    case deoptimize_id:
663
      {
664
        __ set_info("deoptimize", dont_gc_arguments);
665
        OopMap* oop_map = save_live_registers(sasm);
666
        const Register trap_request = R1;
667
        __ ldr(trap_request, Address(SP, arg1_offset));
668
        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), trap_request);
669
        oop_maps = new OopMapSet();
670
        oop_maps->add_gc_map(call_offset, oop_map);
671
        restore_live_registers_without_return(sasm);
672
        DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
673
        assert(deopt_blob != nullptr, "deoptimization blob must have been created");
674
        __ jump(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type, noreg);
675
      }
676
      break;
677

678
    case access_field_patching_id:
679
      {
680
        __ set_info("access_field_patching", dont_gc_arguments);
681
        oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
682
      }
683
      break;
684

685
    case load_klass_patching_id:
686
      {
687
        __ set_info("load_klass_patching", dont_gc_arguments);
688
        oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
689
      }
690
      break;
691

692
    case load_appendix_patching_id:
693
      {
694
        __ set_info("load_appendix_patching", dont_gc_arguments);
695
        oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
696
      }
697
      break;
698

699
    case load_mirror_patching_id:
700
      {
701
        __ set_info("load_mirror_patching", dont_gc_arguments);
702
        oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
703
      }
704
      break;
705

706
    case predicate_failed_trap_id:
707
      {
708
        __ set_info("predicate_failed_trap", dont_gc_arguments);
709

710
        OopMap* oop_map = save_live_registers(sasm);
711
        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
712

713
        oop_maps = new OopMapSet();
714
        oop_maps->add_gc_map(call_offset, oop_map);
715

716
        restore_live_registers_without_return(sasm);
717

718
        DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
719
        assert(deopt_blob != nullptr, "deoptimization blob must have been created");
720
        __ jump(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type, Rtemp);
721
      }
722
      break;
723

724
    default:
725
      {
726
        __ set_info("unimplemented entry", dont_gc_arguments);
727
        STOP("unimplemented entry");
728
      }
729
      break;
730
  }
731
  return oop_maps;
732
}
733

734
#undef __
735

736
#ifdef __SOFTFP__
737
const char *Runtime1::pd_name_for_address(address entry) {
738

739
#define FUNCTION_CASE(a, f) \
740
  if ((intptr_t)a == CAST_FROM_FN_PTR(intptr_t, f))  return #f
741

742
  FUNCTION_CASE(entry, __aeabi_fadd_glibc);
743
  FUNCTION_CASE(entry, __aeabi_fmul);
744
  FUNCTION_CASE(entry, __aeabi_fsub_glibc);
745
  FUNCTION_CASE(entry, __aeabi_fdiv);
746

747
  // __aeabi_XXXX_glibc: Imported code from glibc soft-fp bundle for calculation accuracy improvement. See CR 6757269.
748
  FUNCTION_CASE(entry, __aeabi_dadd_glibc);
749
  FUNCTION_CASE(entry, __aeabi_dmul);
750
  FUNCTION_CASE(entry, __aeabi_dsub_glibc);
751
  FUNCTION_CASE(entry, __aeabi_ddiv);
752

753
  FUNCTION_CASE(entry, __aeabi_f2d);
754
  FUNCTION_CASE(entry, __aeabi_d2f);
755
  FUNCTION_CASE(entry, __aeabi_i2f);
756
  FUNCTION_CASE(entry, __aeabi_i2d);
757
  FUNCTION_CASE(entry, __aeabi_f2iz);
758

759
  FUNCTION_CASE(entry, SharedRuntime::fcmpl);
760
  FUNCTION_CASE(entry, SharedRuntime::fcmpg);
761
  FUNCTION_CASE(entry, SharedRuntime::dcmpl);
762
  FUNCTION_CASE(entry, SharedRuntime::dcmpg);
763

764
  FUNCTION_CASE(entry, SharedRuntime::unordered_fcmplt);
765
  FUNCTION_CASE(entry, SharedRuntime::unordered_dcmplt);
766
  FUNCTION_CASE(entry, SharedRuntime::unordered_fcmple);
767
  FUNCTION_CASE(entry, SharedRuntime::unordered_dcmple);
768
  FUNCTION_CASE(entry, SharedRuntime::unordered_fcmpge);
769
  FUNCTION_CASE(entry, SharedRuntime::unordered_dcmpge);
770
  FUNCTION_CASE(entry, SharedRuntime::unordered_fcmpgt);
771
  FUNCTION_CASE(entry, SharedRuntime::unordered_dcmpgt);
772

773
  FUNCTION_CASE(entry, SharedRuntime::fneg);
774
  FUNCTION_CASE(entry, SharedRuntime::dneg);
775

776
  FUNCTION_CASE(entry, __aeabi_fcmpeq);
777
  FUNCTION_CASE(entry, __aeabi_fcmplt);
778
  FUNCTION_CASE(entry, __aeabi_fcmple);
779
  FUNCTION_CASE(entry, __aeabi_fcmpge);
780
  FUNCTION_CASE(entry, __aeabi_fcmpgt);
781

782
  FUNCTION_CASE(entry, __aeabi_dcmpeq);
783
  FUNCTION_CASE(entry, __aeabi_dcmplt);
784
  FUNCTION_CASE(entry, __aeabi_dcmple);
785
  FUNCTION_CASE(entry, __aeabi_dcmpge);
786
  FUNCTION_CASE(entry, __aeabi_dcmpgt);
787
#undef FUNCTION_CASE
788
  return "";
789
}
790
#else  // __SOFTFP__
791
const char *Runtime1::pd_name_for_address(address entry) {
792
  return "<unknown function>";
793
}
794
#endif // __SOFTFP__
795

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.