jdk

Форк
0
/
c1_LIRAssembler.cpp 
858 строк · 24.0 Кб
1
/*
2
 * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
3
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
 *
5
 * This code is free software; you can redistribute it and/or modify it
6
 * under the terms of the GNU General Public License version 2 only, as
7
 * published by the Free Software Foundation.
8
 *
9
 * This code is distributed in the hope that it will be useful, but WITHOUT
10
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12
 * version 2 for more details (a copy is included in the LICENSE file that
13
 * accompanied this code).
14
 *
15
 * You should have received a copy of the GNU General Public License version
16
 * 2 along with this work; if not, write to the Free Software Foundation,
17
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
 *
19
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
 * or visit www.oracle.com if you need additional information or have any
21
 * questions.
22
 *
23
 */
24

25
#include "precompiled.hpp"
26
#include "asm/assembler.inline.hpp"
27
#include "c1/c1_Compilation.hpp"
28
#include "c1/c1_Instruction.hpp"
29
#include "c1/c1_InstructionPrinter.hpp"
30
#include "c1/c1_LIRAssembler.hpp"
31
#include "c1/c1_MacroAssembler.hpp"
32
#include "c1/c1_ValueStack.hpp"
33
#include "ci/ciInstance.hpp"
34
#include "compiler/compilerDefinitions.inline.hpp"
35
#include "compiler/oopMap.hpp"
36
#include "runtime/os.hpp"
37
#include "runtime/vm_version.hpp"
38

39
void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
40
  // We must have enough patching space so that call can be inserted.
41
  // We cannot use fat nops here, since the concurrent code rewrite may transiently
42
  // create the illegal instruction sequence.
43
  while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeGeneralJump::instruction_size) {
44
    _masm->nop();
45
  }
46
  info->set_force_reexecute();
47
  patch->install(_masm, patch_code, obj, info);
48
  append_code_stub(patch);
49

50
#ifdef ASSERT
51
  Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
52
  if (patch->id() == PatchingStub::access_field_id) {
53
    switch (code) {
54
      case Bytecodes::_putstatic:
55
      case Bytecodes::_getstatic:
56
      case Bytecodes::_putfield:
57
      case Bytecodes::_getfield:
58
        break;
59
      default:
60
        ShouldNotReachHere();
61
    }
62
  } else if (patch->id() == PatchingStub::load_klass_id) {
63
    switch (code) {
64
      case Bytecodes::_new:
65
      case Bytecodes::_anewarray:
66
      case Bytecodes::_multianewarray:
67
      case Bytecodes::_instanceof:
68
      case Bytecodes::_checkcast:
69
        break;
70
      default:
71
        ShouldNotReachHere();
72
    }
73
  } else if (patch->id() == PatchingStub::load_mirror_id) {
74
    switch (code) {
75
      case Bytecodes::_putstatic:
76
      case Bytecodes::_getstatic:
77
      case Bytecodes::_ldc:
78
      case Bytecodes::_ldc_w:
79
      case Bytecodes::_ldc2_w:
80
        break;
81
      default:
82
        ShouldNotReachHere();
83
    }
84
  } else if (patch->id() == PatchingStub::load_appendix_id) {
85
    Bytecodes::Code bc_raw = info->scope()->method()->raw_code_at_bci(info->stack()->bci());
86
    assert(Bytecodes::has_optional_appendix(bc_raw), "unexpected appendix resolution");
87
  } else {
88
    ShouldNotReachHere();
89
  }
90
#endif
91
}
92

93
PatchingStub::PatchID LIR_Assembler::patching_id(CodeEmitInfo* info) {
94
  IRScope* scope = info->scope();
95
  Bytecodes::Code bc_raw = scope->method()->raw_code_at_bci(info->stack()->bci());
96
  if (Bytecodes::has_optional_appendix(bc_raw)) {
97
    return PatchingStub::load_appendix_id;
98
  }
99
  return PatchingStub::load_mirror_id;
100
}
101

102
//---------------------------------------------------------------
103

104

105
LIR_Assembler::LIR_Assembler(Compilation* c):
106
   _masm(c->masm())
107
 , _compilation(c)
108
 , _frame_map(c->frame_map())
109
 , _current_block(nullptr)
110
 , _pending_non_safepoint(nullptr)
111
 , _pending_non_safepoint_offset(0)
112
 , _immediate_oops_patched(0)
113
{
114
  _slow_case_stubs = new CodeStubList();
115
}
116

117

118
LIR_Assembler::~LIR_Assembler() {
119
  // The unwind handler label may be unnbound if this destructor is invoked because of a bail-out.
120
  // Reset it here to avoid an assertion.
121
  _unwind_handler_entry.reset();
122
}
123

124

125
void LIR_Assembler::check_codespace() {
126
  CodeSection* cs = _masm->code_section();
127
  if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) {
128
    BAILOUT("CodeBuffer overflow");
129
  }
130
}
131

132

133
void LIR_Assembler::append_code_stub(CodeStub* stub) {
134
  _immediate_oops_patched += stub->nr_immediate_oops_patched();
135
  _slow_case_stubs->append(stub);
136
}
137

138
void LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
139
  for (int m = 0; m < stub_list->length(); m++) {
140
    CodeStub* s = stub_list->at(m);
141

142
    check_codespace();
143
    CHECK_BAILOUT();
144

145
#ifndef PRODUCT
146
    if (CommentedAssembly) {
147
      stringStream st;
148
      s->print_name(&st);
149
      st.print(" slow case");
150
      _masm->block_comment(st.freeze());
151
    }
152
#endif
153
    s->emit_code(this);
154
#ifdef ASSERT
155
    s->assert_no_unbound_labels();
156
#endif
157
  }
158
}
159

160

161
void LIR_Assembler::emit_slow_case_stubs() {
162
  emit_stubs(_slow_case_stubs);
163
}
164

165

166
bool LIR_Assembler::needs_icache(ciMethod* method) const {
167
  return !method->is_static();
168
}
169

170
bool LIR_Assembler::needs_clinit_barrier_on_entry(ciMethod* method) const {
171
  return VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier();
172
}
173

174
int LIR_Assembler::code_offset() const {
175
  return _masm->offset();
176
}
177

178

179
address LIR_Assembler::pc() const {
180
  return _masm->pc();
181
}
182

183
// To bang the stack of this compiled method we use the stack size
184
// that the interpreter would need in case of a deoptimization. This
185
// removes the need to bang the stack in the deoptimization blob which
186
// in turn simplifies stack overflow handling.
187
int LIR_Assembler::bang_size_in_bytes() const {
188
  return MAX2(initial_frame_size_in_bytes() + os::extra_bang_size_in_bytes(), _compilation->interpreter_frame_size());
189
}
190

191
void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) {
192
  for (int i = 0; i < info_list->length(); i++) {
193
    XHandlers* handlers = info_list->at(i)->exception_handlers();
194

195
    for (int j = 0; j < handlers->length(); j++) {
196
      XHandler* handler = handlers->handler_at(j);
197
      assert(handler->lir_op_id() != -1, "handler not processed by LinearScan");
198
      assert(handler->entry_code() == nullptr ||
199
             handler->entry_code()->instructions_list()->last()->code() == lir_branch ||
200
             handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch");
201

202
      if (handler->entry_pco() == -1) {
203
        // entry code not emitted yet
204
        if (handler->entry_code() != nullptr && handler->entry_code()->instructions_list()->length() > 1) {
205
          handler->set_entry_pco(code_offset());
206
          if (CommentedAssembly) {
207
            _masm->block_comment("Exception adapter block");
208
          }
209
          emit_lir_list(handler->entry_code());
210
        } else {
211
          handler->set_entry_pco(handler->entry_block()->exception_handler_pco());
212
        }
213

214
        assert(handler->entry_pco() != -1, "must be set now");
215
      }
216
    }
217
  }
218
}
219

220

221
void LIR_Assembler::emit_code(BlockList* hir) {
222
  if (PrintLIR) {
223
    print_LIR(hir);
224
  }
225

226
  int n = hir->length();
227
  for (int i = 0; i < n; i++) {
228
    emit_block(hir->at(i));
229
    CHECK_BAILOUT();
230
  }
231

232
  flush_debug_info(code_offset());
233

234
  DEBUG_ONLY(check_no_unbound_labels());
235
}
236

237

238
void LIR_Assembler::emit_block(BlockBegin* block) {
239
  if (block->is_set(BlockBegin::backward_branch_target_flag)) {
240
    align_backward_branch_target();
241
  }
242

243
  // if this block is the start of an exception handler, record the
244
  // PC offset of the first instruction for later construction of
245
  // the ExceptionHandlerTable
246
  if (block->is_set(BlockBegin::exception_entry_flag)) {
247
    block->set_exception_handler_pco(code_offset());
248
  }
249

250
#ifndef PRODUCT
251
  if (PrintLIRWithAssembly) {
252
    // don't print Phi's
253
    InstructionPrinter ip(false);
254
    block->print(ip);
255
  }
256
#endif /* PRODUCT */
257

258
  assert(block->lir() != nullptr, "must have LIR");
259
  X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
260

261
#ifndef PRODUCT
262
  if (CommentedAssembly) {
263
    stringStream st;
264
    st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->printable_bci());
265
    _masm->block_comment(st.freeze());
266
  }
267
#endif
268

269
  emit_lir_list(block->lir());
270

271
  X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
272
}
273

274

275
void LIR_Assembler::emit_lir_list(LIR_List* list) {
276
  peephole(list);
277

278
  int n = list->length();
279
  for (int i = 0; i < n; i++) {
280
    LIR_Op* op = list->at(i);
281

282
    check_codespace();
283
    CHECK_BAILOUT();
284

285
#ifndef PRODUCT
286
    if (CommentedAssembly) {
287
      // Don't record out every op since that's too verbose.  Print
288
      // branches since they include block and stub names.  Also print
289
      // patching moves since they generate funny looking code.
290
      if (op->code() == lir_branch ||
291
          (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none) ||
292
          (op->code() == lir_leal && op->as_Op1()->patch_code() != lir_patch_none)) {
293
        stringStream st;
294
        op->print_on(&st);
295
        _masm->block_comment(st.freeze());
296
      }
297
    }
298
    if (PrintLIRWithAssembly) {
299
      // print out the LIR operation followed by the resulting assembly
300
      list->at(i)->print(); tty->cr();
301
    }
302
#endif /* PRODUCT */
303

304
    op->emit_code(this);
305

306
    if (compilation()->debug_info_recorder()->recording_non_safepoints()) {
307
      process_debug_info(op);
308
    }
309

310
#ifndef PRODUCT
311
    if (PrintLIRWithAssembly) {
312
      _masm->code()->decode();
313
    }
314
#endif /* PRODUCT */
315
  }
316
}
317

318
#ifdef ASSERT
319
void LIR_Assembler::check_no_unbound_labels() {
320
  CHECK_BAILOUT();
321

322
  for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
323
    if (!_branch_target_blocks.at(i)->label()->is_bound()) {
324
      tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
325
      assert(false, "unbound label");
326
    }
327
  }
328
}
329
#endif
330

331
//----------------------------------debug info--------------------------------
332

333

334
void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
335
  int pc_offset = code_offset();
336
  flush_debug_info(pc_offset);
337
  info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
338
  if (info->exception_handlers() != nullptr) {
339
    compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
340
  }
341
}
342

343

344
void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) {
345
  flush_debug_info(pc_offset);
346
  cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
347
  if (cinfo->exception_handlers() != nullptr) {
348
    compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
349
  }
350
}
351

352
static ValueStack* debug_info(Instruction* ins) {
353
  StateSplit* ss = ins->as_StateSplit();
354
  if (ss != nullptr) return ss->state();
355
  return ins->state_before();
356
}
357

358
void LIR_Assembler::process_debug_info(LIR_Op* op) {
359
  Instruction* src = op->source();
360
  if (src == nullptr)  return;
361
  int pc_offset = code_offset();
362
  if (_pending_non_safepoint == src) {
363
    _pending_non_safepoint_offset = pc_offset;
364
    return;
365
  }
366
  ValueStack* vstack = debug_info(src);
367
  if (vstack == nullptr)  return;
368
  if (_pending_non_safepoint != nullptr) {
369
    // Got some old debug info.  Get rid of it.
370
    if (debug_info(_pending_non_safepoint) == vstack) {
371
      _pending_non_safepoint_offset = pc_offset;
372
      return;
373
    }
374
    if (_pending_non_safepoint_offset < pc_offset) {
375
      record_non_safepoint_debug_info();
376
    }
377
    _pending_non_safepoint = nullptr;
378
  }
379
  // Remember the debug info.
380
  if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) {
381
    _pending_non_safepoint = src;
382
    _pending_non_safepoint_offset = pc_offset;
383
  }
384
}
385

386
// Index caller states in s, where 0 is the oldest, 1 its callee, etc.
387
// Return null if n is too large.
388
// Returns the caller_bci for the next-younger state, also.
389
static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) {
390
  ValueStack* t = s;
391
  for (int i = 0; i < n; i++) {
392
    if (t == nullptr)  break;
393
    t = t->caller_state();
394
  }
395
  if (t == nullptr)  return nullptr;
396
  for (;;) {
397
    ValueStack* tc = t->caller_state();
398
    if (tc == nullptr)  return s;
399
    t = tc;
400
    bci_result = tc->bci();
401
    s = s->caller_state();
402
  }
403
}
404

405
void LIR_Assembler::record_non_safepoint_debug_info() {
406
  int         pc_offset = _pending_non_safepoint_offset;
407
  ValueStack* vstack    = debug_info(_pending_non_safepoint);
408
  int         bci       = vstack->bci();
409

410
  DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
411
  assert(debug_info->recording_non_safepoints(), "sanity");
412

413
  debug_info->add_non_safepoint(pc_offset);
414

415
  // Visit scopes from oldest to youngest.
416
  for (int n = 0; ; n++) {
417
    int s_bci = bci;
418
    ValueStack* s = nth_oldest(vstack, n, s_bci);
419
    if (s == nullptr)  break;
420
    IRScope* scope = s->scope();
421
    //Always pass false for reexecute since these ScopeDescs are never used for deopt
422
    methodHandle null_mh;
423
    debug_info->describe_scope(pc_offset, null_mh, scope->method(), s->bci(), false/*reexecute*/);
424
  }
425

426
  debug_info->end_non_safepoint(pc_offset);
427
}
428

429

430
ImplicitNullCheckStub* LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) {
431
  return add_debug_info_for_null_check(code_offset(), cinfo);
432
}
433

434
ImplicitNullCheckStub* LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) {
435
  ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo);
436
  append_code_stub(stub);
437
  return stub;
438
}
439

440
void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) {
441
  add_debug_info_for_div0(code_offset(), info);
442
}
443

444
void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) {
445
  DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo);
446
  append_code_stub(stub);
447
}
448

449
void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) {
450
  rt_call(op->result_opr(), op->addr(), op->arguments(), op->tmp(), op->info());
451
}
452

453
void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
454
  verify_oop_map(op->info());
455

456
  // must align calls sites, otherwise they can't be updated atomically
457
  align_call(op->code());
458

459
  if (CodeBuffer::supports_shared_stubs() && op->method()->can_be_statically_bound()) {
460
    // Calls of the same statically bound method can share
461
    // a stub to the interpreter.
462
    CodeBuffer::csize_t call_offset = pc() - _masm->code()->insts_begin();
463
    _masm->code()->shared_stub_to_interp_for(op->method(), call_offset);
464
  } else {
465
    emit_static_call_stub();
466
  }
467
  CHECK_BAILOUT();
468

469
  switch (op->code()) {
470
  case lir_static_call:
471
  case lir_dynamic_call:
472
    call(op, relocInfo::static_call_type);
473
    break;
474
  case lir_optvirtual_call:
475
    call(op, relocInfo::opt_virtual_call_type);
476
    break;
477
  case lir_icvirtual_call:
478
    ic_call(op);
479
    break;
480
  default:
481
    fatal("unexpected op code: %s", op->name());
482
    break;
483
  }
484

485
  // JSR 292
486
  // Record if this method has MethodHandle invokes.
487
  if (op->is_method_handle_invoke()) {
488
    compilation()->set_has_method_handle_invokes(true);
489
  }
490

491
#if defined(IA32) && defined(COMPILER2)
492
  // C2 leave fpu stack dirty clean it
493
  if (UseSSE < 2 && !CompilerConfig::is_c1_only_no_jvmci()) {
494
    int i;
495
    for ( i = 1; i <= 7 ; i++ ) {
496
      ffree(i);
497
    }
498
    if (!op->result_opr()->is_float_kind()) {
499
      ffree(0);
500
    }
501
  }
502
#endif // IA32 && COMPILER2
503
}
504

505

506
void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
507
  _masm->bind (*(op->label()));
508
}
509

510

511
void LIR_Assembler::emit_op1(LIR_Op1* op) {
512
  switch (op->code()) {
513
    case lir_move:
514
      if (op->move_kind() == lir_move_volatile) {
515
        assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
516
        volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
517
      } else {
518
        move_op(op->in_opr(), op->result_opr(), op->type(),
519
                op->patch_code(), op->info(), op->pop_fpu_stack(),
520
                op->move_kind() == lir_move_wide);
521
      }
522
      break;
523

524
    case lir_roundfp: {
525
      LIR_OpRoundFP* round_op = op->as_OpRoundFP();
526
      roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack());
527
      break;
528
    }
529

530
    case lir_return: {
531
      assert(op->as_OpReturn() != nullptr, "sanity");
532
      LIR_OpReturn *ret_op = (LIR_OpReturn*)op;
533
      return_op(ret_op->in_opr(), ret_op->stub());
534
      if (ret_op->stub() != nullptr) {
535
        append_code_stub(ret_op->stub());
536
      }
537
      break;
538
    }
539

540
    case lir_safepoint:
541
      if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) {
542
        _masm->nop();
543
      }
544
      safepoint_poll(op->in_opr(), op->info());
545
      break;
546

547
#ifdef IA32
548
    case lir_fxch:
549
      fxch(op->in_opr()->as_jint());
550
      break;
551

552
    case lir_fld:
553
      fld(op->in_opr()->as_jint());
554
      break;
555
#endif // IA32
556

557
    case lir_branch:
558
      break;
559

560
    case lir_push:
561
      push(op->in_opr());
562
      break;
563

564
    case lir_pop:
565
      pop(op->in_opr());
566
      break;
567

568
    case lir_leal:
569
      leal(op->in_opr(), op->result_opr(), op->patch_code(), op->info());
570
      break;
571

572
    case lir_null_check: {
573
      ImplicitNullCheckStub* stub = add_debug_info_for_null_check_here(op->info());
574

575
      if (op->in_opr()->is_single_cpu()) {
576
        _masm->null_check(op->in_opr()->as_register(), stub->entry());
577
      } else {
578
        Unimplemented();
579
      }
580
      break;
581
    }
582

583
    case lir_monaddr:
584
      monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
585
      break;
586

587
    case lir_unwind:
588
      unwind_op(op->in_opr());
589
      break;
590

591
    default:
592
      Unimplemented();
593
      break;
594
  }
595
}
596

597

598
void LIR_Assembler::emit_op0(LIR_Op0* op) {
599
  switch (op->code()) {
600
    case lir_nop:
601
      assert(op->info() == nullptr, "not supported");
602
      _masm->nop();
603
      break;
604

605
    case lir_label:
606
      Unimplemented();
607
      break;
608

609
    case lir_std_entry: {
610
      // init offsets
611
      offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
612
      if (needs_icache(compilation()->method())) {
613
        int offset = check_icache();
614
        offsets()->set_value(CodeOffsets::Entry, offset);
615
      }
616
      _masm->align(CodeEntryAlignment);
617
      offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset());
618
      _masm->verified_entry(compilation()->directive()->BreakAtExecuteOption);
619
      if (needs_clinit_barrier_on_entry(compilation()->method())) {
620
        clinit_barrier(compilation()->method());
621
      }
622
      build_frame();
623
      offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
624
      break;
625
    }
626

627
    case lir_osr_entry:
628
      offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
629
      osr_entry();
630
      break;
631

632
#ifdef IA32
633
    case lir_fpop_raw:
634
      fpop();
635
      break;
636
#endif // IA32
637

638
    case lir_breakpoint:
639
      breakpoint();
640
      break;
641

642
    case lir_membar:
643
      membar();
644
      break;
645

646
    case lir_membar_acquire:
647
      membar_acquire();
648
      break;
649

650
    case lir_membar_release:
651
      membar_release();
652
      break;
653

654
    case lir_membar_loadload:
655
      membar_loadload();
656
      break;
657

658
    case lir_membar_storestore:
659
      membar_storestore();
660
      break;
661

662
    case lir_membar_loadstore:
663
      membar_loadstore();
664
      break;
665

666
    case lir_membar_storeload:
667
      membar_storeload();
668
      break;
669

670
    case lir_get_thread:
671
      get_thread(op->result_opr());
672
      break;
673

674
    case lir_on_spin_wait:
675
      on_spin_wait();
676
      break;
677

678
    default:
679
      ShouldNotReachHere();
680
      break;
681
  }
682
}
683

684

685
void LIR_Assembler::emit_op2(LIR_Op2* op) {
686
  switch (op->code()) {
687
    case lir_cmp:
688
      if (op->info() != nullptr) {
689
        assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
690
               "shouldn't be codeemitinfo for non-address operands");
691
        add_debug_info_for_null_check_here(op->info()); // exception possible
692
      }
693
      comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
694
      break;
695

696
    case lir_cmp_l2i:
697
    case lir_cmp_fd2i:
698
    case lir_ucmp_fd2i:
699
      comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
700
      break;
701

702
    case lir_shl:
703
    case lir_shr:
704
    case lir_ushr:
705
      if (op->in_opr2()->is_constant()) {
706
        shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr());
707
      } else {
708
        shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
709
      }
710
      break;
711

712
    case lir_add:
713
    case lir_sub:
714
    case lir_mul:
715
    case lir_div:
716
    case lir_rem:
717
      assert(op->fpu_pop_count() < 2, "");
718
      arith_op(
719
        op->code(),
720
        op->in_opr1(),
721
        op->in_opr2(),
722
        op->result_opr(),
723
        op->info(),
724
        op->fpu_pop_count() == 1);
725
      break;
726

727
    case lir_abs:
728
    case lir_sqrt:
729
    case lir_tan:
730
    case lir_log10:
731
    case lir_f2hf:
732
    case lir_hf2f:
733
      intrinsic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
734
      break;
735

736
    case lir_neg:
737
      negate(op->in_opr1(), op->result_opr(), op->in_opr2());
738
      break;
739

740
    case lir_logic_and:
741
    case lir_logic_or:
742
    case lir_logic_xor:
743
      logic_op(
744
        op->code(),
745
        op->in_opr1(),
746
        op->in_opr2(),
747
        op->result_opr());
748
      break;
749

750
    case lir_throw:
751
      throw_op(op->in_opr1(), op->in_opr2(), op->info());
752
      break;
753

754
    case lir_xadd:
755
    case lir_xchg:
756
      atomic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
757
      break;
758

759
    default:
760
      Unimplemented();
761
      break;
762
  }
763
}
764

765
void LIR_Assembler::emit_op4(LIR_Op4* op) {
766
  switch(op->code()) {
767
    case lir_cmove:
768
      cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type(), op->in_opr3(), op->in_opr4());
769
      break;
770

771
    default:
772
      Unimplemented();
773
      break;
774
  }
775
}
776

777
void LIR_Assembler::build_frame() {
778
  _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
779
}
780

781

782
void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) {
783
  assert(strict_fp_requires_explicit_rounding, "not required");
784
  assert((src->is_single_fpu() && dest->is_single_stack()) ||
785
         (src->is_double_fpu() && dest->is_double_stack()),
786
         "round_fp: rounds register -> stack location");
787

788
  reg2stack (src, dest, src->type(), pop_fpu_stack);
789
}
790

791

792
void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide) {
793
  if (src->is_register()) {
794
    if (dest->is_register()) {
795
      assert(patch_code == lir_patch_none && info == nullptr, "no patching and info allowed here");
796
      reg2reg(src,  dest);
797
    } else if (dest->is_stack()) {
798
      assert(patch_code == lir_patch_none && info == nullptr, "no patching and info allowed here");
799
      reg2stack(src, dest, type, pop_fpu_stack);
800
    } else if (dest->is_address()) {
801
      reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, wide);
802
    } else {
803
      ShouldNotReachHere();
804
    }
805

806
  } else if (src->is_stack()) {
807
    assert(patch_code == lir_patch_none && info == nullptr, "no patching and info allowed here");
808
    if (dest->is_register()) {
809
      stack2reg(src, dest, type);
810
    } else if (dest->is_stack()) {
811
      stack2stack(src, dest, type);
812
    } else {
813
      ShouldNotReachHere();
814
    }
815

816
  } else if (src->is_constant()) {
817
    if (dest->is_register()) {
818
      const2reg(src, dest, patch_code, info); // patching is possible
819
    } else if (dest->is_stack()) {
820
      assert(patch_code == lir_patch_none && info == nullptr, "no patching and info allowed here");
821
      const2stack(src, dest);
822
    } else if (dest->is_address()) {
823
      assert(patch_code == lir_patch_none, "no patching allowed here");
824
      const2mem(src, dest, type, info, wide);
825
    } else {
826
      ShouldNotReachHere();
827
    }
828

829
  } else if (src->is_address()) {
830
    mem2reg(src, dest, type, patch_code, info, wide);
831
  } else {
832
    ShouldNotReachHere();
833
  }
834
}
835

836

837
void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) {
838
#ifndef PRODUCT
839
  if (VerifyOops) {
840
    OopMapStream s(info->oop_map());
841
    while (!s.is_done()) {
842
      OopMapValue v = s.current();
843
      if (v.is_oop()) {
844
        VMReg r = v.reg();
845
        if (!r->is_stack()) {
846
          _masm->verify_oop(r->as_Register());
847
        } else {
848
          _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size);
849
        }
850
      }
851
      check_codespace();
852
      CHECK_BAILOUT();
853

854
      s.next();
855
    }
856
  }
857
#endif
858
}
859

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.