jdk

Форк
0
/
foreignGlobals_aarch64.cpp 
225 строк · 8.7 Кб
1
/*
2
 * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
3
 * Copyright (c) 2019, 2022, Arm Limited. All rights reserved.
4
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
 *
6
 * This code is free software; you can redistribute it and/or modify it
7
 * under the terms of the GNU General Public License version 2 only, as
8
 * published by the Free Software Foundation.
9
 *
10
 * This code is distributed in the hope that it will be useful, but WITHOUT
11
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13
 * version 2 for more details (a copy is included in the LICENSE file that
14
 * accompanied this code).
15
 *
16
 * You should have received a copy of the GNU General Public License version
17
 * 2 along with this work; if not, write to the Free Software Foundation,
18
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19
 *
20
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21
 * or visit www.oracle.com if you need additional information or have any
22
 * questions.
23
 */
24

25
#include "precompiled.hpp"
26
#include "code/vmreg.inline.hpp"
27
#include "runtime/jniHandles.hpp"
28
#include "runtime/jniHandles.inline.hpp"
29
#include "oops/typeArrayOop.inline.hpp"
30
#include "oops/oopCast.inline.hpp"
31
#include "prims/foreignGlobals.hpp"
32
#include "prims/foreignGlobals.inline.hpp"
33
#include "prims/vmstorage.hpp"
34
#include "utilities/formatBuffer.hpp"
35

36
bool ForeignGlobals::is_foreign_linker_supported() {
37
  return true;
38
}
39

40
bool ABIDescriptor::is_volatile_reg(Register reg) const {
41
  return _integer_argument_registers.contains(reg)
42
    || _integer_additional_volatile_registers.contains(reg);
43
}
44

45
bool ABIDescriptor::is_volatile_reg(FloatRegister reg) const {
46
    return _vector_argument_registers.contains(reg)
47
        || _vector_additional_volatile_registers.contains(reg);
48
}
49

50
const ABIDescriptor ForeignGlobals::parse_abi_descriptor(jobject jabi) {
51
  oop abi_oop = JNIHandles::resolve_non_null(jabi);
52
  ABIDescriptor abi;
53

54
  objArrayOop inputStorage = jdk_internal_foreign_abi_ABIDescriptor::inputStorage(abi_oop);
55
  parse_register_array(inputStorage, StorageType::INTEGER, abi._integer_argument_registers, as_Register);
56
  parse_register_array(inputStorage, StorageType::VECTOR, abi._vector_argument_registers, as_FloatRegister);
57

58
  objArrayOop outputStorage = jdk_internal_foreign_abi_ABIDescriptor::outputStorage(abi_oop);
59
  parse_register_array(outputStorage, StorageType::INTEGER, abi._integer_return_registers, as_Register);
60
  parse_register_array(outputStorage, StorageType::VECTOR, abi._vector_return_registers, as_FloatRegister);
61

62
  objArrayOop volatileStorage = jdk_internal_foreign_abi_ABIDescriptor::volatileStorage(abi_oop);
63
  parse_register_array(volatileStorage, StorageType::INTEGER, abi._integer_additional_volatile_registers, as_Register);
64
  parse_register_array(volatileStorage, StorageType::VECTOR, abi._vector_additional_volatile_registers, as_FloatRegister);
65

66
  abi._stack_alignment_bytes = jdk_internal_foreign_abi_ABIDescriptor::stackAlignment(abi_oop);
67
  abi._shadow_space_bytes = jdk_internal_foreign_abi_ABIDescriptor::shadowSpace(abi_oop);
68

69
  abi._scratch1 = parse_vmstorage(jdk_internal_foreign_abi_ABIDescriptor::scratch1(abi_oop));
70
  abi._scratch2 = parse_vmstorage(jdk_internal_foreign_abi_ABIDescriptor::scratch2(abi_oop));
71

72
  return abi;
73
}
74

75
int RegSpiller::pd_reg_size(VMStorage reg) {
76
  if (reg.type() == StorageType::INTEGER) {
77
    return 8;
78
  } else if (reg.type() == StorageType::VECTOR) {
79
    return 16;   // Always spill/unspill Q registers
80
  }
81
  return 0; // stack and BAD
82
}
83

84
void RegSpiller::pd_store_reg(MacroAssembler* masm, int offset, VMStorage reg) {
85
  if (reg.type() == StorageType::INTEGER) {
86
    masm->spill(as_Register(reg), true, offset);
87
  } else if (reg.type() == StorageType::VECTOR) {
88
    masm->spill(as_FloatRegister(reg), masm->Q, offset);
89
  } else {
90
    // stack and BAD
91
  }
92
}
93

94
void RegSpiller::pd_load_reg(MacroAssembler* masm, int offset, VMStorage reg) {
95
  if (reg.type() == StorageType::INTEGER) {
96
    masm->unspill(as_Register(reg), true, offset);
97
  } else if (reg.type() == StorageType::VECTOR) {
98
    masm->unspill(as_FloatRegister(reg), masm->Q, offset);
99
  } else {
100
    // stack and BAD
101
  }
102
}
103

104
static constexpr int RFP_BIAS = 16; // skip old rfp and lr
105

106
static void move_reg64(MacroAssembler* masm, int out_stk_bias,
107
                       Register from_reg, VMStorage to_reg) {
108
  int out_bias = 0;
109
  switch (to_reg.type()) {
110
    case StorageType::INTEGER:
111
      assert(to_reg.segment_mask() == REG64_MASK, "only moves to 64-bit registers supported");
112
      masm->mov(as_Register(to_reg), from_reg);
113
      break;
114
    case StorageType::STACK:
115
      out_bias = out_stk_bias;
116
    case StorageType::FRAME_DATA: {
117
      Address dest(sp, to_reg.offset() + out_bias);
118
      switch (to_reg.stack_size()) {
119
        case 8: masm->str (from_reg, dest); break;
120
        case 4: masm->strw(from_reg, dest); break;
121
        case 2: masm->strh(from_reg, dest); break;
122
        case 1: masm->strb(from_reg, dest); break;
123
        default: ShouldNotReachHere();
124
      }
125
    } break;
126
    default: ShouldNotReachHere();
127
  }
128
}
129

130
static void move_stack(MacroAssembler* masm, Register tmp_reg, int in_stk_bias, int out_stk_bias,
131
                       VMStorage from_reg, VMStorage to_reg) {
132
  Address from_addr(rfp, RFP_BIAS + from_reg.offset() + in_stk_bias);
133
  int out_bias = 0;
134
  switch (to_reg.type()) {
135
    case StorageType::INTEGER:
136
      assert(to_reg.segment_mask() == REG64_MASK, "only moves to 64-bit registers supported");
137
      switch (from_reg.stack_size()) {
138
        case 8: masm->ldr (as_Register(to_reg), from_addr); break;
139
        case 4: masm->ldrw(as_Register(to_reg), from_addr); break;
140
        case 2: masm->ldrh(as_Register(to_reg), from_addr); break;
141
        case 1: masm->ldrb(as_Register(to_reg), from_addr); break;
142
        default: ShouldNotReachHere();
143
      }
144
      break;
145
    case StorageType::VECTOR:
146
      assert(to_reg.segment_mask() == V128_MASK, "only moves to v128 registers supported");
147
      switch (from_reg.stack_size()) {
148
        case 8:
149
          masm->ldrd(as_FloatRegister(to_reg), from_addr);
150
        break;
151
        case 4:
152
          masm->ldrs(as_FloatRegister(to_reg), from_addr);
153
        break;
154
        default: ShouldNotReachHere();
155
      }
156
      break;
157
    case StorageType::STACK:
158
      out_bias = out_stk_bias;
159
    case StorageType::FRAME_DATA: {
160
      switch (from_reg.stack_size()) {
161
        case 8: masm->ldr (tmp_reg, from_addr); break;
162
        case 4: masm->ldrw(tmp_reg, from_addr); break;
163
        case 2: masm->ldrh(tmp_reg, from_addr); break;
164
        case 1: masm->ldrb(tmp_reg, from_addr); break;
165
        default: ShouldNotReachHere();
166
      }
167
      Address dest(sp, to_reg.offset() + out_bias);
168
      switch (to_reg.stack_size()) {
169
        case 8: masm->str (tmp_reg, dest); break;
170
        case 4: masm->strw(tmp_reg, dest); break;
171
        case 2: masm->strh(tmp_reg, dest); break;
172
        case 1: masm->strb(tmp_reg, dest); break;
173
        default: ShouldNotReachHere();
174
      }
175
    } break;
176
    default: ShouldNotReachHere();
177
  }
178
}
179

180
static void move_v128(MacroAssembler* masm, int out_stk_bias,
181
                      FloatRegister from_reg, VMStorage to_reg) {
182
  switch (to_reg.type()) {
183
    case StorageType::INTEGER:
184
      assert(to_reg.segment_mask() == REG64_MASK, "only moves to 64-bit registers supported");
185
      masm->fmovd(as_Register(to_reg), from_reg);
186
      break;
187
    case StorageType::VECTOR:
188
      assert(to_reg.segment_mask() == V128_MASK, "only moves to v128 registers supported");
189
      masm->fmovd(as_FloatRegister(to_reg), from_reg);
190
      break;
191
    case StorageType::STACK: {
192
      Address dest(sp, to_reg.offset() + out_stk_bias);
193
      switch (to_reg.stack_size()) {
194
        case 8: masm->strd(from_reg, dest); break;
195
        case 4: masm->strs(from_reg, dest); break;
196
        default: ShouldNotReachHere();
197
      }
198
    } break;
199
    default: ShouldNotReachHere();
200
  }
201
}
202

203
void ArgumentShuffle::pd_generate(MacroAssembler* masm, VMStorage tmp, int in_stk_bias, int out_stk_bias) const {
204
  Register tmp_reg = as_Register(tmp);
205
  for (int i = 0; i < _moves.length(); i++) {
206
    Move move = _moves.at(i);
207
    VMStorage from_reg = move.from;
208
    VMStorage to_reg   = move.to;
209

210
    switch (from_reg.type()) {
211
      case StorageType::INTEGER:
212
        assert(from_reg.segment_mask() == REG64_MASK, "only 64-bit register supported");
213
        move_reg64(masm, out_stk_bias, as_Register(from_reg), to_reg);
214
        break;
215
      case StorageType::VECTOR:
216
        assert(from_reg.segment_mask() == V128_MASK, "only v128 register supported");
217
        move_v128(masm, out_stk_bias, as_FloatRegister(from_reg), to_reg);
218
        break;
219
      case StorageType::STACK:
220
        move_stack(masm, tmp_reg, in_stk_bias, out_stk_bias, from_reg, to_reg);
221
        break;
222
      default: ShouldNotReachHere();
223
    }
224
  }
225
}
226

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.