jdk

Форк
0
/
foreignGlobals_x86_64.cpp 
189 строк · 7.6 Кб
1
/*
2
 * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
3
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
 *
5
 * This code is free software; you can redistribute it and/or modify it
6
 * under the terms of the GNU General Public License version 2 only, as
7
 * published by the Free Software Foundation.
8
 *
9
 * This code is distributed in the hope that it will be useful, but WITHOUT
10
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12
 * version 2 for more details (a copy is included in the LICENSE file that
13
 * accompanied this code).
14
 *
15
 * You should have received a copy of the GNU General Public License version
16
 * 2 along with this work; if not, write to the Free Software Foundation,
17
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
 *
19
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
 * or visit www.oracle.com if you need additional information or have any
21
 * questions.
22
 */
23

24
#include "precompiled.hpp"
25
#include "classfile/javaClasses.hpp"
26
#include "runtime/jniHandles.inline.hpp"
27
#include "oops/typeArrayOop.inline.hpp"
28
#include "oops/oopCast.inline.hpp"
29
#include "prims/foreignGlobals.inline.hpp"
30
#include "runtime/sharedRuntime.hpp"
31
#include "utilities/formatBuffer.hpp"
32

33
bool ForeignGlobals::is_foreign_linker_supported() {
34
  return true;
35
}
36

37
bool ABIDescriptor::is_volatile_reg(Register reg) const {
38
    return _integer_argument_registers.contains(reg)
39
        || _integer_additional_volatile_registers.contains(reg);
40
}
41

42
bool ABIDescriptor::is_volatile_reg(XMMRegister reg) const {
43
    return _vector_argument_registers.contains(reg)
44
        || _vector_additional_volatile_registers.contains(reg);
45
}
46

47
const ABIDescriptor ForeignGlobals::parse_abi_descriptor(jobject jabi) {
48
  oop abi_oop = JNIHandles::resolve_non_null(jabi);
49
  ABIDescriptor abi;
50

51
  objArrayOop inputStorage = jdk_internal_foreign_abi_ABIDescriptor::inputStorage(abi_oop);
52
  parse_register_array(inputStorage, StorageType::INTEGER, abi._integer_argument_registers, as_Register);
53
  parse_register_array(inputStorage, StorageType::VECTOR, abi._vector_argument_registers, as_XMMRegister);
54

55
  objArrayOop outputStorage = jdk_internal_foreign_abi_ABIDescriptor::outputStorage(abi_oop);
56
  parse_register_array(outputStorage, StorageType::INTEGER, abi._integer_return_registers, as_Register);
57
  parse_register_array(outputStorage, StorageType::VECTOR, abi._vector_return_registers, as_XMMRegister);
58
  objArrayOop subarray = oop_cast<objArrayOop>(outputStorage->obj_at((int) StorageType::X87));
59
  abi._X87_return_registers_noof = subarray->length();
60

61
  objArrayOop volatileStorage = jdk_internal_foreign_abi_ABIDescriptor::volatileStorage(abi_oop);
62
  parse_register_array(volatileStorage, StorageType::INTEGER, abi._integer_additional_volatile_registers, as_Register);
63
  parse_register_array(volatileStorage, StorageType::VECTOR, abi._vector_additional_volatile_registers, as_XMMRegister);
64

65
  abi._stack_alignment_bytes = jdk_internal_foreign_abi_ABIDescriptor::stackAlignment(abi_oop);
66
  abi._shadow_space_bytes = jdk_internal_foreign_abi_ABIDescriptor::shadowSpace(abi_oop);
67

68
  abi._scratch1 = parse_vmstorage(jdk_internal_foreign_abi_ABIDescriptor::scratch1(abi_oop));
69
  abi._scratch2 = parse_vmstorage(jdk_internal_foreign_abi_ABIDescriptor::scratch2(abi_oop));
70

71
  return abi;
72
}
73

74
int RegSpiller::pd_reg_size(VMStorage reg) {
75
  if (reg.type() == StorageType::INTEGER) {
76
    return 8;
77
  } else if (reg.type() == StorageType::VECTOR) {
78
    return 16;
79
  }
80
  return 0; // stack and BAD
81
}
82

83
void RegSpiller::pd_store_reg(MacroAssembler* masm, int offset, VMStorage reg) {
84
  if (reg.type() == StorageType::INTEGER) {
85
    masm->movptr(Address(rsp, offset), as_Register(reg));
86
  } else if (reg.type() == StorageType::VECTOR) {
87
    masm->movdqu(Address(rsp, offset), as_XMMRegister(reg));
88
  } else {
89
    // stack and BAD
90
  }
91
}
92

93
void RegSpiller::pd_load_reg(MacroAssembler* masm, int offset, VMStorage reg) {
94
  if (reg.type() == StorageType::INTEGER) {
95
    masm->movptr(as_Register(reg), Address(rsp, offset));
96
  } else if (reg.type() == StorageType::VECTOR) {
97
    masm->movdqu(as_XMMRegister(reg), Address(rsp, offset));
98
  } else {
99
    // stack and BAD
100
  }
101
}
102

103
static constexpr int RBP_BIAS = 16; // skip old rbp and return address
104

105
static void move_reg64(MacroAssembler* masm, int out_stk_bias,
106
                       Register from_reg, VMStorage to_reg) {
107
  int out_bias = 0;
108
  switch (to_reg.type()) {
109
    case StorageType::INTEGER:
110
      assert(to_reg.segment_mask() == REG64_MASK, "only moves to 64-bit registers supported");
111
      masm->movq(as_Register(to_reg), from_reg);
112
      break;
113
    case StorageType::STACK:
114
      out_bias = out_stk_bias;
115
    case StorageType::FRAME_DATA:
116
      assert(to_reg.stack_size() == 8, "only moves with 64-bit targets supported");
117
      masm->movq(Address(rsp, to_reg.offset() + out_bias), from_reg);
118
      break;
119
    default: ShouldNotReachHere();
120
  }
121
}
122

123
static void move_stack64(MacroAssembler* masm, Register tmp_reg, int out_stk_bias,
124
                         Address from_address, VMStorage to_reg) {
125
  int out_bias = 0;
126
  switch (to_reg.type()) {
127
    case StorageType::INTEGER:
128
      assert(to_reg.segment_mask() == REG64_MASK, "only moves to 64-bit registers supported");
129
      masm->movq(as_Register(to_reg), from_address);
130
      break;
131
    case StorageType::VECTOR:
132
      assert(to_reg.segment_mask() == XMM_MASK, "only moves to xmm registers supported");
133
      masm->movdqu(as_XMMRegister(to_reg), from_address);
134
      break;
135
    case StorageType::STACK:
136
      out_bias = out_stk_bias;
137
    case StorageType::FRAME_DATA:
138
      assert(to_reg.stack_size() == 8, "only moves with 64-bit targets supported");
139
      masm->movq(tmp_reg, from_address);
140
      masm->movq(Address(rsp, to_reg.offset() + out_bias), tmp_reg);
141
      break;
142
    default: ShouldNotReachHere();
143
  }
144
}
145

146
static void move_xmm(MacroAssembler* masm, int out_stk_bias,
147
                     XMMRegister from_reg, VMStorage to_reg) {
148
  switch (to_reg.type()) {
149
    case StorageType::INTEGER: // windows vargarg floats
150
      assert(to_reg.segment_mask() == REG64_MASK, "only moves to 64-bit registers supported");
151
      masm->movq(as_Register(to_reg), from_reg);
152
      break;
153
    case StorageType::VECTOR:
154
      assert(to_reg.segment_mask() == XMM_MASK, "only moves to xmm registers supported");
155
      masm->movdqu(as_XMMRegister(to_reg), from_reg);
156
      break;
157
    case StorageType::STACK:
158
      assert(to_reg.stack_size() == 8, "only moves with 64-bit targets supported");
159
      masm->movq(Address(rsp, to_reg.offset() + out_stk_bias), from_reg);
160
      break;
161
    default: ShouldNotReachHere();
162
  }
163
}
164

165
void ArgumentShuffle::pd_generate(MacroAssembler* masm, VMStorage tmp, int in_stk_bias, int out_stk_bias) const {
166
  Register tmp_reg = as_Register(tmp);
167
  for (int i = 0; i < _moves.length(); i++) {
168
    Move move = _moves.at(i);
169
    VMStorage from_reg = move.from;
170
    VMStorage to_reg   = move.to;
171

172
    switch (from_reg.type()) {
173
      case StorageType::INTEGER:
174
        assert(from_reg.segment_mask() == REG64_MASK, "only 64-bit register supported");
175
        move_reg64(masm, out_stk_bias, as_Register(from_reg), to_reg);
176
        break;
177
      case StorageType::VECTOR:
178
        assert(from_reg.segment_mask() == XMM_MASK, "only xmm register supported");
179
        move_xmm(masm, out_stk_bias, as_XMMRegister(from_reg), to_reg);
180
        break;
181
      case StorageType::STACK: {
182
        assert(from_reg.stack_size() == 8, "only stack_size 8 supported");
183
        Address from_addr(rbp, RBP_BIAS + from_reg.offset() + in_stk_bias);
184
        move_stack64(masm, tmp_reg, out_stk_bias, from_addr, to_reg);
185
      } break;
186
      default: ShouldNotReachHere();
187
    }
188
  }
189
}
190

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.