jdk

Форк
0
/
foreignGlobals.cpp 
372 строки · 12.6 Кб
1
/*
2
 * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
3
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
 *
5
 * This code is free software; you can redistribute it and/or modify it
6
 * under the terms of the GNU General Public License version 2 only, as
7
 * published by the Free Software Foundation.
8
 *
9
 * This code is distributed in the hope that it will be useful, but WITHOUT
10
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12
 * version 2 for more details (a copy is included in the LICENSE file that
13
 * accompanied this code).
14
 *
15
 * You should have received a copy of the GNU General Public License version
16
 * 2 along with this work; if not, write to the Free Software Foundation,
17
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
 *
19
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
 * or visit www.oracle.com if you need additional information or have any
21
 * questions.
22
 */
23

24
#include "precompiled.hpp"
25
#include "foreignGlobals.hpp"
26
#include "classfile/javaClasses.hpp"
27
#include "memory/resourceArea.hpp"
28
#include "prims/foreignGlobals.inline.hpp"
29
#include "runtime/jniHandles.inline.hpp"
30
#include "utilities/resourceHash.hpp"
31

32
StubLocations::StubLocations() {
33
  for (uint32_t i = 0; i < LOCATION_LIMIT; i++) {
34
    _locs[i] = VMStorage::invalid();
35
  }
36
}
37

38
void StubLocations::set(uint32_t loc, VMStorage storage) {
39
  assert(loc < LOCATION_LIMIT, "oob");
40
  _locs[loc] = storage;
41
}
42

43
void StubLocations::set_frame_data(uint32_t loc, int offset) {
44
  set(loc, VMStorage(StorageType::FRAME_DATA, 8, offset));
45
}
46

47
VMStorage StubLocations::get(uint32_t loc) const {
48
  assert(loc < LOCATION_LIMIT, "oob");
49
  VMStorage storage = _locs[loc];
50
  assert(storage.is_valid(), "not set");
51
  return storage;
52
}
53

54
VMStorage StubLocations::get(VMStorage placeholder) const {
55
  assert(placeholder.type() == StorageType::PLACEHOLDER, "must be");
56
  return get(placeholder.index());
57
}
58

59
int StubLocations::data_offset(uint32_t loc) const {
60
  VMStorage storage = get(loc);
61
  assert(storage.type() == StorageType::FRAME_DATA, "must be");
62
  return storage.offset();
63
}
64

65
#define FOREIGN_ABI "jdk/internal/foreign/abi/"
66

67
const CallRegs ForeignGlobals::parse_call_regs(jobject jconv) {
68
  oop conv_oop = JNIHandles::resolve_non_null(jconv);
69
  objArrayOop arg_regs_oop = jdk_internal_foreign_abi_CallConv::argRegs(conv_oop);
70
  objArrayOop ret_regs_oop = jdk_internal_foreign_abi_CallConv::retRegs(conv_oop);
71
  int num_args = arg_regs_oop->length();
72
  int num_rets = ret_regs_oop->length();
73
  CallRegs result(num_args, num_rets);
74

75
  for (int i = 0; i < num_args; i++) {
76
    result._arg_regs.push(parse_vmstorage(arg_regs_oop->obj_at(i)));
77
  }
78

79
  for (int i = 0; i < num_rets; i++) {
80
    result._ret_regs.push(parse_vmstorage(ret_regs_oop->obj_at(i)));
81
  }
82

83
  return result;
84
}
85

86
VMStorage ForeignGlobals::parse_vmstorage(oop storage) {
87
  jbyte type = jdk_internal_foreign_abi_VMStorage::type(storage);
88
  jshort segment_mask_or_size = jdk_internal_foreign_abi_VMStorage::segment_mask_or_size(storage);
89
  jint index_or_offset = jdk_internal_foreign_abi_VMStorage::index_or_offset(storage);
90

91
  return VMStorage(static_cast<StorageType>(type), segment_mask_or_size, index_or_offset);
92
}
93

94
int RegSpiller::compute_spill_area(const GrowableArray<VMStorage>& regs) {
95
  int result_size = 0;
96
  for (int i = 0; i < regs.length(); i++) {
97
    result_size += pd_reg_size(regs.at(i));
98
  }
99
  return result_size;
100
}
101

102
void RegSpiller::generate(MacroAssembler* masm, int rsp_offset, bool spill) const {
103
  assert(rsp_offset != -1, "rsp_offset should be set");
104
  int offset = rsp_offset;
105
  for (int i = 0; i < _regs.length(); i++) {
106
    VMStorage reg = _regs.at(i);
107
    if (spill) {
108
      pd_store_reg(masm, offset, reg);
109
    } else {
110
      pd_load_reg(masm, offset, reg);
111
    }
112
    offset += pd_reg_size(reg);
113
  }
114
}
115

116
void ArgumentShuffle::print_on(outputStream* os) const {
117
  os->print_cr("Argument shuffle {");
118
  for (int i = 0; i < _moves.length(); i++) {
119
    Move move = _moves.at(i);
120
    VMStorage from_reg = move.from;
121
    VMStorage to_reg   = move.to;
122

123
    os->print("Move from ");
124
    from_reg.print_on(os);
125
    os->print(" to ");
126
    to_reg.print_on(os);
127
    os->print_cr("");
128
  }
129
  os->print_cr("}");
130
}
131

132
int ForeignGlobals::compute_out_arg_bytes(const GrowableArray<VMStorage>& out_regs) {
133
  uint32_t max_stack_offset = 0;
134
  for (VMStorage reg : out_regs) {
135
    if (reg.is_stack())
136
      max_stack_offset = MAX2(max_stack_offset, reg.offset() + reg.stack_size());
137
  }
138
  return align_up(max_stack_offset, 8);
139
}
140

141
int ForeignGlobals::java_calling_convention(const BasicType* signature, int num_args, GrowableArray<VMStorage>& out_regs) {
142
  VMRegPair* vm_regs = NEW_RESOURCE_ARRAY(VMRegPair, num_args);
143
  int slots = align_up(SharedRuntime::java_calling_convention(signature, vm_regs, num_args), 2);
144
  for (int i = 0; i < num_args; i++) {
145
    VMRegPair pair = vm_regs[i];
146
    // note, we ignore second here. Signature should consist of register-size values. So there should be
147
    // no need for multi-register pairs.
148
    if (signature[i] != T_VOID) {
149
      out_regs.push(as_VMStorage(pair.first(), signature[i]));
150
    }
151
  }
152
  return slots << LogBytesPerInt;
153
}
154

155
GrowableArray<VMStorage> ForeignGlobals::replace_place_holders(const GrowableArray<VMStorage>& regs, const StubLocations& locs) {
156
  GrowableArray<VMStorage> result(regs.length());
157
  for (VMStorage reg : regs) {
158
    result.push(reg.type() == StorageType::PLACEHOLDER ? locs.get(reg) : reg);
159
  }
160
  return result;
161
}
162

163
GrowableArray<VMStorage> ForeignGlobals::upcall_filter_receiver_reg(const GrowableArray<VMStorage>& unfiltered_regs) {
164
  GrowableArray<VMStorage> out(unfiltered_regs.length() - 1);
165
  // drop first arg reg
166
  for (int i = 1; i < unfiltered_regs.length(); i++) {
167
    out.push(unfiltered_regs.at(i));
168
  }
169
  return out;
170
}
171

172
GrowableArray<VMStorage> ForeignGlobals::downcall_filter_offset_regs(const GrowableArray<VMStorage>& regs,
173
                                                                     BasicType* signature, int num_args,
174
                                                                     bool& has_objects) {
175
  GrowableArray<VMStorage> result(regs.length());
176
  int reg_idx = 0;
177
  for (int sig_idx = 0; sig_idx < num_args; sig_idx++) {
178
    if (signature[sig_idx] == T_VOID) {
179
      continue; // ignore upper halves
180
    }
181

182
    result.push(regs.at(reg_idx++));
183
    if (signature[sig_idx] == T_OBJECT) {
184
      has_objects = true;
185
      sig_idx++; // skip offset
186
      reg_idx++;
187
    }
188
  }
189
  return result;
190
}
191

192
class ArgumentShuffle::ComputeMoveOrder: public StackObj {
193
  class MoveOperation;
194

195
  // segment_mask_or_size is not taken into account since
196
  // VMStorages that differ only in mask or size can still
197
  // conflict
198
  static inline unsigned hash(const VMStorage& vms) {
199
    return static_cast<unsigned int>(vms.type()) ^ vms.index_or_offset();
200
  }
201
  static inline bool equals(const VMStorage& a, const VMStorage& b) {
202
    return a.type() == b.type() && a.index_or_offset() == b.index_or_offset();
203
  }
204

205
  using KillerTable = ResourceHashtable<
206
    VMStorage, MoveOperation*,
207
    32, // doesn't need to be big. don't have that many argument registers (in known ABIs)
208
    AnyObj::RESOURCE_AREA,
209
    mtInternal,
210
    ComputeMoveOrder::hash,
211
    ComputeMoveOrder::equals
212
    >;
213

214
  class MoveOperation: public ResourceObj {
215
    friend class ComputeMoveOrder;
216
   private:
217
    VMStorage       _src;
218
    VMStorage       _dst;
219
    bool            _processed;
220
    MoveOperation*  _next;
221
    MoveOperation*  _prev;
222

223
   public:
224
    MoveOperation(VMStorage src, VMStorage dst):
225
      _src(src), _dst(dst), _processed(false), _next(nullptr), _prev(nullptr) {}
226

227
    const VMStorage& src() const { return _src; }
228
    const VMStorage& dst() const { return _dst; }
229
    MoveOperation* next()  const { return _next; }
230
    MoveOperation* prev()  const { return _prev; }
231
    void set_processed()         { _processed = true; }
232
    bool is_processed()    const { return _processed; }
233

234
    // insert
235
    void break_cycle(VMStorage temp_register) {
236
      // create a new store following the last store
237
      // to move from the temp_register to the original
238
      MoveOperation* new_store = new MoveOperation(temp_register, _dst);
239

240
      // break the cycle of links and insert new_store at the end
241
      // break the reverse link.
242
      MoveOperation* p = prev();
243
      assert(p->next() == this, "must be");
244
      _prev = nullptr;
245
      p->_next = new_store;
246
      new_store->_prev = p;
247

248
      // change the original store to save it's value in the temp.
249
      _dst = temp_register;
250
    }
251

252
    void link(KillerTable& killer) {
253
      // link this store in front the store that it depends on
254
      MoveOperation** n = killer.get(_src);
255
      if (n != nullptr) {
256
        MoveOperation* src_killer = *n;
257
        assert(_next == nullptr && src_killer->_prev == nullptr, "shouldn't have been set yet");
258
        _next = src_killer;
259
        src_killer->_prev = this;
260
      }
261
    }
262

263
    Move as_move() {
264
      return {_src, _dst};
265
    }
266
  };
267

268
 private:
269
  const GrowableArray<VMStorage>& _in_regs;
270
  const GrowableArray<VMStorage>& _out_regs;
271
  VMStorage _tmp_vmreg;
272
  GrowableArray<MoveOperation*> _edges;
273
  GrowableArray<Move> _moves;
274

275
 public:
276
  ComputeMoveOrder(const GrowableArray<VMStorage>& in_regs,
277
                   const GrowableArray<VMStorage>& out_regs,
278
                   VMStorage tmp_vmreg) :
279
      _in_regs(in_regs),
280
      _out_regs(out_regs),
281
      _tmp_vmreg(tmp_vmreg),
282
      _edges(in_regs.length()),
283
      _moves(in_regs.length()) {
284
    assert(in_regs.length() == out_regs.length(),
285
      "stray registers? %d != %d", in_regs.length(), out_regs.length());
286
  }
287

288
  void compute() {
289
    for (int i = 0; i < _in_regs.length(); i++) {
290
      VMStorage in_reg = _in_regs.at(i);
291
      VMStorage out_reg = _out_regs.at(i);
292

293
      if (out_reg.is_stack() || out_reg.is_frame_data()) {
294
        // Move operations where the dest is the stack can all be
295
        // scheduled first since they can't interfere with the other moves.
296
        // The input and output stack spaces are distinct from each other.
297
        Move move{in_reg, out_reg};
298
        _moves.push(move);
299
      } else if (in_reg == out_reg) {
300
        // Can skip non-stack identity moves.
301
        continue;
302
      } else {
303
        _edges.append(new MoveOperation(in_reg, out_reg));
304
      }
305
    }
306
    // Break any cycles in the register moves and emit the in the
307
    // proper order.
308
    compute_store_order();
309
  }
310

311
  // Walk the edges breaking cycles between moves.  The result list
312
  // can be walked in order to produce the proper set of loads
313
  void compute_store_order() {
314
    // Record which moves kill which registers
315
    KillerTable killer; // a map of VMStorage -> MoveOperation*
316
    for (int i = 0; i < _edges.length(); i++) {
317
      MoveOperation* s = _edges.at(i);
318
      assert(!killer.contains(s->dst()),
319
             "multiple moves with the same register as destination");
320
      killer.put(s->dst(), s);
321
    }
322
    assert(!killer.contains(_tmp_vmreg),
323
           "make sure temp isn't in the registers that are killed");
324

325
    // create links between loads and stores
326
    for (int i = 0; i < _edges.length(); i++) {
327
      _edges.at(i)->link(killer);
328
    }
329

330
    // at this point, all the move operations are chained together
331
    // in one or more doubly linked lists.  Processing them backwards finds
332
    // the beginning of the chain, forwards finds the end.  If there's
333
    // a cycle it can be broken at any point,  so pick an edge and walk
334
    // backward until the list ends or we end where we started.
335
    for (int e = 0; e < _edges.length(); e++) {
336
      MoveOperation* s = _edges.at(e);
337
      if (!s->is_processed()) {
338
        MoveOperation* start = s;
339
        // search for the beginning of the chain or cycle
340
        while (start->prev() != nullptr && start->prev() != s) {
341
          start = start->prev();
342
        }
343
        if (start->prev() == s) {
344
          start->break_cycle(_tmp_vmreg);
345
        }
346
        // walk the chain forward inserting to store list
347
        while (start != nullptr) {
348
          _moves.push(start->as_move());
349

350
          start->set_processed();
351
          start = start->next();
352
        }
353
      }
354
    }
355
  }
356

357
public:
358
  static GrowableArray<Move> compute_move_order(const GrowableArray<VMStorage>& in_regs,
359
                                                const GrowableArray<VMStorage>& out_regs,
360
                                                VMStorage tmp_vmreg) {
361
    ComputeMoveOrder cmo(in_regs, out_regs, tmp_vmreg);
362
    cmo.compute();
363
    return cmo._moves;
364
  }
365
};
366

367
ArgumentShuffle::ArgumentShuffle(
368
    const GrowableArray<VMStorage>& in_regs,
369
    const GrowableArray<VMStorage>& out_regs,
370
    VMStorage shuffle_temp) {
371
  _moves = ComputeMoveOrder::compute_move_order(in_regs, out_regs, shuffle_temp);
372
}
373

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.