jdk

Форк
0
970 строк · 29.9 Кб
1
/*
2
 * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved.
3
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
 *
5
 * This code is free software; you can redistribute it and/or modify it
6
 * under the terms of the GNU General Public License version 2 only, as
7
 * published by the Free Software Foundation.
8
 *
9
 * This code is distributed in the hope that it will be useful, but WITHOUT
10
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12
 * version 2 for more details (a copy is included in the LICENSE file that
13
 * accompanied this code).
14
 *
15
 * You should have received a copy of the GNU General Public License version
16
 * 2 along with this work; if not, write to the Free Software Foundation,
17
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
 *
19
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
 * or visit www.oracle.com if you need additional information or have any
21
 * questions.
22
 *
23
 */
24

25
#include "precompiled.hpp"
26
#include "code/codeBlob.hpp"
27
#include "code/codeCache.hpp"
28
#include "code/nmethod.hpp"
29
#include "code/scopeDesc.hpp"
30
#include "compiler/oopMap.inline.hpp"
31
#include "gc/shared/collectedHeap.hpp"
32
#include "logging/log.hpp"
33
#include "logging/logStream.hpp"
34
#include "memory/allocation.inline.hpp"
35
#include "memory/iterator.hpp"
36
#include "memory/resourceArea.hpp"
37
#include "oops/compressedOops.hpp"
38
#include "runtime/atomic.hpp"
39
#include "runtime/frame.inline.hpp"
40
#include "runtime/handles.inline.hpp"
41
#include "runtime/signature.hpp"
42
#include "runtime/stackWatermarkSet.inline.hpp"
43
#include "utilities/align.hpp"
44
#include "utilities/lockFreeStack.hpp"
45
#ifdef COMPILER1
46
#include "c1/c1_Defs.hpp"
47
#endif
48
#ifdef COMPILER2
49
#include "opto/optoreg.hpp"
50
#endif
51
#if INCLUDE_JVMCI
52
#include "jvmci/jvmci_globals.hpp"
53
#endif
54

55
static_assert(sizeof(oop) == sizeof(intptr_t), "Derived pointer sanity check");
56

57
static inline intptr_t derived_pointer_value(derived_pointer p) {
58
  return static_cast<intptr_t>(p);
59
}
60

61
static inline derived_pointer to_derived_pointer(intptr_t obj) {
62
  return static_cast<derived_pointer>(obj);
63
}
64

65
static inline intptr_t operator-(derived_pointer p, derived_pointer p1) {
66
  return derived_pointer_value(p) - derived_pointer_value(p1);
67
}
68

69
static inline derived_pointer operator+(derived_pointer p, intptr_t offset) {
70
  return static_cast<derived_pointer>(derived_pointer_value(p) + offset);
71
}
72

73
// OopMapStream
74

75
OopMapStream::OopMapStream(const OopMap* oop_map)
76
  : _stream(oop_map->write_stream()->buffer()) {
77
  _size = oop_map->omv_count();
78
  _position = 0;
79
  _valid_omv = false;
80
}
81

82
OopMapStream::OopMapStream(const ImmutableOopMap* oop_map)
83
  : _stream(oop_map->data_addr()) {
84
  _size = oop_map->count();
85
  _position = 0;
86
  _valid_omv = false;
87
}
88

89
void OopMapStream::find_next() {
90
  if (_position++ < _size) {
91
    _omv.read_from(&_stream);
92
    _valid_omv = true;
93
    return;
94
  }
95
  _valid_omv = false;
96
}
97

98

99
// OopMap
100

101
// frame_size units are stack-slots (4 bytes) NOT intptr_t; we can name odd
102
// slots to hold 4-byte values like ints and floats in the LP64 build.
103
OopMap::OopMap(int frame_size, int arg_count) {
104
  // OopMaps are usually quite so small, so pick a small initial size
105
  set_write_stream(new CompressedWriteStream(32));
106
  set_omv_count(0);
107
  _num_oops = 0;
108
  _has_derived_oops = false;
109
  _index = -1;
110

111
#ifdef ASSERT
112
  _locs_length = VMRegImpl::stack2reg(0)->value() + frame_size + arg_count;
113
  _locs_used   = NEW_RESOURCE_ARRAY(OopMapValue::oop_types, _locs_length);
114
  for(int i = 0; i < _locs_length; i++) _locs_used[i] = OopMapValue::unused_value;
115
#endif
116
}
117

118

119
OopMap::OopMap(OopMap::DeepCopyToken, OopMap* source) {
120
  // This constructor does a deep copy
121
  // of the source OopMap.
122
  set_write_stream(new CompressedWriteStream(source->omv_count() * 2));
123
  set_omv_count(0);
124
  set_offset(source->offset());
125
  _num_oops = source->num_oops();
126
  _has_derived_oops = source->has_derived_oops();
127
  _index = -1;
128

129
#ifdef ASSERT
130
  _locs_length = source->_locs_length;
131
  _locs_used = NEW_RESOURCE_ARRAY(OopMapValue::oop_types, _locs_length);
132
  for(int i = 0; i < _locs_length; i++) _locs_used[i] = OopMapValue::unused_value;
133
#endif
134

135
  // We need to copy the entries too.
136
  for (OopMapStream oms(source); !oms.is_done(); oms.next()) {
137
    OopMapValue omv = oms.current();
138
    omv.write_on(write_stream());
139
    increment_count();
140
  }
141
}
142

143

144
OopMap* OopMap::deep_copy() {
145
  return new OopMap(_deep_copy_token, this);
146
}
147

148
void OopMap::copy_data_to(address addr) const {
149
  memcpy(addr, write_stream()->buffer(), write_stream()->position());
150
}
151

152
class OopMapSort {
153
private:
154
  const OopMap* _map;
155
  OopMapValue* _values;
156
  int _count;
157

158
public:
159
  OopMapSort(const OopMap* map) : _map(map), _count(0) {
160
    _values = NEW_RESOURCE_ARRAY(OopMapValue, _map->omv_count());
161
  }
162

163
  void sort();
164

165
  void print();
166

167
  void write(CompressedWriteStream* stream) {
168
    for (int i = 0; i < _count; ++i) {
169
      _values[i].write_on(stream);
170
    }
171
  }
172

173
private:
174
  int find_derived_position(OopMapValue omv, int start) {
175
    assert(omv.type() == OopMapValue::derived_oop_value, "");
176

177
    VMReg base = omv.content_reg();
178
    int i = start;
179

180
    for (; i < _count; ++i) {
181
      if (base == _values[i].reg()) {
182

183
        for (int n = i + 1; n < _count; ++n) {
184
          if (_values[i].type() != OopMapValue::derived_oop_value || _values[i].content_reg() != base) {
185
            return n;
186
          }
187

188
          if (derived_cost(_values[i]) > derived_cost(omv)) {
189
            return n;
190
          }
191
        }
192
        return _count;
193
      }
194
    }
195

196
    assert(false, "failed to find base");
197
    return -1;
198
  }
199

200
  int find_position(OopMapValue omv, int start) {
201
    assert(omv.type() != OopMapValue::derived_oop_value, "");
202

203
    int i = start;
204
    for (; i < _count; ++i) {
205
      if (omv_cost(_values[i]) > omv_cost(omv)) {
206
        return i;
207
      }
208
    }
209
    assert(i < _map->omv_count(), "bounds check");
210
    return i;
211
  }
212

213
  void insert(OopMapValue value, int pos) {
214
    assert(pos >= 0 && pos < _map->omv_count(), "bounds check");
215
    assert(pos <= _count, "sanity");
216

217
    if (pos < _count) {
218
      OopMapValue prev = _values[pos];
219

220
      for (int i = pos; i < _count; ++i) {
221
        OopMapValue tmp = _values[i+1];
222
        _values[i+1] = prev;
223
        prev = tmp;
224
      }
225
    }
226
    _values[pos] = value;
227

228
    ++_count;
229
  }
230

231
  int omv_cost(OopMapValue omv) {
232
    assert(omv.type() == OopMapValue::oop_value || omv.type() == OopMapValue::narrowoop_value, "");
233
    return reg_cost(omv.reg());
234
  }
235

236
  int reg_cost(VMReg reg) {
237
    if (reg->is_reg()) {
238
      return 0;
239
    }
240
    return reg->reg2stack() * VMRegImpl::stack_slot_size;
241
  }
242

243
  int derived_cost(OopMapValue omv) {
244
    return reg_cost(omv.reg());
245
  }
246
};
247

248
void OopMapSort::sort() {
249
  for (OopMapStream oms(_map); !oms.is_done(); oms.next()) {
250
    OopMapValue omv = oms.current();
251
    assert(omv.type() == OopMapValue::oop_value || omv.type() == OopMapValue::narrowoop_value || omv.type() == OopMapValue::derived_oop_value || omv.type() == OopMapValue::callee_saved_value, "");
252
  }
253

254
  for (OopMapStream oms(_map); !oms.is_done(); oms.next()) {
255
    if (oms.current().type() == OopMapValue::callee_saved_value) {
256
      insert(oms.current(), _count);
257
    }
258
  }
259

260
  int start = _count;
261
  for (OopMapStream oms(_map); !oms.is_done(); oms.next()) {
262
    OopMapValue omv = oms.current();
263
    if (omv.type() == OopMapValue::oop_value || omv.type() == OopMapValue::narrowoop_value) {
264
      int pos = find_position(omv, start);
265
      insert(omv, pos);
266
    }
267
  }
268

269
  for (OopMapStream oms(_map); !oms.is_done(); oms.next()) {
270
    OopMapValue omv = oms.current();
271
    if (omv.type() == OopMapValue::derived_oop_value) {
272
      int pos = find_derived_position(omv, start);
273
      assert(pos > 0, "");
274
      insert(omv, pos);
275
    }
276
  }
277
}
278

279
void OopMapSort::print() {
280
  for (int i = 0; i < _count; ++i) {
281
    OopMapValue omv = _values[i];
282
    if (omv.type() == OopMapValue::oop_value || omv.type() == OopMapValue::narrowoop_value) {
283
      if (omv.reg()->is_reg()) {
284
        tty->print_cr("[%c][%d] -> reg (%d)", omv.type() == OopMapValue::narrowoop_value ? 'n' : 'o', i, omv.reg()->value());
285
      } else {
286
        tty->print_cr("[%c][%d] -> stack (%d)", omv.type() == OopMapValue::narrowoop_value ? 'n' : 'o', i, omv.reg()->reg2stack() * VMRegImpl::stack_slot_size);
287
      }
288
    } else {
289
      if (omv.content_reg()->is_reg()) {
290
        tty->print_cr("[d][%d] -> reg (%d) stack (%d)", i, omv.content_reg()->value(), omv.reg()->reg2stack() * VMRegImpl::stack_slot_size);
291
      } else if (omv.reg()->is_reg()) {
292
        tty->print_cr("[d][%d] -> stack (%d) reg (%d)", i, omv.content_reg()->reg2stack() * VMRegImpl::stack_slot_size, omv.reg()->value());
293
      } else {
294
        int derived_offset = omv.reg()->reg2stack() * VMRegImpl::stack_slot_size;
295
        int base_offset = omv.content_reg()->reg2stack() * VMRegImpl::stack_slot_size;
296
        tty->print_cr("[d][%d] -> stack (%x) stack (%x)", i, base_offset, derived_offset);
297
      }
298
    }
299
  }
300
}
301

302
void OopMap::copy_and_sort_data_to(address addr) const {
303
  OopMapSort sort(this);
304
  sort.sort();
305
  CompressedWriteStream* stream = new CompressedWriteStream(_write_stream->position());
306
  sort.write(stream);
307

308
  assert(stream->position() == write_stream()->position(), "");
309
  memcpy(addr, stream->buffer(), stream->position());
310
}
311

312
int OopMap::heap_size() const {
313
  int size = sizeof(OopMap);
314
  int align = sizeof(void *) - 1;
315
  size += write_stream()->position();
316
  // Align to a reasonable ending point
317
  size = ((size+align) & ~align);
318
  return size;
319
}
320

321
// frame_size units are stack-slots (4 bytes) NOT intptr_t; we can name odd
322
// slots to hold 4-byte values like ints and floats in the LP64 build.
323
void OopMap::set_xxx(VMReg reg, OopMapValue::oop_types x, VMReg optional) {
324

325
  assert(reg->value() < _locs_length, "too big reg value for stack size");
326
  assert( _locs_used[reg->value()] == OopMapValue::unused_value, "cannot insert twice" );
327
  debug_only( _locs_used[reg->value()] = x; )
328

329
  OopMapValue o(reg, x, optional);
330
  o.write_on(write_stream());
331
  increment_count();
332
  if (x == OopMapValue::oop_value || x == OopMapValue::narrowoop_value) {
333
    increment_num_oops();
334
  } else if (x == OopMapValue::derived_oop_value) {
335
    set_has_derived_oops(true);
336
  }
337
}
338

339

340
void OopMap::set_oop(VMReg reg) {
341
  set_xxx(reg, OopMapValue::oop_value, VMRegImpl::Bad());
342
}
343

344

345
void OopMap::set_narrowoop(VMReg reg) {
346
  set_xxx(reg, OopMapValue::narrowoop_value, VMRegImpl::Bad());
347
}
348

349

350
void OopMap::set_callee_saved(VMReg reg, VMReg caller_machine_register ) {
351
  set_xxx(reg, OopMapValue::callee_saved_value, caller_machine_register);
352
}
353

354

355
void OopMap::set_derived_oop(VMReg reg, VMReg derived_from_local_register ) {
356
  if( reg == derived_from_local_register ) {
357
    // Actually an oop, derived shares storage with base,
358
    set_oop(reg);
359
  } else {
360
    set_xxx(reg, OopMapValue::derived_oop_value, derived_from_local_register);
361
  }
362
}
363

364
// OopMapSet
365

366
OopMapSet::OopMapSet() : _list(MinOopMapAllocation) {}
367

368
int OopMapSet::add_gc_map(int pc_offset, OopMap *map ) {
369
  map->set_offset(pc_offset);
370

371
#ifdef ASSERT
372
  if(_list.length() > 0) {
373
    OopMap* last = _list.last();
374
    if (last->offset() == map->offset() ) {
375
      fatal("OopMap inserted twice");
376
    }
377
    if (last->offset() > map->offset()) {
378
      tty->print_cr( "WARNING, maps not sorted: pc[%d]=%d, pc[%d]=%d",
379
                      _list.length(),last->offset(),_list.length()+1,map->offset());
380
    }
381
  }
382
#endif // ASSERT
383

384
  int index = add(map);
385
  map->_index = index;
386
  return index;
387
}
388

389
class AddDerivedOop : public DerivedOopClosure {
390
 public:
391
  enum {
392
    SkipNull = true, NeedsLock = true
393
  };
394

395
  virtual void do_derived_oop(derived_base* base, derived_pointer* derived) {
396
#if COMPILER2_OR_JVMCI
397
    DerivedPointerTable::add(derived, base);
398
#endif // COMPILER2_OR_JVMCI
399
  }
400
};
401

402
class ProcessDerivedOop : public DerivedOopClosure {
403
  OopClosure* _oop_cl;
404

405
public:
406
  ProcessDerivedOop(OopClosure* oop_cl) :
407
      _oop_cl(oop_cl) {}
408

409
  enum {
410
    SkipNull = true, NeedsLock = true
411
  };
412

413
  virtual void do_derived_oop(derived_base* base, derived_pointer* derived) {
414
    // All derived pointers must be processed before the base pointer of any derived pointer is processed.
415
    // Otherwise, if two derived pointers use the same base, the second derived pointer will get an obscured
416
    // offset, if the base pointer is processed in the first derived pointer.
417
  derived_pointer derived_base = to_derived_pointer(*reinterpret_cast<intptr_t*>(base));
418
    intptr_t offset = *derived - derived_base;
419
    *derived = derived_base;
420
    _oop_cl->do_oop((oop*)derived);
421
    *derived = *derived + offset;
422
  }
423
};
424

425
class IgnoreDerivedOop : public DerivedOopClosure {
426
  OopClosure* _oop_cl;
427

428
public:
429
  enum {
430
    SkipNull = true, NeedsLock = true
431
  };
432

433
  virtual void do_derived_oop(derived_base* base, derived_pointer* derived) {}
434
};
435

436
void OopMapSet::oops_do(const frame* fr, const RegisterMap* reg_map, OopClosure* f, DerivedPointerIterationMode mode) {
437
  find_map(fr)->oops_do(fr, reg_map, f, mode);
438
}
439

440
void OopMapSet::oops_do(const frame *fr, const RegisterMap* reg_map, OopClosure* f, DerivedOopClosure* df) {
441
  find_map(fr)->oops_do(fr, reg_map, f, df);
442
}
443

444
void ImmutableOopMap::oops_do(const frame *fr, const RegisterMap *reg_map,
445
                              OopClosure* oop_fn, DerivedOopClosure* derived_oop_fn) const {
446
  assert(derived_oop_fn != nullptr, "sanity");
447
  OopMapDo<OopClosure, DerivedOopClosure, SkipNullValue> visitor(oop_fn, derived_oop_fn);
448
  visitor.oops_do(fr, reg_map, this);
449
}
450

451
void ImmutableOopMap::oops_do(const frame *fr, const RegisterMap *reg_map,
452
                              OopClosure* oop_fn, DerivedPointerIterationMode derived_mode) const {
453
  ProcessDerivedOop process_cl(oop_fn);
454
  AddDerivedOop add_cl;
455
  IgnoreDerivedOop ignore_cl;
456
  DerivedOopClosure* derived_cl;
457
  switch (derived_mode) {
458
  case DerivedPointerIterationMode::_directly:
459
    derived_cl = &process_cl;
460
    break;
461
  case DerivedPointerIterationMode::_with_table:
462
    derived_cl = &add_cl;
463
    break;
464
  case DerivedPointerIterationMode::_ignore:
465
    derived_cl = &ignore_cl;
466
    break;
467
  default:
468
    guarantee (false, "unreachable");
469
  }
470
  OopMapDo<OopClosure, DerivedOopClosure, SkipNullValue> visitor(oop_fn, derived_cl);
471
  visitor.oops_do(fr, reg_map, this);
472
}
473

474
void ImmutableOopMap::all_type_do(const frame *fr, OopMapClosure* fn) const {
475
  OopMapValue omv;
476
  for (OopMapStream oms(this); !oms.is_done(); oms.next()) {
477
    omv = oms.current();
478
    if (fn->handle_type(omv.type())) {
479
      fn->do_value(omv.reg(), omv.type());
480
    }
481
  }
482
}
483

484
void ImmutableOopMap::all_type_do(const frame *fr, OopMapValue::oop_types type, OopMapClosure* fn) const {
485
  OopMapValue omv;
486
  for (OopMapStream oms(this); !oms.is_done(); oms.next()) {
487
    omv = oms.current();
488
    if (omv.type() == type) {
489
      fn->do_value(omv.reg(), omv.type());
490
    }
491
  }
492
}
493

494
static void update_register_map1(const ImmutableOopMap* oopmap, const frame* fr, RegisterMap* reg_map) {
495
  for (OopMapStream oms(oopmap); !oms.is_done(); oms.next()) {
496
    OopMapValue omv = oms.current();
497
    if (omv.type() == OopMapValue::callee_saved_value) {
498
      VMReg reg = omv.content_reg();
499
      address loc = fr->oopmapreg_to_location(omv.reg(), reg_map);
500
      reg_map->set_location(reg, loc);
501
    }
502
  }
503
}
504

505
// Update callee-saved register info for the following frame
506
void ImmutableOopMap::update_register_map(const frame *fr, RegisterMap *reg_map) const {
507
  CodeBlob* cb = fr->cb();
508
  assert(cb != nullptr, "no codeblob");
509
  // Any reg might be saved by a safepoint handler (see generate_handler_blob).
510
  assert( reg_map->_update_for_id == nullptr || fr->is_older(reg_map->_update_for_id),
511
         "already updated this map; do not 'update' it twice!" );
512
  debug_only(reg_map->_update_for_id = fr->id());
513

514
  // Check if caller must update oop argument
515
  assert((reg_map->include_argument_oops() ||
516
          !cb->caller_must_gc_arguments(reg_map->thread())),
517
         "include_argument_oops should already be set");
518

519
  // Scan through oopmap and find location of all callee-saved registers
520
  // (we do not do update in place, since info could be overwritten)
521

522
  update_register_map1(this, fr, reg_map);
523
}
524

525
const ImmutableOopMap* OopMapSet::find_map(const frame *fr) {
526
  return find_map(fr->cb(), fr->pc());
527
}
528

529
const ImmutableOopMap* OopMapSet::find_map(const CodeBlob* cb, address pc) {
530
  assert(cb != nullptr, "no codeblob");
531
  const ImmutableOopMap* map = cb->oop_map_for_return_address(pc);
532
  assert(map != nullptr, "no ptr map found");
533
  return map;
534
}
535

536
// Update callee-saved register info for the following frame
537
void OopMapSet::update_register_map(const frame *fr, RegisterMap *reg_map) {
538
  find_map(fr)->update_register_map(fr, reg_map);
539
}
540

541
//=============================================================================
542
// Non-Product code
543

544
#ifndef PRODUCT
545
void OopMapSet::trace_codeblob_maps(const frame *fr, const RegisterMap *reg_map) {
546
  // Print oopmap and regmap
547
  tty->print_cr("------ ");
548
  CodeBlob* cb = fr->cb();
549
  const ImmutableOopMapSet* maps = cb->oop_maps();
550
  const ImmutableOopMap* map = cb->oop_map_for_return_address(fr->pc());
551
  map->print();
552
  if( cb->is_nmethod() ) {
553
    nmethod* nm = (nmethod*)cb;
554
    // native wrappers have no scope data, it is implied
555
    if (nm->is_native_method()) {
556
      tty->print("bci: 0 (native)");
557
    } else {
558
      ScopeDesc* scope  = nm->scope_desc_at(fr->pc());
559
      tty->print("bci: %d ",scope->bci());
560
    }
561
  }
562
  tty->cr();
563
  fr->print_on(tty);
564
  tty->print("     ");
565
  cb->print_value_on(tty);  tty->cr();
566
  if (reg_map != nullptr) {
567
    reg_map->print();
568
  }
569
  tty->print_cr("------ ");
570

571
}
572
#endif // PRODUCT
573

574
// Printing code is present in product build for -XX:+PrintAssembly.
575

576
static
577
void print_register_type(OopMapValue::oop_types x, VMReg optional,
578
                         outputStream* st) {
579
  switch( x ) {
580
  case OopMapValue::oop_value:
581
    st->print("Oop");
582
    break;
583
  case OopMapValue::narrowoop_value:
584
    st->print("NarrowOop");
585
    break;
586
  case OopMapValue::callee_saved_value:
587
    st->print("Callers_");
588
    optional->print_on(st);
589
    break;
590
  case OopMapValue::derived_oop_value:
591
    st->print("Derived_oop_");
592
    optional->print_on(st);
593
    break;
594
  default:
595
    ShouldNotReachHere();
596
  }
597
}
598

599
void OopMapValue::print_on(outputStream* st) const {
600
  reg()->print_on(st);
601
  st->print("=");
602
  print_register_type(type(),content_reg(),st);
603
  st->print(" ");
604
}
605

606
void OopMapValue::print() const { print_on(tty); }
607

608
void ImmutableOopMap::print_on(outputStream* st) const {
609
  OopMapValue omv;
610
  st->print("ImmutableOopMap {");
611
  for(OopMapStream oms(this); !oms.is_done(); oms.next()) {
612
    omv = oms.current();
613
    omv.print_on(st);
614
  }
615
  st->print("}");
616
}
617

618
void ImmutableOopMap::print() const { print_on(tty); }
619

620
void OopMap::print_on(outputStream* st) const {
621
  OopMapValue omv;
622
  st->print("OopMap {");
623
  for(OopMapStream oms((OopMap*)this); !oms.is_done(); oms.next()) {
624
    omv = oms.current();
625
    omv.print_on(st);
626
  }
627
  // Print hex offset in addition.
628
  st->print("off=%d/0x%x}", (int) offset(), (int) offset());
629
}
630

631
void OopMap::print() const { print_on(tty); }
632

633
void ImmutableOopMapSet::print_on(outputStream* st) const {
634
  const ImmutableOopMap* last = nullptr;
635
  const int len = count();
636

637
  st->print_cr("ImmutableOopMapSet contains %d OopMaps", len);
638

639
  for (int i = 0; i < len; i++) {
640
    const ImmutableOopMapPair* pair = pair_at(i);
641
    const ImmutableOopMap* map = pair->get_from(this);
642
    if (map != last) {
643
      st->cr();
644
      map->print_on(st);
645
      st->print(" pc offsets: ");
646
    }
647
    last = map;
648
    st->print("%d ", pair->pc_offset());
649
  }
650
  st->cr();
651
}
652

653
void ImmutableOopMapSet::print() const { print_on(tty); }
654

655
void OopMapSet::print_on(outputStream* st) const {
656
  const int len = _list.length();
657

658
  st->print_cr("OopMapSet contains %d OopMaps", len);
659

660
  for( int i = 0; i < len; i++) {
661
    OopMap* m = at(i);
662
    st->print_cr("#%d ",i);
663
    m->print_on(st);
664
    st->cr();
665
  }
666
  st->cr();
667
}
668

669
void OopMapSet::print() const { print_on(tty); }
670

671
bool OopMap::equals(const OopMap* other) const {
672
  if (other->_omv_count != _omv_count) {
673
    return false;
674
  }
675
  if (other->write_stream()->position() != write_stream()->position()) {
676
    return false;
677
  }
678
  if (memcmp(other->write_stream()->buffer(), write_stream()->buffer(), write_stream()->position()) != 0) {
679
    return false;
680
  }
681
  return true;
682
}
683

684
int ImmutableOopMapSet::find_slot_for_offset(int pc_offset) const {
685
  // we might not have an oopmap at asynchronous (non-safepoint) stackwalks
686
  ImmutableOopMapPair* pairs = get_pairs();
687
  for (int i = 0; i < _count; ++i) {
688
    if (pairs[i].pc_offset() >= pc_offset) {
689
      ImmutableOopMapPair* last = &pairs[i];
690
      return last->pc_offset() == pc_offset ? i : -1;
691
    }
692
  }
693
  return -1;
694
}
695

696
const ImmutableOopMap* ImmutableOopMapSet::find_map_at_offset(int pc_offset) const {
697
  ImmutableOopMapPair* pairs = get_pairs();
698
  ImmutableOopMapPair* last  = nullptr;
699

700
  for (int i = 0; i < _count; ++i) {
701
    if (pairs[i].pc_offset() >= pc_offset) {
702
      last = &pairs[i];
703
      break;
704
    }
705
  }
706

707
  // Heal Coverity issue: potential index out of bounds access.
708
  guarantee(last != nullptr, "last may not be null");
709
  assert(last->pc_offset() == pc_offset, "oopmap not found");
710
  return last->get_from(this);
711
}
712

713
ImmutableOopMap::ImmutableOopMap(const OopMap* oopmap)
714
  : _count(oopmap->count()), _num_oops(oopmap->num_oops()) {
715
  _num_oops = oopmap->num_oops();
716
  _has_derived_oops = oopmap->has_derived_oops();
717
  address addr = data_addr();
718
  oopmap->copy_and_sort_data_to(addr);
719
}
720

721
bool ImmutableOopMap::has_any(OopMapValue::oop_types type) const {
722
  for (OopMapStream oms(this); !oms.is_done(); oms.next()) {
723
    if (oms.current().type() == type) {
724
      return true;
725
    }
726
  }
727
  return false;
728
}
729

730
#ifdef ASSERT
731
int ImmutableOopMap::nr_of_bytes() const {
732
  OopMapStream oms(this);
733

734
  while (!oms.is_done()) {
735
    oms.next();
736
  }
737
  return sizeof(ImmutableOopMap) + oms.stream_position();
738
}
739
#endif
740

741
ImmutableOopMapBuilder::ImmutableOopMapBuilder(const OopMapSet* set) : _set(set), _empty(nullptr), _last(nullptr), _empty_offset(-1), _last_offset(-1), _offset(0), _required(-1), _new_set(nullptr) {
742
  _mapping = NEW_RESOURCE_ARRAY(Mapping, _set->size());
743
}
744

745
int ImmutableOopMapBuilder::size_for(const OopMap* map) const {
746
  return align_up((int)sizeof(ImmutableOopMap) + map->data_size(), 8);
747
}
748

749
int ImmutableOopMapBuilder::heap_size() {
750
  int base = sizeof(ImmutableOopMapSet);
751
  base = align_up(base, 8);
752

753
  // all of ours pc / offset pairs
754
  int pairs = _set->size() * sizeof(ImmutableOopMapPair);
755
  pairs = align_up(pairs, 8);
756

757
  for (int i = 0; i < _set->size(); ++i) {
758
    int size = 0;
759
    OopMap* map = _set->at(i);
760

761
    if (is_empty(map)) {
762
      /* only keep a single empty map in the set */
763
      if (has_empty()) {
764
        _mapping[i].set(Mapping::OOPMAP_EMPTY, _empty_offset, 0, map, _empty);
765
      } else {
766
        _empty_offset = _offset;
767
        _empty = map;
768
        size = size_for(map);
769
        _mapping[i].set(Mapping::OOPMAP_NEW, _offset, size, map);
770
      }
771
    } else if (is_last_duplicate(map)) {
772
      /* if this entry is identical to the previous one, just point it there */
773
      _mapping[i].set(Mapping::OOPMAP_DUPLICATE, _last_offset, 0, map, _last);
774
    } else {
775
      /* not empty, not an identical copy of the previous entry */
776
      size = size_for(map);
777
      _mapping[i].set(Mapping::OOPMAP_NEW, _offset, size, map);
778
      _last_offset = _offset;
779
      _last = map;
780
    }
781

782
    assert(_mapping[i]._map == map, "check");
783
    _offset += size;
784
  }
785

786
  int total = base + pairs + _offset;
787
  DEBUG_ONLY(total += 8);
788
  _required = total;
789
  return total;
790
}
791

792
void ImmutableOopMapBuilder::fill_pair(ImmutableOopMapPair* pair, const OopMap* map, int offset, const ImmutableOopMapSet* set) {
793
  assert(offset < set->nr_of_bytes(), "check");
794
  new ((address) pair) ImmutableOopMapPair(map->offset(), offset);
795
}
796

797
int ImmutableOopMapBuilder::fill_map(ImmutableOopMapPair* pair, const OopMap* map, int offset, const ImmutableOopMapSet* set) {
798
  fill_pair(pair, map, offset, set);
799
  address addr = (address) pair->get_from(_new_set); // location of the ImmutableOopMap
800

801
  new (addr) ImmutableOopMap(map);
802
  return size_for(map);
803
}
804

805
void ImmutableOopMapBuilder::fill(ImmutableOopMapSet* set, int sz) {
806
  ImmutableOopMapPair* pairs = set->get_pairs();
807

808
  for (int i = 0; i < set->count(); ++i) {
809
    const OopMap* map = _mapping[i]._map;
810
    ImmutableOopMapPair* pair = nullptr;
811
    int size = 0;
812

813
    if (_mapping[i]._kind == Mapping::OOPMAP_NEW) {
814
      size = fill_map(&pairs[i], map, _mapping[i]._offset, set);
815
    } else if (_mapping[i]._kind == Mapping::OOPMAP_DUPLICATE || _mapping[i]._kind == Mapping::OOPMAP_EMPTY) {
816
      fill_pair(&pairs[i], map, _mapping[i]._offset, set);
817
    }
818

819
    //const ImmutableOopMap* nv = set->find_map_at_offset(map->offset());
820
    //assert(memcmp(map->data(), nv->data_addr(), map->data_size()) == 0, "check identity");
821
  }
822
}
823

824
#ifdef ASSERT
825
void ImmutableOopMapBuilder::verify(address buffer, int size, const ImmutableOopMapSet* set) {
826
  for (int i = 0; i < 8; ++i) {
827
    assert(buffer[size - 8 + i] == (unsigned char) 0xff, "overwritten memory check");
828
  }
829

830
  for (int i = 0; i < set->count(); ++i) {
831
    const ImmutableOopMapPair* pair = set->pair_at(i);
832
    assert(pair->oopmap_offset() < set->nr_of_bytes(), "check size");
833
    const ImmutableOopMap* map = pair->get_from(set);
834
    int nr_of_bytes = map->nr_of_bytes();
835
    assert(pair->oopmap_offset() + nr_of_bytes <= set->nr_of_bytes(), "check size + size");
836
  }
837
}
838
#endif
839

840
ImmutableOopMapSet* ImmutableOopMapBuilder::generate_into(address buffer) {
841
  DEBUG_ONLY(memset(&buffer[_required-8], 0xff, 8));
842

843
  _new_set = new (buffer) ImmutableOopMapSet(_set, _required);
844
  fill(_new_set, _required);
845

846
  DEBUG_ONLY(verify(buffer, _required, _new_set));
847

848
  return _new_set;
849
}
850

851
ImmutableOopMapSet* ImmutableOopMapBuilder::build() {
852
  _required = heap_size();
853

854
  // We need to allocate a chunk big enough to hold the ImmutableOopMapSet and all of its ImmutableOopMaps
855
  address buffer = NEW_C_HEAP_ARRAY(unsigned char, _required, mtCode);
856
  return generate_into(buffer);
857
}
858

859
ImmutableOopMapSet* ImmutableOopMapSet::build_from(const OopMapSet* oopmap_set) {
860
  ResourceMark mark;
861
  ImmutableOopMapBuilder builder(oopmap_set);
862
  return builder.build();
863
}
864

865
void ImmutableOopMapSet::operator delete(void* p) {
866
  FREE_C_HEAP_ARRAY(unsigned char, p);
867
}
868

869
//------------------------------DerivedPointerTable---------------------------
870

871
#if COMPILER2_OR_JVMCI
872

873
class DerivedPointerTable::Entry : public CHeapObj<mtCompiler> {
874
  derived_pointer* _location; // Location of derived pointer, also pointing to base
875
  intptr_t         _offset;   // Offset from base pointer
876
  Entry* volatile  _next;
877

878
  static Entry* volatile* next_ptr(Entry& entry) { return &entry._next; }
879

880
public:
881
  Entry(derived_pointer* location, intptr_t offset) :
882
    _location(location), _offset(offset), _next(nullptr) {}
883

884
  derived_pointer* location() const { return _location; }
885
  intptr_t offset() const { return _offset; }
886
  Entry* next() const { return _next; }
887

888
  typedef LockFreeStack<Entry, &next_ptr> List;
889
  static List* _list;
890
};
891

892
DerivedPointerTable::Entry::List* DerivedPointerTable::Entry::_list = nullptr;
893
bool DerivedPointerTable::_active = false;
894

895
bool DerivedPointerTable::is_empty() {
896
  return Entry::_list == nullptr || Entry::_list->empty();
897
}
898

899
void DerivedPointerTable::clear() {
900
  // The first time, we create the list.  Otherwise it should be
901
  // empty.  If not, then we have probably forgotton to call
902
  // update_pointers after last GC/Scavenge.
903
  assert (!_active, "should not be active");
904
  assert(is_empty(), "table not empty");
905
  if (Entry::_list == nullptr) {
906
    void* mem = NEW_C_HEAP_OBJ(Entry::List, mtCompiler);
907
    Entry::_list = ::new (mem) Entry::List();
908
  }
909
  _active = true;
910
}
911

912
void DerivedPointerTable::add(derived_pointer* derived_loc, derived_base* base_loc) {
913
  assert(Universe::heap()->is_in_or_null((void*)*base_loc), "not an oop");
914
  assert(derived_loc != (void*)base_loc, "Base and derived in same location");
915
  derived_pointer base_loc_as_derived_pointer =
916
    static_cast<derived_pointer>(reinterpret_cast<intptr_t>(base_loc));
917
  assert(*derived_loc != base_loc_as_derived_pointer, "location already added");
918
  assert(Entry::_list != nullptr, "list must exist");
919
  assert(is_active(), "table must be active here");
920
  intptr_t offset = *derived_loc - to_derived_pointer(*reinterpret_cast<intptr_t*>(base_loc));
921
  // This assert is invalid because derived pointers can be
922
  // arbitrarily far away from their base.
923
  // assert(offset >= -1000000, "wrong derived pointer info");
924

925
  if (TraceDerivedPointers) {
926
    tty->print_cr(
927
      "Add derived pointer@" INTPTR_FORMAT
928
      " - Derived: " INTPTR_FORMAT
929
      " Base: " INTPTR_FORMAT " (@" INTPTR_FORMAT ") (Offset: " INTX_FORMAT ")",
930
      p2i(derived_loc), derived_pointer_value(*derived_loc), intptr_t(*base_loc), p2i(base_loc), offset
931
    );
932
  }
933
  // Set derived oop location to point to base.
934
  *derived_loc = base_loc_as_derived_pointer;
935
  Entry* entry = new Entry(derived_loc, offset);
936
  Entry::_list->push(*entry);
937
}
938

939
void DerivedPointerTable::update_pointers() {
940
  assert(Entry::_list != nullptr, "list must exist");
941
  Entry* entries = Entry::_list->pop_all();
942
  while (entries != nullptr) {
943
    Entry* entry = entries;
944
    entries = entry->next();
945
    derived_pointer* derived_loc = entry->location();
946
    intptr_t offset  = entry->offset();
947
    // The derived oop was setup to point to location of base
948
    oop base = **reinterpret_cast<oop**>(derived_loc);
949
    assert(Universe::heap()->is_in_or_null(base), "must be an oop");
950

951
    derived_pointer derived_base = to_derived_pointer(cast_from_oop<intptr_t>(base));
952
    *derived_loc = derived_base + offset;
953
    assert(*derived_loc - derived_base == offset, "sanity check");
954

955
    // assert(offset >= 0 && offset <= (intptr_t)(base->size() << LogHeapWordSize), "offset: %ld base->size: %zu relative: %d", offset, base->size() << LogHeapWordSize, *(intptr_t*)derived_loc <= 0);
956

957
    if (TraceDerivedPointers) {
958
      tty->print_cr("Updating derived pointer@" INTPTR_FORMAT
959
                    " - Derived: " INTPTR_FORMAT "  Base: " INTPTR_FORMAT " (Offset: " INTX_FORMAT ")",
960
                    p2i(derived_loc), derived_pointer_value(*derived_loc), p2i(base), offset);
961
    }
962

963
    // Delete entry
964
    delete entry;
965
  }
966
  assert(Entry::_list->empty(), "invariant");
967
  _active = false;
968
}
969

970
#endif // COMPILER2_OR_JVMCI
971

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.