25
#include "precompiled.hpp"
26
#include "cds/archiveBuilder.hpp"
27
#include "cds/archiveHeapLoader.hpp"
28
#include "cds/archiveHeapWriter.hpp"
29
#include "cds/archiveUtils.hpp"
30
#include "cds/cdsConfig.hpp"
31
#include "cds/cdsEnumKlass.hpp"
32
#include "cds/cdsHeapVerifier.hpp"
33
#include "cds/heapShared.hpp"
34
#include "cds/metaspaceShared.hpp"
35
#include "classfile/classLoaderData.hpp"
36
#include "classfile/javaClasses.inline.hpp"
37
#include "classfile/modules.hpp"
38
#include "classfile/stringTable.hpp"
39
#include "classfile/symbolTable.hpp"
40
#include "classfile/systemDictionary.hpp"
41
#include "classfile/systemDictionaryShared.hpp"
42
#include "classfile/vmClasses.hpp"
43
#include "classfile/vmSymbols.hpp"
44
#include "gc/shared/collectedHeap.hpp"
45
#include "gc/shared/gcLocker.hpp"
46
#include "gc/shared/gcVMOperations.hpp"
47
#include "logging/log.hpp"
48
#include "logging/logStream.hpp"
49
#include "memory/iterator.inline.hpp"
50
#include "memory/resourceArea.hpp"
51
#include "memory/universe.hpp"
52
#include "oops/compressedOops.inline.hpp"
53
#include "oops/fieldStreams.inline.hpp"
54
#include "oops/objArrayOop.inline.hpp"
55
#include "oops/oop.inline.hpp"
56
#include "oops/typeArrayOop.inline.hpp"
57
#include "prims/jvmtiExport.hpp"
58
#include "runtime/fieldDescriptor.inline.hpp"
59
#include "runtime/init.hpp"
60
#include "runtime/javaCalls.hpp"
61
#include "runtime/mutexLocker.hpp"
62
#include "runtime/safepointVerifiers.hpp"
63
#include "utilities/bitMap.inline.hpp"
64
#include "utilities/copy.hpp"
66
#include "gc/g1/g1CollectedHeap.hpp"
69
#if INCLUDE_CDS_JAVA_HEAP
71
struct ArchivableStaticFieldInfo {
72
const char* klass_name;
73
const char* field_name;
78
ArchivableStaticFieldInfo(const char* k, const char* f)
79
: klass_name(k), field_name(f), klass(nullptr), offset(0), type(T_ILLEGAL) {}
82
return klass_name != nullptr;
86
bool HeapShared::_disable_writing = false;
87
DumpedInternedStrings *HeapShared::_dumped_interned_strings = nullptr;
89
size_t HeapShared::_alloc_count[HeapShared::ALLOC_STAT_SLOTS];
90
size_t HeapShared::_alloc_size[HeapShared::ALLOC_STAT_SLOTS];
91
size_t HeapShared::_total_obj_count;
92
size_t HeapShared::_total_obj_size;
95
#define ARCHIVE_TEST_FIELD_NAME "archivedObjects"
96
static Array<char>* _archived_ArchiveHeapTestClass = nullptr;
97
static const char* _test_class_name = nullptr;
98
static const Klass* _test_class = nullptr;
99
static const ArchivedKlassSubGraphInfoRecord* _test_class_record = nullptr;
107
static ArchivableStaticFieldInfo archive_subgraph_entry_fields[] = {
108
{"java/lang/Integer$IntegerCache", "archivedCache"},
109
{"java/lang/Long$LongCache", "archivedCache"},
110
{"java/lang/Byte$ByteCache", "archivedCache"},
111
{"java/lang/Short$ShortCache", "archivedCache"},
112
{"java/lang/Character$CharacterCache", "archivedCache"},
113
{"java/util/jar/Attributes$Name", "KNOWN_NAMES"},
114
{"sun/util/locale/BaseLocale", "constantBaseLocales"},
115
{"jdk/internal/module/ArchivedModuleGraph", "archivedModuleGraph"},
116
{"java/util/ImmutableCollections", "archivedObjects"},
117
{"java/lang/ModuleLayer", "EMPTY_LAYER"},
118
{"java/lang/module/Configuration", "EMPTY_CONFIGURATION"},
119
{"jdk/internal/math/FDBigInteger", "archivedCaches"},
127
static ArchivableStaticFieldInfo fmg_archive_subgraph_entry_fields[] = {
128
{"jdk/internal/loader/ArchivedClassLoaders", "archivedClassLoaders"},
129
{ARCHIVED_BOOT_LAYER_CLASS, ARCHIVED_BOOT_LAYER_FIELD},
130
{"java/lang/Module$ArchivedData", "archivedData"},
134
KlassSubGraphInfo* HeapShared::_default_subgraph_info;
135
GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_pending_roots = nullptr;
136
OopHandle HeapShared::_roots;
137
OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1];
138
MetaspaceObjToOopHandleTable* HeapShared::_scratch_java_mirror_table = nullptr;
139
MetaspaceObjToOopHandleTable* HeapShared::_scratch_references_table = nullptr;
141
static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], InstanceKlass* ik) {
142
for (int i = 0; fields[i].valid(); i++) {
143
if (fields[i].klass == ik) {
150
bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) {
151
return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) ||
152
is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik);
155
unsigned HeapShared::oop_hash(oop const& p) {
158
return primitive_hash(cast_from_oop<intptr_t>(p));
161
static void reset_states(oop obj, TRAPS) {
162
Handle h_obj(THREAD, obj);
163
InstanceKlass* klass = InstanceKlass::cast(obj->klass());
164
TempNewSymbol method_name = SymbolTable::new_symbol("resetArchivedStates");
165
Symbol* method_sig = vmSymbols::void_method_signature();
167
while (klass != nullptr) {
168
Method* method = klass->find_method(method_name, method_sig);
169
if (method != nullptr) {
170
assert(method->is_private(), "must be");
171
if (log_is_enabled(Debug, cds)) {
172
ResourceMark rm(THREAD);
173
log_debug(cds)(" calling %s", method->name_and_sig_as_C_string());
175
JavaValue result(T_VOID);
176
JavaCalls::call_special(&result, h_obj, klass,
177
method_name, method_sig, CHECK);
179
klass = klass->java_super();
183
void HeapShared::reset_archived_object_states(TRAPS) {
184
assert(CDSConfig::is_dumping_heap(), "dump-time only");
185
log_debug(cds)("Resetting platform loader");
186
reset_states(SystemDictionary::java_platform_loader(), CHECK);
187
log_debug(cds)("Resetting system loader");
188
reset_states(SystemDictionary::java_system_loader(), CHECK);
197
log_debug(cds)("Resetting boot loader");
198
JavaValue result(T_OBJECT);
199
JavaCalls::call_static(&result,
200
vmClasses::jdk_internal_loader_ClassLoaders_klass(),
201
vmSymbols::bootLoader_name(),
202
vmSymbols::void_BuiltinClassLoader_signature(),
204
Handle boot_loader(THREAD, result.get_oop());
205
reset_states(boot_loader(), CHECK);
208
HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = nullptr;
210
bool HeapShared::has_been_archived(oop obj) {
211
assert(CDSConfig::is_dumping_heap(), "dump-time only");
212
return archived_object_cache()->get(obj) != nullptr;
215
int HeapShared::append_root(oop obj) {
216
assert(CDSConfig::is_dumping_heap(), "dump-time only");
219
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
221
if (_pending_roots == nullptr) {
222
_pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);
225
return _pending_roots->append(obj);
228
objArrayOop HeapShared::roots() {
229
if (CDSConfig::is_dumping_heap()) {
230
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
231
if (!HeapShared::can_write()) {
235
assert(CDSConfig::is_using_archive(), "must be");
238
objArrayOop roots = (objArrayOop)_roots.resolve();
239
assert(roots != nullptr, "should have been initialized");
244
oop HeapShared::get_root(int index, bool clear) {
245
assert(index >= 0, "sanity");
246
assert(!CDSConfig::is_dumping_heap() && CDSConfig::is_using_archive(), "runtime only");
247
assert(!_roots.is_empty(), "must have loaded shared heap");
248
oop result = roots()->obj_at(index);
255
void HeapShared::clear_root(int index) {
256
assert(index >= 0, "sanity");
257
assert(CDSConfig::is_using_archive(), "must be");
258
if (ArchiveHeapLoader::is_in_use()) {
259
if (log_is_enabled(Debug, cds, heap)) {
260
oop old = roots()->obj_at(index);
261
log_debug(cds, heap)("Clearing root %d: was " PTR_FORMAT, index, p2i(old));
263
roots()->obj_at_put(index, nullptr);
267
bool HeapShared::archive_object(oop obj) {
268
assert(CDSConfig::is_dumping_heap(), "dump-time only");
270
assert(!obj->is_stackChunk(), "do not archive stack chunks");
271
if (has_been_archived(obj)) {
275
if (ArchiveHeapWriter::is_too_large_to_archive(obj->size())) {
276
log_debug(cds, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: " SIZE_FORMAT,
277
p2i(obj), obj->size());
280
count_allocation(obj->size());
281
ArchiveHeapWriter::add_source_obj(obj);
282
CachedOopInfo info = make_cached_oop_info(obj);
283
archived_object_cache()->put_when_absent(obj, info);
284
archived_object_cache()->maybe_grow();
285
mark_native_pointers(obj);
287
if (log_is_enabled(Debug, cds, heap)) {
289
log_debug(cds, heap)("Archived heap object " PTR_FORMAT " : %s",
290
p2i(obj), obj->klass()->external_name());
293
if (java_lang_Module::is_instance(obj) && Modules::check_archived_module_oop(obj)) {
294
Modules::update_oops_in_archived_module(obj, append_root(obj));
301
class MetaspaceObjToOopHandleTable: public ResourceHashtable<MetaspaceObj*, OopHandle,
306
oop get_oop(MetaspaceObj* ptr) {
307
MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
308
OopHandle* handle = get(ptr);
309
if (handle != nullptr) {
310
return handle->resolve();
315
void set_oop(MetaspaceObj* ptr, oop o) {
316
MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
317
OopHandle handle(Universe::vm_global(), o);
318
bool is_new = put(ptr, handle);
319
assert(is_new, "cannot set twice");
321
void remove_oop(MetaspaceObj* ptr) {
322
MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
323
OopHandle* handle = get(ptr);
324
if (handle != nullptr) {
325
handle->release(Universe::vm_global());
331
void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) {
332
_scratch_references_table->set_oop(src, dest);
335
objArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) {
336
return (objArrayOop)_scratch_references_table->get_oop(src);
339
void HeapShared::init_scratch_objects(TRAPS) {
340
for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
341
BasicType bt = (BasicType)i;
342
if (!is_reference_type(bt)) {
343
oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
344
_scratch_basic_type_mirrors[i] = OopHandle(Universe::vm_global(), m);
347
_scratch_java_mirror_table = new (mtClass)MetaspaceObjToOopHandleTable();
348
_scratch_references_table = new (mtClass)MetaspaceObjToOopHandleTable();
351
oop HeapShared::scratch_java_mirror(BasicType t) {
352
assert((uint)t < T_VOID+1, "range check");
353
assert(!is_reference_type(t), "sanity");
354
return _scratch_basic_type_mirrors[t].resolve();
357
oop HeapShared::scratch_java_mirror(Klass* k) {
358
return _scratch_java_mirror_table->get_oop(k);
361
void HeapShared::set_scratch_java_mirror(Klass* k, oop mirror) {
362
_scratch_java_mirror_table->set_oop(k, mirror);
365
void HeapShared::remove_scratch_objects(Klass* k) {
366
_scratch_java_mirror_table->remove_oop(k);
367
if (k->is_instance_klass()) {
368
_scratch_references_table->remove(InstanceKlass::cast(k)->constants());
372
void HeapShared::archive_java_mirrors() {
373
for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
374
BasicType bt = (BasicType)i;
375
if (!is_reference_type(bt)) {
376
oop m = _scratch_basic_type_mirrors[i].resolve();
377
assert(m != nullptr, "sanity");
378
bool success = archive_reachable_objects_from(1, _default_subgraph_info, m);
379
assert(success, "sanity");
381
log_trace(cds, heap, mirror)(
382
"Archived %s mirror object from " PTR_FORMAT,
383
type2name(bt), p2i(m));
385
Universe::set_archived_basic_type_mirror_index(bt, append_root(m));
389
GrowableArray<Klass*>* klasses = ArchiveBuilder::current()->klasses();
390
assert(klasses != nullptr, "sanity");
391
for (int i = 0; i < klasses->length(); i++) {
392
Klass* orig_k = klasses->at(i);
393
oop m = scratch_java_mirror(orig_k);
395
Klass* buffered_k = ArchiveBuilder::get_buffered_klass(orig_k);
396
bool success = archive_reachable_objects_from(1, _default_subgraph_info, m);
397
guarantee(success, "scratch mirrors must point to only archivable objects");
398
buffered_k->set_archived_java_mirror(append_root(m));
400
log_trace(cds, heap, mirror)(
401
"Archived %s mirror object from " PTR_FORMAT,
402
buffered_k->external_name(), p2i(m));
405
if (buffered_k->is_instance_klass()) {
406
InstanceKlass* ik = InstanceKlass::cast(buffered_k);
407
oop rr = ik->constants()->prepare_resolved_references_for_archiving();
408
if (rr != nullptr && !ArchiveHeapWriter::is_too_large_to_archive(rr)) {
409
bool success = HeapShared::archive_reachable_objects_from(1, _default_subgraph_info, rr);
410
assert(success, "must be");
411
int root_index = append_root(rr);
412
ik->constants()->cache()->set_archived_references(root_index);
419
void HeapShared::archive_strings() {
420
oop shared_strings_array = StringTable::init_shared_table(_dumped_interned_strings);
421
bool success = archive_reachable_objects_from(1, _default_subgraph_info, shared_strings_array);
425
assert(success, "shared strings array must not point to arrays or strings that are too large to archive");
426
StringTable::set_shared_strings_array_index(append_root(shared_strings_array));
429
int HeapShared::archive_exception_instance(oop exception) {
430
bool success = archive_reachable_objects_from(1, _default_subgraph_info, exception);
431
assert(success, "sanity");
432
return append_root(exception);
435
void HeapShared::mark_native_pointers(oop orig_obj) {
436
if (java_lang_Class::is_instance(orig_obj)) {
437
ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_Class::klass_offset());
438
ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_Class::array_klass_offset());
442
void HeapShared::get_pointer_info(oop src_obj, bool& has_oop_pointers, bool& has_native_pointers) {
443
CachedOopInfo* info = archived_object_cache()->get(src_obj);
444
assert(info != nullptr, "must be");
445
has_oop_pointers = info->has_oop_pointers();
446
has_native_pointers = info->has_native_pointers();
449
void HeapShared::set_has_native_pointers(oop src_obj) {
450
CachedOopInfo* info = archived_object_cache()->get(src_obj);
451
assert(info != nullptr, "must be");
452
info->set_has_native_pointers();
455
void HeapShared::archive_objects(ArchiveHeapInfo *heap_info) {
457
NoSafepointVerifier nsv;
459
_default_subgraph_info = init_subgraph_info(vmClasses::Object_klass(), false);
462
create_archived_object_cache();
464
log_info(cds)("Heap range = [" PTR_FORMAT " - " PTR_FORMAT "]",
465
UseCompressedOops ? p2i(CompressedOops::begin()) :
466
p2i((address)G1CollectedHeap::heap()->reserved().start()),
467
UseCompressedOops ? p2i(CompressedOops::end()) :
468
p2i((address)G1CollectedHeap::heap()->reserved().end()));
471
CDSHeapVerifier::verify();
472
check_default_subgraph_classes();
475
ArchiveHeapWriter::write(_pending_roots, heap_info);
478
void HeapShared::copy_interned_strings() {
479
init_seen_objects_table();
481
auto copier = [&] (oop s, bool value_ignored) {
482
assert(s != nullptr, "sanity");
483
assert(!ArchiveHeapWriter::is_string_too_large_to_archive(s), "large strings must have been filtered");
484
bool success = archive_reachable_objects_from(1, _default_subgraph_info, s);
485
assert(success, "must be");
488
java_lang_String::set_deduplication_forbidden(s);
490
_dumped_interned_strings->iterate_all(copier);
492
delete_seen_objects_table();
495
void HeapShared::copy_special_objects() {
497
init_seen_objects_table();
498
archive_java_mirrors();
500
Universe::archive_exception_instances();
501
delete_seen_objects_table();
504
void HeapShared::copy_objects() {
505
assert(HeapShared::can_write(), "must be");
507
copy_interned_strings();
508
copy_special_objects();
510
archive_object_subgraphs(archive_subgraph_entry_fields,
513
if (CDSConfig::is_dumping_full_module_graph()) {
514
archive_object_subgraphs(fmg_archive_subgraph_entry_fields,
516
Modules::verify_archived_modules();
523
HeapShared::DumpTimeKlassSubGraphInfoTable* HeapShared::_dump_time_subgraph_info_table = nullptr;
524
HeapShared::RunTimeKlassSubGraphInfoTable HeapShared::_run_time_subgraph_info_table;
529
KlassSubGraphInfo* HeapShared::init_subgraph_info(Klass* k, bool is_full_module_graph) {
530
assert(CDSConfig::is_dumping_heap(), "dump time only");
532
Klass* buffered_k = ArchiveBuilder::get_buffered_klass(k);
533
KlassSubGraphInfo* info =
534
_dump_time_subgraph_info_table->put_if_absent(k, KlassSubGraphInfo(buffered_k, is_full_module_graph),
536
assert(created, "must not initialize twice");
540
KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) {
541
assert(CDSConfig::is_dumping_heap(), "dump time only");
542
KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(k);
543
assert(info != nullptr, "must have been initialized");
548
void KlassSubGraphInfo::add_subgraph_entry_field(int static_field_offset, oop v) {
549
assert(CDSConfig::is_dumping_heap(), "dump time only");
550
if (_subgraph_entry_fields == nullptr) {
551
_subgraph_entry_fields =
552
new (mtClass) GrowableArray<int>(10, mtClass);
554
_subgraph_entry_fields->append(static_field_offset);
555
_subgraph_entry_fields->append(HeapShared::append_root(v));
560
void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k) {
561
assert(CDSConfig::is_dumping_heap(), "dump time only");
562
Klass* buffered_k = ArchiveBuilder::get_buffered_klass(orig_k);
564
if (_subgraph_object_klasses == nullptr) {
565
_subgraph_object_klasses =
566
new (mtClass) GrowableArray<Klass*>(50, mtClass);
569
assert(ArchiveBuilder::current()->is_in_buffer_space(buffered_k), "must be a shared class");
571
if (_k == buffered_k) {
577
if (buffered_k->is_instance_klass()) {
578
assert(InstanceKlass::cast(buffered_k)->is_shared_boot_class(),
579
"must be boot class");
582
if (orig_k == vmClasses::String_klass() ||
583
orig_k == vmClasses::Object_klass()) {
588
check_allowed_klass(InstanceKlass::cast(orig_k));
589
} else if (buffered_k->is_objArray_klass()) {
590
Klass* abk = ObjArrayKlass::cast(buffered_k)->bottom_klass();
591
if (abk->is_instance_klass()) {
592
assert(InstanceKlass::cast(abk)->is_shared_boot_class(),
593
"must be boot class");
594
check_allowed_klass(InstanceKlass::cast(ObjArrayKlass::cast(orig_k)->bottom_klass()));
596
if (buffered_k == Universe::objectArrayKlass()) {
602
assert(buffered_k->is_typeArray_klass(), "must be");
607
if (log_is_enabled(Debug, cds, heap)) {
608
if (!_subgraph_object_klasses->contains(buffered_k)) {
610
log_debug(cds, heap)("Adding klass %s", orig_k->external_name());
614
_subgraph_object_klasses->append_if_missing(buffered_k);
615
_has_non_early_klasses |= is_non_early_klass(orig_k);
618
void KlassSubGraphInfo::check_allowed_klass(InstanceKlass* ik) {
619
if (ik->module()->name() == vmSymbols::java_base()) {
620
assert(ik->package() != nullptr, "classes in java.base cannot be in unnamed package");
625
if (!ik->module()->is_named() && ik->package() == nullptr) {
629
const char* extra_msg = ", or in an unnamed package of an unnamed module";
631
const char* extra_msg = "";
635
log_error(cds, heap)("Class %s not allowed in archive heap. Must be in java.base%s",
636
ik->external_name(), extra_msg);
637
MetaspaceShared::unrecoverable_writing_error();
640
bool KlassSubGraphInfo::is_non_early_klass(Klass* k) {
641
if (k->is_objArray_klass()) {
642
k = ObjArrayKlass::cast(k)->bottom_klass();
644
if (k->is_instance_klass()) {
645
if (!SystemDictionaryShared::is_early_klass(InstanceKlass::cast(k))) {
647
log_info(cds, heap)("non-early: %s", k->external_name());
658
void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) {
660
_entry_field_records = nullptr;
661
_subgraph_object_klasses = nullptr;
662
_is_full_module_graph = info->is_full_module_graph();
664
if (_is_full_module_graph) {
668
_has_non_early_klasses = false;
670
_has_non_early_klasses = info->has_non_early_klasses();
673
if (_has_non_early_klasses) {
676
"Subgraph of klass %s has non-early klasses and cannot be used when JVMTI ClassFileLoadHook is enabled",
677
_k->external_name());
681
GrowableArray<int>* entry_fields = info->subgraph_entry_fields();
682
if (entry_fields != nullptr) {
683
int num_entry_fields = entry_fields->length();
684
assert(num_entry_fields % 2 == 0, "sanity");
685
_entry_field_records =
686
ArchiveBuilder::new_ro_array<int>(num_entry_fields);
687
for (int i = 0 ; i < num_entry_fields; i++) {
688
_entry_field_records->at_put(i, entry_fields->at(i));
693
GrowableArray<Klass*>* subgraph_object_klasses = info->subgraph_object_klasses();
694
if (subgraph_object_klasses != nullptr) {
695
int num_subgraphs_klasses = subgraph_object_klasses->length();
696
_subgraph_object_klasses =
697
ArchiveBuilder::new_ro_array<Klass*>(num_subgraphs_klasses);
698
for (int i = 0; i < num_subgraphs_klasses; i++) {
699
Klass* subgraph_k = subgraph_object_klasses->at(i);
700
if (log_is_enabled(Info, cds, heap)) {
703
"Archived object klass %s (%2d) => %s",
704
_k->external_name(), i, subgraph_k->external_name());
706
_subgraph_object_klasses->at_put(i, subgraph_k);
707
ArchivePtrMarker::mark_pointer(_subgraph_object_klasses->adr_at(i));
711
ArchivePtrMarker::mark_pointer(&_k);
712
ArchivePtrMarker::mark_pointer(&_entry_field_records);
713
ArchivePtrMarker::mark_pointer(&_subgraph_object_klasses);
716
struct CopyKlassSubGraphInfoToArchive : StackObj {
717
CompactHashtableWriter* _writer;
718
CopyKlassSubGraphInfoToArchive(CompactHashtableWriter* writer) : _writer(writer) {}
720
bool do_entry(Klass* klass, KlassSubGraphInfo& info) {
721
if (info.subgraph_object_klasses() != nullptr || info.subgraph_entry_fields() != nullptr) {
722
ArchivedKlassSubGraphInfoRecord* record =
723
(ArchivedKlassSubGraphInfoRecord*)ArchiveBuilder::ro_region_alloc(sizeof(ArchivedKlassSubGraphInfoRecord));
726
Klass* buffered_k = ArchiveBuilder::get_buffered_klass(klass);
727
unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary((address)buffered_k);
728
u4 delta = ArchiveBuilder::current()->any_to_offset_u4(record);
729
_writer->add(hash, delta);
742
void HeapShared::write_subgraph_info_table() {
744
DumpTimeKlassSubGraphInfoTable* d_table = _dump_time_subgraph_info_table;
745
CompactHashtableStats stats;
747
_run_time_subgraph_info_table.reset();
749
CompactHashtableWriter writer(d_table->_count, &stats);
750
CopyKlassSubGraphInfoToArchive copy(&writer);
751
d_table->iterate(©);
752
writer.dump(&_run_time_subgraph_info_table, "subgraphs");
755
if (ArchiveHeapTestClass != nullptr) {
756
size_t len = strlen(ArchiveHeapTestClass) + 1;
757
Array<char>* array = ArchiveBuilder::new_ro_array<char>((int)len);
758
strncpy(array->adr_at(0), ArchiveHeapTestClass, len);
759
_archived_ArchiveHeapTestClass = array;
762
if (log_is_enabled(Info, cds, heap)) {
767
void HeapShared::init_roots(oop roots_oop) {
768
if (roots_oop != nullptr) {
769
assert(ArchiveHeapLoader::is_in_use(), "must be");
770
_roots = OopHandle(Universe::vm_global(), roots_oop);
774
void HeapShared::serialize_tables(SerializeClosure* soc) {
777
soc->do_ptr(&_archived_ArchiveHeapTestClass);
778
if (soc->reading() && _archived_ArchiveHeapTestClass != nullptr) {
779
_test_class_name = _archived_ArchiveHeapTestClass->adr_at(0);
780
setup_test_class(_test_class_name);
784
_run_time_subgraph_info_table.serialize_header(soc);
787
static void verify_the_heap(Klass* k, const char* which) {
788
if (VerifyArchivedFields > 0) {
790
log_info(cds, heap)("Verify heap %s initializing static field(s) in %s",
791
which, k->external_name());
794
VMThread::execute(&verify_op);
796
if (VerifyArchivedFields > 1 && is_init_completed()) {
804
log_info(cds, heap)("Trigger GC %s initializing static field(s) in %s",
805
which, k->external_name());
806
FlagSetting fs1(VerifyBeforeGC, true);
807
FlagSetting fs2(VerifyDuringGC, true);
808
FlagSetting fs3(VerifyAfterGC, true);
809
Universe::heap()->collect(GCCause::_java_lang_system_gc);
820
void HeapShared::resolve_classes(JavaThread* current) {
821
assert(CDSConfig::is_using_archive(), "runtime only!");
822
if (!ArchiveHeapLoader::is_in_use()) {
825
resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields);
826
resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields);
829
void HeapShared::resolve_classes_for_subgraphs(JavaThread* current, ArchivableStaticFieldInfo fields[]) {
830
for (int i = 0; fields[i].valid(); i++) {
831
ArchivableStaticFieldInfo* info = &fields[i];
832
TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
833
InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name);
834
assert(k != nullptr && k->is_shared_boot_class(), "sanity");
835
resolve_classes_for_subgraph_of(current, k);
839
void HeapShared::resolve_classes_for_subgraph_of(JavaThread* current, Klass* k) {
840
JavaThread* THREAD = current;
841
ExceptionMark em(THREAD);
842
const ArchivedKlassSubGraphInfoRecord* record =
843
resolve_or_init_classes_for_subgraph_of(k, false, THREAD);
844
if (HAS_PENDING_EXCEPTION) {
845
CLEAR_PENDING_EXCEPTION;
847
if (record == nullptr) {
848
clear_archived_roots_of(k);
852
void HeapShared::initialize_from_archived_subgraph(JavaThread* current, Klass* k) {
853
JavaThread* THREAD = current;
854
if (!ArchiveHeapLoader::is_in_use()) {
858
ExceptionMark em(THREAD);
859
const ArchivedKlassSubGraphInfoRecord* record =
860
resolve_or_init_classes_for_subgraph_of(k, true, THREAD);
862
if (HAS_PENDING_EXCEPTION) {
863
CLEAR_PENDING_EXCEPTION;
870
if (record != nullptr) {
871
init_archived_fields_for(k, record);
875
const ArchivedKlassSubGraphInfoRecord*
876
HeapShared::resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAPS) {
877
assert(!CDSConfig::is_dumping_heap(), "Should not be called when dumping heap");
879
if (!k->is_shared()) {
882
unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k);
883
const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
886
if (_test_class_name != nullptr && k->name()->equals(_test_class_name) && record != nullptr) {
888
_test_class_record = record;
894
if (record == nullptr) {
895
if (log_is_enabled(Info, cds, heap)) {
896
ResourceMark rm(THREAD);
897
log_info(cds, heap)("subgraph %s is not recorded",
902
if (record->is_full_module_graph() && !CDSConfig::is_using_full_module_graph()) {
903
if (log_is_enabled(Info, cds, heap)) {
904
ResourceMark rm(THREAD);
905
log_info(cds, heap)("subgraph %s cannot be used because full module graph is disabled",
911
if (record->has_non_early_klasses() && JvmtiExport::should_post_class_file_load_hook()) {
912
if (log_is_enabled(Info, cds, heap)) {
913
ResourceMark rm(THREAD);
914
log_info(cds, heap)("subgraph %s cannot be used because JVMTI ClassFileLoadHook is enabled",
920
if (log_is_enabled(Info, cds, heap)) {
922
log_info(cds, heap)("%s subgraph %s ", do_init ? "init" : "resolve", k->external_name());
925
resolve_or_init(k, do_init, CHECK_NULL);
929
Array<Klass*>* klasses = record->subgraph_object_klasses();
930
if (klasses != nullptr) {
931
for (int i = 0; i < klasses->length(); i++) {
932
Klass* klass = klasses->at(i);
933
if (!klass->is_shared()) {
936
resolve_or_init(klass, do_init, CHECK_NULL);
944
void HeapShared::resolve_or_init(Klass* k, bool do_init, TRAPS) {
946
if (k->class_loader_data() == nullptr) {
947
Klass* resolved_k = SystemDictionary::resolve_or_null(k->name(), CHECK);
948
assert(resolved_k == k, "classes used by archived heap must not be replaced by JVMTI ClassFileLoadHook");
951
assert(k->class_loader_data() != nullptr, "must have been resolved by HeapShared::resolve_classes");
952
if (k->is_instance_klass()) {
953
InstanceKlass* ik = InstanceKlass::cast(k);
954
ik->initialize(CHECK);
955
} else if (k->is_objArray_klass()) {
956
ObjArrayKlass* oak = ObjArrayKlass::cast(k);
957
oak->initialize(CHECK);
962
void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record) {
963
verify_the_heap(k, "before");
967
oop m = k->java_mirror();
968
Array<int>* entry_field_records = record->entry_field_records();
969
if (entry_field_records != nullptr) {
970
int efr_len = entry_field_records->length();
971
assert(efr_len % 2 == 0, "sanity");
972
for (int i = 0; i < efr_len; i += 2) {
973
int field_offset = entry_field_records->at(i);
974
int root_index = entry_field_records->at(i+1);
975
oop v = get_root(root_index, true);
976
m->obj_field_put(field_offset, v);
977
log_debug(cds, heap)(" " PTR_FORMAT " init field @ %2d = " PTR_FORMAT, p2i(k), field_offset, p2i(v));
982
if (log_is_enabled(Info, cds, heap)) {
984
log_info(cds, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT "%s",
985
k->external_name(), p2i(k), JvmtiExport::is_early_phase() ? " (early)" : "");
989
verify_the_heap(k, "after ");
992
void HeapShared::clear_archived_roots_of(Klass* k) {
993
unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k);
994
const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
995
if (record != nullptr) {
996
Array<int>* entry_field_records = record->entry_field_records();
997
if (entry_field_records != nullptr) {
998
int efr_len = entry_field_records->length();
999
assert(efr_len % 2 == 0, "sanity");
1000
for (int i = 0; i < efr_len; i += 2) {
1001
int root_index = entry_field_records->at(i+1);
1002
clear_root(root_index);
1008
class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
1010
bool _record_klasses_only;
1011
KlassSubGraphInfo* _subgraph_info;
1012
oop _referencing_obj;
1016
static WalkOopAndArchiveClosure* _current;
1017
WalkOopAndArchiveClosure* _last;
1019
WalkOopAndArchiveClosure(int level,
1020
bool record_klasses_only,
1021
KlassSubGraphInfo* subgraph_info,
1024
_record_klasses_only(record_klasses_only),
1025
_subgraph_info(subgraph_info),
1026
_referencing_obj(orig) {
1030
~WalkOopAndArchiveClosure() {
1033
void do_oop(narrowOop *p) { WalkOopAndArchiveClosure::do_oop_work(p); }
1034
void do_oop( oop *p) { WalkOopAndArchiveClosure::do_oop_work(p); }
1037
template <class T> void do_oop_work(T *p) {
1038
oop obj = RawAccess<>::oop_load(p);
1039
if (!CompressedOops::is_null(obj)) {
1040
size_t field_delta = pointer_delta(p, _referencing_obj, sizeof(char));
1042
if (!_record_klasses_only && log_is_enabled(Debug, cds, heap)) {
1044
log_debug(cds, heap)("(%d) %s[" SIZE_FORMAT "] ==> " PTR_FORMAT " size " SIZE_FORMAT " %s", _level,
1045
_referencing_obj->klass()->external_name(), field_delta,
1046
p2i(obj), obj->size() * HeapWordSize, obj->klass()->external_name());
1047
if (log_is_enabled(Trace, cds, heap)) {
1048
LogTarget(Trace, cds, heap) log;
1050
obj->print_on(&out);
1054
bool success = HeapShared::archive_reachable_objects_from(
1055
_level + 1, _subgraph_info, obj);
1056
assert(success, "VM should have exited with unarchivable objects for _level > 1");
1061
static WalkOopAndArchiveClosure* current() { return _current; }
1062
oop referencing_obj() { return _referencing_obj; }
1063
KlassSubGraphInfo* subgraph_info() { return _subgraph_info; }
1066
WalkOopAndArchiveClosure* WalkOopAndArchiveClosure::_current = nullptr;
1069
class PointsToOopsChecker : public BasicOopIterateClosure {
1072
template <class T> void check(T *p) {
1073
_result |= (HeapAccess<>::oop_load(p) != nullptr);
1077
PointsToOopsChecker() : _result(false) {}
1078
void do_oop(narrowOop *p) { check(p); }
1079
void do_oop( oop *p) { check(p); }
1080
bool result() { return _result; }
1083
HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop obj) {
1084
WalkOopAndArchiveClosure* walker = WalkOopAndArchiveClosure::current();
1085
oop referrer = (walker == nullptr) ? nullptr : walker->referencing_obj();
1086
PointsToOopsChecker points_to_oops_checker;
1087
obj->oop_iterate(&points_to_oops_checker);
1088
return CachedOopInfo(referrer, points_to_oops_checker.result());
1095
bool HeapShared::archive_reachable_objects_from(int level,
1096
KlassSubGraphInfo* subgraph_info,
1098
assert(orig_obj != nullptr, "must be");
1100
if (!JavaClasses::is_supported_for_archiving(orig_obj)) {
1105
log_error(cds, heap)("Cannot archive object of class %s", orig_obj->klass()->external_name());
1106
MetaspaceShared::unrecoverable_writing_error();
1114
if (java_lang_Class::is_instance(orig_obj) && subgraph_info != _default_subgraph_info) {
1115
log_error(cds, heap)("(%d) Unknown java.lang.Class object is in the archived sub-graph", level);
1116
MetaspaceShared::unrecoverable_writing_error();
1119
if (has_been_seen_during_subgraph_recording(orig_obj)) {
1123
set_has_been_seen_during_subgraph_recording(orig_obj);
1126
bool already_archived = has_been_archived(orig_obj);
1127
bool record_klasses_only = already_archived;
1128
if (!already_archived) {
1129
++_num_new_archived_objs;
1130
if (!archive_object(orig_obj)) {
1133
log_error(cds, heap)(
1134
"Cannot archive the sub-graph referenced from %s object ("
1135
PTR_FORMAT ") size " SIZE_FORMAT ", skipped.",
1136
orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize);
1145
MetaspaceShared::unrecoverable_writing_error();
1150
Klass *orig_k = orig_obj->klass();
1151
subgraph_info->add_subgraph_object_klass(orig_k);
1153
WalkOopAndArchiveClosure walker(level, record_klasses_only, subgraph_info, orig_obj);
1154
orig_obj->oop_iterate(&walker);
1156
if (CDSEnumKlass::is_enum_obj(orig_obj)) {
1157
CDSEnumKlass::handle_enum_obj(level + 1, subgraph_info, orig_obj);
1196
void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k,
1197
const char* klass_name,
1199
const char* field_name) {
1200
assert(CDSConfig::is_dumping_heap(), "dump time only");
1201
assert(k->is_shared_boot_class(), "must be boot class");
1203
oop m = k->java_mirror();
1205
KlassSubGraphInfo* subgraph_info = get_subgraph_info(k);
1206
oop f = m->obj_field(field_offset);
1208
log_debug(cds, heap)("Start archiving from: %s::%s (" PTR_FORMAT ")", klass_name, field_name, p2i(f));
1210
if (!CompressedOops::is_null(f)) {
1211
if (log_is_enabled(Trace, cds, heap)) {
1212
LogTarget(Trace, cds, heap) log;
1217
bool success = archive_reachable_objects_from(1, subgraph_info, f);
1219
log_error(cds, heap)("Archiving failed %s::%s (some reachable objects cannot be archived)",
1220
klass_name, field_name);
1225
subgraph_info->add_subgraph_entry_field(field_offset, f);
1226
log_info(cds, heap)("Archived field %s::%s => " PTR_FORMAT, klass_name, field_name, p2i(f));
1231
subgraph_info->add_subgraph_entry_field(field_offset, nullptr);
1236
class VerifySharedOopClosure: public BasicOopIterateClosure {
1238
void do_oop(narrowOop *p) { VerifySharedOopClosure::do_oop_work(p); }
1239
void do_oop( oop *p) { VerifySharedOopClosure::do_oop_work(p); }
1242
template <class T> void do_oop_work(T *p) {
1243
oop obj = RawAccess<>::oop_load(p);
1244
if (!CompressedOops::is_null(obj)) {
1245
HeapShared::verify_reachable_objects_from(obj);
1250
void HeapShared::verify_subgraph_from_static_field(InstanceKlass* k, int field_offset) {
1251
assert(CDSConfig::is_dumping_heap(), "dump time only");
1252
assert(k->is_shared_boot_class(), "must be boot class");
1254
oop m = k->java_mirror();
1255
oop f = m->obj_field(field_offset);
1256
if (!CompressedOops::is_null(f)) {
1257
verify_subgraph_from(f);
1261
void HeapShared::verify_subgraph_from(oop orig_obj) {
1262
if (!has_been_archived(orig_obj)) {
1269
init_seen_objects_table();
1270
verify_reachable_objects_from(orig_obj);
1271
delete_seen_objects_table();
1274
void HeapShared::verify_reachable_objects_from(oop obj) {
1275
_num_total_verifications ++;
1276
if (!has_been_seen_during_subgraph_recording(obj)) {
1277
set_has_been_seen_during_subgraph_recording(obj);
1278
assert(has_been_archived(obj), "must be");
1279
VerifySharedOopClosure walker;
1280
obj->oop_iterate(&walker);
1289
void HeapShared::check_default_subgraph_classes() {
1290
GrowableArray<Klass*>* klasses = _default_subgraph_info->subgraph_object_klasses();
1291
int num = klasses->length();
1292
for (int i = 0; i < num; i++) {
1293
Klass* subgraph_k = klasses->at(i);
1294
if (log_is_enabled(Info, cds, heap)) {
1296
log_info(cds, heap)(
1297
"Archived object klass (default subgraph %d) => %s",
1298
i, subgraph_k->external_name());
1301
Symbol* name = ArchiveBuilder::current()->get_source_addr(subgraph_k->name());
1302
guarantee(name == vmSymbols::java_lang_Class() ||
1303
name == vmSymbols::java_lang_String() ||
1304
name == vmSymbols::java_lang_ArithmeticException() ||
1305
name == vmSymbols::java_lang_NullPointerException() ||
1306
name == vmSymbols::java_lang_InternalError() ||
1307
name == vmSymbols::object_array_signature() ||
1308
name == vmSymbols::byte_array_signature() ||
1309
name == vmSymbols::char_array_signature(),
1310
"default subgraph can have only these objects");
1314
HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = nullptr;
1315
int HeapShared::_num_new_walked_objs;
1316
int HeapShared::_num_new_archived_objs;
1317
int HeapShared::_num_old_recorded_klasses;
1319
int HeapShared::_num_total_subgraph_recordings = 0;
1320
int HeapShared::_num_total_walked_objs = 0;
1321
int HeapShared::_num_total_archived_objs = 0;
1322
int HeapShared::_num_total_recorded_klasses = 0;
1323
int HeapShared::_num_total_verifications = 0;
1325
bool HeapShared::has_been_seen_during_subgraph_recording(oop obj) {
1326
return _seen_objects_table->get(obj) != nullptr;
1329
void HeapShared::set_has_been_seen_during_subgraph_recording(oop obj) {
1330
assert(!has_been_seen_during_subgraph_recording(obj), "sanity");
1331
_seen_objects_table->put_when_absent(obj, true);
1332
_seen_objects_table->maybe_grow();
1333
++ _num_new_walked_objs;
1336
void HeapShared::start_recording_subgraph(InstanceKlass *k, const char* class_name, bool is_full_module_graph) {
1337
log_info(cds, heap)("Start recording subgraph(s) for archived fields in %s", class_name);
1338
init_subgraph_info(k, is_full_module_graph);
1339
init_seen_objects_table();
1340
_num_new_walked_objs = 0;
1341
_num_new_archived_objs = 0;
1342
_num_old_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses();
1345
void HeapShared::done_recording_subgraph(InstanceKlass *k, const char* class_name) {
1346
int num_new_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses() -
1347
_num_old_recorded_klasses;
1348
log_info(cds, heap)("Done recording subgraph(s) for archived fields in %s: "
1349
"walked %d objs, archived %d new objs, recorded %d classes",
1350
class_name, _num_new_walked_objs, _num_new_archived_objs,
1351
num_new_recorded_klasses);
1353
delete_seen_objects_table();
1355
_num_total_subgraph_recordings ++;
1356
_num_total_walked_objs += _num_new_walked_objs;
1357
_num_total_archived_objs += _num_new_archived_objs;
1358
_num_total_recorded_klasses += num_new_recorded_klasses;
1361
class ArchivableStaticFieldFinder: public FieldClosure {
1363
Symbol* _field_name;
1367
ArchivableStaticFieldFinder(InstanceKlass* ik, Symbol* field_name) :
1368
_ik(ik), _field_name(field_name), _found(false), _offset(-1) {}
1370
virtual void do_field(fieldDescriptor* fd) {
1371
if (fd->name() == _field_name) {
1372
assert(!_found, "fields can never be overloaded");
1373
if (is_reference_type(fd->field_type())) {
1375
_offset = fd->offset();
1379
bool found() { return _found; }
1380
int offset() { return _offset; }
1383
void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[],
1385
for (int i = 0; fields[i].valid(); i++) {
1386
ArchivableStaticFieldInfo* info = &fields[i];
1387
TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
1388
TempNewSymbol field_name = SymbolTable::new_symbol(info->field_name);
1392
bool is_test_class = (ArchiveHeapTestClass != nullptr) && (strcmp(info->klass_name, ArchiveHeapTestClass) == 0);
1393
const char* test_class_name = ArchiveHeapTestClass;
1395
bool is_test_class = false;
1396
const char* test_class_name = "";
1399
if (is_test_class) {
1400
log_warning(cds)("Loading ArchiveHeapTestClass %s ...", test_class_name);
1403
Klass* k = SystemDictionary::resolve_or_fail(klass_name, true, THREAD);
1404
if (HAS_PENDING_EXCEPTION) {
1405
CLEAR_PENDING_EXCEPTION;
1407
st.print("Fail to initialize archive heap: %s cannot be loaded by the boot loader", info->klass_name);
1408
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string());
1411
if (!k->is_instance_klass()) {
1413
st.print("Fail to initialize archive heap: %s is not an instance class", info->klass_name);
1414
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string());
1417
InstanceKlass* ik = InstanceKlass::cast(k);
1418
assert(InstanceKlass::cast(ik)->is_shared_boot_class(),
1419
"Only support boot classes");
1421
if (is_test_class) {
1422
if (ik->module()->is_named()) {
1426
st.print("ArchiveHeapTestClass %s is not in unnamed module", test_class_name);
1427
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string());
1430
if (ik->package() != nullptr) {
1433
st.print("ArchiveHeapTestClass %s is not in unnamed package", test_class_name);
1434
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string());
1437
if (ik->module()->name() != vmSymbols::java_base()) {
1442
st.print("%s is not in java.base module", info->klass_name);
1443
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string());
1447
if (is_test_class) {
1448
log_warning(cds)("Initializing ArchiveHeapTestClass %s ...", test_class_name);
1450
ik->initialize(CHECK);
1452
ArchivableStaticFieldFinder finder(ik, field_name);
1453
ik->do_local_static_fields(&finder);
1454
if (!finder.found()) {
1456
st.print("Unable to find the static T_OBJECT field %s::%s", info->klass_name, info->field_name);
1457
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string());
1461
info->offset = finder.offset();
1465
void HeapShared::init_subgraph_entry_fields(TRAPS) {
1466
assert(HeapShared::can_write(), "must be");
1467
_dump_time_subgraph_info_table = new (mtClass)DumpTimeKlassSubGraphInfoTable();
1468
init_subgraph_entry_fields(archive_subgraph_entry_fields, CHECK);
1469
if (CDSConfig::is_dumping_full_module_graph()) {
1470
init_subgraph_entry_fields(fmg_archive_subgraph_entry_fields, CHECK);
1475
void HeapShared::setup_test_class(const char* test_class_name) {
1476
ArchivableStaticFieldInfo* p = archive_subgraph_entry_fields;
1477
int num_slots = sizeof(archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo);
1478
assert(p[num_slots - 2].klass_name == nullptr, "must have empty slot that's patched below");
1479
assert(p[num_slots - 1].klass_name == nullptr, "must have empty slot that marks the end of the list");
1481
if (test_class_name != nullptr) {
1482
p[num_slots - 2].klass_name = test_class_name;
1483
p[num_slots - 2].field_name = ARCHIVE_TEST_FIELD_NAME;
1490
bool HeapShared::is_a_test_class_in_unnamed_module(Klass* ik) {
1491
if (_test_class != nullptr) {
1492
if (ik == _test_class) {
1495
Array<Klass*>* klasses = _test_class_record->subgraph_object_klasses();
1496
if (klasses == nullptr) {
1500
for (int i = 0; i < klasses->length(); i++) {
1501
Klass* k = klasses->at(i);
1504
if (k->is_instance_klass()) {
1505
name = InstanceKlass::cast(k)->name();
1506
} else if (k->is_objArray_klass()) {
1507
Klass* bk = ObjArrayKlass::cast(k)->bottom_klass();
1508
if (!bk->is_instance_klass()) {
1522
if (name->index_of_at(0, "/", 1) >= 0) {
1535
void HeapShared::init_for_dumping(TRAPS) {
1536
if (HeapShared::can_write()) {
1537
setup_test_class(ArchiveHeapTestClass);
1538
_dumped_interned_strings = new (mtClass)DumpedInternedStrings(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE);
1539
init_subgraph_entry_fields(CHECK);
1543
void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
1544
bool is_full_module_graph) {
1545
_num_total_subgraph_recordings = 0;
1546
_num_total_walked_objs = 0;
1547
_num_total_archived_objs = 0;
1548
_num_total_recorded_klasses = 0;
1549
_num_total_verifications = 0;
1558
for (int i = 0; fields[i].valid(); ) {
1559
ArchivableStaticFieldInfo* info = &fields[i];
1560
const char* klass_name = info->klass_name;
1561
start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
1567
for (; fields[i].valid(); i++) {
1568
ArchivableStaticFieldInfo* f = &fields[i];
1569
if (f->klass_name != klass_name) {
1573
archive_reachable_objects_from_static_field(f->klass, f->klass_name,
1574
f->offset, f->field_name);
1576
done_recording_subgraph(info->klass, klass_name);
1579
log_info(cds, heap)("Archived subgraph records = %d",
1580
_num_total_subgraph_recordings);
1581
log_info(cds, heap)(" Walked %d objects", _num_total_walked_objs);
1582
log_info(cds, heap)(" Archived %d objects", _num_total_archived_objs);
1583
log_info(cds, heap)(" Recorded %d klasses", _num_total_recorded_klasses);
1586
for (int i = 0; fields[i].valid(); i++) {
1587
ArchivableStaticFieldInfo* f = &fields[i];
1588
verify_subgraph_from_static_field(f->klass, f->offset);
1590
log_info(cds, heap)(" Verified %d references", _num_total_verifications);
1599
void HeapShared::add_to_dumped_interned_strings(oop string) {
1600
assert_at_safepoint();
1601
assert(!ArchiveHeapWriter::is_string_too_large_to_archive(string), "must be");
1603
_dumped_interned_strings->put_if_absent(string, true, &created);
1605
_dumped_interned_strings->maybe_grow();
1613
class FindEmbeddedNonNullPointers: public BasicOopIterateClosure {
1616
int _num_total_oops;
1619
FindEmbeddedNonNullPointers(void* start, BitMap* oopmap)
1620
: _start(start), _oopmap(oopmap), _num_total_oops(0), _num_null_oops(0) {}
1622
virtual void do_oop(narrowOop* p) {
1623
assert(UseCompressedOops, "sanity");
1626
if (!CompressedOops::is_null(v)) {
1627
size_t idx = p - (narrowOop*)_start;
1628
_oopmap->set_bit(idx);
1633
virtual void do_oop(oop* p) {
1634
assert(!UseCompressedOops, "sanity");
1636
if ((*p) != nullptr) {
1637
size_t idx = p - (oop*)_start;
1638
_oopmap->set_bit(idx);
1643
int num_total_oops() const { return _num_total_oops; }
1644
int num_null_oops() const { return _num_null_oops; }
1649
ResourceBitMap HeapShared::calculate_oopmap(MemRegion region) {
1650
size_t num_bits = region.byte_size() / (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
1651
ResourceBitMap oopmap(num_bits);
1653
HeapWord* p = region.start();
1654
HeapWord* end = region.end();
1655
FindEmbeddedNonNullPointers finder((void*)p, &oopmap);
1659
oop o = cast_to_oop(p);
1660
o->oop_iterate(&finder);
1665
log_info(cds, heap)("calculate_oopmap: objects = %6d, oop fields = %7d (nulls = %7d)",
1666
num_objs, finder.num_total_oops(), finder.num_null_oops());
1672
void HeapShared::count_allocation(size_t size) {
1673
_total_obj_count ++;
1674
_total_obj_size += size;
1675
for (int i = 0; i < ALLOC_STAT_SLOTS; i++) {
1676
if (size <= (size_t(1) << i)) {
1678
_alloc_size[i] += size;
1684
static double avg_size(size_t size, size_t count) {
1687
avg = double(size * HeapWordSize) / double(count);
1692
void HeapShared::print_stats() {
1693
size_t huge_count = _total_obj_count;
1694
size_t huge_size = _total_obj_size;
1696
for (int i = 0; i < ALLOC_STAT_SLOTS; i++) {
1697
size_t byte_size_limit = (size_t(1) << i) * HeapWordSize;
1698
size_t count = _alloc_count[i];
1699
size_t size = _alloc_size[i];
1700
log_info(cds, heap)(SIZE_FORMAT_W(8) " objects are <= " SIZE_FORMAT_W(-6)
1701
" bytes (total " SIZE_FORMAT_W(8) " bytes, avg %8.1f bytes)",
1702
count, byte_size_limit, size * HeapWordSize, avg_size(size, count));
1703
huge_count -= count;
1707
log_info(cds, heap)(SIZE_FORMAT_W(8) " huge objects (total " SIZE_FORMAT_W(8) " bytes"
1708
", avg %8.1f bytes)",
1709
huge_count, huge_size * HeapWordSize,
1710
avg_size(huge_size, huge_count));
1711
log_info(cds, heap)(SIZE_FORMAT_W(8) " total objects (total " SIZE_FORMAT_W(8) " bytes"
1712
", avg %8.1f bytes)",
1713
_total_obj_count, _total_obj_size * HeapWordSize,
1714
avg_size(_total_obj_size, _total_obj_count));
1717
bool HeapShared::is_archived_boot_layer_available(JavaThread* current) {
1718
TempNewSymbol klass_name = SymbolTable::new_symbol(ARCHIVED_BOOT_LAYER_CLASS);
1719
InstanceKlass* k = SystemDictionary::find_instance_klass(current, klass_name, Handle(), Handle());
1723
TempNewSymbol field_name = SymbolTable::new_symbol(ARCHIVED_BOOT_LAYER_FIELD);
1724
TempNewSymbol field_signature = SymbolTable::new_symbol("Ljdk/internal/module/ArchivedBootLayer;");
1726
if (k->find_field(field_name, field_signature, true, &fd) != nullptr) {
1727
oop m = k->java_mirror();
1728
oop f = m->obj_field(fd.offset());
1729
if (CompressedOops::is_null(f)) {