jdk
621 строка · 20.1 Кб
1/*
2* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
3* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4*
5* This code is free software; you can redistribute it and/or modify it
6* under the terms of the GNU General Public License version 2 only, as
7* published by the Free Software Foundation.
8*
9* This code is distributed in the hope that it will be useful, but WITHOUT
10* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12* version 2 for more details (a copy is included in the LICENSE file that
13* accompanied this code).
14*
15* You should have received a copy of the GNU General Public License version
16* 2 along with this work; if not, write to the Free Software Foundation,
17* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18*
19* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20* or visit www.oracle.com if you need additional information or have any
21* questions.
22*
23*/
24
25#include "precompiled.hpp"26#include "interpreter/bytecodeStream.hpp"27#include "interpreter/oopMapCache.hpp"28#include "logging/log.hpp"29#include "logging/logStream.hpp"30#include "memory/allocation.inline.hpp"31#include "memory/resourceArea.hpp"32#include "oops/generateOopMap.hpp"33#include "oops/oop.inline.hpp"34#include "runtime/atomic.hpp"35#include "runtime/handles.inline.hpp"36#include "runtime/safepoint.hpp"37#include "runtime/signature.hpp"38#include "utilities/globalCounter.inline.hpp"39
40class OopMapCacheEntry: private InterpreterOopMap {41friend class InterpreterOopMap;42friend class OopMapForCacheEntry;43friend class OopMapCache;44friend class VerifyClosure;45
46private:47OopMapCacheEntry* _next;48
49protected:50// Initialization51void fill(const methodHandle& method, int bci);52// fills the bit mask for native calls53void fill_for_native(const methodHandle& method);54void set_mask(CellTypeState* vars, CellTypeState* stack, int stack_top);55
56// Deallocate bit masks and initialize fields57void flush();58
59static void deallocate(OopMapCacheEntry* const entry);60
61private:62void allocate_bit_mask(); // allocates the bit mask on C heap f necessary63void deallocate_bit_mask(); // allocates the bit mask on C heap f necessary64bool verify_mask(CellTypeState *vars, CellTypeState *stack, int max_locals, int stack_top);65
66public:67OopMapCacheEntry() : InterpreterOopMap() {68_next = nullptr;69}70};71
72
73// Implementation of OopMapForCacheEntry
74// (subclass of GenerateOopMap, initializes an OopMapCacheEntry for a given method and bci)
75
76class OopMapForCacheEntry: public GenerateOopMap {77OopMapCacheEntry *_entry;78int _bci;79int _stack_top;80
81virtual bool report_results() const { return false; }82virtual bool possible_gc_point (BytecodeStream *bcs);83virtual void fill_stackmap_prolog (int nof_gc_points);84virtual void fill_stackmap_epilog ();85virtual void fill_stackmap_for_opcodes (BytecodeStream *bcs,86CellTypeState* vars,87CellTypeState* stack,88int stack_top);89virtual void fill_init_vars (GrowableArray<intptr_t> *init_vars);90
91public:92OopMapForCacheEntry(const methodHandle& method, int bci, OopMapCacheEntry *entry);93
94// Computes stack map for (method,bci) and initialize entry95bool compute_map(Thread* current);96int size();97};98
99
100OopMapForCacheEntry::OopMapForCacheEntry(const methodHandle& method, int bci, OopMapCacheEntry* entry) : GenerateOopMap(method) {101_bci = bci;102_entry = entry;103_stack_top = -1;104}
105
106
107bool OopMapForCacheEntry::compute_map(Thread* current) {108assert(!method()->is_native(), "cannot compute oop map for native methods");109// First check if it is a method where the stackmap is always empty110if (method()->code_size() == 0 || method()->max_locals() + method()->max_stack() == 0) {111_entry->set_mask_size(0);112} else {113ResourceMark rm;114if (!GenerateOopMap::compute_map(current)) {115fatal("Unrecoverable verification or out-of-memory error");116return false;117}118result_for_basicblock(_bci);119}120return true;121}
122
123
124bool OopMapForCacheEntry::possible_gc_point(BytecodeStream *bcs) {125return false; // We are not reporting any result. We call result_for_basicblock directly126}
127
128
129void OopMapForCacheEntry::fill_stackmap_prolog(int nof_gc_points) {130// Do nothing131}
132
133
134void OopMapForCacheEntry::fill_stackmap_epilog() {135// Do nothing136}
137
138
139void OopMapForCacheEntry::fill_init_vars(GrowableArray<intptr_t> *init_vars) {140// Do nothing141}
142
143
144void OopMapForCacheEntry::fill_stackmap_for_opcodes(BytecodeStream *bcs,145CellTypeState* vars,146CellTypeState* stack,147int stack_top) {148// Only interested in one specific bci149if (bcs->bci() == _bci) {150_entry->set_mask(vars, stack, stack_top);151_stack_top = stack_top;152}153}
154
155
156int OopMapForCacheEntry::size() {157assert(_stack_top != -1, "compute_map must be called first");158return ((method()->is_static()) ? 0 : 1) + method()->max_locals() + _stack_top;159}
160
161
162// Implementation of InterpreterOopMap and OopMapCacheEntry
163
164class VerifyClosure : public OffsetClosure {165private:166OopMapCacheEntry* _entry;167bool _failed;168
169public:170VerifyClosure(OopMapCacheEntry* entry) { _entry = entry; _failed = false; }171void offset_do(int offset) { if (!_entry->is_oop(offset)) _failed = true; }172bool failed() const { return _failed; }173};174
175InterpreterOopMap::InterpreterOopMap() {176initialize();177}
178
179InterpreterOopMap::~InterpreterOopMap() {180if (has_valid_mask() && mask_size() > small_mask_limit) {181assert(_bit_mask[0] != 0, "should have pointer to C heap");182FREE_C_HEAP_ARRAY(uintptr_t, _bit_mask[0]);183}184}
185
186bool InterpreterOopMap::is_empty() const {187bool result = _method == nullptr;188assert(_method != nullptr || (_bci == 0 &&189(_mask_size == 0 || _mask_size == USHRT_MAX) &&190_bit_mask[0] == 0), "Should be completely empty");191return result;192}
193
194void InterpreterOopMap::initialize() {195_method = nullptr;196_mask_size = USHRT_MAX; // This value should cause a failure quickly197_bci = 0;198_expression_stack_size = 0;199_num_oops = 0;200for (int i = 0; i < N; i++) _bit_mask[i] = 0;201}
202
203void InterpreterOopMap::iterate_oop(OffsetClosure* oop_closure) const {204int n = number_of_entries();205int word_index = 0;206uintptr_t value = 0;207uintptr_t mask = 0;208// iterate over entries209for (int i = 0; i < n; i++, mask <<= bits_per_entry) {210// get current word211if (mask == 0) {212value = bit_mask()[word_index++];213mask = 1;214}215// test for oop216if ((value & (mask << oop_bit_number)) != 0) oop_closure->offset_do(i);217}218}
219
220void InterpreterOopMap::print() const {221int n = number_of_entries();222tty->print("oop map for ");223method()->print_value();224tty->print(" @ %d = [%d] { ", bci(), n);225for (int i = 0; i < n; i++) {226if (is_dead(i)) tty->print("%d+ ", i);227else228if (is_oop(i)) tty->print("%d ", i);229}230tty->print_cr("}");231}
232
233class MaskFillerForNative: public NativeSignatureIterator {234private:235uintptr_t * _mask; // the bit mask to be filled236int _size; // the mask size in bits237
238void set_one(int i) {239i *= InterpreterOopMap::bits_per_entry;240assert(0 <= i && i < _size, "offset out of bounds");241_mask[i / BitsPerWord] |= (((uintptr_t) 1 << InterpreterOopMap::oop_bit_number) << (i % BitsPerWord));242}243
244public:245void pass_byte() { /* ignore */ }246void pass_short() { /* ignore */ }247void pass_int() { /* ignore */ }248void pass_long() { /* ignore */ }249void pass_float() { /* ignore */ }250void pass_double() { /* ignore */ }251void pass_object() { set_one(offset()); }252
253MaskFillerForNative(const methodHandle& method, uintptr_t* mask, int size) : NativeSignatureIterator(method) {254_mask = mask;255_size = size;256// initialize with 0257int i = (size + BitsPerWord - 1) / BitsPerWord;258while (i-- > 0) _mask[i] = 0;259}260
261void generate() {262iterate();263}264};265
266bool OopMapCacheEntry::verify_mask(CellTypeState* vars, CellTypeState* stack, int max_locals, int stack_top) {267// Check mask includes map268VerifyClosure blk(this);269iterate_oop(&blk);270if (blk.failed()) return false;271
272// Check if map is generated correctly273// (Use ?: operator to make sure all 'true' & 'false' are represented exactly the same so we can use == afterwards)274const bool log = log_is_enabled(Trace, interpreter, oopmap);275LogStream st(Log(interpreter, oopmap)::trace());276
277if (log) st.print("Locals (%d): ", max_locals);278for(int i = 0; i < max_locals; i++) {279bool v1 = is_oop(i) ? true : false;280bool v2 = vars[i].is_reference() ? true : false;281assert(v1 == v2, "locals oop mask generation error");282if (log) st.print("%d", v1 ? 1 : 0);283}284if (log) st.cr();285
286if (log) st.print("Stack (%d): ", stack_top);287for(int j = 0; j < stack_top; j++) {288bool v1 = is_oop(max_locals + j) ? true : false;289bool v2 = stack[j].is_reference() ? true : false;290assert(v1 == v2, "stack oop mask generation error");291if (log) st.print("%d", v1 ? 1 : 0);292}293if (log) st.cr();294return true;295}
296
297void OopMapCacheEntry::allocate_bit_mask() {298if (mask_size() > small_mask_limit) {299assert(_bit_mask[0] == 0, "bit mask should be new or just flushed");300_bit_mask[0] = (intptr_t)301NEW_C_HEAP_ARRAY(uintptr_t, mask_word_size(), mtClass);302}303}
304
305void OopMapCacheEntry::deallocate_bit_mask() {306if (mask_size() > small_mask_limit && _bit_mask[0] != 0) {307assert(!Thread::current()->resource_area()->contains((void*)_bit_mask[0]),308"This bit mask should not be in the resource area");309FREE_C_HEAP_ARRAY(uintptr_t, _bit_mask[0]);310debug_only(_bit_mask[0] = 0;)311}312}
313
314
315void OopMapCacheEntry::fill_for_native(const methodHandle& mh) {316assert(mh->is_native(), "method must be native method");317set_mask_size(mh->size_of_parameters() * bits_per_entry);318allocate_bit_mask();319// fill mask for parameters320MaskFillerForNative mf(mh, bit_mask(), mask_size());321mf.generate();322}
323
324
325void OopMapCacheEntry::fill(const methodHandle& method, int bci) {326// Flush entry to deallocate an existing entry327flush();328set_method(method());329set_bci(checked_cast<unsigned short>(bci)); // bci is always u2330if (method->is_native()) {331// Native method activations have oops only among the parameters and one332// extra oop following the parameters (the mirror for static native methods).333fill_for_native(method);334} else {335OopMapForCacheEntry gen(method, bci, this);336if (!gen.compute_map(Thread::current())) {337fatal("Unrecoverable verification or out-of-memory error");338}339}340}
341
342
343void OopMapCacheEntry::set_mask(CellTypeState *vars, CellTypeState *stack, int stack_top) {344// compute bit mask size345int max_locals = method()->max_locals();346int n_entries = max_locals + stack_top;347set_mask_size(n_entries * bits_per_entry);348allocate_bit_mask();349set_expression_stack_size(stack_top);350
351// compute bits352int word_index = 0;353uintptr_t value = 0;354uintptr_t mask = 1;355
356_num_oops = 0;357CellTypeState* cell = vars;358for (int entry_index = 0; entry_index < n_entries; entry_index++, mask <<= bits_per_entry, cell++) {359// store last word360if (mask == 0) {361bit_mask()[word_index++] = value;362value = 0;363mask = 1;364}365
366// switch to stack when done with locals367if (entry_index == max_locals) {368cell = stack;369}370
371// set oop bit372if ( cell->is_reference()) {373value |= (mask << oop_bit_number );374_num_oops++;375}376
377// set dead bit378if (!cell->is_live()) {379value |= (mask << dead_bit_number);380assert(!cell->is_reference(), "dead value marked as oop");381}382}383
384// make sure last word is stored385bit_mask()[word_index] = value;386
387// verify bit mask388assert(verify_mask(vars, stack, max_locals, stack_top), "mask could not be verified");389}
390
391void OopMapCacheEntry::flush() {392deallocate_bit_mask();393initialize();394}
395
396void OopMapCacheEntry::deallocate(OopMapCacheEntry* const entry) {397entry->flush();398FREE_C_HEAP_OBJ(entry);399}
400
401// Implementation of OopMapCache
402
403void InterpreterOopMap::copy_from(const OopMapCacheEntry* src) {404// The expectation is that this InterpreterOopMap is recently created405// and empty. It is used to get a copy of a cached entry.406assert(!has_valid_mask(), "InterpreterOopMap object can only be filled once");407assert(src->has_valid_mask(), "Cannot copy entry with an invalid mask");408
409set_method(src->method());410set_bci(src->bci());411set_mask_size(src->mask_size());412set_expression_stack_size(src->expression_stack_size());413_num_oops = src->num_oops();414
415// Is the bit mask contained in the entry?416if (src->mask_size() <= small_mask_limit) {417memcpy(_bit_mask, src->_bit_mask, mask_word_size() * BytesPerWord);418} else {419_bit_mask[0] = (uintptr_t) NEW_C_HEAP_ARRAY(uintptr_t, mask_word_size(), mtClass);420memcpy((void*) _bit_mask[0], (void*) src->_bit_mask[0], mask_word_size() * BytesPerWord);421}422}
423
424inline unsigned int OopMapCache::hash_value_for(const methodHandle& method, int bci) const {425// We use method->code_size() rather than method->identity_hash() below since426// the mark may not be present if a pointer to the method is already reversed.427return ((unsigned int) bci)428^ ((unsigned int) method->max_locals() << 2)429^ ((unsigned int) method->code_size() << 4)430^ ((unsigned int) method->size_of_parameters() << 6);431}
432
433OopMapCacheEntry* volatile OopMapCache::_old_entries = nullptr;434
435OopMapCache::OopMapCache() {436for(int i = 0; i < size; i++) _array[i] = nullptr;437}
438
439
440OopMapCache::~OopMapCache() {441// Deallocate oop maps that are allocated out-of-line442flush();443}
444
445OopMapCacheEntry* OopMapCache::entry_at(int i) const {446return Atomic::load_acquire(&(_array[i % size]));447}
448
449bool OopMapCache::put_at(int i, OopMapCacheEntry* entry, OopMapCacheEntry* old) {450return Atomic::cmpxchg(&_array[i % size], old, entry) == old;451}
452
453void OopMapCache::flush() {454for (int i = 0; i < size; i++) {455OopMapCacheEntry* entry = _array[i];456if (entry != nullptr) {457_array[i] = nullptr; // no barrier, only called in OopMapCache destructor458OopMapCacheEntry::deallocate(entry);459}460}461}
462
463void OopMapCache::flush_obsolete_entries() {464assert(SafepointSynchronize::is_at_safepoint(), "called by RedefineClasses in a safepoint");465for (int i = 0; i < size; i++) {466OopMapCacheEntry* entry = _array[i];467if (entry != nullptr && !entry->is_empty() && entry->method()->is_old()) {468// Cache entry is occupied by an old redefined method and we don't want469// to pin it down so flush the entry.470if (log_is_enabled(Debug, redefine, class, oopmap)) {471ResourceMark rm;472log_debug(redefine, class, interpreter, oopmap)473("flush: %s(%s): cached entry @%d",474entry->method()->name()->as_C_string(), entry->method()->signature()->as_C_string(), i);475}476_array[i] = nullptr;477OopMapCacheEntry::deallocate(entry);478}479}480}
481
482// Lookup or compute/cache the entry.
483void OopMapCache::lookup(const methodHandle& method,484int bci,485InterpreterOopMap* entry_for) {486int probe = hash_value_for(method, bci);487
488if (log_is_enabled(Debug, interpreter, oopmap)) {489static int count = 0;490ResourceMark rm;491log_debug(interpreter, oopmap)492("%d - Computing oopmap at bci %d for %s at hash %d", ++count, bci,493method()->name_and_sig_as_C_string(), probe);494}495
496// Search hashtable for match.497// Need a critical section to avoid race against concurrent reclamation.498{499GlobalCounter::CriticalSection cs(Thread::current());500for (int i = 0; i < probe_depth; i++) {501OopMapCacheEntry *entry = entry_at(probe + i);502if (entry != nullptr && !entry->is_empty() && entry->match(method, bci)) {503entry_for->copy_from(entry);504assert(!entry_for->is_empty(), "A non-empty oop map should be returned");505log_debug(interpreter, oopmap)("- found at hash %d", probe + i);506return;507}508}509}510
511// Entry is not in hashtable.512// Compute entry513
514OopMapCacheEntry* tmp = NEW_C_HEAP_OBJ(OopMapCacheEntry, mtClass);515tmp->initialize();516tmp->fill(method, bci);517entry_for->copy_from(tmp);518
519if (method->should_not_be_cached()) {520// It is either not safe or not a good idea to cache this Method*521// at this time. We give the caller of lookup() a copy of the522// interesting info via parameter entry_for, but we don't add it to523// the cache. See the gory details in Method*.cpp.524OopMapCacheEntry::deallocate(tmp);525return;526}527
528// First search for an empty slot529for (int i = 0; i < probe_depth; i++) {530OopMapCacheEntry* entry = entry_at(probe + i);531if (entry == nullptr) {532if (put_at(probe + i, tmp, nullptr)) {533assert(!entry_for->is_empty(), "A non-empty oop map should be returned");534return;535}536}537}538
539log_debug(interpreter, oopmap)("*** collision in oopmap cache - flushing item ***");540
541// No empty slot (uncommon case). Use (some approximation of a) LRU algorithm542// where the first entry in the collision array is replaced with the new one.543OopMapCacheEntry* old = entry_at(probe + 0);544if (put_at(probe + 0, tmp, old)) {545// Cannot deallocate old entry on the spot: it can still be used by readers546// that got a reference to it before we were able to replace it in the map.547// Instead of synchronizing on GlobalCounter here and incurring heavy thread548// walk, we do this clean up out of band.549enqueue_for_cleanup(old);550} else {551OopMapCacheEntry::deallocate(tmp);552}553
554assert(!entry_for->is_empty(), "A non-empty oop map should be returned");555return;556}
557
558void OopMapCache::enqueue_for_cleanup(OopMapCacheEntry* entry) {559while (true) {560OopMapCacheEntry* head = Atomic::load(&_old_entries);561entry->_next = head;562if (Atomic::cmpxchg(&_old_entries, head, entry) == head) {563// Enqueued successfully.564break;565}566}567
568if (log_is_enabled(Debug, interpreter, oopmap)) {569ResourceMark rm;570log_debug(interpreter, oopmap)("enqueue %s at bci %d for cleanup",571entry->method()->name_and_sig_as_C_string(), entry->bci());572}573}
574
575bool OopMapCache::has_cleanup_work() {576return Atomic::load(&_old_entries) != nullptr;577}
578
579void OopMapCache::try_trigger_cleanup() {580// See we can take the lock for the notification without blocking.581// This allows triggering the cleanup from GC paths, that can hold582// the service lock for e.g. oop iteration in service thread.583if (has_cleanup_work() && Service_lock->try_lock_without_rank_check()) {584Service_lock->notify_all();585Service_lock->unlock();586}587}
588
589void OopMapCache::cleanup() {590OopMapCacheEntry* entry = Atomic::xchg(&_old_entries, (OopMapCacheEntry*)nullptr);591if (entry == nullptr) {592// No work.593return;594}595
596// About to delete the entries than might still be accessed by other threads597// on lookup path. Need to sync up with them before proceeding.598GlobalCounter::write_synchronize();599
600while (entry != nullptr) {601if (log_is_enabled(Debug, interpreter, oopmap)) {602ResourceMark rm;603log_debug(interpreter, oopmap)("cleanup entry %s at bci %d",604entry->method()->name_and_sig_as_C_string(), entry->bci());605}606OopMapCacheEntry* next = entry->_next;607OopMapCacheEntry::deallocate(entry);608entry = next;609}610}
611
612void OopMapCache::compute_one_oop_map(const methodHandle& method, int bci, InterpreterOopMap* entry) {613// Due to the invariants above it's tricky to allocate a temporary OopMapCacheEntry on the stack614OopMapCacheEntry* tmp = NEW_C_HEAP_OBJ(OopMapCacheEntry, mtClass);615tmp->initialize();616tmp->fill(method, bci);617if (tmp->has_valid_mask()) {618entry->copy_from(tmp);619}620OopMapCacheEntry::deallocate(tmp);621}
622