25
#include "precompiled.hpp"
26
#include "code/codeBlob.hpp"
27
#include "code/codeCache.hpp"
28
#include "code/stubs.hpp"
29
#include "memory/allocation.inline.hpp"
30
#include "oops/oop.inline.hpp"
31
#include "runtime/mutexLocker.hpp"
32
#include "utilities/align.hpp"
33
#include "utilities/checkedCast.hpp"
68
StubQueue::StubQueue(StubInterface* stub_interface, int buffer_size,
69
Mutex* lock, const char* name) : _mutex(lock) {
70
intptr_t size = align_up(buffer_size, 2*BytesPerWord);
71
BufferBlob* blob = BufferBlob::create(name, checked_cast<int>(size));
72
if( blob == nullptr) {
73
vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "CodeCache: no room for %s", name);
75
_stub_interface = stub_interface;
79
address aligned_start = align_up(blob->content_begin(), stub_alignment());
80
address aligned_end = align_down(blob->content_end(), stub_alignment());
81
int aligned_size = aligned_end - aligned_start;
82
_buffer_size = aligned_size;
83
_buffer_limit = aligned_size;
84
_stub_buffer = aligned_start;
91
StubQueue::~StubQueue() {
99
void StubQueue::deallocate_unused_tail() {
100
CodeBlob* blob = CodeCache::find_blob((void*)_stub_buffer);
101
CodeCache::free_unused_tail(blob, used_space());
103
address aligned_start = align_up(blob->content_begin(), stub_alignment());
104
address aligned_end = align_down(blob->content_end(), stub_alignment());
105
int aligned_size = aligned_end - aligned_start;
106
_buffer_size = aligned_size;
107
_buffer_limit = aligned_size;
110
Stub* StubQueue::stub_containing(address pc) const {
112
for (Stub* s = first(); s != nullptr; s = next(s)) {
113
if (stub_contains(s, pc)) return s;
120
Stub* StubQueue::request_committed(int code_size) {
121
Stub* s = request(code_size);
122
if (s != nullptr) commit(code_size);
126
int StubQueue::compute_stub_size(Stub* stub, int code_size) {
127
address stub_begin = (address) stub;
128
address code_begin = stub_code_begin(stub);
129
address code_end = align_up(code_begin + code_size, stub_alignment());
130
return (int)(code_end - stub_begin);
133
Stub* StubQueue::request(int requested_code_size) {
134
assert(requested_code_size > 0, "requested_code_size must be > 0");
135
if (_mutex != nullptr) _mutex->lock_without_safepoint_check();
136
Stub* s = current_stub();
137
int requested_size = compute_stub_size(s, requested_code_size);
138
if (requested_size <= available_space()) {
139
if (is_contiguous()) {
142
assert(_buffer_limit == _buffer_size, "buffer must be fully usable");
143
if (_queue_end + requested_size <= _buffer_size) {
145
stub_initialize(s, requested_size);
150
assert(!is_empty(), "just checkin'");
151
_buffer_limit = _queue_end;
156
if (requested_size <= available_space()) {
157
assert(!is_contiguous(), "just checkin'");
158
assert(_buffer_limit <= _buffer_size, "queue invariant broken");
162
stub_initialize(s, requested_size);
166
if (_mutex != nullptr) _mutex->unlock();
171
void StubQueue::commit(int committed_code_size) {
172
assert(committed_code_size > 0, "committed_code_size must be > 0");
173
Stub* s = current_stub();
174
int committed_size = compute_stub_size(s, committed_code_size);
175
assert(committed_size <= stub_size(s), "committed size must not exceed requested size");
176
stub_initialize(s, committed_size);
177
_queue_end += committed_size;
179
if (_mutex != nullptr) _mutex->unlock();
180
debug_only(stub_verify(s);)
184
void StubQueue::remove_first() {
185
if (number_of_stubs() == 0) return;
187
debug_only(stub_verify(s);)
189
_queue_begin += stub_size(s);
190
assert(_queue_begin <= _buffer_limit, "sanity check");
191
if (_queue_begin == _queue_end) {
196
_buffer_limit = _buffer_size;
197
} else if (_queue_begin == _buffer_limit) {
200
_buffer_limit = _buffer_size;
207
void StubQueue::remove_first(int n) {
208
int i = MIN2(n, number_of_stubs());
209
while (i-- > 0) remove_first();
213
void StubQueue::remove_all(){
214
debug_only(verify();)
215
remove_first(number_of_stubs());
216
assert(number_of_stubs() == 0, "sanity check");
220
void StubQueue::verify() {
222
if (_stub_buffer == nullptr) return;
223
MutexLocker lock(_mutex, Mutex::_no_safepoint_check_flag);
225
guarantee(0 <= _buffer_size, "buffer size must be positive");
226
guarantee(0 <= _buffer_limit && _buffer_limit <= _buffer_size , "_buffer_limit out of bounds");
227
guarantee(0 <= _queue_begin && _queue_begin < _buffer_limit, "_queue_begin out of bounds");
228
guarantee(0 <= _queue_end && _queue_end <= _buffer_limit, "_queue_end out of bounds");
230
guarantee(_queue_begin % stub_alignment() == 0, "_queue_begin not aligned");
231
guarantee(_queue_end % stub_alignment() == 0, "_queue_end not aligned");
233
if (is_contiguous()) {
234
guarantee(_buffer_limit == _buffer_size, "_buffer_limit must equal _buffer_size");
238
for (Stub* s = first(); s != nullptr; s = next(s)) {
242
guarantee(n == number_of_stubs(), "number of stubs inconsistent");
243
guarantee(_queue_begin != _queue_end || n == 0, "buffer indices must be the same");
247
void StubQueue::print() {
248
ConditionalMutexLocker lock(_mutex, _mutex != nullptr, Mutex::_no_safepoint_check_flag);
249
for (Stub* s = first(); s != nullptr; s = next(s)) {