25
#include "precompiled.hpp"
26
#include "gc/shared/bufferNode.hpp"
27
#include "memory/allocation.hpp"
28
#include "runtime/atomic.hpp"
29
#include "runtime/interfaceSupport.inline.hpp"
30
#include "runtime/semaphore.inline.hpp"
31
#include "runtime/thread.hpp"
32
#include "utilities/globalCounter.inline.hpp"
33
#include "utilities/globalDefinitions.hpp"
34
#include "utilities/ostream.hpp"
35
#include "threadHelper.inline.hpp"
36
#include "unittest.hpp"
38
class BufferNode::TestSupport : AllStatic {
40
static bool try_transfer_pending(Allocator* allocator) {
41
return allocator->_free_list.try_transfer_pending();
45
class AllocatorThread;
46
class ProcessorThread;
49
typedef BufferNode::TestSupport::CompletedList CompletedList;
50
typedef BufferNode::TestSupport::AllocatorThread AllocatorThread;
51
typedef BufferNode::TestSupport::ProcessorThread ProcessorThread;
54
TEST_VM(BufferNodeAllocatorTest, test) {
55
const size_t buffer_capacity = 256;
56
BufferNode::Allocator allocator("Test Buffer Allocator", buffer_capacity);
57
ASSERT_EQ(buffer_capacity, allocator.buffer_capacity());
60
BufferNode* nodes[10] = {};
61
const size_t node_count = ARRAY_SIZE(nodes);
62
for (size_t i = 0; i < node_count; ++i) {
63
ASSERT_EQ(0u, allocator.free_count());
64
nodes[i] = allocator.allocate();
65
ASSERT_EQ(nullptr, nodes[i]->next());
69
for (size_t i = 0; i < node_count; ++i) {
70
allocator.release(nodes[i]);
72
ASSERT_TRUE(BufferNode::TestSupport::try_transfer_pending(&allocator));
73
ASSERT_EQ(node_count, allocator.free_count());
76
for (size_t i = 0; i < node_count; ++i) {
77
size_t j = node_count - i;
78
ASSERT_EQ(nodes[j - 1], allocator.allocate());
80
ASSERT_EQ(0u, allocator.free_count());
83
for (size_t i = 0; i < node_count; ++i) {
84
allocator.release(nodes[i]);
86
ASSERT_TRUE(BufferNode::TestSupport::try_transfer_pending(&allocator));
87
ASSERT_EQ(node_count, allocator.free_count());
94
class BufferNode::TestSupport::CompletedList {
95
BufferNode::Stack _completed_list;
98
CompletedList() : _completed_list() {}
101
assert(_completed_list.empty(), "completed list not empty");
104
void push(BufferNode* node) {
105
assert(node != nullptr, "precondition");
106
_completed_list.push(*node);
110
GlobalCounter::CriticalSection cs(Thread::current());
111
return _completed_list.pop();
117
class BufferNode::TestSupport::AllocatorThread : public JavaTestThread {
118
BufferNode::Allocator* _allocator;
120
volatile size_t* _total_allocations;
121
volatile bool* _continue_running;
125
AllocatorThread(Semaphore* post,
126
BufferNode::Allocator* allocator,
128
volatile size_t* total_allocations,
129
volatile bool* continue_running) :
130
JavaTestThread(post),
131
_allocator(allocator),
133
_total_allocations(total_allocations),
134
_continue_running(continue_running),
138
virtual void main_run() {
139
while (Atomic::load_acquire(_continue_running)) {
140
BufferNode* node = _allocator->allocate();
143
ThreadBlockInVM tbiv(this);
145
tty->print_cr("allocations: " SIZE_FORMAT, _allocations);
146
Atomic::add(_total_allocations, _allocations);
152
class BufferNode::TestSupport::ProcessorThread : public JavaTestThread {
153
BufferNode::Allocator* _allocator;
155
volatile bool* _continue_running;
158
ProcessorThread(Semaphore* post,
159
BufferNode::Allocator* allocator,
161
volatile bool* continue_running) :
162
JavaTestThread(post),
163
_allocator(allocator),
165
_continue_running(continue_running)
168
virtual void main_run() {
169
bool shutdown_requested = false;
171
BufferNode* node = _cbl->pop();
172
if (node != nullptr) {
173
_allocator->release(node);
174
} else if (shutdown_requested) {
176
} else if (!Atomic::load_acquire(_continue_running)) {
180
shutdown_requested = true;
182
ThreadBlockInVM tbiv(this);
187
static void run_test(BufferNode::Allocator* allocator, CompletedList* cbl) {
192
constexpr uint num_allocator_threads = 4;
193
constexpr uint num_processor_threads = 6;
194
constexpr uint milliseconds_to_run = 1000;
197
volatile size_t total_allocations = 0;
198
volatile bool allocator_running = true;
199
volatile bool processor_running = true;
201
ProcessorThread* proc_threads[num_processor_threads] = {};
202
for (uint i = 0; i < num_processor_threads; ++i) {
203
proc_threads[i] = new ProcessorThread(&post,
207
proc_threads[i]->doit();
210
AllocatorThread* alloc_threads[num_allocator_threads] = {};
211
for (uint i = 0; i < num_allocator_threads; ++i) {
212
alloc_threads[i] = new AllocatorThread(&post,
217
alloc_threads[i]->doit();
220
JavaThread* this_thread = JavaThread::current();
221
tty->print_cr("Stressing allocator for %u ms", milliseconds_to_run);
223
ThreadInVMfromNative invm(this_thread);
224
this_thread->sleep(milliseconds_to_run);
226
Atomic::release_store(&allocator_running, false);
227
for (uint i = 0; i < num_allocator_threads; ++i) {
228
ThreadInVMfromNative invm(this_thread);
229
post.wait_with_safepoint_check(this_thread);
231
Atomic::release_store(&processor_running, false);
232
for (uint i = 0; i < num_processor_threads; ++i) {
233
ThreadInVMfromNative invm(this_thread);
234
post.wait_with_safepoint_check(this_thread);
236
ASSERT_TRUE(BufferNode::TestSupport::try_transfer_pending(allocator));
237
tty->print_cr("total allocations: " SIZE_FORMAT, total_allocations);
238
tty->print_cr("allocator free count: " SIZE_FORMAT, allocator->free_count());
241
TEST_VM(BufferNodeAllocatorTest, stress_free_list_allocator) {
242
const size_t buffer_capacity = DEFAULT_PADDING_SIZE / sizeof(void*);
243
BufferNode::Allocator allocator("Test Allocator", buffer_capacity);
244
CompletedList completed;
245
run_test(&allocator, &completed);