jdk

Форк
0
/
test_bufferNodeAllocator.cpp 
246 строк · 8.4 Кб
1
/*
2
 * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
3
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
 *
5
 * This code is free software; you can redistribute it and/or modify it
6
 * under the terms of the GNU General Public License version 2 only, as
7
 * published by the Free Software Foundation.
8
 *
9
 * This code is distributed in the hope that it will be useful, but WITHOUT
10
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12
 * version 2 for more details (a copy is included in the LICENSE file that
13
 * accompanied this code).
14
 *
15
 * You should have received a copy of the GNU General Public License version
16
 * 2 along with this work; if not, write to the Free Software Foundation,
17
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
 *
19
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
 * or visit www.oracle.com if you need additional information or have any
21
 * questions.
22
 *
23
 */
24

25
#include "precompiled.hpp"
26
#include "gc/shared/bufferNode.hpp"
27
#include "memory/allocation.hpp"
28
#include "runtime/atomic.hpp"
29
#include "runtime/interfaceSupport.inline.hpp"
30
#include "runtime/semaphore.inline.hpp"
31
#include "runtime/thread.hpp"
32
#include "utilities/globalCounter.inline.hpp"
33
#include "utilities/globalDefinitions.hpp"
34
#include "utilities/ostream.hpp"
35
#include "threadHelper.inline.hpp"
36
#include "unittest.hpp"
37

38
class BufferNode::TestSupport : AllStatic {
39
public:
40
  static bool try_transfer_pending(Allocator* allocator) {
41
    return allocator->_free_list.try_transfer_pending();
42
  }
43

44
  class CompletedList;
45
  class AllocatorThread;
46
  class ProcessorThread;
47
};
48

49
typedef BufferNode::TestSupport::CompletedList CompletedList;
50
typedef BufferNode::TestSupport::AllocatorThread AllocatorThread;
51
typedef BufferNode::TestSupport::ProcessorThread ProcessorThread;
52

53
// Some basic testing of BufferNode::Allocator.
54
TEST_VM(BufferNodeAllocatorTest, test) {
55
  const size_t buffer_capacity = 256;
56
  BufferNode::Allocator allocator("Test Buffer Allocator", buffer_capacity);
57
  ASSERT_EQ(buffer_capacity, allocator.buffer_capacity());
58

59
  // Allocate some new nodes for use in testing.
60
  BufferNode* nodes[10] = {};
61
  const size_t node_count = ARRAY_SIZE(nodes);
62
  for (size_t i = 0; i < node_count; ++i) {
63
    ASSERT_EQ(0u, allocator.free_count());
64
    nodes[i] = allocator.allocate();
65
    ASSERT_EQ(nullptr, nodes[i]->next());
66
  }
67

68
  // Release the nodes, adding them to the allocator's free list.
69
  for (size_t i = 0; i < node_count; ++i) {
70
    allocator.release(nodes[i]);
71
  }
72
  ASSERT_TRUE(BufferNode::TestSupport::try_transfer_pending(&allocator));
73
  ASSERT_EQ(node_count, allocator.free_count());
74

75
  // Allocate nodes from the free list.
76
  for (size_t i = 0; i < node_count; ++i) {
77
    size_t j = node_count - i;
78
    ASSERT_EQ(nodes[j - 1], allocator.allocate());
79
  }
80
  ASSERT_EQ(0u, allocator.free_count());
81

82
  // Release nodes back to the free list.
83
  for (size_t i = 0; i < node_count; ++i) {
84
    allocator.release(nodes[i]);
85
  }
86
  ASSERT_TRUE(BufferNode::TestSupport::try_transfer_pending(&allocator));
87
  ASSERT_EQ(node_count, allocator.free_count());
88
}
89

90
// Stress test with lock-free allocator and completed buffer list.
91
// Completed buffer list pop avoids ABA by also being in a critical
92
// section that is synchronized by the allocator's release.
93

94
class BufferNode::TestSupport::CompletedList {
95
  BufferNode::Stack _completed_list;
96

97
public:
98
  CompletedList() : _completed_list() {}
99

100
  ~CompletedList() {
101
    assert(_completed_list.empty(), "completed list not empty");
102
  }
103

104
  void push(BufferNode* node) {
105
    assert(node != nullptr, "precondition");
106
    _completed_list.push(*node);
107
  }
108

109
  BufferNode* pop() {
110
    GlobalCounter::CriticalSection cs(Thread::current());
111
    return _completed_list.pop();
112
  }
113
};
114

115
// Simulate a mutator thread, allocating buffers and adding them to
116
// the completed buffer list.
117
class BufferNode::TestSupport::AllocatorThread : public JavaTestThread {
118
  BufferNode::Allocator* _allocator;
119
  CompletedList* _cbl;
120
  volatile size_t* _total_allocations;
121
  volatile bool* _continue_running;
122
  size_t _allocations;
123

124
public:
125
  AllocatorThread(Semaphore* post,
126
                  BufferNode::Allocator* allocator,
127
                  CompletedList* cbl,
128
                  volatile size_t* total_allocations,
129
                  volatile bool* continue_running) :
130
    JavaTestThread(post),
131
    _allocator(allocator),
132
    _cbl(cbl),
133
    _total_allocations(total_allocations),
134
    _continue_running(continue_running),
135
    _allocations(0)
136
  {}
137

138
  virtual void main_run() {
139
    while (Atomic::load_acquire(_continue_running)) {
140
      BufferNode* node = _allocator->allocate();
141
      _cbl->push(node);
142
      ++_allocations;
143
      ThreadBlockInVM tbiv(this); // Safepoint check.
144
    }
145
    tty->print_cr("allocations: " SIZE_FORMAT, _allocations);
146
    Atomic::add(_total_allocations, _allocations);
147
  }
148
};
149

150
// Simulate a GC thread, taking buffers from the completed buffer list
151
// and returning them to the allocator.
152
class BufferNode::TestSupport::ProcessorThread : public JavaTestThread {
153
  BufferNode::Allocator* _allocator;
154
  CompletedList* _cbl;
155
  volatile bool* _continue_running;
156

157
public:
158
  ProcessorThread(Semaphore* post,
159
                  BufferNode::Allocator* allocator,
160
                  CompletedList* cbl,
161
                  volatile bool* continue_running) :
162
    JavaTestThread(post),
163
    _allocator(allocator),
164
    _cbl(cbl),
165
    _continue_running(continue_running)
166
  {}
167

168
  virtual void main_run() {
169
    bool shutdown_requested = false;
170
    while (true) {
171
      BufferNode* node = _cbl->pop();
172
      if (node != nullptr) {
173
        _allocator->release(node);
174
      } else if (shutdown_requested) {
175
        return;
176
      } else if (!Atomic::load_acquire(_continue_running)) {
177
        // To avoid a race that could leave buffers in the list after this
178
        // thread has shut down, continue processing until the list is empty
179
        // *after* the shut down request has been received.
180
        shutdown_requested = true;
181
      }
182
      ThreadBlockInVM tbiv(this); // Safepoint check.
183
    }
184
  }
185
};
186

187
static void run_test(BufferNode::Allocator* allocator, CompletedList* cbl) {
188

189
  // deallocation is slower than allocation, so lets create
190
  // more deallocation threads to prevent too large buildup of
191
  // free nodes (footprint)
192
  constexpr uint num_allocator_threads = 4;
193
  constexpr uint num_processor_threads = 6;
194
  constexpr uint milliseconds_to_run = 1000;
195

196
  Semaphore post;
197
  volatile size_t total_allocations = 0;
198
  volatile bool allocator_running = true;
199
  volatile bool processor_running = true;
200

201
  ProcessorThread* proc_threads[num_processor_threads] = {};
202
  for (uint i = 0; i < num_processor_threads; ++i) {
203
    proc_threads[i] = new ProcessorThread(&post,
204
                                          allocator,
205
                                          cbl,
206
                                          &processor_running);
207
    proc_threads[i]->doit();
208
  }
209

210
  AllocatorThread* alloc_threads[num_allocator_threads] = {};
211
  for (uint i = 0; i < num_allocator_threads; ++i) {
212
    alloc_threads[i] = new AllocatorThread(&post,
213
                                           allocator,
214
                                           cbl,
215
                                           &total_allocations,
216
                                           &allocator_running);
217
    alloc_threads[i]->doit();
218
  }
219

220
  JavaThread* this_thread = JavaThread::current();
221
  tty->print_cr("Stressing allocator for %u ms", milliseconds_to_run);
222
  {
223
    ThreadInVMfromNative invm(this_thread);
224
    this_thread->sleep(milliseconds_to_run);
225
  }
226
  Atomic::release_store(&allocator_running, false);
227
  for (uint i = 0; i < num_allocator_threads; ++i) {
228
    ThreadInVMfromNative invm(this_thread);
229
    post.wait_with_safepoint_check(this_thread);
230
  }
231
  Atomic::release_store(&processor_running, false);
232
  for (uint i = 0; i < num_processor_threads; ++i) {
233
    ThreadInVMfromNative invm(this_thread);
234
    post.wait_with_safepoint_check(this_thread);
235
  }
236
  ASSERT_TRUE(BufferNode::TestSupport::try_transfer_pending(allocator));
237
  tty->print_cr("total allocations: " SIZE_FORMAT, total_allocations);
238
  tty->print_cr("allocator free count: " SIZE_FORMAT, allocator->free_count());
239
}
240

241
TEST_VM(BufferNodeAllocatorTest, stress_free_list_allocator) {
242
  const size_t buffer_capacity = DEFAULT_PADDING_SIZE / sizeof(void*);
243
  BufferNode::Allocator allocator("Test Allocator", buffer_capacity);
244
  CompletedList completed;
245
  run_test(&allocator, &completed);
246
}
247

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.