jdk

Форк
0
/
safepoint.cpp 
992 строки · 37.0 Кб
1
/*
2
 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
3
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
 *
5
 * This code is free software; you can redistribute it and/or modify it
6
 * under the terms of the GNU General Public License version 2 only, as
7
 * published by the Free Software Foundation.
8
 *
9
 * This code is distributed in the hope that it will be useful, but WITHOUT
10
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12
 * version 2 for more details (a copy is included in the LICENSE file that
13
 * accompanied this code).
14
 *
15
 * You should have received a copy of the GNU General Public License version
16
 * 2 along with this work; if not, write to the Free Software Foundation,
17
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
 *
19
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
 * or visit www.oracle.com if you need additional information or have any
21
 * questions.
22
 *
23
 */
24

25
#include "precompiled.hpp"
26
#include "code/codeCache.hpp"
27
#include "code/nmethod.hpp"
28
#include "code/pcDesc.hpp"
29
#include "code/scopeDesc.hpp"
30
#include "compiler/compilationPolicy.hpp"
31
#include "gc/shared/collectedHeap.hpp"
32
#include "gc/shared/gcLocker.hpp"
33
#include "gc/shared/oopStorage.hpp"
34
#include "gc/shared/strongRootsScope.hpp"
35
#include "gc/shared/workerThread.hpp"
36
#include "gc/shared/workerUtils.hpp"
37
#include "interpreter/interpreter.hpp"
38
#include "jfr/jfrEvents.hpp"
39
#include "logging/log.hpp"
40
#include "logging/logStream.hpp"
41
#include "memory/resourceArea.hpp"
42
#include "memory/universe.hpp"
43
#include "oops/oop.inline.hpp"
44
#include "oops/symbol.hpp"
45
#include "runtime/atomic.hpp"
46
#include "runtime/deoptimization.hpp"
47
#include "runtime/frame.inline.hpp"
48
#include "runtime/globals.hpp"
49
#include "runtime/handles.inline.hpp"
50
#include "runtime/interfaceSupport.inline.hpp"
51
#include "runtime/javaThread.inline.hpp"
52
#include "runtime/mutexLocker.hpp"
53
#include "runtime/orderAccess.hpp"
54
#include "runtime/osThread.hpp"
55
#include "runtime/safepoint.hpp"
56
#include "runtime/safepointMechanism.inline.hpp"
57
#include "runtime/signature.hpp"
58
#include "runtime/stackWatermarkSet.inline.hpp"
59
#include "runtime/stubCodeGenerator.hpp"
60
#include "runtime/stubRoutines.hpp"
61
#include "runtime/synchronizer.hpp"
62
#include "runtime/threads.hpp"
63
#include "runtime/threadSMR.hpp"
64
#include "runtime/threadWXSetters.inline.hpp"
65
#include "runtime/timerTrace.hpp"
66
#include "services/runtimeService.hpp"
67
#include "utilities/events.hpp"
68
#include "utilities/macros.hpp"
69
#include "utilities/systemMemoryBarrier.hpp"
70

71
static void post_safepoint_begin_event(EventSafepointBegin& event,
72
                                       uint64_t safepoint_id,
73
                                       int thread_count,
74
                                       int critical_thread_count) {
75
  if (event.should_commit()) {
76
    event.set_safepointId(safepoint_id);
77
    event.set_totalThreadCount(thread_count);
78
    event.set_jniCriticalThreadCount(critical_thread_count);
79
    event.commit();
80
  }
81
}
82

83

84
static void post_safepoint_synchronize_event(EventSafepointStateSynchronization& event,
85
                                             uint64_t safepoint_id,
86
                                             int initial_number_of_threads,
87
                                             int threads_waiting_to_block,
88
                                             int iterations) {
89
  if (event.should_commit()) {
90
    event.set_safepointId(safepoint_id);
91
    event.set_initialThreadCount(initial_number_of_threads);
92
    event.set_runningThreadCount(threads_waiting_to_block);
93
    event.set_iterations(checked_cast<u4>(iterations));
94
    event.commit();
95
  }
96
}
97

98
static void post_safepoint_end_event(EventSafepointEnd& event, uint64_t safepoint_id) {
99
  if (event.should_commit()) {
100
    event.set_safepointId(safepoint_id);
101
    event.commit();
102
  }
103
}
104

105
// SafepointCheck
106
SafepointStateTracker::SafepointStateTracker(uint64_t safepoint_id, bool at_safepoint)
107
  : _safepoint_id(safepoint_id), _at_safepoint(at_safepoint) {}
108

109
bool SafepointStateTracker::safepoint_state_changed() {
110
  return _safepoint_id != SafepointSynchronize::safepoint_id() ||
111
    _at_safepoint != SafepointSynchronize::is_at_safepoint();
112
}
113

114
// --------------------------------------------------------------------------------------------------
115
// Implementation of Safepoint begin/end
116

117
SafepointSynchronize::SynchronizeState volatile SafepointSynchronize::_state = SafepointSynchronize::_not_synchronized;
118
int SafepointSynchronize::_waiting_to_block = 0;
119
volatile uint64_t SafepointSynchronize::_safepoint_counter = 0;
120
uint64_t SafepointSynchronize::_safepoint_id = 0;
121
const uint64_t SafepointSynchronize::InactiveSafepointCounter = 0;
122
int SafepointSynchronize::_current_jni_active_count = 0;
123

124
WaitBarrier* SafepointSynchronize::_wait_barrier;
125

126
static bool timeout_error_printed = false;
127

128
// Statistic related
129
static jlong _safepoint_begin_time = 0;
130
static volatile int _nof_threads_hit_polling_page = 0;
131

132
void SafepointSynchronize::init(Thread* vmthread) {
133
  // WaitBarrier should never be destroyed since we will have
134
  // threads waiting on it while exiting.
135
  _wait_barrier = new WaitBarrier(vmthread);
136
  SafepointTracing::init();
137
}
138

139
void SafepointSynchronize::increment_jni_active_count() {
140
  assert(Thread::current()->is_VM_thread(), "Only VM thread may increment");
141
  ++_current_jni_active_count;
142
}
143

144
void SafepointSynchronize::decrement_waiting_to_block() {
145
  assert(_waiting_to_block > 0, "sanity check");
146
  assert(Thread::current()->is_VM_thread(), "Only VM thread may decrement");
147
  --_waiting_to_block;
148
}
149

150
bool SafepointSynchronize::thread_not_running(ThreadSafepointState *cur_state) {
151
  if (!cur_state->is_running()) {
152
    // Robustness: asserted in the caller, but handle/tolerate it for release bits.
153
    LogTarget(Error, safepoint) lt;
154
    if (lt.is_enabled()) {
155
      ResourceMark rm;
156
      LogStream ls(lt);
157
      ls.print("Illegal initial state detected: ");
158
      cur_state->print_on(&ls);
159
    }
160
    return true;
161
  }
162
  cur_state->examine_state_of_thread(SafepointSynchronize::safepoint_counter());
163
  if (!cur_state->is_running()) {
164
    return true;
165
  }
166
  LogTarget(Trace, safepoint) lt;
167
  if (lt.is_enabled()) {
168
    ResourceMark rm;
169
    LogStream ls(lt);
170
    cur_state->print_on(&ls);
171
  }
172
  return false;
173
}
174

175
#ifdef ASSERT
176
static void assert_list_is_valid(const ThreadSafepointState* tss_head, int still_running) {
177
  int a = 0;
178
  const ThreadSafepointState *tmp_tss = tss_head;
179
  while (tmp_tss != nullptr) {
180
    ++a;
181
    assert(tmp_tss->is_running(), "Illegal initial state");
182
    tmp_tss = tmp_tss->get_next();
183
  }
184
  assert(a == still_running, "Must be the same");
185
}
186
#endif // ASSERT
187

188
static void back_off(int64_t start_time) {
189
  // We start with fine-grained nanosleeping until a millisecond has
190
  // passed, at which point we resort to plain naked_short_sleep.
191
  if (os::javaTimeNanos() - start_time < NANOSECS_PER_MILLISEC) {
192
    os::naked_short_nanosleep(10 * (NANOUNITS / MICROUNITS));
193
  } else {
194
    os::naked_short_sleep(1);
195
  }
196
}
197

198
int SafepointSynchronize::synchronize_threads(jlong safepoint_limit_time, int nof_threads, int* initial_running)
199
{
200
  JavaThreadIteratorWithHandle jtiwh;
201

202
#ifdef ASSERT
203
  for (; JavaThread *cur = jtiwh.next(); ) {
204
    assert(cur->safepoint_state()->is_running(), "Illegal initial state");
205
  }
206
  jtiwh.rewind();
207
#endif // ASSERT
208

209
  // Iterate through all threads until it has been determined how to stop them all at a safepoint.
210
  int still_running = nof_threads;
211
  ThreadSafepointState *tss_head = nullptr;
212
  ThreadSafepointState **p_prev = &tss_head;
213
  for (; JavaThread *cur = jtiwh.next(); ) {
214
    ThreadSafepointState *cur_tss = cur->safepoint_state();
215
    assert(cur_tss->get_next() == nullptr, "Must be null");
216
    if (thread_not_running(cur_tss)) {
217
      --still_running;
218
    } else {
219
      *p_prev = cur_tss;
220
      p_prev = cur_tss->next_ptr();
221
    }
222
  }
223
  *p_prev = nullptr;
224

225
  DEBUG_ONLY(assert_list_is_valid(tss_head, still_running);)
226

227
  *initial_running = still_running;
228

229
  // If there is no thread still running, we are already done.
230
  if (still_running <= 0) {
231
    assert(tss_head == nullptr, "Must be empty");
232
    return 1;
233
  }
234

235
  int iterations = 1; // The first iteration is above.
236
  int64_t start_time = os::javaTimeNanos();
237

238
  do {
239
    // Check if this has taken too long:
240
    if (SafepointTimeout && safepoint_limit_time < os::javaTimeNanos()) {
241
      print_safepoint_timeout();
242
    }
243

244
    p_prev = &tss_head;
245
    ThreadSafepointState *cur_tss = tss_head;
246
    while (cur_tss != nullptr) {
247
      assert(cur_tss->is_running(), "Illegal initial state");
248
      if (thread_not_running(cur_tss)) {
249
        --still_running;
250
        *p_prev = nullptr;
251
        ThreadSafepointState *tmp = cur_tss;
252
        cur_tss = cur_tss->get_next();
253
        tmp->set_next(nullptr);
254
      } else {
255
        *p_prev = cur_tss;
256
        p_prev = cur_tss->next_ptr();
257
        cur_tss = cur_tss->get_next();
258
      }
259
    }
260

261
    DEBUG_ONLY(assert_list_is_valid(tss_head, still_running);)
262

263
    if (still_running > 0) {
264
      back_off(start_time);
265
    }
266

267
    iterations++;
268
  } while (still_running > 0);
269

270
  assert(tss_head == nullptr, "Must be empty");
271

272
  return iterations;
273
}
274

275
void SafepointSynchronize::arm_safepoint() {
276
  // Begin the process of bringing the system to a safepoint.
277
  // Java threads can be in several different states and are
278
  // stopped by different mechanisms:
279
  //
280
  //  1. Running interpreted
281
  //     When executing branching/returning byte codes interpreter
282
  //     checks if the poll is armed, if so blocks in SS::block().
283
  //  2. Running in native code
284
  //     When returning from the native code, a Java thread must check
285
  //     the safepoint _state to see if we must block.  If the
286
  //     VM thread sees a Java thread in native, it does
287
  //     not wait for this thread to block.  The order of the memory
288
  //     writes and reads of both the safepoint state and the Java
289
  //     threads state is critical.  In order to guarantee that the
290
  //     memory writes are serialized with respect to each other,
291
  //     the VM thread issues a memory barrier instruction.
292
  //  3. Running compiled Code
293
  //     Compiled code reads the local polling page that
294
  //     is set to fault if we are trying to get to a safepoint.
295
  //  4. Blocked
296
  //     A thread which is blocked will not be allowed to return from the
297
  //     block condition until the safepoint operation is complete.
298
  //  5. In VM or Transitioning between states
299
  //     If a Java thread is currently running in the VM or transitioning
300
  //     between states, the safepointing code will poll the thread state
301
  //     until the thread blocks itself when it attempts transitions to a
302
  //     new state or locking a safepoint checked monitor.
303

304
  // We must never miss a thread with correct safepoint id, so we must make sure we arm
305
  // the wait barrier for the next safepoint id/counter.
306
  // Arming must be done after resetting _current_jni_active_count, _waiting_to_block.
307
  _wait_barrier->arm(static_cast<int>(_safepoint_counter + 1));
308

309
  assert((_safepoint_counter & 0x1) == 0, "must be even");
310
  // The store to _safepoint_counter must happen after any stores in arming.
311
  Atomic::release_store(&_safepoint_counter, _safepoint_counter + 1);
312

313
  // We are synchronizing
314
  OrderAccess::storestore(); // Ordered with _safepoint_counter
315
  _state = _synchronizing;
316

317
  // Arming the per thread poll while having _state != _not_synchronized means safepointing
318
  log_trace(safepoint)("Setting thread local yield flag for threads");
319
  OrderAccess::storestore(); // storestore, global state -> local state
320
  for (JavaThreadIteratorWithHandle jtiwh; JavaThread *cur = jtiwh.next(); ) {
321
    // Make sure the threads start polling, it is time to yield.
322
    SafepointMechanism::arm_local_poll(cur);
323
  }
324
  if (UseSystemMemoryBarrier) {
325
    SystemMemoryBarrier::emit(); // storestore|storeload, global state -> local state
326
  } else {
327
    OrderAccess::fence(); // storestore|storeload, global state -> local state
328
  }
329
}
330

331
// Roll all threads forward to a safepoint and suspend them all
332
void SafepointSynchronize::begin() {
333
  assert(Thread::current()->is_VM_thread(), "Only VM thread may execute a safepoint");
334

335
  EventSafepointBegin begin_event;
336
  SafepointTracing::begin(VMThread::vm_op_type());
337

338
  Universe::heap()->safepoint_synchronize_begin();
339

340
  // By getting the Threads_lock, we assure that no threads are about to start or
341
  // exit. It is released again in SafepointSynchronize::end().
342
  Threads_lock->lock();
343

344
  assert( _state == _not_synchronized, "trying to safepoint synchronize with wrong state");
345

346
  int nof_threads = Threads::number_of_threads();
347

348
  _nof_threads_hit_polling_page = 0;
349

350
  log_debug(safepoint)("Safepoint synchronization initiated using %s wait barrier. (%d threads)", _wait_barrier->description(), nof_threads);
351

352
  // Reset the count of active JNI critical threads
353
  _current_jni_active_count = 0;
354

355
  // Set number of threads to wait for
356
  _waiting_to_block = nof_threads;
357

358
  jlong safepoint_limit_time = 0;
359
  if (SafepointTimeout) {
360
    // Set the limit time, so that it can be compared to see if this has taken
361
    // too long to complete.
362
    safepoint_limit_time = SafepointTracing::start_of_safepoint() + (jlong)(SafepointTimeoutDelay * NANOSECS_PER_MILLISEC);
363
    timeout_error_printed = false;
364
  }
365

366
  EventSafepointStateSynchronization sync_event;
367
  int initial_running = 0;
368

369
  // Arms the safepoint, _current_jni_active_count and _waiting_to_block must be set before.
370
  arm_safepoint();
371

372
  // Will spin until all threads are safe.
373
  int iterations = synchronize_threads(safepoint_limit_time, nof_threads, &initial_running);
374
  assert(_waiting_to_block == 0, "No thread should be running");
375

376
#ifndef PRODUCT
377
  // Mark all threads
378
  if (VerifyCrossModifyFence) {
379
    JavaThreadIteratorWithHandle jtiwh;
380
    for (; JavaThread *cur = jtiwh.next(); ) {
381
      cur->set_requires_cross_modify_fence(true);
382
    }
383
  }
384

385
  if (safepoint_limit_time != 0) {
386
    jlong current_time = os::javaTimeNanos();
387
    if (safepoint_limit_time < current_time) {
388
      log_warning(safepoint)("# SafepointSynchronize: Finished after "
389
                    INT64_FORMAT_W(6) " ms",
390
                    (int64_t)(current_time - SafepointTracing::start_of_safepoint()) / (NANOUNITS / MILLIUNITS));
391
    }
392
  }
393
#endif
394

395
  assert(Threads_lock->owned_by_self(), "must hold Threads_lock");
396

397
  // Record state
398
  _state = _synchronized;
399

400
  OrderAccess::fence();
401

402
  // Set the new id
403
  ++_safepoint_id;
404

405
#ifdef ASSERT
406
  // Make sure all the threads were visited.
407
  for (JavaThreadIteratorWithHandle jtiwh; JavaThread *cur = jtiwh.next(); ) {
408
    assert(cur->was_visited_for_critical_count(_safepoint_counter), "missed a thread");
409
  }
410
#endif // ASSERT
411

412
  // Update the count of active JNI critical regions
413
  GCLocker::set_jni_lock_count(_current_jni_active_count);
414

415
  post_safepoint_synchronize_event(sync_event,
416
                                   _safepoint_id,
417
                                   initial_running,
418
                                   _waiting_to_block, iterations);
419

420
  SafepointTracing::synchronized(nof_threads, initial_running, _nof_threads_hit_polling_page);
421

422
  post_safepoint_begin_event(begin_event, _safepoint_id, nof_threads, _current_jni_active_count);
423
}
424

425
void SafepointSynchronize::disarm_safepoint() {
426
  uint64_t active_safepoint_counter = _safepoint_counter;
427
  {
428
    JavaThreadIteratorWithHandle jtiwh;
429
#ifdef ASSERT
430
    // A pending_exception cannot be installed during a safepoint.  The threads
431
    // may install an async exception after they come back from a safepoint into
432
    // pending_exception after they unblock.  But that should happen later.
433
    for (; JavaThread *cur = jtiwh.next(); ) {
434
      assert (!(cur->has_pending_exception() &&
435
                cur->safepoint_state()->is_at_poll_safepoint()),
436
              "safepoint installed a pending exception");
437
    }
438
#endif // ASSERT
439

440
    OrderAccess::fence(); // keep read and write of _state from floating up
441
    assert(_state == _synchronized, "must be synchronized before ending safepoint synchronization");
442

443
    // Change state first to _not_synchronized.
444
    // No threads should see _synchronized when running.
445
    _state = _not_synchronized;
446

447
    // Set the next dormant (even) safepoint id.
448
    assert((_safepoint_counter & 0x1) == 1, "must be odd");
449
    Atomic::release_store(&_safepoint_counter, _safepoint_counter + 1);
450

451
    OrderAccess::fence(); // Keep the local state from floating up.
452

453
    jtiwh.rewind();
454
    for (; JavaThread *current = jtiwh.next(); ) {
455
      // Clear the visited flag to ensure that the critical counts are collected properly.
456
      DEBUG_ONLY(current->reset_visited_for_critical_count(active_safepoint_counter);)
457
      ThreadSafepointState* cur_state = current->safepoint_state();
458
      assert(!cur_state->is_running(), "Thread not suspended at safepoint");
459
      cur_state->restart(); // TSS _running
460
      assert(cur_state->is_running(), "safepoint state has not been reset");
461
    }
462
  } // ~JavaThreadIteratorWithHandle
463

464
  // Release threads lock, so threads can be created/destroyed again.
465
  Threads_lock->unlock();
466

467
  // Wake threads after local state is correctly set.
468
  _wait_barrier->disarm();
469
}
470

471
// Wake up all threads, so they are ready to resume execution after the safepoint
472
// operation has been carried out
473
void SafepointSynchronize::end() {
474
  assert(Threads_lock->owned_by_self(), "must hold Threads_lock");
475
  EventSafepointEnd event;
476
  assert(Thread::current()->is_VM_thread(), "Only VM thread can execute a safepoint");
477

478
  disarm_safepoint();
479

480
  Universe::heap()->safepoint_synchronize_end();
481

482
  SafepointTracing::end();
483

484
  post_safepoint_end_event(event, safepoint_id());
485
}
486

487
// Methods for determining if a JavaThread is safepoint safe.
488

489
// False means unsafe with undetermined state.
490
// True means a determined state, but it may be an unsafe state.
491
// If called from a non-safepoint context safepoint_count MUST be InactiveSafepointCounter.
492
bool SafepointSynchronize::try_stable_load_state(JavaThreadState *state, JavaThread *thread, uint64_t safepoint_count) {
493
  assert((safepoint_count != InactiveSafepointCounter &&
494
          Thread::current() == (Thread*)VMThread::vm_thread() &&
495
          SafepointSynchronize::_state != _not_synchronized)
496
         || safepoint_count == InactiveSafepointCounter, "Invalid check");
497

498
  // To handle the thread_blocked state on the backedge of the WaitBarrier from
499
  // previous safepoint and reading the reset value (0/InactiveSafepointCounter) we
500
  // re-read state after we read thread safepoint id. The JavaThread changes its
501
  // thread state from thread_blocked before resetting safepoint id to 0.
502
  // This guarantees the second read will be from an updated thread state. It can
503
  // either be different state making this an unsafe state or it can see blocked
504
  // again. When we see blocked twice with a 0 safepoint id, either:
505
  // - It is normally blocked, e.g. on Mutex, TBIVM.
506
  // - It was in SS:block(), looped around to SS:block() and is blocked on the WaitBarrier.
507
  // - It was in SS:block() but now on a Mutex.
508
  // All of these cases are safe.
509

510
  *state = thread->thread_state();
511
  OrderAccess::loadload();
512
  uint64_t sid = thread->safepoint_state()->get_safepoint_id();  // Load acquire
513
  if (sid != InactiveSafepointCounter && sid != safepoint_count) {
514
    // In an old safepoint, state not relevant.
515
    return false;
516
  }
517
  return *state == thread->thread_state();
518
}
519

520
static bool safepoint_safe_with(JavaThread *thread, JavaThreadState state) {
521
  switch(state) {
522
  case _thread_in_native:
523
    // native threads are safe if they have no java stack or have walkable stack
524
    return !thread->has_last_Java_frame() || thread->frame_anchor()->walkable();
525

526
  case _thread_blocked:
527
    // On wait_barrier or blocked.
528
    // Blocked threads should already have walkable stack.
529
    assert(!thread->has_last_Java_frame() || thread->frame_anchor()->walkable(), "blocked and not walkable");
530
    return true;
531

532
  default:
533
    return false;
534
  }
535
}
536

537
bool SafepointSynchronize::handshake_safe(JavaThread *thread) {
538
  if (thread->is_terminated()) {
539
    return true;
540
  }
541
  JavaThreadState stable_state;
542
  if (try_stable_load_state(&stable_state, thread, InactiveSafepointCounter)) {
543
    return safepoint_safe_with(thread, stable_state);
544
  }
545
  return false;
546
}
547

548

549
// -------------------------------------------------------------------------------------------------------
550
// Implementation of Safepoint blocking point
551

552
void SafepointSynchronize::block(JavaThread *thread) {
553
  assert(thread != nullptr, "thread must be set");
554

555
  // Threads shouldn't block if they are in the middle of printing, but...
556
  ttyLocker::break_tty_lock_for_safepoint(os::current_thread_id());
557

558
  // Only bail from the block() call if the thread is gone from the
559
  // thread list; starting to exit should still block.
560
  if (thread->is_terminated()) {
561
     // block current thread if we come here from native code when VM is gone
562
     thread->block_if_vm_exited();
563

564
     // otherwise do nothing
565
     return;
566
  }
567

568
  JavaThreadState state = thread->thread_state();
569
  thread->frame_anchor()->make_walkable();
570

571
  uint64_t safepoint_id = SafepointSynchronize::safepoint_counter();
572

573
  // We have no idea where the VMThread is, it might even be at next safepoint.
574
  // So we can miss this poll, but stop at next.
575

576
  // Load dependent store, it must not pass loading of safepoint_id.
577
  thread->safepoint_state()->set_safepoint_id(safepoint_id); // Release store
578

579
  // This part we can skip if we notice we miss or are in a future safepoint.
580
  OrderAccess::storestore();
581
  // Load in wait barrier should not float up
582
  thread->set_thread_state_fence(_thread_blocked);
583

584
  _wait_barrier->wait(static_cast<int>(safepoint_id));
585
  assert(_state != _synchronized, "Can't be");
586

587
  // If barrier is disarmed stop store from floating above loads in barrier.
588
  OrderAccess::loadstore();
589
  thread->set_thread_state(state);
590

591
  // Then we reset the safepoint id to inactive.
592
  thread->safepoint_state()->reset_safepoint_id(); // Release store
593

594
  OrderAccess::fence();
595

596
  guarantee(thread->safepoint_state()->get_safepoint_id() == InactiveSafepointCounter,
597
            "The safepoint id should be set only in block path");
598

599
  // cross_modify_fence is done by SafepointMechanism::process_if_requested
600
  // which is the only caller here.
601
}
602

603
// ------------------------------------------------------------------------------------------------------
604
// Exception handlers
605

606

607
void SafepointSynchronize::handle_polling_page_exception(JavaThread *thread) {
608
  assert(thread->thread_state() == _thread_in_Java, "should come from Java code");
609
  thread->set_thread_state(_thread_in_vm);
610

611
  // Enable WXWrite: the function is called implicitly from java code.
612
  MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, thread));
613

614
  if (log_is_enabled(Info, safepoint, stats)) {
615
    Atomic::inc(&_nof_threads_hit_polling_page);
616
  }
617

618
  ThreadSafepointState* state = thread->safepoint_state();
619

620
  state->handle_polling_page_exception();
621

622
  thread->set_thread_state(_thread_in_Java);
623
}
624

625

626
void SafepointSynchronize::print_safepoint_timeout() {
627
  if (!timeout_error_printed) {
628
    timeout_error_printed = true;
629
    // Print out the thread info which didn't reach the safepoint for debugging
630
    // purposes (useful when there are lots of threads in the debugger).
631
    LogTarget(Warning, safepoint) lt;
632
    if (lt.is_enabled()) {
633
      ResourceMark rm;
634
      LogStream ls(lt);
635

636
      ls.cr();
637
      ls.print_cr("# SafepointSynchronize::begin: Timeout detected:");
638
      ls.print_cr("# SafepointSynchronize::begin: Timed out while spinning to reach a safepoint.");
639
      ls.print_cr("# SafepointSynchronize::begin: Threads which did not reach the safepoint:");
640
      for (JavaThreadIteratorWithHandle jtiwh; JavaThread *cur_thread = jtiwh.next(); ) {
641
        if (cur_thread->safepoint_state()->is_running()) {
642
          ls.print("# ");
643
          cur_thread->print_on(&ls);
644
          ls.cr();
645
        }
646
      }
647
      ls.print_cr("# SafepointSynchronize::begin: (End of list)");
648
    }
649
  }
650

651
  // To debug the long safepoint, specify both AbortVMOnSafepointTimeout &
652
  // ShowMessageBoxOnError.
653
  if (AbortVMOnSafepointTimeout && (os::elapsedTime() * MILLIUNITS > AbortVMOnSafepointTimeoutDelay)) {
654
    // Send the blocking thread a signal to terminate and write an error file.
655
    for (JavaThreadIteratorWithHandle jtiwh; JavaThread *cur_thread = jtiwh.next(); ) {
656
      if (cur_thread->safepoint_state()->is_running()) {
657
        if (!os::signal_thread(cur_thread, SIGILL, "blocking a safepoint")) {
658
          break; // Could not send signal. Report fatal error.
659
        }
660
        // Give cur_thread a chance to report the error and terminate the VM.
661
        os::naked_sleep(3000);
662
      }
663
    }
664
    fatal("Safepoint sync time longer than %.6f ms detected when executing %s.",
665
          SafepointTimeoutDelay, VMThread::vm_operation()->name());
666
  }
667
}
668

669
// -------------------------------------------------------------------------------------------------------
670
// Implementation of ThreadSafepointState
671

672
ThreadSafepointState::ThreadSafepointState(JavaThread *thread)
673
  : _at_poll_safepoint(false), _thread(thread), _safepoint_safe(false),
674
    _safepoint_id(SafepointSynchronize::InactiveSafepointCounter), _next(nullptr) {
675
}
676

677
void ThreadSafepointState::create(JavaThread *thread) {
678
  ThreadSafepointState *state = new ThreadSafepointState(thread);
679
  thread->set_safepoint_state(state);
680
}
681

682
void ThreadSafepointState::destroy(JavaThread *thread) {
683
  if (thread->safepoint_state()) {
684
    delete(thread->safepoint_state());
685
    thread->set_safepoint_state(nullptr);
686
  }
687
}
688

689
uint64_t ThreadSafepointState::get_safepoint_id() const {
690
  return Atomic::load_acquire(&_safepoint_id);
691
}
692

693
void ThreadSafepointState::reset_safepoint_id() {
694
  Atomic::release_store(&_safepoint_id, SafepointSynchronize::InactiveSafepointCounter);
695
}
696

697
void ThreadSafepointState::set_safepoint_id(uint64_t safepoint_id) {
698
  Atomic::release_store(&_safepoint_id, safepoint_id);
699
}
700

701
void ThreadSafepointState::examine_state_of_thread(uint64_t safepoint_count) {
702
  assert(is_running(), "better be running or just have hit safepoint poll");
703

704
  JavaThreadState stable_state;
705
  if (!SafepointSynchronize::try_stable_load_state(&stable_state, _thread, safepoint_count)) {
706
    // We could not get stable state of the JavaThread.
707
    // Consider it running and just return.
708
    return;
709
  }
710

711
  if (safepoint_safe_with(_thread, stable_state)) {
712
    account_safe_thread();
713
    return;
714
  }
715

716
  // All other thread states will continue to run until they
717
  // transition and self-block in state _blocked
718
  // Safepoint polling in compiled code causes the Java threads to do the same.
719
  // Note: new threads may require a malloc so they must be allowed to finish
720

721
  assert(is_running(), "examine_state_of_thread on non-running thread");
722
  return;
723
}
724

725
void ThreadSafepointState::account_safe_thread() {
726
  SafepointSynchronize::decrement_waiting_to_block();
727
  if (_thread->in_critical()) {
728
    // Notice that this thread is in a critical section
729
    SafepointSynchronize::increment_jni_active_count();
730
  }
731
  DEBUG_ONLY(_thread->set_visited_for_critical_count(SafepointSynchronize::safepoint_counter());)
732
  assert(!_safepoint_safe, "Must be unsafe before safe");
733
  _safepoint_safe = true;
734
}
735

736
void ThreadSafepointState::restart() {
737
  assert(_safepoint_safe, "Must be safe before unsafe");
738
  _safepoint_safe = false;
739
}
740

741
void ThreadSafepointState::print_on(outputStream *st) const {
742
  const char *s = _safepoint_safe ? "_at_safepoint" : "_running";
743

744
  st->print_cr("Thread: " INTPTR_FORMAT
745
              "  [0x%2x] State: %s _at_poll_safepoint %d",
746
               p2i(_thread), _thread->osthread()->thread_id(), s, _at_poll_safepoint);
747

748
  _thread->print_thread_state_on(st);
749
}
750

751
// ---------------------------------------------------------------------------------------------------------------------
752

753
// Process pending operation.
754
void ThreadSafepointState::handle_polling_page_exception() {
755
  JavaThread* self = thread();
756
  assert(self == JavaThread::current(), "must be self");
757

758
  // Step 1: Find the nmethod from the return address
759
  address real_return_addr = self->saved_exception_pc();
760

761
  CodeBlob *cb = CodeCache::find_blob(real_return_addr);
762
  assert(cb != nullptr && cb->is_nmethod(), "return address should be in nmethod");
763
  nmethod* nm = cb->as_nmethod();
764

765
  // Find frame of caller
766
  frame stub_fr = self->last_frame();
767
  CodeBlob* stub_cb = stub_fr.cb();
768
  assert(stub_cb->is_safepoint_stub(), "must be a safepoint stub");
769
  RegisterMap map(self,
770
                  RegisterMap::UpdateMap::include,
771
                  RegisterMap::ProcessFrames::skip,
772
                  RegisterMap::WalkContinuation::skip);
773
  frame caller_fr = stub_fr.sender(&map);
774

775
  // Should only be poll_return or poll
776
  assert( nm->is_at_poll_or_poll_return(real_return_addr), "should not be at call" );
777

778
  // This is a poll immediately before a return. The exception handling code
779
  // has already had the effect of causing the return to occur, so the execution
780
  // will continue immediately after the call. In addition, the oopmap at the
781
  // return point does not mark the return value as an oop (if it is), so
782
  // it needs a handle here to be updated.
783
  if( nm->is_at_poll_return(real_return_addr) ) {
784
    // See if return type is an oop.
785
    bool return_oop = nm->method()->is_returning_oop();
786
    HandleMark hm(self);
787
    Handle return_value;
788
    if (return_oop) {
789
      // The oop result has been saved on the stack together with all
790
      // the other registers. In order to preserve it over GCs we need
791
      // to keep it in a handle.
792
      oop result = caller_fr.saved_oop_result(&map);
793
      assert(oopDesc::is_oop_or_null(result), "must be oop");
794
      return_value = Handle(self, result);
795
      assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
796
    }
797

798
    // We get here if compiled return polls found a reason to call into the VM.
799
    // One condition for that is that the top frame is not yet safe to use.
800
    // The following stack watermark barrier poll will catch such situations.
801
    StackWatermarkSet::after_unwind(self);
802

803
    // Process pending operation
804
    SafepointMechanism::process_if_requested_with_exit_check(self, true /* check asyncs */);
805

806
    // restore oop result, if any
807
    if (return_oop) {
808
      caller_fr.set_saved_oop_result(&map, return_value());
809
    }
810
  }
811

812
  // This is a safepoint poll. Verify the return address and block.
813
  else {
814

815
    // verify the blob built the "return address" correctly
816
    assert(real_return_addr == caller_fr.pc(), "must match");
817

818
    set_at_poll_safepoint(true);
819
    // Process pending operation
820
    // We never deliver an async exception at a polling point as the
821
    // compiler may not have an exception handler for it (polling at
822
    // a return point is ok though). We will check for a pending async
823
    // exception below and deoptimize if needed. We also cannot deoptimize
824
    // and still install the exception here because live registers needed
825
    // during deoptimization are clobbered by the exception path. The
826
    // exception will just be delivered once we get into the interpreter.
827
    SafepointMechanism::process_if_requested_with_exit_check(self, false /* check asyncs */);
828
    set_at_poll_safepoint(false);
829

830
    if (self->has_async_exception_condition()) {
831
      Deoptimization::deoptimize_frame(self, caller_fr.id());
832
      log_info(exceptions)("deferred async exception at compiled safepoint");
833
    }
834

835
    // If an exception has been installed we must verify that the top frame wasn't deoptimized.
836
    if (self->has_pending_exception() ) {
837
      RegisterMap map(self,
838
                      RegisterMap::UpdateMap::include,
839
                      RegisterMap::ProcessFrames::skip,
840
                      RegisterMap::WalkContinuation::skip);
841
      frame caller_fr = stub_fr.sender(&map);
842
      if (caller_fr.is_deoptimized_frame()) {
843
        // The exception path will destroy registers that are still
844
        // live and will be needed during deoptimization, so if we
845
        // have an exception now things are messed up. We only check
846
        // at this scope because for a poll return it is ok to deoptimize
847
        // while having a pending exception since the call we are returning
848
        // from already collides with exception handling registers and
849
        // so there is no issue (the exception handling path kills call
850
        // result registers but this is ok since the exception kills
851
        // the result anyway).
852
        fatal("Exception installed and deoptimization is pending");
853
      }
854
    }
855
  }
856
}
857

858

859
// -------------------------------------------------------------------------------------------------------
860
// Implementation of SafepointTracing
861

862
jlong SafepointTracing::_last_safepoint_begin_time_ns = 0;
863
jlong SafepointTracing::_last_safepoint_sync_time_ns = 0;
864
jlong SafepointTracing::_last_safepoint_end_time_ns = 0;
865
jlong SafepointTracing::_last_app_time_ns = 0;
866
int SafepointTracing::_nof_threads = 0;
867
int SafepointTracing::_nof_running = 0;
868
int SafepointTracing::_page_trap = 0;
869
VM_Operation::VMOp_Type SafepointTracing::_current_type;
870
jlong     SafepointTracing::_max_sync_time = 0;
871
jlong     SafepointTracing::_max_vmop_time = 0;
872
uint64_t  SafepointTracing::_op_count[VM_Operation::VMOp_Terminating] = {0};
873

874
void SafepointTracing::init() {
875
  // Application start
876
  _last_safepoint_end_time_ns = os::javaTimeNanos();
877
}
878

879
// Helper method to print the header.
880
static void print_header(outputStream* st) {
881
  // The number of spaces is significant here, and should match the format
882
  // specifiers in print_statistics().
883

884
  st->print("VM Operation                 "
885
            "[ threads: total initial_running ]"
886
            "[ time:       sync    vmop      total ]");
887

888
  st->print_cr(" page_trap_count");
889
}
890

891
// This prints a nice table.  To get the statistics to not shift due to the logging uptime
892
// decorator, use the option as: -Xlog:safepoint+stats:[outputfile]:none
893
void SafepointTracing::statistics_log() {
894
  LogTarget(Info, safepoint, stats) lt;
895
  assert (lt.is_enabled(), "should only be called when printing statistics is enabled");
896
  LogStream ls(lt);
897

898
  static int _cur_stat_index = 0;
899

900
  // Print header every 30 entries
901
  if ((_cur_stat_index % 30) == 0) {
902
    print_header(&ls);
903
    _cur_stat_index = 1;  // wrap
904
  } else {
905
    _cur_stat_index++;
906
  }
907

908
  ls.print("%-28s [       "
909
           INT32_FORMAT_W(8) "        " INT32_FORMAT_W(8) " "
910
           "]",
911
           VM_Operation::name(_current_type),
912
           _nof_threads,
913
           _nof_running);
914
  ls.print("[       "
915
           INT64_FORMAT_W(10) " " INT64_FORMAT_W(10) " " INT64_FORMAT_W(10) " ]",
916
           (int64_t)(_last_safepoint_sync_time_ns - _last_safepoint_begin_time_ns),
917
           (int64_t)(_last_safepoint_end_time_ns - _last_safepoint_sync_time_ns),
918
           (int64_t)(_last_safepoint_end_time_ns - _last_safepoint_begin_time_ns));
919

920
  ls.print_cr(INT32_FORMAT_W(16), _page_trap);
921
}
922

923
// This method will be called when VM exits. This tries to summarize the sampling.
924
// Current thread may already be deleted, so don't use ResourceMark.
925
void SafepointTracing::statistics_exit_log() {
926
  if (!log_is_enabled(Info, safepoint, stats)) {
927
    return;
928
  }
929
  for (int index = 0; index < VM_Operation::VMOp_Terminating; index++) {
930
    if (_op_count[index] != 0) {
931
      log_info(safepoint, stats)("%-28s" UINT64_FORMAT_W(10), VM_Operation::name(index),
932
               _op_count[index]);
933
    }
934
  }
935

936
  log_info(safepoint, stats)("Maximum sync time  " INT64_FORMAT" ns",
937
                              (int64_t)(_max_sync_time));
938
  log_info(safepoint, stats)("Maximum vm operation time (except for Exit VM operation)  "
939
                              INT64_FORMAT " ns",
940
                              (int64_t)(_max_vmop_time));
941
}
942

943
void SafepointTracing::begin(VM_Operation::VMOp_Type type) {
944
  _op_count[type]++;
945
  _current_type = type;
946

947
  // update the time stamp to begin recording safepoint time
948
  _last_safepoint_begin_time_ns = os::javaTimeNanos();
949
  _last_safepoint_sync_time_ns = 0;
950

951
  _last_app_time_ns = _last_safepoint_begin_time_ns - _last_safepoint_end_time_ns;
952
  _last_safepoint_end_time_ns = 0;
953

954
  RuntimeService::record_safepoint_begin(_last_app_time_ns);
955
}
956

957
void SafepointTracing::synchronized(int nof_threads, int nof_running, int traps) {
958
  _last_safepoint_sync_time_ns = os::javaTimeNanos();
959
  _nof_threads = nof_threads;
960
  _nof_running = nof_running;
961
  _page_trap   = traps;
962
  RuntimeService::record_safepoint_synchronized(_last_safepoint_sync_time_ns - _last_safepoint_begin_time_ns);
963
}
964

965
void SafepointTracing::end() {
966
  _last_safepoint_end_time_ns = os::javaTimeNanos();
967

968
  if (_max_sync_time < (_last_safepoint_sync_time_ns - _last_safepoint_begin_time_ns)) {
969
    _max_sync_time = _last_safepoint_sync_time_ns - _last_safepoint_begin_time_ns;
970
  }
971
  if (_max_vmop_time < (_last_safepoint_end_time_ns - _last_safepoint_sync_time_ns)) {
972
    _max_vmop_time = _last_safepoint_end_time_ns - _last_safepoint_sync_time_ns;
973
  }
974
  if (log_is_enabled(Info, safepoint, stats)) {
975
    statistics_log();
976
  }
977

978
  log_info(safepoint)(
979
     "Safepoint \"%s\", "
980
     "Time since last: " JLONG_FORMAT " ns, "
981
     "Reaching safepoint: " JLONG_FORMAT " ns, "
982
     "At safepoint: " JLONG_FORMAT " ns, "
983
     "Total: " JLONG_FORMAT " ns",
984
      VM_Operation::name(_current_type),
985
      _last_app_time_ns,
986
      _last_safepoint_sync_time_ns    - _last_safepoint_begin_time_ns,
987
      _last_safepoint_end_time_ns     - _last_safepoint_sync_time_ns,
988
      _last_safepoint_end_time_ns     - _last_safepoint_begin_time_ns
989
     );
990

991
  RuntimeService::record_safepoint_end(_last_safepoint_end_time_ns - _last_safepoint_sync_time_ns);
992
}
993

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.