jdk

Форк
0
/
threadService.cpp 
1127 строк · 39.7 Кб
1
/*
2
 * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
3
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
 *
5
 * This code is free software; you can redistribute it and/or modify it
6
 * under the terms of the GNU General Public License version 2 only, as
7
 * published by the Free Software Foundation.
8
 *
9
 * This code is distributed in the hope that it will be useful, but WITHOUT
10
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12
 * version 2 for more details (a copy is included in the LICENSE file that
13
 * accompanied this code).
14
 *
15
 * You should have received a copy of the GNU General Public License version
16
 * 2 along with this work; if not, write to the Free Software Foundation,
17
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
 *
19
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
 * or visit www.oracle.com if you need additional information or have any
21
 * questions.
22
 *
23
 */
24

25
#include "precompiled.hpp"
26
#include "classfile/javaClasses.inline.hpp"
27
#include "classfile/systemDictionary.hpp"
28
#include "classfile/vmClasses.hpp"
29
#include "classfile/vmSymbols.hpp"
30
#include "gc/shared/oopStorageSet.hpp"
31
#include "memory/heapInspection.hpp"
32
#include "memory/oopFactory.hpp"
33
#include "memory/resourceArea.hpp"
34
#include "memory/universe.hpp"
35
#include "nmt/memflags.hpp"
36
#include "oops/instanceKlass.hpp"
37
#include "oops/klass.inline.hpp"
38
#include "oops/objArrayKlass.hpp"
39
#include "oops/objArrayOop.inline.hpp"
40
#include "oops/oop.inline.hpp"
41
#include "oops/oopHandle.inline.hpp"
42
#include "prims/jvmtiRawMonitor.hpp"
43
#include "runtime/atomic.hpp"
44
#include "runtime/handles.inline.hpp"
45
#include "runtime/init.hpp"
46
#include "runtime/javaThread.inline.hpp"
47
#include "runtime/objectMonitor.inline.hpp"
48
#include "runtime/synchronizer.hpp"
49
#include "runtime/thread.inline.hpp"
50
#include "runtime/threads.hpp"
51
#include "runtime/threadSMR.inline.hpp"
52
#include "runtime/vframe.hpp"
53
#include "runtime/vmThread.hpp"
54
#include "runtime/vmOperations.hpp"
55
#include "services/threadService.hpp"
56

57
// TODO: we need to define a naming convention for perf counters
58
// to distinguish counters for:
59
//   - standard JSR174 use
60
//   - Hotspot extension (public and committed)
61
//   - Hotspot extension (private/internal and uncommitted)
62

63
// Default is disabled.
64
bool ThreadService::_thread_monitoring_contention_enabled = false;
65
bool ThreadService::_thread_cpu_time_enabled = false;
66
bool ThreadService::_thread_allocated_memory_enabled = false;
67

68
PerfCounter*  ThreadService::_total_threads_count = nullptr;
69
PerfVariable* ThreadService::_live_threads_count = nullptr;
70
PerfVariable* ThreadService::_peak_threads_count = nullptr;
71
PerfVariable* ThreadService::_daemon_threads_count = nullptr;
72
volatile int ThreadService::_atomic_threads_count = 0;
73
volatile int ThreadService::_atomic_daemon_threads_count = 0;
74

75
volatile jlong ThreadService::_exited_allocated_bytes = 0;
76

77
ThreadDumpResult* ThreadService::_threaddump_list = nullptr;
78

79
static const int INITIAL_ARRAY_SIZE = 10;
80

81
// OopStorage for thread stack sampling
82
static OopStorage* _thread_service_storage = nullptr;
83

84
void ThreadService::init() {
85
  EXCEPTION_MARK;
86

87
  // These counters are for java.lang.management API support.
88
  // They are created even if -XX:-UsePerfData is set and in
89
  // that case, they will be allocated on C heap.
90

91
  _total_threads_count =
92
                PerfDataManager::create_counter(JAVA_THREADS, "started",
93
                                                PerfData::U_Events, CHECK);
94

95
  _live_threads_count =
96
                PerfDataManager::create_variable(JAVA_THREADS, "live",
97
                                                 PerfData::U_None, CHECK);
98

99
  _peak_threads_count =
100
                PerfDataManager::create_variable(JAVA_THREADS, "livePeak",
101
                                                 PerfData::U_None, CHECK);
102

103
  _daemon_threads_count =
104
                PerfDataManager::create_variable(JAVA_THREADS, "daemon",
105
                                                 PerfData::U_None, CHECK);
106

107
  if (os::is_thread_cpu_time_supported()) {
108
    _thread_cpu_time_enabled = true;
109
  }
110

111
  _thread_allocated_memory_enabled = true; // Always on, so enable it
112

113
  // Initialize OopStorage for thread stack sampling walking
114
  _thread_service_storage = OopStorageSet::create_strong("ThreadService OopStorage",
115
                                                         mtServiceability);
116
}
117

118
void ThreadService::reset_peak_thread_count() {
119
  // Acquire the lock to update the peak thread count
120
  // to synchronize with thread addition and removal.
121
  MutexLocker mu(Threads_lock);
122
  _peak_threads_count->set_value(get_live_thread_count());
123
}
124

125
static bool is_hidden_thread(JavaThread *thread) {
126
  // hide VM internal or JVMTI agent threads
127
  return thread->is_hidden_from_external_view() || thread->is_jvmti_agent_thread();
128
}
129

130
void ThreadService::add_thread(JavaThread* thread, bool daemon) {
131
  assert(Threads_lock->owned_by_self(), "must have threads lock");
132

133
  // Do not count hidden threads
134
  if (is_hidden_thread(thread)) {
135
    return;
136
  }
137

138
  _total_threads_count->inc();
139
  _live_threads_count->inc();
140
  Atomic::inc(&_atomic_threads_count);
141
  int count = _atomic_threads_count;
142

143
  if (count > _peak_threads_count->get_value()) {
144
    _peak_threads_count->set_value(count);
145
  }
146

147
  if (daemon) {
148
    _daemon_threads_count->inc();
149
    Atomic::inc(&_atomic_daemon_threads_count);
150
  }
151
}
152

153
void ThreadService::decrement_thread_counts(JavaThread* jt, bool daemon) {
154
  Atomic::dec(&_atomic_threads_count);
155

156
  if (daemon) {
157
    Atomic::dec(&_atomic_daemon_threads_count);
158
  }
159
}
160

161
void ThreadService::remove_thread(JavaThread* thread, bool daemon) {
162
  assert(Threads_lock->owned_by_self(), "must have threads lock");
163

164
  // Include hidden thread allcations in exited_allocated_bytes
165
  ThreadService::incr_exited_allocated_bytes(thread->cooked_allocated_bytes());
166

167
  // Do not count hidden threads
168
  if (is_hidden_thread(thread)) {
169
    return;
170
  }
171

172
  assert(!thread->is_terminated(), "must not be terminated");
173
  if (!thread->is_exiting()) {
174
    // We did not get here via JavaThread::exit() so current_thread_exiting()
175
    // was not called, e.g., JavaThread::cleanup_failed_attach_current_thread().
176
    decrement_thread_counts(thread, daemon);
177
  }
178

179
  int daemon_count = _atomic_daemon_threads_count;
180
  int count = _atomic_threads_count;
181

182
  // Counts are incremented at the same time, but atomic counts are
183
  // decremented earlier than perf counts.
184
  assert(_live_threads_count->get_value() > count,
185
    "thread count mismatch %d : %d",
186
    (int)_live_threads_count->get_value(), count);
187

188
  _live_threads_count->dec(1);
189
  if (daemon) {
190
    assert(_daemon_threads_count->get_value() > daemon_count,
191
      "thread count mismatch %d : %d",
192
      (int)_daemon_threads_count->get_value(), daemon_count);
193

194
    _daemon_threads_count->dec(1);
195
  }
196

197
  // Counts are incremented at the same time, but atomic counts are
198
  // decremented earlier than perf counts.
199
  assert(_daemon_threads_count->get_value() >= daemon_count,
200
    "thread count mismatch %d : %d",
201
    (int)_daemon_threads_count->get_value(), daemon_count);
202
  assert(_live_threads_count->get_value() >= count,
203
    "thread count mismatch %d : %d",
204
    (int)_live_threads_count->get_value(), count);
205
  assert(_live_threads_count->get_value() > 0 ||
206
    (_live_threads_count->get_value() == 0 && count == 0 &&
207
    _daemon_threads_count->get_value() == 0 && daemon_count == 0),
208
    "thread counts should reach 0 at the same time, live %d,%d daemon %d,%d",
209
    (int)_live_threads_count->get_value(), count,
210
    (int)_daemon_threads_count->get_value(), daemon_count);
211
  assert(_daemon_threads_count->get_value() > 0 ||
212
    (_daemon_threads_count->get_value() == 0 && daemon_count == 0),
213
    "thread counts should reach 0 at the same time, daemon %d,%d",
214
    (int)_daemon_threads_count->get_value(), daemon_count);
215
}
216

217
void ThreadService::current_thread_exiting(JavaThread* jt, bool daemon) {
218
  // Do not count hidden threads
219
  if (is_hidden_thread(jt)) {
220
    return;
221
  }
222

223
  assert(jt == JavaThread::current(), "Called by current thread");
224
  assert(!jt->is_terminated() && jt->is_exiting(), "must be exiting");
225

226
  decrement_thread_counts(jt, daemon);
227
}
228

229
// FIXME: JVMTI should call this function
230
Handle ThreadService::get_current_contended_monitor(JavaThread* thread) {
231
  assert(thread != nullptr, "should be non-null");
232
  debug_only(Thread::check_for_dangling_thread_pointer(thread);)
233

234
  // This function can be called on a target JavaThread that is not
235
  // the caller and we are not at a safepoint. So it is possible for
236
  // the waiting or pending condition to be over/stale and for the
237
  // first stage of async deflation to clear the object field in
238
  // the ObjectMonitor. It is also possible for the object to be
239
  // inflated again and to be associated with a completely different
240
  // ObjectMonitor by the time this object reference is processed
241
  // by the caller.
242
  ObjectMonitor *wait_obj = thread->current_waiting_monitor();
243

244
  oop obj = nullptr;
245
  if (wait_obj != nullptr) {
246
    // thread is doing an Object.wait() call
247
    obj = wait_obj->object();
248
  } else {
249
    ObjectMonitor *enter_obj = thread->current_pending_monitor();
250
    if (enter_obj != nullptr) {
251
      // thread is trying to enter() an ObjectMonitor.
252
      obj = enter_obj->object();
253
    }
254
  }
255

256
  Handle h(Thread::current(), obj);
257
  return h;
258
}
259

260
bool ThreadService::set_thread_monitoring_contention(bool flag) {
261
  MutexLocker m(Management_lock);
262

263
  bool prev = _thread_monitoring_contention_enabled;
264
  _thread_monitoring_contention_enabled = flag;
265

266
  return prev;
267
}
268

269
bool ThreadService::set_thread_cpu_time_enabled(bool flag) {
270
  MutexLocker m(Management_lock);
271

272
  bool prev = _thread_cpu_time_enabled;
273
  _thread_cpu_time_enabled = flag;
274

275
  return prev;
276
}
277

278
bool ThreadService::set_thread_allocated_memory_enabled(bool flag) {
279
  MutexLocker m(Management_lock);
280

281
  bool prev = _thread_allocated_memory_enabled;
282
  _thread_allocated_memory_enabled = flag;
283

284
  return prev;
285
}
286

287
void ThreadService::metadata_do(void f(Metadata*)) {
288
  for (ThreadDumpResult* dump = _threaddump_list; dump != nullptr; dump = dump->next()) {
289
    dump->metadata_do(f);
290
  }
291
}
292

293
void ThreadService::add_thread_dump(ThreadDumpResult* dump) {
294
  MutexLocker ml(Management_lock);
295
  if (_threaddump_list == nullptr) {
296
    _threaddump_list = dump;
297
  } else {
298
    dump->set_next(_threaddump_list);
299
    _threaddump_list = dump;
300
  }
301
}
302

303
void ThreadService::remove_thread_dump(ThreadDumpResult* dump) {
304
  MutexLocker ml(Management_lock);
305

306
  ThreadDumpResult* prev = nullptr;
307
  bool found = false;
308
  for (ThreadDumpResult* d = _threaddump_list; d != nullptr; prev = d, d = d->next()) {
309
    if (d == dump) {
310
      if (prev == nullptr) {
311
        _threaddump_list = dump->next();
312
      } else {
313
        prev->set_next(dump->next());
314
      }
315
      found = true;
316
      break;
317
    }
318
  }
319
  assert(found, "The threaddump result to be removed must exist.");
320
}
321

322
// Dump stack trace of threads specified in the given threads array.
323
// Returns StackTraceElement[][] each element is the stack trace of a thread in
324
// the corresponding entry in the given threads array
325
Handle ThreadService::dump_stack_traces(GrowableArray<instanceHandle>* threads,
326
                                        int num_threads,
327
                                        TRAPS) {
328
  assert(num_threads > 0, "just checking");
329

330
  ThreadDumpResult dump_result;
331
  VM_ThreadDump op(&dump_result,
332
                   threads,
333
                   num_threads,
334
                   -1,    /* entire stack */
335
                   false, /* with locked monitors */
336
                   false  /* with locked synchronizers */);
337
  VMThread::execute(&op);
338

339
  // Allocate the resulting StackTraceElement[][] object
340

341
  ResourceMark rm(THREAD);
342
  Klass* k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_StackTraceElement_array(), true, CHECK_NH);
343
  ObjArrayKlass* ik = ObjArrayKlass::cast(k);
344
  objArrayOop r = oopFactory::new_objArray(ik, num_threads, CHECK_NH);
345
  objArrayHandle result_obj(THREAD, r);
346

347
  int num_snapshots = dump_result.num_snapshots();
348
  assert(num_snapshots == num_threads, "Must have num_threads thread snapshots");
349
  assert(num_snapshots == 0 || dump_result.t_list_has_been_set(), "ThreadsList must have been set if we have a snapshot");
350
  int i = 0;
351
  for (ThreadSnapshot* ts = dump_result.snapshots(); ts != nullptr; i++, ts = ts->next()) {
352
    ThreadStackTrace* stacktrace = ts->get_stack_trace();
353
    if (stacktrace == nullptr) {
354
      // No stack trace
355
      result_obj->obj_at_put(i, nullptr);
356
    } else {
357
      // Construct an array of java/lang/StackTraceElement object
358
      Handle backtrace_h = stacktrace->allocate_fill_stack_trace_element_array(CHECK_NH);
359
      result_obj->obj_at_put(i, backtrace_h());
360
    }
361
  }
362

363
  return result_obj;
364
}
365

366
void ThreadService::reset_contention_count_stat(JavaThread* thread) {
367
  ThreadStatistics* stat = thread->get_thread_stat();
368
  if (stat != nullptr) {
369
    stat->reset_count_stat();
370
  }
371
}
372

373
void ThreadService::reset_contention_time_stat(JavaThread* thread) {
374
  ThreadStatistics* stat = thread->get_thread_stat();
375
  if (stat != nullptr) {
376
    stat->reset_time_stat();
377
  }
378
}
379

380
bool ThreadService::is_virtual_or_carrier_thread(JavaThread* jt) {
381
  oop threadObj = jt->threadObj();
382
  if (threadObj != nullptr && threadObj->is_a(vmClasses::BaseVirtualThread_klass())) {
383
    // a virtual thread backed by JavaThread
384
    return true;
385
  }
386
  if (jt->is_vthread_mounted()) {
387
    // carrier thread
388
    return true;
389
  }
390
  return false;
391
}
392

393
// Find deadlocks involving raw monitors, object monitors and concurrent locks
394
// if concurrent_locks is true.
395
// We skip virtual thread carriers under the assumption that the current scheduler, ForkJoinPool,
396
// doesn't hold any locks while mounting a virtual thread, so any owned monitor (or j.u.c., lock for that matter)
397
// on that JavaThread must be owned by the virtual thread, and we don't support deadlock detection for virtual threads.
398
DeadlockCycle* ThreadService::find_deadlocks_at_safepoint(ThreadsList * t_list, bool concurrent_locks) {
399
  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
400

401
  // This code was modified from the original Threads::find_deadlocks code.
402
  int globalDfn = 0, thisDfn;
403
  ObjectMonitor* waitingToLockMonitor = nullptr;
404
  JvmtiRawMonitor* waitingToLockRawMonitor = nullptr;
405
  oop waitingToLockBlocker = nullptr;
406
  bool blocked_on_monitor = false;
407
  JavaThread *currentThread, *previousThread;
408
  int num_deadlocks = 0;
409

410
  // Initialize the depth-first-number for each JavaThread.
411
  JavaThreadIterator jti(t_list);
412
  for (JavaThread* jt = jti.first(); jt != nullptr; jt = jti.next()) {
413
    if (!is_virtual_or_carrier_thread(jt)) {
414
      jt->set_depth_first_number(-1);
415
    }
416
  }
417

418
  DeadlockCycle* deadlocks = nullptr;
419
  DeadlockCycle* last = nullptr;
420
  DeadlockCycle* cycle = new DeadlockCycle();
421
  for (JavaThread* jt = jti.first(); jt != nullptr; jt = jti.next()) {
422
    if (is_virtual_or_carrier_thread(jt)) {
423
      // skip virtual and carrier threads
424
      continue;
425
    }
426
    if (jt->depth_first_number() >= 0) {
427
      // this thread was already visited
428
      continue;
429
    }
430

431
    thisDfn = globalDfn;
432
    jt->set_depth_first_number(globalDfn++);
433
    previousThread = jt;
434
    currentThread = jt;
435

436
    cycle->reset();
437

438
    // The ObjectMonitor* can't be async deflated since we are at a safepoint.
439
    // When there is a deadlock, all the monitors involved in the dependency
440
    // cycle must be contended and heavyweight. So we only care about the
441
    // heavyweight monitor a thread is waiting to lock.
442
    waitingToLockMonitor = jt->current_pending_monitor();
443
    // JVM TI raw monitors can also be involved in deadlocks, and we can be
444
    // waiting to lock both a raw monitor and ObjectMonitor at the same time.
445
    // It isn't clear how to make deadlock detection work correctly if that
446
    // happens.
447
    waitingToLockRawMonitor = jt->current_pending_raw_monitor();
448

449
    if (concurrent_locks) {
450
      waitingToLockBlocker = jt->current_park_blocker();
451
    }
452

453
    while (waitingToLockMonitor != nullptr ||
454
           waitingToLockRawMonitor != nullptr ||
455
           waitingToLockBlocker != nullptr) {
456
      cycle->add_thread(currentThread);
457
      // Give preference to the raw monitor
458
      if (waitingToLockRawMonitor != nullptr) {
459
        Thread* owner = waitingToLockRawMonitor->owner();
460
        if (owner != nullptr && // the raw monitor could be released at any time
461
            owner->is_Java_thread()) {
462
          currentThread = JavaThread::cast(owner);
463
        }
464
      } else if (waitingToLockMonitor != nullptr) {
465
        if (waitingToLockMonitor->has_owner()) {
466
          currentThread = Threads::owning_thread_from_monitor(t_list, waitingToLockMonitor);
467
          if (currentThread == nullptr) {
468
            // This function is called at a safepoint so the JavaThread
469
            // that owns waitingToLockMonitor should be findable, but
470
            // if it is not findable, then the previous currentThread is
471
            // blocked permanently. We record this as a deadlock.
472
            num_deadlocks++;
473

474
            // add this cycle to the deadlocks list
475
            if (deadlocks == nullptr) {
476
              deadlocks = cycle;
477
            } else {
478
              last->set_next(cycle);
479
            }
480
            last = cycle;
481
            cycle = new DeadlockCycle();
482
            break;
483
          }
484
        }
485
      } else {
486
        if (concurrent_locks) {
487
          if (waitingToLockBlocker->is_a(vmClasses::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass())) {
488
            oop threadObj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(waitingToLockBlocker);
489
            // This JavaThread (if there is one) is protected by the
490
            // ThreadsListSetter in VM_FindDeadlocks::doit().
491
            currentThread = threadObj != nullptr ? java_lang_Thread::thread(threadObj) : nullptr;
492
          } else {
493
            currentThread = nullptr;
494
          }
495
        }
496
      }
497

498
      if (currentThread == nullptr || is_virtual_or_carrier_thread(currentThread)) {
499
        // No dependency on another thread
500
        break;
501
      }
502
      if (currentThread->depth_first_number() < 0) {
503
        // First visit to this thread
504
        currentThread->set_depth_first_number(globalDfn++);
505
      } else if (currentThread->depth_first_number() < thisDfn) {
506
        // Thread already visited, and not on a (new) cycle
507
        break;
508
      } else if (currentThread == previousThread) {
509
        // Self-loop, ignore
510
        break;
511
      } else {
512
        // We have a (new) cycle
513
        num_deadlocks++;
514

515
        // add this cycle to the deadlocks list
516
        if (deadlocks == nullptr) {
517
          deadlocks = cycle;
518
        } else {
519
          last->set_next(cycle);
520
        }
521
        last = cycle;
522
        cycle = new DeadlockCycle();
523
        break;
524
      }
525
      previousThread = currentThread;
526
      waitingToLockMonitor = (ObjectMonitor*)currentThread->current_pending_monitor();
527
      if (concurrent_locks) {
528
        waitingToLockBlocker = currentThread->current_park_blocker();
529
      }
530
    }
531

532
  }
533
  delete cycle;
534
  return deadlocks;
535
}
536

537
ThreadDumpResult::ThreadDumpResult() : _num_threads(0), _num_snapshots(0), _snapshots(nullptr), _last(nullptr), _next(nullptr), _setter() {
538

539
  // Create a new ThreadDumpResult object and append to the list.
540
  // If GC happens before this function returns, Method*
541
  // in the stack trace will be visited.
542
  ThreadService::add_thread_dump(this);
543
}
544

545
ThreadDumpResult::ThreadDumpResult(int num_threads) : _num_threads(num_threads), _num_snapshots(0), _snapshots(nullptr), _last(nullptr), _next(nullptr), _setter() {
546
  // Create a new ThreadDumpResult object and append to the list.
547
  // If GC happens before this function returns, oops
548
  // will be visited.
549
  ThreadService::add_thread_dump(this);
550
}
551

552
ThreadDumpResult::~ThreadDumpResult() {
553
  ThreadService::remove_thread_dump(this);
554

555
  // free all the ThreadSnapshot objects created during
556
  // the VM_ThreadDump operation
557
  ThreadSnapshot* ts = _snapshots;
558
  while (ts != nullptr) {
559
    ThreadSnapshot* p = ts;
560
    ts = ts->next();
561
    delete p;
562
  }
563
}
564

565
ThreadSnapshot* ThreadDumpResult::add_thread_snapshot() {
566
  ThreadSnapshot* ts = new ThreadSnapshot();
567
  link_thread_snapshot(ts);
568
  return ts;
569
}
570

571
ThreadSnapshot* ThreadDumpResult::add_thread_snapshot(JavaThread* thread) {
572
  ThreadSnapshot* ts = new ThreadSnapshot();
573
  link_thread_snapshot(ts);
574
  ts->initialize(t_list(), thread);
575
  return ts;
576
}
577

578
void ThreadDumpResult::link_thread_snapshot(ThreadSnapshot* ts) {
579
  assert(_num_threads == 0 || _num_snapshots < _num_threads,
580
         "_num_snapshots must be less than _num_threads");
581
  _num_snapshots++;
582
  if (_snapshots == nullptr) {
583
    _snapshots = ts;
584
  } else {
585
    _last->set_next(ts);
586
  }
587
  _last = ts;
588
}
589

590
void ThreadDumpResult::metadata_do(void f(Metadata*)) {
591
  for (ThreadSnapshot* ts = _snapshots; ts != nullptr; ts = ts->next()) {
592
    ts->metadata_do(f);
593
  }
594
}
595

596
ThreadsList* ThreadDumpResult::t_list() {
597
  return _setter.list();
598
}
599

600
StackFrameInfo::StackFrameInfo(javaVFrame* jvf, bool with_lock_info) {
601
  _method = jvf->method();
602
  _bci = jvf->bci();
603
  _class_holder = OopHandle(_thread_service_storage, _method->method_holder()->klass_holder());
604
  _locked_monitors = nullptr;
605
  if (with_lock_info) {
606
    Thread* current_thread = Thread::current();
607
    ResourceMark rm(current_thread);
608
    HandleMark hm(current_thread);
609
    GrowableArray<MonitorInfo*>* list = jvf->locked_monitors();
610
    int length = list->length();
611
    if (length > 0) {
612
      _locked_monitors = new (mtServiceability) GrowableArray<OopHandle>(length, mtServiceability);
613
      for (int i = 0; i < length; i++) {
614
        MonitorInfo* monitor = list->at(i);
615
        assert(monitor->owner() != nullptr, "This monitor must have an owning object");
616
        _locked_monitors->append(OopHandle(_thread_service_storage, monitor->owner()));
617
      }
618
    }
619
  }
620
}
621

622
StackFrameInfo::~StackFrameInfo() {
623
  if (_locked_monitors != nullptr) {
624
    for (int i = 0; i < _locked_monitors->length(); i++) {
625
      _locked_monitors->at(i).release(_thread_service_storage);
626
    }
627
    delete _locked_monitors;
628
  }
629
  _class_holder.release(_thread_service_storage);
630
}
631

632
void StackFrameInfo::metadata_do(void f(Metadata*)) {
633
  f(_method);
634
}
635

636
void StackFrameInfo::print_on(outputStream* st) const {
637
  ResourceMark rm;
638
  java_lang_Throwable::print_stack_element(st, method(), bci());
639
  int len = (_locked_monitors != nullptr ? _locked_monitors->length() : 0);
640
  for (int i = 0; i < len; i++) {
641
    oop o = _locked_monitors->at(i).resolve();
642
    st->print_cr("\t- locked <" INTPTR_FORMAT "> (a %s)", p2i(o), o->klass()->external_name());
643
  }
644
}
645

646
// Iterate through monitor cache to find JNI locked monitors
647
class InflatedMonitorsClosure: public MonitorClosure {
648
private:
649
  ThreadStackTrace* _stack_trace;
650
public:
651
  InflatedMonitorsClosure(ThreadStackTrace* st) {
652
    _stack_trace = st;
653
  }
654
  void do_monitor(ObjectMonitor* mid) {
655
    oop object = mid->object();
656
    if (!_stack_trace->is_owned_monitor_on_stack(object)) {
657
      _stack_trace->add_jni_locked_monitor(object);
658
    }
659
  }
660
};
661

662
ThreadStackTrace::ThreadStackTrace(JavaThread* t, bool with_locked_monitors) {
663
  _thread = t;
664
  _frames = new (mtServiceability) GrowableArray<StackFrameInfo*>(INITIAL_ARRAY_SIZE, mtServiceability);
665
  _depth = 0;
666
  _with_locked_monitors = with_locked_monitors;
667
  if (_with_locked_monitors) {
668
    _jni_locked_monitors = new (mtServiceability) GrowableArray<OopHandle>(INITIAL_ARRAY_SIZE, mtServiceability);
669
  } else {
670
    _jni_locked_monitors = nullptr;
671
  }
672
}
673

674
void ThreadStackTrace::add_jni_locked_monitor(oop object) {
675
  _jni_locked_monitors->append(OopHandle(_thread_service_storage, object));
676
}
677

678
ThreadStackTrace::~ThreadStackTrace() {
679
  for (int i = 0; i < _frames->length(); i++) {
680
    delete _frames->at(i);
681
  }
682
  delete _frames;
683
  if (_jni_locked_monitors != nullptr) {
684
    for (int i = 0; i < _jni_locked_monitors->length(); i++) {
685
      _jni_locked_monitors->at(i).release(_thread_service_storage);
686
    }
687
    delete _jni_locked_monitors;
688
  }
689
}
690

691
void ThreadStackTrace::dump_stack_at_safepoint(int maxDepth, ObjectMonitorsView* monitors, bool full) {
692
  assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
693

694
  if (_thread->has_last_Java_frame()) {
695
    RegisterMap reg_map(_thread,
696
                        RegisterMap::UpdateMap::include,
697
                        RegisterMap::ProcessFrames::include,
698
                        RegisterMap::WalkContinuation::skip);
699
    ResourceMark rm(VMThread::vm_thread());
700
    // If full, we want to print both vthread and carrier frames
701
    vframe* start_vf = !full && _thread->is_vthread_mounted()
702
      ? _thread->carrier_last_java_vframe(&reg_map)
703
      : _thread->last_java_vframe(&reg_map);
704
    int count = 0;
705
    for (vframe* f = start_vf; f; f = f->sender() ) {
706
      if (maxDepth >= 0 && count == maxDepth) {
707
        // Skip frames if more than maxDepth
708
        break;
709
      }
710
      if (!full && f->is_vthread_entry()) {
711
        break;
712
      }
713
      if (f->is_java_frame()) {
714
        javaVFrame* jvf = javaVFrame::cast(f);
715
        add_stack_frame(jvf);
716
        count++;
717
      } else {
718
        // Ignore non-Java frames
719
      }
720
    }
721
  }
722

723
  if (_with_locked_monitors) {
724
    // Iterate inflated monitors and find monitors locked by this thread
725
    // that are not found in the stack, e.g. JNI locked monitors:
726
    InflatedMonitorsClosure imc(this);
727
    monitors->visit(&imc, _thread);
728
  }
729
}
730

731

732
bool ThreadStackTrace::is_owned_monitor_on_stack(oop object) {
733
  assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
734

735
  bool found = false;
736
  int num_frames = get_stack_depth();
737
  for (int depth = 0; depth < num_frames; depth++) {
738
    StackFrameInfo* frame = stack_frame_at(depth);
739
    int len = frame->num_locked_monitors();
740
    GrowableArray<OopHandle>* locked_monitors = frame->locked_monitors();
741
    for (int j = 0; j < len; j++) {
742
      oop monitor = locked_monitors->at(j).resolve();
743
      assert(monitor != nullptr, "must be a Java object");
744
      if (monitor == object) {
745
        found = true;
746
        break;
747
      }
748
    }
749
  }
750
  return found;
751
}
752

753
Handle ThreadStackTrace::allocate_fill_stack_trace_element_array(TRAPS) {
754
  InstanceKlass* ik = vmClasses::StackTraceElement_klass();
755
  assert(ik != nullptr, "must be loaded in 1.4+");
756

757
  // Allocate an array of java/lang/StackTraceElement object
758
  objArrayOop ste = oopFactory::new_objArray(ik, _depth, CHECK_NH);
759
  objArrayHandle backtrace(THREAD, ste);
760
  for (int j = 0; j < _depth; j++) {
761
    StackFrameInfo* frame = _frames->at(j);
762
    methodHandle mh(THREAD, frame->method());
763
    oop element = java_lang_StackTraceElement::create(mh, frame->bci(), CHECK_NH);
764
    backtrace->obj_at_put(j, element);
765
  }
766
  return backtrace;
767
}
768

769
void ThreadStackTrace::add_stack_frame(javaVFrame* jvf) {
770
  StackFrameInfo* frame = new StackFrameInfo(jvf, _with_locked_monitors);
771
  _frames->append(frame);
772
  _depth++;
773
}
774

775
void ThreadStackTrace::metadata_do(void f(Metadata*)) {
776
  int length = _frames->length();
777
  for (int i = 0; i < length; i++) {
778
    _frames->at(i)->metadata_do(f);
779
  }
780
}
781

782

783
ConcurrentLocksDump::~ConcurrentLocksDump() {
784
  if (_retain_map_on_free) {
785
    return;
786
  }
787

788
  for (ThreadConcurrentLocks* t = _map; t != nullptr;)  {
789
    ThreadConcurrentLocks* tcl = t;
790
    t = t->next();
791
    delete tcl;
792
  }
793
}
794

795
void ConcurrentLocksDump::dump_at_safepoint() {
796
  // dump all locked concurrent locks
797
  assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
798

799
  GrowableArray<oop>* aos_objects = new (mtServiceability) GrowableArray<oop>(INITIAL_ARRAY_SIZE, mtServiceability);
800

801
  // Find all instances of AbstractOwnableSynchronizer
802
  HeapInspection::find_instances_at_safepoint(vmClasses::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass(),
803
                                              aos_objects);
804
  // Build a map of thread to its owned AQS locks
805
  build_map(aos_objects);
806

807
  delete aos_objects;
808
}
809

810

811
// build a map of JavaThread to all its owned AbstractOwnableSynchronizer
812
void ConcurrentLocksDump::build_map(GrowableArray<oop>* aos_objects) {
813
  int length = aos_objects->length();
814
  for (int i = 0; i < length; i++) {
815
    oop o = aos_objects->at(i);
816
    oop owner_thread_obj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(o);
817
    if (owner_thread_obj != nullptr) {
818
      // See comments in ThreadConcurrentLocks to see how this
819
      // JavaThread* is protected.
820
      JavaThread* thread = java_lang_Thread::thread(owner_thread_obj);
821
      assert(o->is_instance(), "Must be an instanceOop");
822
      add_lock(thread, (instanceOop) o);
823
    }
824
  }
825
}
826

827
void ConcurrentLocksDump::add_lock(JavaThread* thread, instanceOop o) {
828
  ThreadConcurrentLocks* tcl = thread_concurrent_locks(thread);
829
  if (tcl != nullptr) {
830
    tcl->add_lock(o);
831
    return;
832
  }
833

834
  // First owned lock found for this thread
835
  tcl = new ThreadConcurrentLocks(thread);
836
  tcl->add_lock(o);
837
  if (_map == nullptr) {
838
    _map = tcl;
839
  } else {
840
    _last->set_next(tcl);
841
  }
842
  _last = tcl;
843
}
844

845
ThreadConcurrentLocks* ConcurrentLocksDump::thread_concurrent_locks(JavaThread* thread) {
846
  for (ThreadConcurrentLocks* tcl = _map; tcl != nullptr; tcl = tcl->next()) {
847
    if (tcl->java_thread() == thread) {
848
      return tcl;
849
    }
850
  }
851
  return nullptr;
852
}
853

854
void ConcurrentLocksDump::print_locks_on(JavaThread* t, outputStream* st) {
855
  st->print_cr("   Locked ownable synchronizers:");
856
  ThreadConcurrentLocks* tcl = thread_concurrent_locks(t);
857
  GrowableArray<OopHandle>* locks = (tcl != nullptr ? tcl->owned_locks() : nullptr);
858
  if (locks == nullptr || locks->is_empty()) {
859
    st->print_cr("\t- None");
860
    st->cr();
861
    return;
862
  }
863

864
  for (int i = 0; i < locks->length(); i++) {
865
    oop obj = locks->at(i).resolve();
866
    st->print_cr("\t- <" INTPTR_FORMAT "> (a %s)", p2i(obj), obj->klass()->external_name());
867
  }
868
  st->cr();
869
}
870

871
ThreadConcurrentLocks::ThreadConcurrentLocks(JavaThread* thread) {
872
  _thread = thread;
873
  _owned_locks = new (mtServiceability) GrowableArray<OopHandle>(INITIAL_ARRAY_SIZE, mtServiceability);
874
  _next = nullptr;
875
}
876

877
ThreadConcurrentLocks::~ThreadConcurrentLocks() {
878
  for (int i = 0; i < _owned_locks->length(); i++) {
879
    _owned_locks->at(i).release(_thread_service_storage);
880
  }
881
  delete _owned_locks;
882
}
883

884
void ThreadConcurrentLocks::add_lock(instanceOop o) {
885
  _owned_locks->append(OopHandle(_thread_service_storage, o));
886
}
887

888
ThreadStatistics::ThreadStatistics() {
889
  _contended_enter_count = 0;
890
  _monitor_wait_count = 0;
891
  _sleep_count = 0;
892
  _count_pending_reset = false;
893
  _timer_pending_reset = false;
894
  memset((void*) _perf_recursion_counts, 0, sizeof(_perf_recursion_counts));
895
}
896

897
oop ThreadSnapshot::threadObj() const { return _threadObj.resolve(); }
898

899
void ThreadSnapshot::initialize(ThreadsList * t_list, JavaThread* thread) {
900
  _thread = thread;
901
  oop threadObj = thread->threadObj();
902
  _threadObj = OopHandle(_thread_service_storage, threadObj);
903

904
  ThreadStatistics* stat = thread->get_thread_stat();
905
  _contended_enter_ticks = stat->contended_enter_ticks();
906
  _contended_enter_count = stat->contended_enter_count();
907
  _monitor_wait_ticks = stat->monitor_wait_ticks();
908
  _monitor_wait_count = stat->monitor_wait_count();
909
  _sleep_ticks = stat->sleep_ticks();
910
  _sleep_count = stat->sleep_count();
911

912
  // If thread is still attaching then threadObj will be null.
913
  _thread_status = threadObj == nullptr ? JavaThreadStatus::NEW
914
                                     : java_lang_Thread::get_thread_status(threadObj);
915

916
  _is_suspended = thread->is_suspended();
917
  _is_in_native = (thread->thread_state() == _thread_in_native);
918

919
  Handle obj = ThreadService::get_current_contended_monitor(thread);
920

921
  oop blocker_object = nullptr;
922
  oop blocker_object_owner = nullptr;
923

924
  if (thread->is_vthread_mounted() && thread->vthread() != threadObj) { // ThreadSnapshot only captures platform threads
925
    _thread_status = JavaThreadStatus::IN_OBJECT_WAIT;
926
    oop vthread = thread->vthread();
927
    assert(vthread != nullptr, "");
928
    blocker_object = vthread;
929
    blocker_object_owner = vthread;
930
  } else if (_thread_status == JavaThreadStatus::BLOCKED_ON_MONITOR_ENTER ||
931
      _thread_status == JavaThreadStatus::IN_OBJECT_WAIT ||
932
      _thread_status == JavaThreadStatus::IN_OBJECT_WAIT_TIMED) {
933

934
    if (obj() == nullptr) {
935
      // monitor no longer exists; thread is not blocked
936
      _thread_status = JavaThreadStatus::RUNNABLE;
937
    } else {
938
      blocker_object = obj();
939
      JavaThread* owner = ObjectSynchronizer::get_lock_owner(t_list, obj);
940
      if ((owner == nullptr && _thread_status == JavaThreadStatus::BLOCKED_ON_MONITOR_ENTER)
941
          || (owner != nullptr && owner->is_attaching_via_jni())) {
942
        // ownership information of the monitor is not available
943
        // (may no longer be owned or releasing to some other thread)
944
        // make this thread in RUNNABLE state.
945
        // And when the owner thread is in attaching state, the java thread
946
        // is not completely initialized. For example thread name and id
947
        // and may not be set, so hide the attaching thread.
948
        _thread_status = JavaThreadStatus::RUNNABLE;
949
        blocker_object = nullptr;
950
      } else if (owner != nullptr) {
951
        blocker_object_owner = owner->threadObj();
952
      }
953
    }
954
  } else if (_thread_status == JavaThreadStatus::PARKED || _thread_status == JavaThreadStatus::PARKED_TIMED) {
955
    blocker_object = thread->current_park_blocker();
956
    if (blocker_object != nullptr && blocker_object->is_a(vmClasses::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass())) {
957
      blocker_object_owner = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(blocker_object);
958
    }
959
  }
960

961
  if (blocker_object != nullptr) {
962
    _blocker_object = OopHandle(_thread_service_storage, blocker_object);
963
  }
964
  if (blocker_object_owner != nullptr) {
965
    _blocker_object_owner = OopHandle(_thread_service_storage, blocker_object_owner);
966
  }
967
}
968

969
oop ThreadSnapshot::blocker_object() const           { return _blocker_object.resolve(); }
970
oop ThreadSnapshot::blocker_object_owner() const     { return _blocker_object_owner.resolve(); }
971

972
ThreadSnapshot::~ThreadSnapshot() {
973
  _blocker_object.release(_thread_service_storage);
974
  _blocker_object_owner.release(_thread_service_storage);
975
  _threadObj.release(_thread_service_storage);
976

977
  delete _stack_trace;
978
  delete _concurrent_locks;
979
}
980

981
void ThreadSnapshot::dump_stack_at_safepoint(int max_depth, bool with_locked_monitors,
982
                                             ObjectMonitorsView* monitors, bool full) {
983
  _stack_trace = new ThreadStackTrace(_thread, with_locked_monitors);
984
  _stack_trace->dump_stack_at_safepoint(max_depth, monitors, full);
985
}
986

987

988
void ThreadSnapshot::metadata_do(void f(Metadata*)) {
989
  if (_stack_trace != nullptr) {
990
    _stack_trace->metadata_do(f);
991
  }
992
}
993

994

995
DeadlockCycle::DeadlockCycle() {
996
  _threads = new (mtServiceability) GrowableArray<JavaThread*>(INITIAL_ARRAY_SIZE, mtServiceability);
997
  _next = nullptr;
998
}
999

1000
DeadlockCycle::~DeadlockCycle() {
1001
  delete _threads;
1002
}
1003

1004
void DeadlockCycle::print_on_with(ThreadsList * t_list, outputStream* st) const {
1005
  st->cr();
1006
  st->print_cr("Found one Java-level deadlock:");
1007
  st->print("=============================");
1008

1009
  JavaThread* currentThread;
1010
  JvmtiRawMonitor* waitingToLockRawMonitor;
1011
  oop waitingToLockBlocker;
1012
  int len = _threads->length();
1013
  for (int i = 0; i < len; i++) {
1014
    currentThread = _threads->at(i);
1015
    // The ObjectMonitor* can't be async deflated since we are at a safepoint.
1016
    ObjectMonitor* waitingToLockMonitor = currentThread->current_pending_monitor();
1017
    waitingToLockRawMonitor = currentThread->current_pending_raw_monitor();
1018
    waitingToLockBlocker = currentThread->current_park_blocker();
1019
    st->cr();
1020
    st->print_cr("\"%s\":", currentThread->name());
1021
    const char* owner_desc = ",\n  which is held by";
1022

1023
    // Note: As the JVM TI "monitor contended enter" event callback is executed after ObjectMonitor
1024
    // sets the current pending monitor, it is possible to then see a pending raw monitor as well.
1025
    if (waitingToLockRawMonitor != nullptr) {
1026
      st->print("  waiting to lock JVM TI raw monitor " INTPTR_FORMAT, p2i(waitingToLockRawMonitor));
1027
      Thread* owner = waitingToLockRawMonitor->owner();
1028
      // Could be null as the raw monitor could be released at any time if held by non-JavaThread
1029
      if (owner != nullptr) {
1030
        if (owner->is_Java_thread()) {
1031
          currentThread = JavaThread::cast(owner);
1032
          st->print_cr("%s \"%s\"", owner_desc, currentThread->name());
1033
        } else {
1034
          st->print_cr(",\n  which has now been released");
1035
        }
1036
      } else {
1037
        st->print_cr("%s non-Java thread=" PTR_FORMAT, owner_desc, p2i(owner));
1038
      }
1039
    }
1040

1041
    if (waitingToLockMonitor != nullptr) {
1042
      st->print("  waiting to lock monitor " INTPTR_FORMAT, p2i(waitingToLockMonitor));
1043
      oop obj = waitingToLockMonitor->object();
1044
      st->print(" (object " INTPTR_FORMAT ", a %s)", p2i(obj),
1045
                 obj->klass()->external_name());
1046

1047
      if (!currentThread->current_pending_monitor_is_from_java()) {
1048
        owner_desc = "\n  in JNI, which is held by";
1049
      }
1050
      currentThread = Threads::owning_thread_from_monitor(t_list, waitingToLockMonitor);
1051
      if (currentThread == nullptr) {
1052
        // The deadlock was detected at a safepoint so the JavaThread
1053
        // that owns waitingToLockMonitor should be findable, but
1054
        // if it is not findable, then the previous currentThread is
1055
        // blocked permanently.
1056
        st->print_cr("%s UNKNOWN_owner_addr=" PTR_FORMAT, owner_desc,
1057
                  p2i(waitingToLockMonitor->owner()));
1058
        continue;
1059
      }
1060
    } else {
1061
      st->print("  waiting for ownable synchronizer " INTPTR_FORMAT ", (a %s)",
1062
                p2i(waitingToLockBlocker),
1063
                waitingToLockBlocker->klass()->external_name());
1064
      assert(waitingToLockBlocker->is_a(vmClasses::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass()),
1065
             "Must be an AbstractOwnableSynchronizer");
1066
      oop ownerObj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(waitingToLockBlocker);
1067
      currentThread = java_lang_Thread::thread(ownerObj);
1068
      assert(currentThread != nullptr, "AbstractOwnableSynchronizer owning thread is unexpectedly null");
1069
    }
1070
    st->print_cr("%s \"%s\"", owner_desc, currentThread->name());
1071
  }
1072

1073
  st->cr();
1074

1075
  // Print stack traces
1076
  bool oldJavaMonitorsInStackTrace = JavaMonitorsInStackTrace;
1077
  JavaMonitorsInStackTrace = true;
1078
  st->print_cr("Java stack information for the threads listed above:");
1079
  st->print_cr("===================================================");
1080
  for (int j = 0; j < len; j++) {
1081
    currentThread = _threads->at(j);
1082
    st->print_cr("\"%s\":", currentThread->name());
1083
    currentThread->print_stack_on(st);
1084
  }
1085
  JavaMonitorsInStackTrace = oldJavaMonitorsInStackTrace;
1086
}
1087

1088
ThreadsListEnumerator::ThreadsListEnumerator(Thread* cur_thread,
1089
                                             bool include_jvmti_agent_threads,
1090
                                             bool include_jni_attaching_threads,
1091
                                             bool include_bound_virtual_threads) {
1092
  assert(cur_thread == Thread::current(), "Check current thread");
1093

1094
  int init_size = ThreadService::get_live_thread_count();
1095
  _threads_array = new GrowableArray<instanceHandle>(init_size);
1096

1097
  for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
1098
    // skips JavaThreads in the process of exiting
1099
    // and also skips VM internal JavaThreads
1100
    // Threads in _thread_new or _thread_new_trans state are included.
1101
    // i.e. threads have been started but not yet running.
1102
    if (jt->threadObj() == nullptr   ||
1103
        jt->is_exiting() ||
1104
        !java_lang_Thread::is_alive(jt->threadObj())   ||
1105
        jt->is_hidden_from_external_view()) {
1106
      continue;
1107
    }
1108

1109
    // skip agent threads
1110
    if (!include_jvmti_agent_threads && jt->is_jvmti_agent_thread()) {
1111
      continue;
1112
    }
1113

1114
    // skip jni threads in the process of attaching
1115
    if (!include_jni_attaching_threads && jt->is_attaching_via_jni()) {
1116
      continue;
1117
    }
1118

1119
    // skip instances of BoundVirtualThread
1120
    if (!include_bound_virtual_threads && jt->threadObj()->is_a(vmClasses::BoundVirtualThread_klass())) {
1121
      continue;
1122
    }
1123

1124
    instanceHandle h(cur_thread, (instanceOop) jt->threadObj());
1125
    _threads_array->append(h);
1126
  }
1127
}
1128

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.