jdk
1/*
2* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
3* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4*
5* This code is free software; you can redistribute it and/or modify it
6* under the terms of the GNU General Public License version 2 only, as
7* published by the Free Software Foundation.
8*
9* This code is distributed in the hope that it will be useful, but WITHOUT
10* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12* version 2 for more details (a copy is included in the LICENSE file that
13* accompanied this code).
14*
15* You should have received a copy of the GNU General Public License version
16* 2 along with this work; if not, write to the Free Software Foundation,
17* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18*
19* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20* or visit www.oracle.com if you need additional information or have any
21* questions.
22*
23*/
24
25#include "precompiled.hpp"26#include "runtime/atomic.hpp"27#include "runtime/orderAccess.hpp"28#include "runtime/os.hpp"29#include "utilities/debug.hpp"30#include "utilities/singleWriterSynchronizer.hpp"31#include "utilities/macros.hpp"32
33SingleWriterSynchronizer::SingleWriterSynchronizer() :34_enter(0),35_exit(),36// The initial value of 1 for _waiting_for puts it on the inactive37// track, so no thread exiting a critical section will match it.38_waiting_for(1),39_wakeup()40DEBUG_ONLY(COMMA _writers(0))41{}42
43// Wait until all threads that entered a critical section before
44// synchronization have exited that critical section.
45void SingleWriterSynchronizer::synchronize() {46// Side-effect in assert balanced by debug-only dec at end.47assert(Atomic::add(&_writers, 1u) == 1u, "multiple writers");48// We don't know anything about the muxing between this invocation49// and invocations in other threads. We must start with the latest50// _enter polarity, else we could clobber the wrong _exit value on51// the first iteration. So fence to ensure everything here follows52// whatever muxing was used.53OrderAccess::fence();54uint value = _enter;55// (1) Determine the old and new exit counters, based on the56// polarity (bit0 value) of the on-entry enter counter.57volatile uint* new_ptr = &_exit[(value + 1) & 1];58// (2) Change the in-use exit counter to the new counter, by adding59// 1 to the enter counter (flipping the polarity), meanwhile60// "simultaneously" initializing the new exit counter to that enter61// value. Note: The new exit counter is not being used by read62// operations until this change of _enter succeeds.63uint old;64do {65old = value;66*new_ptr = ++value;67value = Atomic::cmpxchg(&_enter, old, value);68} while (old != value);69// Critical sections entered before we changed the polarity will use70// the old exit counter. Critical sections entered after the change71// will use the new exit counter.72volatile uint* old_ptr = &_exit[old & 1];73assert(old_ptr != new_ptr, "invariant");74// (3) Inform threads in in-progress critical sections that there is75// a pending synchronize waiting. The thread that completes the76// request (_exit value == old) will signal the _wakeup semaphore to77// allow us to proceed.78_waiting_for = old;79// Write of _waiting_for must precede read of _exit and associated80// conditional semaphore wait. If they were re-ordered then a81// critical section exit could miss the wakeup request, failing to82// signal us while we're waiting.83OrderAccess::fence();84// (4) Wait for all the critical sections started before the change85// to complete, e.g. for the value of old_ptr to catch up with old.86// Loop because there could be pending wakeups unrelated to this87// synchronize request.88while (old != Atomic::load_acquire(old_ptr)) {89_wakeup.wait();90}91// (5) Drain any pending wakeups. A critical section exit may have92// completed our request and seen our _waiting_for before we checked93// for completion. There are also possible (though rare) spurious94// wakeup signals in the timing gap between changing the _enter95// polarity and setting _waiting_for. Enough of any of those could96// lead to semaphore overflow. This doesn't guarantee no unrelated97// wakeups for the next wait, but prevents unbounded accumulation.98while (_wakeup.trywait()) {}99DEBUG_ONLY(Atomic::dec(&_writers);)100}
101