Export of internal Abseil changes

--
7b6a68aa92dcc7247236d1a1813914e035383bf8 by Abseil Team <absl-team@google.com>:

Use atomic exchange to mark completion in absl::once_flag

This prevents a potential for a missed wakeup if one thread marks
itself as a waiter while another thread is completing the invocation.

PiperOrigin-RevId: 344946791

--
ddff21d1dde08d1368d8be5fca81b154e78be2fc by Abseil Team <absl-team@google.com>:

Add missing string_view include. This is currently used transitively through the cord header.

PiperOrigin-RevId: 344845266
GitOrigin-RevId: 7b6a68aa92dcc7247236d1a1813914e035383bf8
Change-Id: Ia24e98a1df832fc4cb491d888fdf21182b5954f4
pull/861/head
Abseil Team 4 years ago committed by vslashg
parent e80c0b3536
commit 592924480a
  1. 11
      absl/base/call_once.h
  2. 5
      absl/base/internal/spinlock.h
  3. 12
      absl/base/internal/spinlock_wait.h
  4. 1
      absl/status/status.h

@ -177,15 +177,8 @@ void CallOnceImpl(std::atomic<uint32_t>* control,
scheduling_mode) == kOnceInit) { scheduling_mode) == kOnceInit) {
base_internal::invoke(std::forward<Callable>(fn), base_internal::invoke(std::forward<Callable>(fn),
std::forward<Args>(args)...); std::forward<Args>(args)...);
// The call to SpinLockWake below is an optimization, because the waiter old_control =
// in SpinLockWait is waiting with a short timeout. The atomic load/store control->exchange(base_internal::kOnceDone, std::memory_order_release);
// sequence is slightly faster than an atomic exchange:
// old_control = control->exchange(base_internal::kOnceDone,
// std::memory_order_release);
// We opt for a slightly faster case when there are no waiters, in spite
// of longer tail latency when there are waiters.
old_control = control->load(std::memory_order_relaxed);
control->store(base_internal::kOnceDone, std::memory_order_release);
if (old_control == base_internal::kOnceWaiter) { if (old_control == base_internal::kOnceWaiter) {
base_internal::SpinLockWake(control, true); base_internal::SpinLockWake(control, true);
} }

@ -15,11 +15,8 @@
// //
// Most users requiring mutual exclusion should use Mutex. // Most users requiring mutual exclusion should use Mutex.
// SpinLock is provided for use in three situations: // SpinLock is provided for use in two situations:
// - for use in code that Mutex itself depends on // - for use in code that Mutex itself depends on
// - to get a faster fast-path release under low contention (without an
// atomic read-modify-write) In return, SpinLock has worse behaviour under
// contention, which is why Mutex is preferred in most situations.
// - for async signal safety (see below) // - for async signal safety (see below)
// SpinLock is async signal safe. If a spinlock is used within a signal // SpinLock is async signal safe. If a spinlock is used within a signal

@ -43,18 +43,16 @@ uint32_t SpinLockWait(std::atomic<uint32_t> *w, int n,
const SpinLockWaitTransition trans[], const SpinLockWaitTransition trans[],
SchedulingMode scheduling_mode); SchedulingMode scheduling_mode);
// If possible, wake some thread that has called SpinLockDelay(w, ...). If // If possible, wake some thread that has called SpinLockDelay(w, ...). If `all`
// "all" is true, wake all such threads. This call is a hint, and on some // is true, wake all such threads. On some systems, this may be a no-op; on
// systems it may be a no-op; threads calling SpinLockDelay() will always wake // those systems, threads calling SpinLockDelay() will always wake eventually
// eventually even if SpinLockWake() is never called. // even if SpinLockWake() is never called.
void SpinLockWake(std::atomic<uint32_t> *w, bool all); void SpinLockWake(std::atomic<uint32_t> *w, bool all);
// Wait for an appropriate spin delay on iteration "loop" of a // Wait for an appropriate spin delay on iteration "loop" of a
// spin loop on location *w, whose previously observed value was "value". // spin loop on location *w, whose previously observed value was "value".
// SpinLockDelay() may do nothing, may yield the CPU, may sleep a clock tick, // SpinLockDelay() may do nothing, may yield the CPU, may sleep a clock tick,
// or may wait for a delay that can be truncated by a call to SpinLockWake(w). // or may wait for a call to SpinLockWake(w).
// In all cases, it must return in bounded time even if SpinLockWake() is not
// called.
void SpinLockDelay(std::atomic<uint32_t> *w, uint32_t value, int loop, void SpinLockDelay(std::atomic<uint32_t> *w, uint32_t value, int loop,
base_internal::SchedulingMode scheduling_mode); base_internal::SchedulingMode scheduling_mode);

@ -57,6 +57,7 @@
#include "absl/container/inlined_vector.h" #include "absl/container/inlined_vector.h"
#include "absl/status/internal/status_internal.h" #include "absl/status/internal/status_internal.h"
#include "absl/strings/cord.h" #include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h" #include "absl/types/optional.h"
namespace absl { namespace absl {

Loading…
Cancel
Save