Export of internal Abseil changes

--
3fad46c668edd864a62511f2a6875b8b79e38f34 by Evan Brown <ezb@google.com>:

Use switches instead of lookup tables for zap_desig_waker and ignore_waiting_writers so that we can avoid dTLB misses.

See, e.g., https://godbolt.org/z/a7Gb9vzzj.

Also, now that these are functions, follow function style in naming and comments.

PiperOrigin-RevId: 418654693

--
ba5107744023a4e9163a44d706fbe8e4a1bc0fd9 by Abseil Team <absl-team@google.com>:

Check for Clang before attempting to expand __clang_major__.

This avoids a warning about an undefined macro on Windows when compiling with
MinGW-GCC.

PiperOrigin-RevId: 418287329
GitOrigin-RevId: 3fad46c668edd864a62511f2a6875b8b79e38f34
Change-Id: I28104980c4d3b204537b248447a6bd1022c9ef5d
pull/1085/head
Abseil Team 3 years ago committed by Derek Mauro
parent d758735198
commit 04610889a9
  1. 7
      absl/base/attributes.h
  2. 74
      absl/synchronization/mutex.cc

@ -136,9 +136,10 @@
// for further information. // for further information.
// The MinGW compiler doesn't complain about the weak attribute until the link // The MinGW compiler doesn't complain about the weak attribute until the link
// step, presumably because Windows doesn't use ELF binaries. // step, presumably because Windows doesn't use ELF binaries.
#if (ABSL_HAVE_ATTRIBUTE(weak) || \ #if (ABSL_HAVE_ATTRIBUTE(weak) || \
(defined(__GNUC__) && !defined(__clang__))) && \ (defined(__GNUC__) && !defined(__clang__))) && \
(!defined(_WIN32) || __clang_major__ < 9) && !defined(__MINGW32__) (!defined(_WIN32) || (defined(__clang__) && __clang_major__ < 9)) && \
!defined(__MINGW32__)
#undef ABSL_ATTRIBUTE_WEAK #undef ABSL_ATTRIBUTE_WEAK
#define ABSL_ATTRIBUTE_WEAK __attribute__((weak)) #define ABSL_ATTRIBUTE_WEAK __attribute__((weak))
#define ABSL_HAVE_ATTRIBUTE_WEAK 1 #define ABSL_HAVE_ATTRIBUTE_WEAK 1

@ -1744,23 +1744,33 @@ ABSL_XRAY_LOG_ARGS(1) void Mutex::ReaderUnlock() {
ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock); ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);
} }
// The zap_desig_waker bitmask is used to clear the designated waker flag in // Clears the designated waker flag in the mutex if this thread has blocked, and
// the mutex if this thread has blocked, and therefore may be the designated // therefore may be the designated waker.
// waker. static intptr_t ClearDesignatedWakerMask(int flag) {
static const intptr_t zap_desig_waker[] = { assert(flag >= 0);
~static_cast<intptr_t>(0), // not blocked assert(flag <= 1);
~static_cast<intptr_t>( switch (flag) {
kMuDesig) // blocked; turn off the designated waker bit case 0: // not blocked
}; return ~static_cast<intptr_t>(0);
case 1: // blocked; turn off the designated waker bit
// The ignore_waiting_writers bitmask is used to ignore the existence return ~static_cast<intptr_t>(kMuDesig);
// of waiting writers if a reader that has already blocked once }
// wakes up. ABSL_INTERNAL_UNREACHABLE;
static const intptr_t ignore_waiting_writers[] = { }
~static_cast<intptr_t>(0), // not blocked
~static_cast<intptr_t>( // Conditionally ignores the existence of waiting writers if a reader that has
kMuWrWait) // blocked; pretend there are no waiting writers // already blocked once wakes up.
}; static intptr_t IgnoreWaitingWritersMask(int flag) {
assert(flag >= 0);
assert(flag <= 1);
switch (flag) {
case 0: // not blocked
return ~static_cast<intptr_t>(0);
case 1: // blocked; pretend there are no waiting writers
return ~static_cast<intptr_t>(kMuWrWait);
}
ABSL_INTERNAL_UNREACHABLE;
}
// Internal version of LockWhen(). See LockSlowWithDeadline() // Internal version of LockWhen(). See LockSlowWithDeadline()
ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition *cond, ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition *cond,
@ -1852,8 +1862,10 @@ bool Mutex::LockSlowWithDeadline(MuHow how, const Condition *cond,
bool unlock = false; bool unlock = false;
if ((v & how->fast_need_zero) == 0 && // try fast acquire if ((v & how->fast_need_zero) == 0 && // try fast acquire
mu_.compare_exchange_strong( mu_.compare_exchange_strong(
v, (how->fast_or | (v & zap_desig_waker[flags & kMuHasBlocked])) + v,
how->fast_add, (how->fast_or |
(v & ClearDesignatedWakerMask(flags & kMuHasBlocked))) +
how->fast_add,
std::memory_order_acquire, std::memory_order_relaxed)) { std::memory_order_acquire, std::memory_order_relaxed)) {
if (cond == nullptr || if (cond == nullptr ||
EvalConditionAnnotated(cond, this, true, false, how == kShared)) { EvalConditionAnnotated(cond, this, true, false, how == kShared)) {
@ -1927,9 +1939,10 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
CheckForMutexCorruption(v, "Lock"); CheckForMutexCorruption(v, "Lock");
if ((v & waitp->how->slow_need_zero) == 0) { if ((v & waitp->how->slow_need_zero) == 0) {
if (mu_.compare_exchange_strong( if (mu_.compare_exchange_strong(
v, (waitp->how->fast_or | v,
(v & zap_desig_waker[flags & kMuHasBlocked])) + (waitp->how->fast_or |
waitp->how->fast_add, (v & ClearDesignatedWakerMask(flags & kMuHasBlocked))) +
waitp->how->fast_add,
std::memory_order_acquire, std::memory_order_relaxed)) { std::memory_order_acquire, std::memory_order_relaxed)) {
if (waitp->cond == nullptr || if (waitp->cond == nullptr ||
EvalConditionAnnotated(waitp->cond, this, true, false, EvalConditionAnnotated(waitp->cond, this, true, false,
@ -1946,8 +1959,9 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
if ((v & (kMuSpin|kMuWait)) == 0) { // no waiters if ((v & (kMuSpin|kMuWait)) == 0) { // no waiters
// This thread tries to become the one and only waiter. // This thread tries to become the one and only waiter.
PerThreadSynch *new_h = Enqueue(nullptr, waitp, v, flags); PerThreadSynch *new_h = Enqueue(nullptr, waitp, v, flags);
intptr_t nv = (v & zap_desig_waker[flags & kMuHasBlocked] & kMuLow) | intptr_t nv =
kMuWait; (v & ClearDesignatedWakerMask(flags & kMuHasBlocked) & kMuLow) |
kMuWait;
ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to empty list failed"); ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to empty list failed");
if (waitp->how == kExclusive && (v & kMuReader) != 0) { if (waitp->how == kExclusive && (v & kMuReader) != 0) {
nv |= kMuWrWait; nv |= kMuWrWait;
@ -1961,12 +1975,13 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
waitp->thread->waitp = nullptr; waitp->thread->waitp = nullptr;
} }
} else if ((v & waitp->how->slow_inc_need_zero & } else if ((v & waitp->how->slow_inc_need_zero &
ignore_waiting_writers[flags & kMuHasBlocked]) == 0) { IgnoreWaitingWritersMask(flags & kMuHasBlocked)) == 0) {
// This is a reader that needs to increment the reader count, // This is a reader that needs to increment the reader count,
// but the count is currently held in the last waiter. // but the count is currently held in the last waiter.
if (mu_.compare_exchange_strong( if (mu_.compare_exchange_strong(
v, (v & zap_desig_waker[flags & kMuHasBlocked]) | kMuSpin | v,
kMuReader, (v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) |
kMuSpin | kMuReader,
std::memory_order_acquire, std::memory_order_relaxed)) { std::memory_order_acquire, std::memory_order_relaxed)) {
PerThreadSynch *h = GetPerThreadSynch(v); PerThreadSynch *h = GetPerThreadSynch(v);
h->readers += kMuOne; // inc reader count in waiter h->readers += kMuOne; // inc reader count in waiter
@ -1987,8 +2002,9 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
} }
} else if ((v & kMuSpin) == 0 && // attempt to queue ourselves } else if ((v & kMuSpin) == 0 && // attempt to queue ourselves
mu_.compare_exchange_strong( mu_.compare_exchange_strong(
v, (v & zap_desig_waker[flags & kMuHasBlocked]) | kMuSpin | v,
kMuWait, (v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) |
kMuSpin | kMuWait,
std::memory_order_acquire, std::memory_order_relaxed)) { std::memory_order_acquire, std::memory_order_relaxed)) {
PerThreadSynch *h = GetPerThreadSynch(v); PerThreadSynch *h = GetPerThreadSynch(v);
PerThreadSynch *new_h = Enqueue(h, waitp, v, flags); PerThreadSynch *new_h = Enqueue(h, waitp, v, flags);

Loading…
Cancel
Save