Export of internal Abseil changes

--
f50d25c8f8491ef7031cbbcad78edd15f98c2bd1 by Abseil Team <absl-team@google.com>:

Add myriad2 to HAVE_MMAP
Remove mutex_nonprod and associated defines.

PiperOrigin-RevId: 333759830

--
25ef4c577ea983aa3fcd6cfe2af6cdc62a06f520 by Samuel Benzaquen <sbenza@google.com>:

Internal refactor.
Represent the data with a union to allow for better constexpr support in the future.

PiperOrigin-RevId: 333756733
GitOrigin-RevId: f50d25c8f8491ef7031cbbcad78edd15f98c2bd1
Change-Id: Ieecd2c47cb20de638726eb3f9fc2e5682d05dcca
pull/800/head
Abseil Team 4 years ago committed by Andy Getz
parent cad3f30b44
commit d1de75bf54
  1. 2
      absl/base/config.h
  2. 85
      absl/strings/cord.cc
  3. 95
      absl/strings/cord.h
  4. 39
      absl/strings/internal/cord_internal.h
  5. 6
      absl/synchronization/BUILD.bazel
  6. 1
      absl/synchronization/CMakeLists.txt
  7. 325
      absl/synchronization/internal/mutex_nonprod.cc
  8. 249
      absl/synchronization/internal/mutex_nonprod.inc
  9. 34
      absl/synchronization/mutex.h
  10. 4
      absl/synchronization/mutex_test.cc

@ -364,7 +364,7 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \
defined(__ros__) || defined(__native_client__) || defined(__asmjs__) || \
defined(__wasm__) || defined(__Fuchsia__) || defined(__sun) || \
defined(__ASYLO__)
defined(__ASYLO__) || defined(__myriad2__)
#define ABSL_HAVE_MMAP 1
#endif

@ -50,16 +50,10 @@ using ::absl::cord_internal::CordRepConcat;
using ::absl::cord_internal::CordRepExternal;
using ::absl::cord_internal::CordRepSubstring;
// Various representations that we allow
enum CordRepKind {
CONCAT = 0,
EXTERNAL = 1,
SUBSTRING = 2,
// We have different tags for different sized flat arrays,
// starting with FLAT
FLAT = 3,
};
using ::absl::cord_internal::CONCAT;
using ::absl::cord_internal::EXTERNAL;
using ::absl::cord_internal::FLAT;
using ::absl::cord_internal::SUBSTRING;
namespace cord_internal {
@ -447,48 +441,49 @@ inline void Cord::InlineRep::set_data(const char* data, size_t n,
bool nullify_tail) {
static_assert(kMaxInline == 15, "set_data is hard-coded for a length of 15");
cord_internal::SmallMemmove(data_, data, n, nullify_tail);
data_[kMaxInline] = static_cast<char>(n);
cord_internal::SmallMemmove(data_.as_chars, data, n, nullify_tail);
set_tagged_size(static_cast<char>(n));
}
inline char* Cord::InlineRep::set_data(size_t n) {
assert(n <= kMaxInline);
memset(data_, 0, sizeof(data_));
data_[kMaxInline] = static_cast<char>(n);
return data_;
ResetToEmpty();
set_tagged_size(static_cast<char>(n));
return data_.as_chars;
}
inline CordRep* Cord::InlineRep::force_tree(size_t extra_hint) {
size_t len = data_[kMaxInline];
CordRep* result;
size_t len = tagged_size();
if (len > kMaxInline) {
memcpy(&result, data_, sizeof(result));
} else {
result = NewFlat(len + extra_hint);
result->length = len;
memcpy(result->data, data_, len);
set_tree(result);
return data_.as_tree.rep;
}
CordRep* result = NewFlat(len + extra_hint);
result->length = len;
static_assert(kMinFlatLength >= sizeof(data_.as_chars), "");
memcpy(result->data, data_.as_chars, sizeof(data_.as_chars));
set_tree(result);
return result;
}
inline void Cord::InlineRep::reduce_size(size_t n) {
size_t tag = data_[kMaxInline];
size_t tag = tagged_size();
assert(tag <= kMaxInline);
assert(tag >= n);
tag -= n;
memset(data_ + tag, 0, n);
data_[kMaxInline] = static_cast<char>(tag);
memset(data_.as_chars + tag, 0, n);
set_tagged_size(static_cast<char>(tag));
}
inline void Cord::InlineRep::remove_prefix(size_t n) {
cord_internal::SmallMemmove(data_, data_ + n, data_[kMaxInline] - n);
cord_internal::SmallMemmove(data_.as_chars, data_.as_chars + n,
tagged_size() - n);
reduce_size(n);
}
void Cord::InlineRep::AppendTree(CordRep* tree) {
if (tree == nullptr) return;
size_t len = data_[kMaxInline];
size_t len = tagged_size();
if (len == 0) {
set_tree(tree);
} else {
@ -498,7 +493,7 @@ void Cord::InlineRep::AppendTree(CordRep* tree) {
void Cord::InlineRep::PrependTree(CordRep* tree) {
assert(tree != nullptr);
size_t len = data_[kMaxInline];
size_t len = tagged_size();
if (len == 0) {
set_tree(tree);
} else {
@ -554,11 +549,11 @@ void Cord::InlineRep::GetAppendRegion(char** region, size_t* size,
}
// Try to fit in the inline buffer if possible.
size_t inline_length = data_[kMaxInline];
size_t inline_length = tagged_size();
if (inline_length < kMaxInline && max_length <= kMaxInline - inline_length) {
*region = data_ + inline_length;
*region = data_.as_chars + inline_length;
*size = max_length;
data_[kMaxInline] = static_cast<char>(inline_length + max_length);
set_tagged_size(static_cast<char>(inline_length + max_length));
return;
}
@ -582,11 +577,11 @@ void Cord::InlineRep::GetAppendRegion(char** region, size_t* size) {
const size_t max_length = std::numeric_limits<size_t>::max();
// Try to fit in the inline buffer if possible.
size_t inline_length = data_[kMaxInline];
size_t inline_length = tagged_size();
if (inline_length < kMaxInline) {
*region = data_ + inline_length;
*region = data_.as_chars + inline_length;
*size = kMaxInline - inline_length;
data_[kMaxInline] = kMaxInline;
set_tagged_size(kMaxInline);
return;
}
@ -621,7 +616,7 @@ static bool RepMemoryUsageLeaf(const CordRep* rep, size_t* total_mem_usage) {
void Cord::InlineRep::AssignSlow(const Cord::InlineRep& src) {
ClearSlow();
memcpy(data_, src.data_, sizeof(data_));
data_ = src.data_;
if (is_tree()) {
Ref(tree());
}
@ -631,7 +626,7 @@ void Cord::InlineRep::ClearSlow() {
if (is_tree()) {
Unref(tree());
}
memset(data_, 0, sizeof(data_));
ResetToEmpty();
}
// --------------------------------------------------------------------
@ -735,11 +730,11 @@ template Cord& Cord::operator=(std::string&& src);
void Cord::InlineRep::AppendArray(const char* src_data, size_t src_size) {
if (src_size == 0) return; // memcpy(_, nullptr, 0) is undefined.
// Try to fit in the inline buffer if possible.
size_t inline_length = data_[kMaxInline];
size_t inline_length = tagged_size();
if (inline_length < kMaxInline && src_size <= kMaxInline - inline_length) {
// Append new data to embedded array
data_[kMaxInline] = static_cast<char>(inline_length + src_size);
memcpy(data_ + inline_length, src_data, src_size);
set_tagged_size(static_cast<char>(inline_length + src_size));
memcpy(data_.as_chars + inline_length, src_data, src_size);
return;
}
@ -762,7 +757,7 @@ void Cord::InlineRep::AppendArray(const char* src_data, size_t src_size) {
const size_t size2 = inline_length + src_size / 10;
root = NewFlat(std::max<size_t>(size1, size2));
appended = std::min(src_size, TagToLength(root->tag) - inline_length);
memcpy(root->data, data_, inline_length);
memcpy(root->data, data_.as_chars, inline_length);
memcpy(root->data + inline_length, src_data, appended);
root->length = inline_length + appended;
set_tree(root);
@ -1071,7 +1066,7 @@ Cord Cord::Subcord(size_t pos, size_t new_size) const {
} else if (new_size <= InlineRep::kMaxInline) {
Cord::ChunkIterator it = chunk_begin();
it.AdvanceBytes(pos);
char* dest = sub_cord.contents_.data_;
char* dest = sub_cord.contents_.data_.as_chars;
size_t remaining_size = new_size;
while (remaining_size > it->size()) {
cord_internal::SmallMemmove(dest, it->data(), it->size());
@ -1080,7 +1075,7 @@ Cord Cord::Subcord(size_t pos, size_t new_size) const {
++it;
}
cord_internal::SmallMemmove(dest, it->data(), remaining_size);
sub_cord.contents_.data_[InlineRep::kMaxInline] = new_size;
sub_cord.contents_.set_tagged_size(new_size);
} else {
sub_cord.contents_.set_tree(NewSubRange(tree, pos, new_size));
}
@ -1269,9 +1264,9 @@ bool ComputeCompareResult<bool>(int memcmp_res) {
// Helper routine. Locates the first flat chunk of the Cord without
// initializing the iterator.
inline absl::string_view Cord::InlineRep::FindFlatStartPiece() const {
size_t n = data_[kMaxInline];
size_t n = tagged_size();
if (n <= kMaxInline) {
return absl::string_view(data_, n);
return absl::string_view(data_.as_chars, n);
}
CordRep* node = tree();

@ -644,14 +644,12 @@ class Cord {
// InlineRep holds either a tree pointer, or an array of kMaxInline bytes.
class InlineRep {
public:
static constexpr unsigned char kMaxInline = 15;
static constexpr unsigned char kMaxInline = cord_internal::kMaxInline;
static_assert(kMaxInline >= sizeof(absl::cord_internal::CordRep*), "");
// Tag byte & kMaxInline means we are storing a pointer.
static constexpr unsigned char kTreeFlag = 1 << 4;
// Tag byte & kProfiledFlag means we are profiling the Cord.
static constexpr unsigned char kProfiledFlag = 1 << 5;
static constexpr unsigned char kTreeFlag = cord_internal::kTreeFlag;
static constexpr unsigned char kProfiledFlag = cord_internal::kProfiledFlag;
constexpr InlineRep() : data_{} {}
constexpr InlineRep() : data_() {}
InlineRep(const InlineRep& src);
InlineRep(InlineRep&& src);
InlineRep& operator=(const InlineRep& src);
@ -684,16 +682,16 @@ class Cord {
void GetAppendRegion(char** region, size_t* size, size_t max_length);
void GetAppendRegion(char** region, size_t* size);
bool IsSame(const InlineRep& other) const {
return memcmp(data_, other.data_, sizeof(data_)) == 0;
return memcmp(&data_, &other.data_, sizeof(data_)) == 0;
}
int BitwiseCompare(const InlineRep& other) const {
uint64_t x, y;
// Use memcpy to avoid anti-aliasing issues.
memcpy(&x, data_, sizeof(x));
memcpy(&y, other.data_, sizeof(y));
// Use memcpy to avoid aliasing issues.
memcpy(&x, &data_, sizeof(x));
memcpy(&y, &other.data_, sizeof(y));
if (x == y) {
memcpy(&x, data_ + 8, sizeof(x));
memcpy(&y, other.data_ + 8, sizeof(y));
memcpy(&x, reinterpret_cast<const char*>(&data_) + 8, sizeof(x));
memcpy(&y, reinterpret_cast<const char*>(&other.data_) + 8, sizeof(y));
if (x == y) return 0;
}
return absl::big_endian::FromHost64(x) < absl::big_endian::FromHost64(y)
@ -706,16 +704,16 @@ class Cord {
// to 15 bytes does not cause a memory allocation.
absl::strings_internal::STLStringResizeUninitialized(dst,
sizeof(data_) - 1);
memcpy(&(*dst)[0], data_, sizeof(data_) - 1);
memcpy(&(*dst)[0], &data_, sizeof(data_) - 1);
// erase is faster than resize because the logic for memory allocation is
// not needed.
dst->erase(data_[kMaxInline]);
dst->erase(tagged_size());
}
// Copies the inline contents into `dst`. Assumes the cord is not empty.
void CopyToArray(char* dst) const;
bool is_tree() const { return data_[kMaxInline] > kMaxInline; }
bool is_tree() const { return tagged_size() > kMaxInline; }
private:
friend class Cord;
@ -724,10 +722,18 @@ class Cord {
// Unrefs the tree, stops profiling, and zeroes the contents
void ClearSlow();
// If the data has length <= kMaxInline, we store it in data_[0..len-1],
// and store the length in data_[kMaxInline]. Else we store it in a tree
// and store a pointer to that tree in data_[0..sizeof(CordRep*)-1].
alignas(absl::cord_internal::CordRep*) char data_[kMaxInline + 1];
void ResetToEmpty() { data_ = {}; }
// This uses reinterpret_cast instead of the union to avoid accessing the
// inactive union element. The tagged size is not a common prefix.
void set_tagged_size(char new_tag) {
reinterpret_cast<char*>(&data_)[kMaxInline] = new_tag;
}
char tagged_size() const {
return reinterpret_cast<const char*>(&data_)[kMaxInline];
}
cord_internal::InlineData data_;
};
InlineRep contents_;
@ -879,12 +885,12 @@ Cord MakeCordFromExternal(absl::string_view data, Releaser&& releaser) {
}
inline Cord::InlineRep::InlineRep(const Cord::InlineRep& src) {
cord_internal::SmallMemmove(data_, src.data_, sizeof(data_));
data_ = src.data_;
}
inline Cord::InlineRep::InlineRep(Cord::InlineRep&& src) {
memcpy(data_, src.data_, sizeof(data_));
memset(src.data_, 0, sizeof(data_));
data_ = src.data_;
src.ResetToEmpty();
}
inline Cord::InlineRep& Cord::InlineRep::operator=(const Cord::InlineRep& src) {
@ -892,7 +898,7 @@ inline Cord::InlineRep& Cord::InlineRep::operator=(const Cord::InlineRep& src) {
return *this;
}
if (!is_tree() && !src.is_tree()) {
cord_internal::SmallMemmove(data_, src.data_, sizeof(data_));
data_ = src.data_;
return *this;
}
AssignSlow(src);
@ -904,8 +910,8 @@ inline Cord::InlineRep& Cord::InlineRep::operator=(
if (is_tree()) {
ClearSlow();
}
memcpy(data_, src.data_, sizeof(data_));
memset(src.data_, 0, sizeof(data_));
data_ = src.data_;
src.ResetToEmpty();
return *this;
}
@ -914,43 +920,39 @@ inline void Cord::InlineRep::Swap(Cord::InlineRep* rhs) {
return;
}
Cord::InlineRep tmp;
cord_internal::SmallMemmove(tmp.data_, data_, sizeof(data_));
cord_internal::SmallMemmove(data_, rhs->data_, sizeof(data_));
cord_internal::SmallMemmove(rhs->data_, tmp.data_, sizeof(data_));
std::swap(data_, rhs->data_);
}
inline const char* Cord::InlineRep::data() const {
return is_tree() ? nullptr : data_;
return is_tree() ? nullptr : data_.as_chars;
}
inline absl::cord_internal::CordRep* Cord::InlineRep::tree() const {
if (is_tree()) {
absl::cord_internal::CordRep* rep;
memcpy(&rep, data_, sizeof(rep));
return rep;
return data_.as_tree.rep;
} else {
return nullptr;
}
}
inline bool Cord::InlineRep::empty() const { return data_[kMaxInline] == 0; }
inline bool Cord::InlineRep::empty() const { return tagged_size() == 0; }
inline size_t Cord::InlineRep::size() const {
const char tag = data_[kMaxInline];
const char tag = tagged_size();
if (tag <= kMaxInline) return tag;
return static_cast<size_t>(tree()->length);
}
inline void Cord::InlineRep::set_tree(absl::cord_internal::CordRep* rep) {
if (rep == nullptr) {
memset(data_, 0, sizeof(data_));
ResetToEmpty();
} else {
bool was_tree = is_tree();
memcpy(data_, &rep, sizeof(rep));
memset(data_ + sizeof(rep), 0, sizeof(data_) - sizeof(rep) - 1);
data_.as_tree = {rep, {}, tagged_size()};
if (!was_tree) {
data_[kMaxInline] = kTreeFlag;
// If we were not a tree already, set the tag.
// Otherwise, leave it alone because it might have the profile bit on.
set_tagged_size(kTreeFlag);
}
}
}
@ -961,25 +963,20 @@ inline void Cord::InlineRep::replace_tree(absl::cord_internal::CordRep* rep) {
set_tree(rep);
return;
}
memcpy(data_, &rep, sizeof(rep));
memset(data_ + sizeof(rep), 0, sizeof(data_) - sizeof(rep) - 1);
data_.as_tree = {rep, {}, tagged_size()};
}
inline absl::cord_internal::CordRep* Cord::InlineRep::clear() {
const char tag = data_[kMaxInline];
absl::cord_internal::CordRep* result = nullptr;
if (tag > kMaxInline) {
memcpy(&result, data_, sizeof(result));
}
memset(data_, 0, sizeof(data_)); // Clear the cord
absl::cord_internal::CordRep* result = tree();
ResetToEmpty();
return result;
}
inline void Cord::InlineRep::CopyToArray(char* dst) const {
assert(!is_tree());
size_t n = data_[kMaxInline];
size_t n = tagged_size();
assert(n != 0);
cord_internal::SmallMemmove(dst, data_, n);
cord_internal::SmallMemmove(dst, data_.as_chars, n);
}
constexpr inline Cord::Cord() noexcept {}

@ -85,6 +85,17 @@ struct CordRepConcat;
struct CordRepSubstring;
struct CordRepExternal;
// Various representations that we allow
enum CordRepKind {
CONCAT = 0,
EXTERNAL = 1,
SUBSTRING = 2,
// We have different tags for different sized flat arrays,
// starting with FLAT
FLAT = 3,
};
struct CordRep {
// The following three fields have to be less than 32 bytes since
// that is the smallest supported flat node size.
@ -167,6 +178,34 @@ struct CordRepExternalImpl
}
};
enum {
kMaxInline = 15,
// Tag byte & kMaxInline means we are storing a pointer.
kTreeFlag = 1 << 4,
// Tag byte & kProfiledFlag means we are profiling the Cord.
kProfiledFlag = 1 << 5
};
// If the data has length <= kMaxInline, we store it in `as_chars`, and
// store the size in `tagged_size`.
// Else we store it in a tree and store a pointer to that tree in
// `as_tree.rep` and store a tag in `tagged_size`.
struct AsTree {
absl::cord_internal::CordRep* rep;
char padding[kMaxInline + 1 - sizeof(absl::cord_internal::CordRep*) - 1];
char tagged_size;
};
union InlineData {
constexpr InlineData() : as_chars{} {}
explicit constexpr InlineData(AsTree tree) : as_tree(tree) {}
AsTree as_tree;
char as_chars[kMaxInline + 1];
};
static_assert(sizeof(InlineData) == kMaxInline + 1, "");
static_assert(sizeof(AsTree) == sizeof(InlineData), "");
static_assert(offsetof(AsTree, tagged_size) == kMaxInline, "");
} // namespace cord_internal
ABSL_NAMESPACE_END
} // namespace absl

@ -73,15 +73,13 @@ cc_library(
"internal/create_thread_identity.cc",
"internal/per_thread_sem.cc",
"internal/waiter.cc",
"mutex.cc",
"notification.cc",
] + select({
"//conditions:default": ["mutex.cc"],
}),
],
hdrs = [
"barrier.h",
"blocking_counter.h",
"internal/create_thread_identity.h",
"internal/mutex_nonprod.inc",
"internal/per_thread_sem.h",
"internal/waiter.h",
"mutex.h",

@ -52,7 +52,6 @@ absl_cc_library(
"barrier.h"
"blocking_counter.h"
"internal/create_thread_identity.h"
"internal/mutex_nonprod.inc"
"internal/per_thread_sem.h"
"internal/waiter.h"
"mutex.h"

@ -1,325 +0,0 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Implementation of a small subset of Mutex and CondVar functionality
// for platforms where the production implementation hasn't been fully
// ported yet.
#include "absl/synchronization/mutex.h"
#if defined(_WIN32)
#include <chrono> // NOLINT(build/c++11)
#else
#include <sys/time.h>
#include <time.h>
#endif
#include <algorithm>
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/time/time.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
void SetMutexDeadlockDetectionMode(OnDeadlockCycle) {}
void EnableMutexInvariantDebugging(bool) {}
namespace synchronization_internal {
namespace {
// Return the current time plus the timeout.
absl::Time DeadlineFromTimeout(absl::Duration timeout) {
return absl::Now() + timeout;
}
// Limit the deadline to a positive, 32-bit time_t value to accommodate
// implementation restrictions. This also deals with InfinitePast and
// InfiniteFuture.
absl::Time LimitedDeadline(absl::Time deadline) {
deadline = std::max(absl::FromTimeT(0), deadline);
deadline = std::min(deadline, absl::FromTimeT(0x7fffffff));
return deadline;
}
} // namespace
#if defined(_WIN32)
MutexImpl::MutexImpl() {}
MutexImpl::~MutexImpl() {
if (locked_) {
std_mutex_.unlock();
}
}
void MutexImpl::Lock() {
std_mutex_.lock();
locked_ = true;
}
bool MutexImpl::TryLock() {
bool locked = std_mutex_.try_lock();
if (locked) locked_ = true;
return locked;
}
void MutexImpl::Unlock() {
locked_ = false;
released_.SignalAll();
std_mutex_.unlock();
}
CondVarImpl::CondVarImpl() {}
CondVarImpl::~CondVarImpl() {}
void CondVarImpl::Signal() { std_cv_.notify_one(); }
void CondVarImpl::SignalAll() { std_cv_.notify_all(); }
void CondVarImpl::Wait(MutexImpl* mu) {
mu->released_.SignalAll();
std_cv_.wait(mu->std_mutex_);
}
bool CondVarImpl::WaitWithDeadline(MutexImpl* mu, absl::Time deadline) {
mu->released_.SignalAll();
time_t when = ToTimeT(deadline);
int64_t nanos = ToInt64Nanoseconds(deadline - absl::FromTimeT(when));
std::chrono::system_clock::time_point deadline_tp =
std::chrono::system_clock::from_time_t(when) +
std::chrono::duration_cast<std::chrono::system_clock::duration>(
std::chrono::nanoseconds(nanos));
auto deadline_since_epoch =
std::chrono::duration_cast<std::chrono::duration<double>>(
deadline_tp - std::chrono::system_clock::from_time_t(0));
return std_cv_.wait_until(mu->std_mutex_, deadline_tp) ==
std::cv_status::timeout;
}
#else // ! _WIN32
MutexImpl::MutexImpl() {
ABSL_RAW_CHECK(pthread_mutex_init(&pthread_mutex_, nullptr) == 0,
"pthread error");
}
MutexImpl::~MutexImpl() {
if (locked_) {
ABSL_RAW_CHECK(pthread_mutex_unlock(&pthread_mutex_) == 0, "pthread error");
}
ABSL_RAW_CHECK(pthread_mutex_destroy(&pthread_mutex_) == 0, "pthread error");
}
void MutexImpl::Lock() {
ABSL_RAW_CHECK(pthread_mutex_lock(&pthread_mutex_) == 0, "pthread error");
locked_ = true;
}
bool MutexImpl::TryLock() {
bool locked = (0 == pthread_mutex_trylock(&pthread_mutex_));
if (locked) locked_ = true;
return locked;
}
void MutexImpl::Unlock() {
locked_ = false;
released_.SignalAll();
ABSL_RAW_CHECK(pthread_mutex_unlock(&pthread_mutex_) == 0, "pthread error");
}
CondVarImpl::CondVarImpl() {
ABSL_RAW_CHECK(pthread_cond_init(&pthread_cv_, nullptr) == 0,
"pthread error");
}
CondVarImpl::~CondVarImpl() {
ABSL_RAW_CHECK(pthread_cond_destroy(&pthread_cv_) == 0, "pthread error");
}
void CondVarImpl::Signal() {
ABSL_RAW_CHECK(pthread_cond_signal(&pthread_cv_) == 0, "pthread error");
}
void CondVarImpl::SignalAll() {
ABSL_RAW_CHECK(pthread_cond_broadcast(&pthread_cv_) == 0, "pthread error");
}
void CondVarImpl::Wait(MutexImpl* mu) {
mu->released_.SignalAll();
ABSL_RAW_CHECK(pthread_cond_wait(&pthread_cv_, &mu->pthread_mutex_) == 0,
"pthread error");
}
bool CondVarImpl::WaitWithDeadline(MutexImpl* mu, absl::Time deadline) {
mu->released_.SignalAll();
struct timespec ts = ToTimespec(deadline);
int rc = pthread_cond_timedwait(&pthread_cv_, &mu->pthread_mutex_, &ts);
if (rc == ETIMEDOUT) return true;
ABSL_RAW_CHECK(rc == 0, "pthread error");
return false;
}
#endif // ! _WIN32
void MutexImpl::Await(const Condition& cond) {
if (cond.Eval()) return;
released_.SignalAll();
do {
released_.Wait(this);
} while (!cond.Eval());
}
bool MutexImpl::AwaitWithDeadline(const Condition& cond, absl::Time deadline) {
if (cond.Eval()) return true;
released_.SignalAll();
while (true) {
if (released_.WaitWithDeadline(this, deadline)) return false;
if (cond.Eval()) return true;
}
}
} // namespace synchronization_internal
Mutex::Mutex() {}
Mutex::~Mutex() {}
void Mutex::Lock() { impl()->Lock(); }
void Mutex::Unlock() { impl()->Unlock(); }
bool Mutex::TryLock() { return impl()->TryLock(); }
void Mutex::ReaderLock() { Lock(); }
void Mutex::ReaderUnlock() { Unlock(); }
void Mutex::Await(const Condition& cond) { impl()->Await(cond); }
void Mutex::LockWhen(const Condition& cond) {
Lock();
Await(cond);
}
bool Mutex::AwaitWithDeadline(const Condition& cond, absl::Time deadline) {
return impl()->AwaitWithDeadline(
cond, synchronization_internal::LimitedDeadline(deadline));
}
bool Mutex::AwaitWithTimeout(const Condition& cond, absl::Duration timeout) {
return AwaitWithDeadline(
cond, synchronization_internal::DeadlineFromTimeout(timeout));
}
bool Mutex::LockWhenWithDeadline(const Condition& cond, absl::Time deadline) {
Lock();
return AwaitWithDeadline(cond, deadline);
}
bool Mutex::LockWhenWithTimeout(const Condition& cond, absl::Duration timeout) {
return LockWhenWithDeadline(
cond, synchronization_internal::DeadlineFromTimeout(timeout));
}
void Mutex::ReaderLockWhen(const Condition& cond) {
ReaderLock();
Await(cond);
}
bool Mutex::ReaderLockWhenWithTimeout(const Condition& cond,
absl::Duration timeout) {
return LockWhenWithTimeout(cond, timeout);
}
bool Mutex::ReaderLockWhenWithDeadline(const Condition& cond,
absl::Time deadline) {
return LockWhenWithDeadline(cond, deadline);
}
void Mutex::EnableDebugLog(const char*) {}
void Mutex::EnableInvariantDebugging(void (*)(void*), void*) {}
void Mutex::ForgetDeadlockInfo() {}
void Mutex::AssertHeld() const {}
void Mutex::AssertReaderHeld() const {}
void Mutex::AssertNotHeld() const {}
CondVar::CondVar() {}
CondVar::~CondVar() {}
void CondVar::Signal() { impl()->Signal(); }
void CondVar::SignalAll() { impl()->SignalAll(); }
void CondVar::Wait(Mutex* mu) { return impl()->Wait(mu->impl()); }
bool CondVar::WaitWithDeadline(Mutex* mu, absl::Time deadline) {
return impl()->WaitWithDeadline(
mu->impl(), synchronization_internal::LimitedDeadline(deadline));
}
bool CondVar::WaitWithTimeout(Mutex* mu, absl::Duration timeout) {
return WaitWithDeadline(mu, absl::Now() + timeout);
}
void CondVar::EnableDebugLog(const char*) {}
#ifdef ABSL_HAVE_THREAD_SANITIZER
extern "C" void __tsan_read1(void *addr);
#else
#define __tsan_read1(addr) // do nothing if TSan not enabled
#endif
// A function that just returns its argument, dereferenced
static bool Dereference(void *arg) {
// ThreadSanitizer does not instrument this file for memory accesses.
// This function dereferences a user variable that can participate
// in a data race, so we need to manually tell TSan about this memory access.
__tsan_read1(arg);
return *(static_cast<bool *>(arg));
}
Condition::Condition() {} // null constructor, used for kTrue only
const Condition Condition::kTrue;
Condition::Condition(bool (*func)(void *), void *arg)
: eval_(&CallVoidPtrFunction),
function_(func),
method_(nullptr),
arg_(arg) {}
bool Condition::CallVoidPtrFunction(const Condition *c) {
return (*c->function_)(c->arg_);
}
Condition::Condition(const bool *cond)
: eval_(CallVoidPtrFunction),
function_(Dereference),
method_(nullptr),
// const_cast is safe since Dereference does not modify arg
arg_(const_cast<bool *>(cond)) {}
bool Condition::Eval() const {
// eval_ == null for kTrue
return (this->eval_ == nullptr) || (*this->eval_)(this);
}
void RegisterSymbolizer(bool (*)(const void*, char*, int)) {}
ABSL_NAMESPACE_END
} // namespace absl

@ -1,249 +0,0 @@
// Do not include. This is an implementation detail of base/mutex.h.
//
// Declares three classes:
//
// base::internal::MutexImpl - implementation helper for Mutex
// base::internal::CondVarImpl - implementation helper for CondVar
// base::internal::SynchronizationStorage<T> - implementation helper for
// Mutex, CondVar
#include <type_traits>
#if defined(_WIN32)
#include <condition_variable>
#include <mutex>
#else
#include <pthread.h>
#endif
#include "absl/base/call_once.h"
#include "absl/time/time.h"
// Declare that Mutex::ReaderLock is actually Lock(). Intended primarily
// for tests, and even then as a last resort.
#ifdef ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE
#error ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE cannot be directly set
#else
#define ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE 1
#endif
// Declare that Mutex::EnableInvariantDebugging is not implemented.
// Intended primarily for tests, and even then as a last resort.
#ifdef ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED
#error ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED cannot be directly set
#else
#define ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED 1
#endif
namespace absl {
ABSL_NAMESPACE_BEGIN
class Condition;
namespace synchronization_internal {
class MutexImpl;
// Do not use this implementation detail of CondVar. Provides most of the
// implementation, but should not be placed directly in static storage
// because it will not linker initialize properly. See
// SynchronizationStorage<T> below for what we mean by linker
// initialization.
class CondVarImpl {
public:
CondVarImpl();
CondVarImpl(const CondVarImpl&) = delete;
CondVarImpl& operator=(const CondVarImpl&) = delete;
~CondVarImpl();
void Signal();
void SignalAll();
void Wait(MutexImpl* mutex);
bool WaitWithDeadline(MutexImpl* mutex, absl::Time deadline);
private:
#if defined(_WIN32)
std::condition_variable_any std_cv_;
#else
pthread_cond_t pthread_cv_;
#endif
};
// Do not use this implementation detail of Mutex. Provides most of the
// implementation, but should not be placed directly in static storage
// because it will not linker initialize properly. See
// SynchronizationStorage<T> below for what we mean by linker
// initialization.
class MutexImpl {
public:
MutexImpl();
MutexImpl(const MutexImpl&) = delete;
MutexImpl& operator=(const MutexImpl&) = delete;
~MutexImpl();
void Lock();
bool TryLock();
void Unlock();
void Await(const Condition& cond);
bool AwaitWithDeadline(const Condition& cond, absl::Time deadline);
private:
friend class CondVarImpl;
#if defined(_WIN32)
std::mutex std_mutex_;
#else
pthread_mutex_t pthread_mutex_;
#endif
// True if the underlying mutex is locked. If the destructor is entered
// while locked_, the underlying mutex is unlocked. Mutex supports
// destruction while locked, but the same is undefined behavior for both
// pthread_mutex_t and std::mutex.
bool locked_ = false;
// Signaled before releasing the lock, in support of Await.
CondVarImpl released_;
};
// Do not use this implementation detail of CondVar and Mutex. A storage
// space for T that supports a LinkerInitialized constructor. T must
// have a default constructor, which is called by the first call to
// get(). T's destructor is never called if the LinkerInitialized
// constructor is called.
//
// Objects constructed with the default constructor are constructed and
// destructed like any other object, and should never be allocated in
// static storage.
//
// Objects constructed with the LinkerInitialized constructor should
// always be in static storage. For such objects, calls to get() are always
// valid, except from signal handlers.
//
// Note that this implementation relies on undefined language behavior that
// are known to hold for the set of supported compilers. An analysis
// follows.
//
// From the C++11 standard:
//
// [basic.life] says an object has non-trivial initialization if it is of
// class type and it is initialized by a constructor other than a trivial
// default constructor. (the LinkerInitialized constructor is
// non-trivial)
//
// [basic.life] says the lifetime of an object with a non-trivial
// constructor begins when the call to the constructor is complete.
//
// [basic.life] says the lifetime of an object with non-trivial destructor
// ends when the call to the destructor begins.
//
// [basic.life] p5 specifies undefined behavior when accessing non-static
// members of an instance outside its
// lifetime. (SynchronizationStorage::get() access non-static members)
//
// So, LinkerInitialized object of SynchronizationStorage uses a
// non-trivial constructor, which is called at some point during dynamic
// initialization, and is therefore subject to order of dynamic
// initialization bugs, where get() is called before the object's
// constructor is, resulting in undefined behavior.
//
// Similarly, a LinkerInitialized SynchronizationStorage object has a
// non-trivial destructor, and so its lifetime ends at some point during
// destruction of objects with static storage duration [basic.start.term]
// p4. There is a window where other exit code could call get() after this
// occurs, resulting in undefined behavior.
//
// Combined, these statements imply that LinkerInitialized instances
// of SynchronizationStorage<T> rely on undefined behavior.
//
// However, in practice, the implementation works on all supported
// compilers. Specifically, we rely on:
//
// a) zero-initialization being sufficient to initialize
// LinkerInitialized instances for the purposes of calling
// get(), regardless of when the constructor is called. This is
// because the is_dynamic_ boolean is correctly zero-initialized to
// false.
//
// b) the LinkerInitialized constructor is a NOP, and immaterial to
// even to concurrent calls to get().
//
// c) the destructor being a NOP for LinkerInitialized objects
// (guaranteed by a check for !is_dynamic_), and so any concurrent and
// subsequent calls to get() functioning as if the destructor were not
// called, by virtue of the instances' storage remaining valid after the
// destructor runs.
//
// d) That a-c apply transitively when SynchronizationStorage<T> is the
// only member of a class allocated in static storage.
//
// Nothing in the language standard guarantees that a-d hold. In practice,
// these hold in all supported compilers.
//
// Future direction:
//
// Ideally, we would simply use std::mutex or a similar class, which when
// allocated statically would support use immediately after static
// initialization up until static storage is reclaimed (i.e. the properties
// we require of all "linker initialized" instances).
//
// Regarding construction in static storage, std::mutex is required to
// provide a constexpr default constructor [thread.mutex.class], which
// ensures the instance's lifetime begins with static initialization
// [basic.start.init], and so is immune to any problems caused by the order
// of dynamic initialization. However, as of this writing Microsoft's
// Visual Studio does not provide a constexpr constructor for std::mutex.
// See
// https://blogs.msdn.microsoft.com/vcblog/2015/06/02/constexpr-complete-for-vs-2015-rtm-c11-compiler-c17-stl/
//
// Regarding destruction of instances in static storage, [basic.life] does
// say an object ends when storage in which the occupies is released, in
// the case of non-trivial destructor. However, std::mutex is not specified
// to have a trivial destructor.
//
// So, we would need a class with a constexpr default constructor and a
// trivial destructor. Today, we can achieve neither desired property using
// std::mutex directly.
template <typename T>
class SynchronizationStorage {
public:
// Instances allocated on the heap or on the stack should use the default
// constructor.
SynchronizationStorage()
: destruct_(true), once_() {}
constexpr explicit SynchronizationStorage(absl::ConstInitType)
: destruct_(false), once_(), space_{{0}} {}
SynchronizationStorage(SynchronizationStorage&) = delete;
SynchronizationStorage& operator=(SynchronizationStorage&) = delete;
~SynchronizationStorage() {
if (destruct_) {
get()->~T();
}
}
// Retrieve the object in storage. This is fast and thread safe, but does
// incur the cost of absl::call_once().
T* get() {
absl::call_once(once_, SynchronizationStorage::Construct, this);
return reinterpret_cast<T*>(&space_);
}
private:
static void Construct(SynchronizationStorage<T>* self) {
new (&self->space_) T();
}
// When true, T's destructor is run when this is destructed.
const bool destruct_;
absl::once_flag once_;
// An aligned space for the T.
alignas(T) unsigned char space_[sizeof(T)];
};
} // namespace synchronization_internal
ABSL_NAMESPACE_END
} // namespace absl

@ -72,15 +72,6 @@
#include "absl/synchronization/internal/per_thread_sem.h"
#include "absl/time/time.h"
// Decide if we should use the non-production implementation because
// the production implementation hasn't been fully ported yet.
#ifdef ABSL_INTERNAL_USE_NONPROD_MUTEX
#error ABSL_INTERNAL_USE_NONPROD_MUTEX cannot be directly set
#elif defined(ABSL_LOW_LEVEL_ALLOC_MISSING)
#define ABSL_INTERNAL_USE_NONPROD_MUTEX 1
#include "absl/synchronization/internal/mutex_nonprod.inc"
#endif
namespace absl {
ABSL_NAMESPACE_BEGIN
@ -461,15 +452,6 @@ class ABSL_LOCKABLE Mutex {
static void InternalAttemptToUseMutexInFatalSignalHandler();
private:
#ifdef ABSL_INTERNAL_USE_NONPROD_MUTEX
friend class CondVar;
synchronization_internal::MutexImpl *impl() { return impl_.get(); }
synchronization_internal::SynchronizationStorage<
synchronization_internal::MutexImpl>
impl_;
#else
std::atomic<intptr_t> mu_; // The Mutex state.
// Post()/Wait() versus associated PerThreadSem; in class for required
@ -504,7 +486,6 @@ class ABSL_LOCKABLE Mutex {
void Trans(MuHow how); // used for CondVar->Mutex transfer
void Fer(
base_internal::PerThreadSynch *w); // used for CondVar->Mutex transfer
#endif
// Catch the error of writing Mutex when intending MutexLock.
Mutex(const volatile Mutex * /*ignored*/) {} // NOLINT(runtime/explicit)
@ -838,17 +819,10 @@ class CondVar {
void EnableDebugLog(const char *name);
private:
#ifdef ABSL_INTERNAL_USE_NONPROD_MUTEX
synchronization_internal::CondVarImpl *impl() { return impl_.get(); }
synchronization_internal::SynchronizationStorage<
synchronization_internal::CondVarImpl>
impl_;
#else
bool WaitCommon(Mutex *mutex, synchronization_internal::KernelTimeout t);
void Remove(base_internal::PerThreadSynch *s);
void Wakeup(base_internal::PerThreadSynch *w);
std::atomic<intptr_t> cv_; // Condition variable state.
#endif
CondVar(const CondVar&) = delete;
CondVar& operator=(const CondVar&) = delete;
};
@ -906,12 +880,6 @@ class ABSL_SCOPED_LOCKABLE ReleasableMutexLock {
ReleasableMutexLock& operator=(ReleasableMutexLock&&) = delete;
};
#ifdef ABSL_INTERNAL_USE_NONPROD_MUTEX
inline constexpr Mutex::Mutex(absl::ConstInitType) : impl_(absl::kConstInit) {}
#else
inline Mutex::Mutex() : mu_(0) {
ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
}
@ -920,8 +888,6 @@ inline constexpr Mutex::Mutex(absl::ConstInitType) : mu_(0) {}
inline CondVar::CondVar() : cv_(0) {}
#endif // ABSL_INTERNAL_USE_NONPROD_MUTEX
// static
template <typename T>
bool Condition::CastAndCallMethod(const Condition *c) {

@ -1002,9 +1002,6 @@ TEST(Mutex, AcquireFromCondition) {
x.mu0.Unlock();
}
// The deadlock detector is not part of non-prod builds, so do not test it.
#if !defined(ABSL_INTERNAL_USE_NONPROD_MUTEX)
TEST(Mutex, DeadlockDetector) {
absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
@ -1158,7 +1155,6 @@ TEST(Mutex, DeadlockIdBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
c.Lock();
c.Unlock();
}
#endif // !defined(ABSL_INTERNAL_USE_NONPROD_MUTEX)
// --------------------------------------------------------
// Test for timeouts/deadlines on condition waits that are specified using

Loading…
Cancel
Save