Export of internal Abseil changes

--
c7ce91501834b225bc9cf2d3fa2a319dd0b7f864 by Martijn Vels <mvels@google.com>:

Implement global data for CordzHandle in an ODR hardened way

This change puts the global data into a global delete queue structure, and stores a reference to the global data in the handle itself. This hardens the implementation against ODR violations where handles are crossing dynamic library boundaries which are privately loaded.

PiperOrigin-RevId: 368885636
GitOrigin-RevId: c7ce91501834b225bc9cf2d3fa2a319dd0b7f864
Change-Id: I9775151a760b30989dec9517e4bcd2183e8c1651
pull/946/head
Abseil Team 4 years ago committed by Dino Radaković
parent e20fe888fa
commit 732c6540c1
  1. 1
      absl/strings/CMakeLists.txt
  2. 36
      absl/strings/internal/cordz_handle.cc
  3. 48
      absl/strings/internal/cordz_handle.h

@ -655,6 +655,7 @@ absl_cc_library(
${ABSL_DEFAULT_COPTS}
DEPS
absl::config
absl::raw_logging_internal
absl::synchronization
)

@ -21,26 +21,26 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
ABSL_CONST_INIT absl::Mutex CordzHandle::mutex_(absl::kConstInit);
ABSL_CONST_INIT std::atomic<CordzHandle*> CordzHandle::dq_tail_{nullptr};
ABSL_CONST_INIT CordzHandle::Queue CordzHandle::global_queue_(absl::kConstInit);
CordzHandle::CordzHandle(bool is_snapshot) : is_snapshot_(is_snapshot) {
if (is_snapshot) {
MutexLock lock(&mutex_);
CordzHandle* dq_tail = dq_tail_.load(std::memory_order_acquire);
MutexLock lock(&queue_->mutex);
CordzHandle* dq_tail = queue_->dq_tail.load(std::memory_order_acquire);
if (dq_tail != nullptr) {
dq_prev_ = dq_tail;
dq_tail->dq_next_ = this;
}
dq_tail_.store(this, std::memory_order_release);
queue_->dq_tail.store(this, std::memory_order_release);
}
}
CordzHandle::~CordzHandle() {
ODRCheck();
if (is_snapshot_) {
std::vector<CordzHandle*> to_delete;
{
absl::MutexLock lock(&mutex_);
absl::MutexLock lock(&queue_->mutex);
CordzHandle* next = dq_next_;
if (dq_prev_ == nullptr) {
// We were head of the queue, delete every CordzHandle until we reach
@ -56,7 +56,7 @@ CordzHandle::~CordzHandle() {
if (next) {
next->dq_prev_ = dq_prev_;
} else {
dq_tail_.store(dq_prev_, std::memory_order_release);
queue_->dq_tail.store(dq_prev_, std::memory_order_release);
}
}
for (CordzHandle* handle : to_delete) {
@ -67,13 +67,15 @@ CordzHandle::~CordzHandle() {
void CordzHandle::Delete(CordzHandle* handle) {
if (handle) {
if (!handle->is_snapshot_ && !UnsafeDeleteQueueEmpty()) {
MutexLock lock(&mutex_);
CordzHandle* dq_tail = dq_tail_.load(std::memory_order_acquire);
handle->ODRCheck();
Queue* const queue = handle->queue_;
if (!handle->is_snapshot_ && !queue->IsEmpty()) {
MutexLock lock(&queue->mutex);
CordzHandle* dq_tail = queue->dq_tail.load(std::memory_order_acquire);
if (dq_tail != nullptr) {
handle->dq_prev_ = dq_tail;
dq_tail->dq_next_ = handle;
dq_tail_.store(handle, std::memory_order_release);
queue->dq_tail.store(handle, std::memory_order_release);
return;
}
}
@ -83,8 +85,8 @@ void CordzHandle::Delete(CordzHandle* handle) {
std::vector<const CordzHandle*> CordzHandle::DiagnosticsGetDeleteQueue() {
std::vector<const CordzHandle*> handles;
MutexLock lock(&mutex_);
CordzHandle* dq_tail = dq_tail_.load(std::memory_order_acquire);
MutexLock lock(&global_queue_.mutex);
CordzHandle* dq_tail = global_queue_.dq_tail.load(std::memory_order_acquire);
for (const CordzHandle* p = dq_tail; p; p = p->dq_prev_) {
handles.push_back(p);
}
@ -93,12 +95,13 @@ std::vector<const CordzHandle*> CordzHandle::DiagnosticsGetDeleteQueue() {
bool CordzHandle::DiagnosticsHandleIsSafeToInspect(
const CordzHandle* handle) const {
ODRCheck();
if (!is_snapshot_) return false;
if (handle == nullptr) return true;
if (handle->is_snapshot_) return false;
bool snapshot_found = false;
MutexLock lock(&mutex_);
for (const CordzHandle* p = dq_tail_; p; p = p->dq_prev_) {
MutexLock lock(&queue_->mutex);
for (const CordzHandle* p = queue_->dq_tail; p; p = p->dq_prev_) {
if (p == handle) return !snapshot_found;
if (p == this) snapshot_found = true;
}
@ -108,12 +111,13 @@ bool CordzHandle::DiagnosticsHandleIsSafeToInspect(
std::vector<const CordzHandle*>
CordzHandle::DiagnosticsGetSafeToInspectDeletedHandles() {
ODRCheck();
std::vector<const CordzHandle*> handles;
if (!is_snapshot()) {
return handles;
}
MutexLock lock(&mutex_);
MutexLock lock(&queue_->mutex);
for (const CordzHandle* p = dq_next_; p != nullptr; p = p->dq_next_) {
if (!p->is_snapshot()) {
handles.push_back(p);

@ -19,6 +19,7 @@
#include <vector>
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/synchronization/mutex.h"
namespace absl {
@ -66,23 +67,42 @@ class CordzHandle {
virtual ~CordzHandle();
private:
// Returns true if the delete queue is empty. This method does not acquire the
// lock, but does a 'load acquire' observation on the delete queue tail. It
// is used inside Delete() to check for the presence of a delete queue without
// holding the lock. The assumption is that the caller is in the state of
// 'being deleted', and can not be newly discovered by a concurrent 'being
// constructed' snapshot instance. Practically, this means that any such
// discovery (`find`, 'first' or 'next', etc) must have proper 'happens before
// / after' semantics and atomic fences.
static bool UnsafeDeleteQueueEmpty() ABSL_NO_THREAD_SAFETY_ANALYSIS {
return dq_tail_.load(std::memory_order_acquire) == nullptr;
// Global queue data. CordzHandle stores a pointer to the global queue
// instance to harden against ODR violations.
struct Queue {
constexpr explicit Queue(absl::ConstInitType) : mutex(absl::kConstInit) {}
absl::Mutex mutex;
std::atomic<CordzHandle*> dq_tail ABSL_GUARDED_BY(mutex){nullptr};
// Returns true if this delete queue is empty. This method does not acquire
// the lock, but does a 'load acquire' observation on the delete queue tail.
// It is used inside Delete() to check for the presence of a delete queue
// without holding the lock. The assumption is that the caller is in the
// state of 'being deleted', and can not be newly discovered by a concurrent
// 'being constructed' snapshot instance. Practically, this means that any
// such discovery (`find`, 'first' or 'next', etc) must have proper 'happens
// before / after' semantics and atomic fences.
bool IsEmpty() const ABSL_NO_THREAD_SAFETY_ANALYSIS {
return dq_tail.load(std::memory_order_acquire) == nullptr;
}
};
void ODRCheck() const {
#ifndef NDEBUG
ABSL_RAW_CHECK(queue_ == &global_queue_, "ODR violation in Cord");
#endif
}
ABSL_CONST_INIT static Queue global_queue_;
Queue* const queue_ = &global_queue_;
const bool is_snapshot_;
static absl::Mutex mutex_;
static std::atomic<CordzHandle*> dq_tail_ ABSL_GUARDED_BY(mutex_);
CordzHandle* dq_prev_ ABSL_GUARDED_BY(mutex_) = nullptr;
CordzHandle* dq_next_ ABSL_GUARDED_BY(mutex_) = nullptr;
// dq_prev_ and dq_next_ require the global queue mutex to be held.
// Unfortunately we can't use thread annotations such that the thread safety
// analysis understands that queue_ and global_queue_ are one and the same.
CordzHandle* dq_prev_ = nullptr;
CordzHandle* dq_next_ = nullptr;
};
class CordzSnapshot : public CordzHandle {

Loading…
Cancel
Save