From 1ccdb0ee265a02cda9751d43f74ee7285ecdae60 Mon Sep 17 00:00:00 2001 From: Soheil Hassas Yeganeh Date: Wed, 20 Feb 2019 11:28:16 -0500 Subject: [PATCH] Alias std::memory_order as grpc_core::MemoryOrder. --- src/core/lib/gprpp/atomic.h | 60 +++++++++++++++++++---------- src/core/lib/gprpp/ref_counted.h | 12 +++--- src/core/lib/surface/lame_client.cc | 4 +- 3 files changed, 47 insertions(+), 29 deletions(-) diff --git a/src/core/lib/gprpp/atomic.h b/src/core/lib/gprpp/atomic.h index 9ba4f85db89..e7c10f68763 100644 --- a/src/core/lib/gprpp/atomic.h +++ b/src/core/lib/gprpp/atomic.h @@ -28,53 +28,73 @@ namespace grpc_core { template using Atomic = std::atomic; +enum class MemoryOrder { + RELAXED = std::memory_order_relaxed, + CONSUME = std::memory_order_consume, + ACQUIRE = std::memory_order_acquire, + RELEASE = std::memory_order_release, + ACQ_REL = std::memory_order_acq_rel, + SEQ_CST = std::memory_order_seq_cst +}; + // Prefer the helper methods below over the same functions provided by // std::atomic, because they maintain stats over atomic opertions which are // useful for comparing benchmarks. template -bool AtomicCompareExchangeWeak(std::atomic* storage, T* expected, T desired, - std::memory_order success, - std::memory_order failure) { - return GPR_ATM_INC_CAS_THEN( - storage->compare_exchange_weak(*expected, desired, success, failure)); +T AtomicLoad(const Atomic* storage, MemoryOrder order) { + return storage->load(static_cast(order)); } template -bool AtomicCompareExchangeStrong(std::atomic* storage, T* expected, - T desired, std::memory_order success, - std::memory_order failure) { +T AtomicStore(Atomic* storage, T val, MemoryOrder order) { + return storage->store(val, static_cast(order)); +} +template +bool AtomicCompareExchangeWeak(Atomic* storage, T* expected, T desired, + MemoryOrder success, MemoryOrder failure) { return GPR_ATM_INC_CAS_THEN( storage->compare_exchange_weak(*expected, desired, success, failure)); } +template +bool AtomicCompareExchangeStrong(Atomic* storage, T* expected, T desired, + MemoryOrder success, MemoryOrder failure) { + return GPR_ATM_INC_CAS_THEN(storage->compare_exchange_weak( + *expected, desired, static_cast(success), + static_cast(failure))); +} + template -T AtomicFetchAdd(std::atomic* storage, Arg arg, - std::memory_order order = std::memory_order_seq_cst) { - return GPR_ATM_INC_ADD_THEN(storage->fetch_add(static_cast(arg), order)); +T AtomicFetchAdd(Atomic* storage, Arg arg, + MemoryOrder order = MemoryOrder::SEQ_CST) { + return GPR_ATM_INC_ADD_THEN(storage->fetch_add( + static_cast(arg), static_cast(order))); } template -T AtomicFetchSub(std::atomic* storage, Arg arg, - std::memory_order order = std::memory_order_seq_cst) { - return GPR_ATM_INC_ADD_THEN(storage->fetch_sub(static_cast(arg), order)); +T AtomicFetchSub(Atomic* storage, Arg arg, + MemoryOrder order = MemoryOrder::SEQ_CST) { + return GPR_ATM_INC_ADD_THEN(storage->fetch_sub( + static_cast(arg), static_cast(order))); } // Atomically increment a counter only if the counter value is not zero. // Returns true if increment took place; false if counter is zero. template -bool AtomicIncrementIfNonzero( - std::atomic* counter, - std::memory_order load_order = std::memory_order_acquire) { - T count = counter->load(load_order); +bool AtomicIncrementIfNonzero(Atomic* counter, + MemoryOrder load_order = MemoryOrder::ACQ_REL) { + T count = counter->load(static_cast(load_order)); do { // If zero, we are done (without an increment). If not, we must do a CAS to // maintain the contract: do not increment the counter if it is already zero if (count == 0) { return false; } - } while (!AtomicCompareExchangeWeak(counter, &count, count + 1, - std::memory_order_acq_rel, load_order)); + } while (!AtomicCompareExchangeWeak( + counter, &count, count + 1, + static_cast(MemoryOrder::ACQ_REL), + static_cast(load_order))); return true; } diff --git a/src/core/lib/gprpp/ref_counted.h b/src/core/lib/gprpp/ref_counted.h index b0430b6b809..8148cfd35d2 100644 --- a/src/core/lib/gprpp/ref_counted.h +++ b/src/core/lib/gprpp/ref_counted.h @@ -89,9 +89,7 @@ class RefCount { } // Increases the ref-count by `n`. - void Ref(Value n = 1) { - AtomicFetchAdd(&value_, n, std::memory_order_relaxed); - } + void Ref(Value n = 1) { AtomicFetchAdd(&value_, n, MemoryOrder::RELAXED); } void Ref(const DebugLocation& location, const char* reason, Value n = 1) { #ifndef NDEBUG if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) { @@ -107,7 +105,7 @@ class RefCount { // Similar to Ref() with an assert on the ref-count being non-zero. void RefNonZero() { #ifndef NDEBUG - const Value prior = AtomicFetchAdd(&value_, 1, std::memory_order_relaxed); + const Value prior = AtomicFetchAdd(&value_, 1, MemoryOrder::RELAXED); assert(prior > 0); #else Ref(); @@ -127,7 +125,7 @@ class RefCount { // Decrements the ref-count and returns true if the ref-count reaches 0. bool Unref() { - const Value prior = AtomicFetchSub(&value_, 1, std::memory_order_acq_rel); + const Value prior = AtomicFetchSub(&value_, 1, MemoryOrder::ACQ_REL); GPR_DEBUG_ASSERT(prior > 0); return prior == 1; } @@ -144,12 +142,12 @@ class RefCount { } private: - Value get() const { return value_.load(std::memory_order_relaxed); } + Value get() const { return AtomicLoad(&value_, MemoryOrder::RELAXED); } #ifndef NDEBUG TraceFlag* trace_flag_; #endif - std::atomic value_; + Atomic value_; }; // A base class for reference-counted objects. diff --git a/src/core/lib/surface/lame_client.cc b/src/core/lib/surface/lame_client.cc index 0ff512f07e2..c2ee9d985e9 100644 --- a/src/core/lib/surface/lame_client.cc +++ b/src/core/lib/surface/lame_client.cc @@ -54,8 +54,8 @@ static void fill_metadata(grpc_call_element* elem, grpc_metadata_batch* mdb) { CallData* calld = static_cast(elem->call_data); bool expected = false; if (!AtomicCompareExchangeStrong(&calld->filled_metadata, &expected, true, - std::memory_order_relaxed, - std::memory_order_relaxed)) { + MemoryOrder::RELAXED, + MemoryOrder::RELAXED)) { return; } ChannelData* chand = static_cast(elem->channel_data);