Export of internal Abseil changes

--
6e9f93888bbe6718997ed90bbd049f1f3572b066 by Abseil Team <absl-team@google.com>:

Fix status to be safe for self move assignment.

PiperOrigin-RevId: 346815392

--
35cae74a977f258e81dfbe925fb5a34cb6632036 by Gennadiy Rozental <rogeeff@google.com>:

Eliminate unnecessary access to the global vars.

PiperOrigin-RevId: 346777168

--
e7e786c243069060f5d6c1c945decb4b0b83f95b by Andy Getzendanner <durandal@google.com>:

Internal change.

PiperOrigin-RevId: 346685656

--
4ccd41c48f1a83cfa20b3ea534f743dd7d788376 by Abseil Team <absl-team@google.com>:

Move CordRep Ref() and Unref() logic into cord_internal.h

This change moves Ref() and Unref() logic out of cord.cc into cord_internal. The main purpose is to make upcoming ring buffer changes easier to isolate from existing cord.cc code. Notice that this removes the nullptr check from Unref() and now requires it to be non null (which held true most times). We may need to rethink if the 'unref unlikely one' is the common case: are cordreps most likely shared? This may be something between 'root' and non root nodes, i.e., is it more likely for leaf / flat nodes in large cords to be shared than top level cordreps being shared? Vice versa? Benchmarks say that we mostly shouldn't care, the caveat being that atomic ops seem more expensive on upcoming archs (arcadia) so we should error on the side of an extra IsOne() branch saving us single owned atomic ops.

PiperOrigin-RevId: 346676121

--
f0babab103b9e60d61ba09482d468985e43eceb3 by Samuel Benzaquen <sbenza@google.com>:

Fix iterator based constructor and `.insert` members to only require
EmplaceConstructible as the standard specifies.

PiperOrigin-RevId: 346616707

--
8f48eedda02277f9c96a88ed7726e34b557cce20 by Evan Brown <ezb@google.com>:

Fix a bug in binary_search_impl when there's a transparent, three-way comparator that has different equivalence classes for different lookup types.

Add a new can_have_multiple_equivalent_keys method to share the common logic for these cases.

PiperOrigin-RevId: 346605948

--
649183cb3cc9383431de9c81fb1c0f885d4001ae by Abseil Team <absl-team@google.com>:

Add benchmark for accessing a Duration flag.

PiperOrigin-RevId: 346594843

--
fefdb046520871af63ce2229e2f7cccfc0483dea by Abseil Team <absl-team@google.com>:

Restructure CordReader for upcoming ring buffer changes.

PiperOrigin-RevId: 346410642

--
8b2f50e7da0ebab06ead5f94e366e984ca23cb6a by Abseil Team <absl-team@google.com>:

Wire in an internal-only flag to toggle upcoming ring buffer changes on/off for experimentation.

PiperOrigin-RevId: 346199111
GitOrigin-RevId: 6e9f93888bbe6718997ed90bbd049f1f3572b066
Change-Id: I8f34866b25a79209cb5448bbb28dd3044111d2e9
pull/861/head
Abseil Team 4 years ago committed by Andy Getzendanner
parent fbdff6f3ae
commit 938fd0f4e6
  1. 1
      CMake/AbseilDll.cmake
  2. 40
      absl/container/btree_test.cc
  3. 42
      absl/container/internal/btree.h
  4. 2
      absl/container/internal/raw_hash_set.h
  5. 74
      absl/container/internal/raw_hash_set_test.cc
  6. 6
      absl/flags/internal/usage.cc
  7. 8
      absl/status/status.h
  8. 6
      absl/status/status_test.cc
  9. 10
      absl/strings/BUILD.bazel
  10. 2
      absl/strings/CMakeLists.txt
  11. 223
      absl/strings/cord.cc
  12. 1
      absl/strings/cord.h
  13. 76
      absl/strings/internal/cord_internal.cc
  14. 107
      absl/strings/internal/cord_internal.h
  15. 1
      absl/time/BUILD.bazel
  16. 16
      absl/time/duration_benchmark.cc

@ -192,6 +192,7 @@ set(ABSL_INTERNAL_DLL_FILES
"strings/cord.h"
"strings/escaping.cc"
"strings/escaping.h"
"strings/internal/cord_internal.cc"
"strings/internal/cord_internal.h"
"strings/internal/cord_rep_flat.h"
"strings/internal/charconv_bigint.cc"

@ -2696,8 +2696,36 @@ struct MultiKeyComp {
bool operator()(const MultiKey a, const int b) const { return a.i1 < b; }
};
TEST(Btree, MultiKeyEqualRange) {
absl::btree_set<MultiKey, MultiKeyComp> set;
// A heterogeneous, three-way comparator that has different equivalence classes
// for different lookup types.
struct MultiKeyThreeWayComp {
using is_transparent = void;
absl::weak_ordering operator()(const MultiKey a, const MultiKey b) const {
if (a.i1 < b.i1) return absl::weak_ordering::less;
if (a.i1 > b.i1) return absl::weak_ordering::greater;
if (a.i2 < b.i2) return absl::weak_ordering::less;
if (a.i2 > b.i2) return absl::weak_ordering::greater;
return absl::weak_ordering::equivalent;
}
absl::weak_ordering operator()(const int a, const MultiKey b) const {
if (a < b.i1) return absl::weak_ordering::less;
if (a > b.i1) return absl::weak_ordering::greater;
return absl::weak_ordering::equivalent;
}
absl::weak_ordering operator()(const MultiKey a, const int b) const {
if (a.i1 < b) return absl::weak_ordering::less;
if (a.i1 > b) return absl::weak_ordering::greater;
return absl::weak_ordering::equivalent;
}
};
template <typename Compare>
class BtreeMultiKeyTest : public ::testing::Test {};
using MultiKeyComps = ::testing::Types<MultiKeyComp, MultiKeyThreeWayComp>;
TYPED_TEST_SUITE(BtreeMultiKeyTest, MultiKeyComps);
TYPED_TEST(BtreeMultiKeyTest, EqualRange) {
absl::btree_set<MultiKey, TypeParam> set;
for (int i = 0; i < 100; ++i) {
for (int j = 0; j < 100; ++j) {
@ -2713,15 +2741,15 @@ TEST(Btree, MultiKeyEqualRange) {
}
}
TEST(Btree, MultiKeyErase) {
absl::btree_set<MultiKey, MultiKeyComp> set = {
TYPED_TEST(BtreeMultiKeyTest, Erase) {
absl::btree_set<MultiKey, TypeParam> set = {
{1, 1}, {2, 1}, {2, 2}, {3, 1}};
EXPECT_EQ(set.erase(2), 2);
EXPECT_THAT(set, ElementsAre(MultiKey{1, 1}, MultiKey{3, 1}));
}
TEST(Btree, MultiKeyCount) {
const absl::btree_set<MultiKey, MultiKeyComp> set = {
TYPED_TEST(BtreeMultiKeyTest, Count) {
const absl::btree_set<MultiKey, TypeParam> set = {
{1, 1}, {2, 1}, {2, 2}, {3, 1}};
EXPECT_EQ(set.count(2), 2);
}

@ -220,9 +220,6 @@ struct common_params {
// If Compare is a common comparator for a string-like type, then we adapt it
// to use heterogeneous lookup and to be a key-compare-to comparator.
using key_compare = typename key_compare_to_adapter<Compare>::type;
// True when key_compare has been adapted to StringBtreeDefault{Less,Greater}.
using is_key_compare_adapted =
absl::negation<std::is_same<key_compare, Compare>>;
// A type which indicates if we have a key-compare-to functor or a plain old
// key-compare functor.
using is_key_compare_to = btree_is_key_compare_to<key_compare, Key>;
@ -232,9 +229,6 @@ struct common_params {
using size_type = std::make_signed<size_t>::type;
using difference_type = ptrdiff_t;
// True if this is a multiset or multimap.
using is_multi_container = std::integral_constant<bool, Multi>;
using slot_policy = SlotPolicy;
using slot_type = typename slot_policy::slot_type;
using value_type = typename slot_policy::value_type;
@ -244,6 +238,23 @@ struct common_params {
using reference = value_type &;
using const_reference = const value_type &;
// For the given lookup key type, returns whether we can have multiple
// equivalent keys in the btree. If this is a multi-container, then we can.
// Otherwise, we can have multiple equivalent keys only if all of the
// following conditions are met:
// - The comparator is transparent.
// - The lookup key type is not the same as key_type.
// - The comparator is not a StringBtreeDefault{Less,Greater} comparator
// that we know has the same equivalence classes for all lookup types.
template <typename LookupKey>
constexpr static bool can_have_multiple_equivalent_keys() {
return Multi ||
(IsTransparent<key_compare>::value &&
!std::is_same<LookupKey, Key>::value &&
!std::is_same<key_compare, StringBtreeDefaultLess>::value &&
!std::is_same<key_compare, StringBtreeDefaultGreater>::value);
}
enum {
kTargetNodeSize = TargetNodeSize,
@ -439,7 +450,6 @@ struct SearchResult<V, false> {
template <typename Params>
class btree_node {
using is_key_compare_to = typename Params::is_key_compare_to;
using is_multi_container = typename Params::is_multi_container;
using field_type = typename Params::node_count_type;
using allocator_type = typename Params::allocator_type;
using slot_type = typename Params::slot_type;
@ -759,7 +769,7 @@ class btree_node {
SearchResult<int, true> binary_search_impl(
const K &k, int s, int e, const CompareTo &comp,
std::true_type /* IsCompareTo */) const {
if (is_multi_container::value) {
if (params_type::template can_have_multiple_equivalent_keys<K>()) {
MatchKind exact_match = MatchKind::kNe;
while (s != e) {
const int mid = (s + e) >> 1;
@ -770,14 +780,14 @@ class btree_node {
e = mid;
if (c == 0) {
// Need to return the first value whose key is not less than k,
// which requires continuing the binary search if this is a
// multi-container.
// which requires continuing the binary search if there could be
// multiple equivalent keys.
exact_match = MatchKind::kEq;
}
}
}
return {s, exact_match};
} else { // Not a multi-container.
} else { // Can't have multiple equivalent keys.
while (s != e) {
const int mid = (s + e) >> 1;
const absl::weak_ordering c = comp(key(mid), k);
@ -1054,8 +1064,6 @@ class btree {
using is_key_compare_to = typename Params::is_key_compare_to;
using init_type = typename Params::init_type;
using field_type = typename node_type::field_type;
using is_multi_container = typename Params::is_multi_container;
using is_key_compare_adapted = typename Params::is_key_compare_adapted;
// We use a static empty node for the root/leftmost/rightmost of empty btrees
// in order to avoid branching in begin()/end().
@ -1907,13 +1915,7 @@ auto btree<P>::equal_range(const K &key) -> std::pair<iterator, iterator> {
}
const iterator next = std::next(lower);
// When the comparator is heterogeneous, we can't assume that comparison with
// non-`key_type` will be equivalent to `key_type` comparisons so there
// could be multiple equivalent keys even in a unique-container. But for
// heterogeneous comparisons from the default string adapted comparators, we
// don't need to worry about this.
if (!is_multi_container::value &&
(std::is_same<K, key_type>::value || is_key_compare_adapted::value)) {
if (!params_type::template can_have_multiple_equivalent_keys<K>()) {
// The next iterator after lower must point to a key greater than `key`.
// Note: if this assert fails, then it may indicate that the comparator does
// not meet the equivalence requirements for Compare

@ -1085,7 +1085,7 @@ class raw_hash_set {
template <class InputIt>
void insert(InputIt first, InputIt last) {
for (; first != last; ++first) insert(*first);
for (; first != last; ++first) emplace(*first);
}
template <class T, RequiresNotInit<T> = 0, RequiresInsertable<const T&> = 0>

@ -250,25 +250,43 @@ TEST(Group, CountLeadingEmptyOrDeleted) {
}
}
struct IntPolicy {
using slot_type = int64_t;
using key_type = int64_t;
using init_type = int64_t;
template <class T>
struct ValuePolicy {
using slot_type = T;
using key_type = T;
using init_type = T;
static void construct(void*, int64_t* slot, int64_t v) { *slot = v; }
static void destroy(void*, int64_t*) {}
static void transfer(void*, int64_t* new_slot, int64_t* old_slot) {
*new_slot = *old_slot;
template <class Allocator, class... Args>
static void construct(Allocator* alloc, slot_type* slot, Args&&... args) {
absl::allocator_traits<Allocator>::construct(*alloc, slot,
std::forward<Args>(args)...);
}
static int64_t& element(slot_type* slot) { return *slot; }
template <class Allocator>
static void destroy(Allocator* alloc, slot_type* slot) {
absl::allocator_traits<Allocator>::destroy(*alloc, slot);
}
template <class F>
static auto apply(F&& f, int64_t x) -> decltype(std::forward<F>(f)(x, x)) {
return std::forward<F>(f)(x, x);
template <class Allocator>
static void transfer(Allocator* alloc, slot_type* new_slot,
slot_type* old_slot) {
construct(alloc, new_slot, std::move(*old_slot));
destroy(alloc, old_slot);
}
static T& element(slot_type* slot) { return *slot; }
template <class F, class... Args>
static decltype(absl::container_internal::DecomposeValue(
std::declval<F>(), std::declval<Args>()...))
apply(F&& f, Args&&... args) {
return absl::container_internal::DecomposeValue(
std::forward<F>(f), std::forward<Args>(args)...);
}
};
using IntPolicy = ValuePolicy<int64_t>;
class StringPolicy {
template <class F, class K, class V,
class = typename std::enable_if<
@ -1657,6 +1675,38 @@ TEST(Table, Merge) {
EXPECT_THAT(t2, UnorderedElementsAre(Pair("0", "~0")));
}
TEST(Table, IteratorEmplaceConstructibleRequirement) {
struct Value {
explicit Value(absl::string_view view) : value(view) {}
std::string value;
bool operator==(const Value& other) const { return value == other.value; }
};
struct H {
size_t operator()(const Value& v) const {
return absl::Hash<std::string>{}(v.value);
}
};
struct Table : raw_hash_set<ValuePolicy<Value>, H, std::equal_to<Value>,
std::allocator<Value>> {
using Base = typename Table::raw_hash_set;
using Base::Base;
};
std::string input[3]{"A", "B", "C"};
Table t(std::begin(input), std::end(input));
EXPECT_THAT(t, UnorderedElementsAre(Value{"A"}, Value{"B"}, Value{"C"}));
input[0] = "D";
input[1] = "E";
input[2] = "F";
t.insert(std::begin(input), std::end(input));
EXPECT_THAT(t, UnorderedElementsAre(Value{"A"}, Value{"B"}, Value{"C"},
Value{"D"}, Value{"E"}, Value{"F"}));
}
TEST(Nodes, EmptyNodeType) {
using node_type = StringTable::node_type;
node_type n;

@ -437,12 +437,6 @@ void SetFlagsHelpMatchSubstr(absl::string_view substr) {
HelpMode GetFlagsHelpMode() {
absl::MutexLock l(&help_attributes_guard);
// Refer to dummy variales to prevent linker dropping them
if (FLAGS_help || FLAGS_helpfull || FLAGS_helpshort || FLAGS_helppackage ||
FLAGS_version || FLAGS_only_check_args || FLAGS_helpon ||
FLAGS_helpmatch) {
help_mode = HelpMode::kNone;
}
return help_mode;
}

@ -705,9 +705,11 @@ inline Status::Status(Status&& x) noexcept : rep_(x.rep_) {
inline Status& Status::operator=(Status&& x) {
uintptr_t old_rep = rep_;
rep_ = x.rep_;
x.rep_ = MovedFromRep();
Unref(old_rep);
if (x.rep_ != old_rep) {
rep_ = x.rep_;
x.rep_ = MovedFromRep();
Unref(old_rep);
}
return *this;
}

@ -397,6 +397,12 @@ TEST(Status, MoveAssignment) {
assignee = std::move(status);
EXPECT_EQ(assignee, copy);
}
{
absl::Status status(absl::StatusCode::kInvalidArgument, "message");
absl::Status copy(status);
status = static_cast<absl::Status&&>(status);
EXPECT_EQ(status, copy);
}
}
TEST(Status, Update) {

@ -267,16 +267,23 @@ cc_test(
cc_library(
name = "cord_internal",
srcs = [
"internal/cord_internal.cc",
],
hdrs = [
"internal/cord_internal.h",
"internal/cord_rep_flat.h",
],
copts = ABSL_DEFAULT_COPTS,
visibility = ["//visibility:private"],
visibility = [
"//visibility:private",
],
deps = [
":strings",
"//absl/base:base_internal",
"//absl/base:core_headers",
"//absl/container:compressed_tuple",
"//absl/container:inlined_vector",
"//absl/meta:type_traits",
],
)
@ -304,6 +311,7 @@ cc_library(
"//absl/functional:function_ref",
"//absl/meta:type_traits",
"//absl/types:optional",
"//absl/types:variant",
],
)

@ -556,6 +556,7 @@ absl_cc_library(
"cord.h"
SRCS
"cord.cc"
"internal/cord_internal.cc"
"internal/cord_internal.h"
"internal/cord_rep_flat.h"
COPTS
@ -570,6 +571,7 @@ absl_cc_library(
absl::function_ref
absl::inlined_vector
absl::optional
absl::variant
absl::raw_logging_internal
absl::strings
absl::strings_internal

@ -60,51 +60,8 @@ using ::absl::cord_internal::EXTERNAL;
using ::absl::cord_internal::FLAT;
using ::absl::cord_internal::SUBSTRING;
namespace cord_internal {
inline CordRepConcat* CordRep::concat() {
assert(tag == CONCAT);
return static_cast<CordRepConcat*>(this);
}
inline const CordRepConcat* CordRep::concat() const {
assert(tag == CONCAT);
return static_cast<const CordRepConcat*>(this);
}
inline CordRepSubstring* CordRep::substring() {
assert(tag == SUBSTRING);
return static_cast<CordRepSubstring*>(this);
}
inline const CordRepSubstring* CordRep::substring() const {
assert(tag == SUBSTRING);
return static_cast<const CordRepSubstring*>(this);
}
inline CordRepExternal* CordRep::external() {
assert(tag == EXTERNAL);
return static_cast<CordRepExternal*>(this);
}
inline const CordRepExternal* CordRep::external() const {
assert(tag == EXTERNAL);
return static_cast<const CordRepExternal*>(this);
}
inline CordRepFlat* CordRep::flat() {
assert(tag >= FLAT);
return static_cast<CordRepFlat*>(this);
}
inline const CordRepFlat* CordRep::flat() const {
assert(tag >= FLAT);
return static_cast<const CordRepFlat*>(this);
}
} // namespace cord_internal
// Prefer copying blocks of at most this size, otherwise reference count.
static const size_t kMaxBytesToCopy = 511;
using ::absl::cord_internal::kInlinedVectorSize;
using ::absl::cord_internal::kMaxBytesToCopy;
constexpr uint64_t Fibonacci(unsigned char n, uint64_t a = 0, uint64_t b = 1) {
return n == 0 ? a : Fibonacci(n - 1, b, a + b);
@ -136,17 +93,6 @@ static constexpr uint64_t min_length[] = {
static const int kMinLengthSize = ABSL_ARRAYSIZE(min_length);
// The inlined size to use with absl::InlinedVector.
//
// Note: The InlinedVectors in this file (and in cord.h) do not need to use
// the same value for their inlined size. The fact that they do is historical.
// It may be desirable for each to use a different inlined size optimized for
// that InlinedVector's usage.
//
// TODO(jgm): Benchmark to see if there's a more optimal value than 47 for
// the inlined vector size (47 exists for backward compatibility).
static const int kInlinedVectorSize = 47;
static inline bool IsRootBalanced(CordRep* node) {
if (node->tag != CONCAT) {
return true;
@ -182,86 +128,6 @@ static inline CordRep* VerifyTree(CordRep* node) {
return node;
}
// --------------------------------------------------------------------
// Memory management
inline CordRep* Ref(CordRep* rep) {
if (rep != nullptr) {
rep->refcount.Increment();
}
return rep;
}
// This internal routine is called from the cold path of Unref below. Keeping it
// in a separate routine allows good inlining of Unref into many profitable call
// sites. However, the call to this function can be highly disruptive to the
// register pressure in those callers. To minimize the cost to callers, we use
// a special LLVM calling convention that preserves most registers. This allows
// the call to this routine in cold paths to not disrupt the caller's register
// pressure. This calling convention is not available on all platforms; we
// intentionally allow LLVM to ignore the attribute rather than attempting to
// hardcode the list of supported platforms.
#if defined(__clang__) && !defined(__i386__)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wattributes"
__attribute__((preserve_most))
#pragma clang diagnostic pop
#endif
static void UnrefInternal(CordRep* rep) {
assert(rep != nullptr);
absl::InlinedVector<CordRep*, kInlinedVectorSize> pending;
while (true) {
assert(!rep->refcount.IsImmortal());
if (rep->tag == CONCAT) {
CordRepConcat* rep_concat = rep->concat();
CordRep* right = rep_concat->right;
if (!right->refcount.Decrement()) {
pending.push_back(right);
}
CordRep* left = rep_concat->left;
delete rep_concat;
rep = nullptr;
if (!left->refcount.Decrement()) {
rep = left;
continue;
}
} else if (rep->tag == EXTERNAL) {
CordRepExternal::Delete(rep);
rep = nullptr;
} else if (rep->tag == SUBSTRING) {
CordRepSubstring* rep_substring = rep->substring();
CordRep* child = rep_substring->child;
delete rep_substring;
rep = nullptr;
if (!child->refcount.Decrement()) {
rep = child;
continue;
}
} else {
CordRepFlat::Delete(rep);
rep = nullptr;
}
if (!pending.empty()) {
rep = pending.back();
pending.pop_back();
} else {
break;
}
}
}
inline void Unref(CordRep* rep) {
// Fast-path for two common, hot cases: a null rep and a shared root.
if (ABSL_PREDICT_TRUE(rep == nullptr ||
rep->refcount.DecrementExpectHighRefcount())) {
return;
}
UnrefInternal(rep);
}
// Return the depth of a node
static int Depth(const CordRep* rep) {
if (rep->tag == CONCAT) {
@ -285,12 +151,14 @@ static void SetConcatChildren(CordRepConcat* concat, CordRep* left,
// The returned node has a refcount of 1.
static CordRep* RawConcat(CordRep* left, CordRep* right) {
// Avoid making degenerate concat nodes (one child is empty)
if (left == nullptr || left->length == 0) {
Unref(left);
if (left == nullptr) return right;
if (right == nullptr) return left;
if (left->length == 0) {
CordRep::Unref(left);
return right;
}
if (right == nullptr || right->length == 0) {
Unref(right);
if (right->length == 0) {
CordRep::Unref(right);
return left;
}
@ -364,7 +232,7 @@ void InitializeCordRepExternal(absl::string_view data, CordRepExternal* rep) {
static CordRep* NewSubstring(CordRep* child, size_t offset, size_t length) {
// Never create empty substring nodes
if (length == 0) {
Unref(child);
CordRep::Unref(child);
return nullptr;
} else {
CordRepSubstring* rep = new CordRepSubstring();
@ -562,13 +430,13 @@ void Cord::InlineRep::AssignSlow(const Cord::InlineRep& src) {
data_ = src.data_;
if (is_tree()) {
Ref(tree());
CordRep::Ref(tree());
}
}
void Cord::InlineRep::ClearSlow() {
if (is_tree()) {
Unref(tree());
CordRep::Unref(tree());
}
ResetToEmpty();
}
@ -577,7 +445,9 @@ void Cord::InlineRep::ClearSlow() {
// Constructors and destructors
Cord::Cord(const Cord& src) : contents_(src.contents_) {
Ref(contents_.tree()); // Does nothing if contents_ has embedded data
if (CordRep* tree = contents_.tree()) {
CordRep::Ref(tree);
}
}
Cord::Cord(absl::string_view src) {
@ -623,14 +493,18 @@ template Cord::Cord(std::string&& src);
// The destruction code is separate so that the compiler can determine
// that it does not need to call the destructor on a moved-from Cord.
void Cord::DestroyCordSlow() {
Unref(VerifyTree(contents_.tree()));
if (CordRep* tree = contents_.tree()) {
CordRep::Unref(VerifyTree(tree));
}
}
// --------------------------------------------------------------------
// Mutators
void Cord::Clear() {
Unref(contents_.clear());
if (CordRep* tree = contents_.clear()) {
CordRep::Unref(tree);
}
}
Cord& Cord::operator=(absl::string_view src) {
@ -641,7 +515,7 @@ Cord& Cord::operator=(absl::string_view src) {
if (length <= InlineRep::kMaxInline) {
// Embed into this->contents_
contents_.set_data(data, length, true);
Unref(tree);
if (tree) CordRep::Unref(tree);
return *this;
}
if (tree != nullptr && tree->tag >= FLAT &&
@ -654,7 +528,7 @@ Cord& Cord::operator=(absl::string_view src) {
return *this;
}
contents_.set_tree(NewTree(data, length, 0));
Unref(tree);
if (tree) CordRep::Unref(tree);
return *this;
}
@ -731,7 +605,7 @@ void Cord::InlineRep::AppendArray(const char* src_data, size_t src_size) {
}
inline CordRep* Cord::TakeRep() const& {
return Ref(contents_.tree());
return CordRep::Ref(contents_.tree());
}
inline CordRep* Cord::TakeRep() && {
@ -775,6 +649,7 @@ inline void Cord::AppendImpl(C&& src) {
return;
}
// Guaranteed to be a tree (kMaxBytesToCopy > kInlinedSize)
contents_.AppendTree(std::forward<C>(src).TakeRep());
}
@ -796,7 +671,7 @@ template void Cord::Append(std::string&& src);
void Cord::Prepend(const Cord& src) {
CordRep* src_tree = src.contents_.tree();
if (src_tree != nullptr) {
Ref(src_tree);
CordRep::Ref(src_tree);
contents_.PrependTree(src_tree);
return;
}
@ -835,7 +710,7 @@ template void Cord::Prepend(std::string&& src);
static CordRep* RemovePrefixFrom(CordRep* node, size_t n) {
if (n >= node->length) return nullptr;
if (n == 0) return Ref(node);
if (n == 0) return CordRep::Ref(node);
absl::InlinedVector<CordRep*, kInlinedVectorSize> rhs_stack;
while (node->tag == CONCAT) {
@ -853,7 +728,7 @@ static CordRep* RemovePrefixFrom(CordRep* node, size_t n) {
assert(n <= node->length);
if (n == 0) {
Ref(node);
CordRep::Ref(node);
} else {
size_t start = n;
size_t len = node->length - n;
@ -862,10 +737,10 @@ static CordRep* RemovePrefixFrom(CordRep* node, size_t n) {
start += node->substring()->start;
node = node->substring()->child;
}
node = NewSubstring(Ref(node), start, len);
node = NewSubstring(CordRep::Ref(node), start, len);
}
while (!rhs_stack.empty()) {
node = Concat(node, Ref(rhs_stack.back()));
node = Concat(node, CordRep::Ref(rhs_stack.back()));
rhs_stack.pop_back();
}
return node;
@ -876,7 +751,7 @@ static CordRep* RemovePrefixFrom(CordRep* node, size_t n) {
// edited in place iff that node and all its ancestors have a refcount of 1.
static CordRep* RemoveSuffixFrom(CordRep* node, size_t n) {
if (n >= node->length) return nullptr;
if (n == 0) return Ref(node);
if (n == 0) return CordRep::Ref(node);
absl::InlinedVector<CordRep*, kInlinedVectorSize> lhs_stack;
bool inplace_ok = node->refcount.IsOne();
@ -896,11 +771,11 @@ static CordRep* RemoveSuffixFrom(CordRep* node, size_t n) {
assert(n <= node->length);
if (n == 0) {
Ref(node);
CordRep::Ref(node);
} else if (inplace_ok && node->tag != EXTERNAL) {
// Consider making a new buffer if the current node capacity is much
// larger than the new length.
Ref(node);
CordRep::Ref(node);
node->length -= n;
} else {
size_t start = 0;
@ -909,10 +784,10 @@ static CordRep* RemoveSuffixFrom(CordRep* node, size_t n) {
start = node->substring()->start;
node = node->substring()->child;
}
node = NewSubstring(Ref(node), start, len);
node = NewSubstring(CordRep::Ref(node), start, len);
}
while (!lhs_stack.empty()) {
node = Concat(Ref(lhs_stack.back()), node);
node = Concat(CordRep::Ref(lhs_stack.back()), node);
lhs_stack.pop_back();
}
return node;
@ -927,7 +802,7 @@ void Cord::RemovePrefix(size_t n) {
contents_.remove_prefix(n);
} else {
CordRep* newrep = RemovePrefixFrom(tree, n);
Unref(tree);
CordRep::Unref(tree);
contents_.replace_tree(VerifyTree(newrep));
}
}
@ -941,7 +816,7 @@ void Cord::RemoveSuffix(size_t n) {
contents_.reduce_size(n);
} else {
CordRep* newrep = RemoveSuffixFrom(tree, n);
Unref(tree);
CordRep::Unref(tree);
contents_.replace_tree(VerifyTree(newrep));
}
}
@ -974,13 +849,13 @@ static CordRep* NewSubRange(CordRep* node, size_t pos, size_t n) {
results.pop_back();
results.push_back(Concat(left, right));
} else if (pos == 0 && n == node->length) {
results.push_back(Ref(node));
results.push_back(CordRep::Ref(node));
} else if (node->tag != CONCAT) {
if (node->tag == SUBSTRING) {
pos += node->substring()->start;
node = node->substring()->child;
}
results.push_back(NewSubstring(Ref(node), pos, n));
results.push_back(NewSubstring(CordRep::Ref(node), pos, n));
} else if (pos + n <= node->concat()->left->length) {
todo.push_back(SubRange(node->concat()->left, pos, n));
} else if (pos >= node->concat()->left->length) {
@ -1058,9 +933,9 @@ class CordForest {
concat_node->left = concat_freelist_;
concat_freelist_ = concat_node;
} else {
Ref(concat_node->right);
Ref(concat_node->left);
Unref(concat_node);
CordRep::Ref(concat_node->right);
CordRep::Ref(concat_node->left);
CordRep::Unref(concat_node);
}
} else {
AddNode(node);
@ -1488,7 +1363,7 @@ Cord Cord::ChunkIterator::AdvanceAndReadBytes(size_t n) {
if (n < current_chunk_.size()) {
// Range to read is a proper subrange of the current chunk.
assert(current_leaf_ != nullptr);
CordRep* subnode = Ref(current_leaf_);
CordRep* subnode = CordRep::Ref(current_leaf_);
const char* data =
subnode->tag == EXTERNAL ? subnode->external()->base : subnode->data;
subnode = NewSubstring(subnode, current_chunk_.data() - data, n);
@ -1500,7 +1375,7 @@ Cord Cord::ChunkIterator::AdvanceAndReadBytes(size_t n) {
// Range to read begins with a proper subrange of the current chunk.
assert(!current_chunk_.empty());
assert(current_leaf_ != nullptr);
CordRep* subnode = Ref(current_leaf_);
CordRep* subnode = CordRep::Ref(current_leaf_);
if (current_chunk_.size() < subnode->length) {
const char* data =
subnode->tag == EXTERNAL ? subnode->external()->base : subnode->data;
@ -1526,7 +1401,7 @@ Cord Cord::ChunkIterator::AdvanceAndReadBytes(size_t n) {
// children starting from current_subtree_ if this loop exits while staying
// below current_subtree_; etc.; alternatively, push parents instead of
// right children on the stack).
subnode = Concat(subnode, Ref(node));
subnode = Concat(subnode, CordRep::Ref(node));
n -= node->length;
bytes_remaining_ -= node->length;
node = nullptr;
@ -1548,7 +1423,7 @@ Cord Cord::ChunkIterator::AdvanceAndReadBytes(size_t n) {
node = node->concat()->left;
} else {
// Read left, descend right.
subnode = Concat(subnode, Ref(node->concat()->left));
subnode = Concat(subnode, CordRep::Ref(node->concat()->left));
n -= node->concat()->left->length;
bytes_remaining_ -= node->concat()->left->length;
node = node->concat()->right;
@ -1567,7 +1442,9 @@ Cord Cord::ChunkIterator::AdvanceAndReadBytes(size_t n) {
// chunk.
assert(node->tag == EXTERNAL || node->tag >= FLAT);
assert(length > n);
if (n > 0) subnode = Concat(subnode, NewSubstring(Ref(node), offset, n));
if (n > 0) {
subnode = Concat(subnode, NewSubstring(CordRep::Ref(node), offset, n));
}
const char* data =
node->tag == EXTERNAL ? node->external()->base : node->data;
current_chunk_ = absl::string_view(data + offset + n, length - n);
@ -1691,7 +1568,9 @@ absl::string_view Cord::FlattenSlowPath() {
s.size());
});
}
Unref(contents_.tree());
if (CordRep* tree = contents_.tree()) {
CordRep::Unref(tree);
}
contents_.set_tree(new_rep);
return absl::string_view(new_buffer, total_size);
}

@ -82,6 +82,7 @@
#include "absl/strings/internal/string_constant.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "absl/types/variant.h"
namespace absl {
ABSL_NAMESPACE_BEGIN

@ -0,0 +1,76 @@
// Copyright 2020 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/strings/internal/cord_internal.h"
#include <atomic>
#include <cassert>
#include <memory>
#include "absl/container/inlined_vector.h"
#include "absl/strings/internal/cord_rep_flat.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
ABSL_CONST_INIT std::atomic<bool> cord_ring_buffer_enabled(false);
void CordRep::Destroy(CordRep* rep) {
assert(rep != nullptr);
absl::InlinedVector<CordRep*, Constants::kInlinedVectorSize> pending;
while (true) {
assert(!rep->refcount.IsImmortal());
if (rep->tag == CONCAT) {
CordRepConcat* rep_concat = rep->concat();
CordRep* right = rep_concat->right;
if (!right->refcount.Decrement()) {
pending.push_back(right);
}
CordRep* left = rep_concat->left;
delete rep_concat;
rep = nullptr;
if (!left->refcount.Decrement()) {
rep = left;
continue;
}
} else if (rep->tag == EXTERNAL) {
CordRepExternal::Delete(rep);
rep = nullptr;
} else if (rep->tag == SUBSTRING) {
CordRepSubstring* rep_substring = rep->substring();
CordRep* child = rep_substring->child;
delete rep_substring;
rep = nullptr;
if (!child->refcount.Decrement()) {
rep = child;
continue;
}
} else {
CordRepFlat::Delete(rep);
rep = nullptr;
}
if (!pending.empty()) {
rep = pending.back();
pending.pop_back();
} else {
break;
}
}
}
} // namespace cord_internal
ABSL_NAMESPACE_END
} // namespace absl

@ -22,6 +22,7 @@
#include <type_traits>
#include "absl/base/internal/invoke.h"
#include "absl/base/optimization.h"
#include "absl/container/internal/compressed_tuple.h"
#include "absl/meta/type_traits.h"
#include "absl/strings/string_view.h"
@ -30,6 +31,28 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
extern std::atomic<bool> cord_ring_buffer_enabled;
inline void enable_cord_ring_buffer(bool enable) {
cord_ring_buffer_enabled.store(enable, std::memory_order_relaxed);
}
enum Constants {
// The inlined size to use with absl::InlinedVector.
//
// Note: The InlinedVectors in this file (and in cord.h) do not need to use
// the same value for their inlined size. The fact that they do is historical.
// It may be desirable for each to use a different inlined size optimized for
// that InlinedVector's usage.
//
// TODO(jgm): Benchmark to see if there's a more optimal value than 47 for
// the inlined vector size (47 exists for backward compatibility).
kInlinedVectorSize = 47,
// Prefer copying blocks of at most this size, otherwise reference count.
kMaxBytesToCopy = 511
};
// Wraps std::atomic for reference counting.
class Refcount {
public:
@ -151,6 +174,34 @@ struct CordRep {
inline const CordRepExternal* external() const;
inline CordRepFlat* flat();
inline const CordRepFlat* flat() const;
// --------------------------------------------------------------------
// Memory management
// This internal routine is called from the cold path of Unref below. Keeping
// it in a separate routine allows good inlining of Unref into many profitable
// call sites. However, the call to this function can be highly disruptive to
// the register pressure in those callers. To minimize the cost to callers, we
// use a special LLVM calling convention that preserves most registers. This
// allows the call to this routine in cold paths to not disrupt the caller's
// register pressure. This calling convention is not available on all
// platforms; we intentionally allow LLVM to ignore the attribute rather than
// attempting to hardcode the list of supported platforms.
#if defined(__clang__) && !defined(__i386__)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wattributes"
__attribute__((preserve_most))
#pragma clang diagnostic pop
#endif
static void Destroy(CordRep* rep);
// Increments the reference count of `rep`.
// Requires `rep` to be a non-null pointer value.
static inline CordRep* Ref(CordRep* rep);
// Decrements the reference count of `rep`. Destroys rep if count reaches
// zero. Requires `rep` to be a non-null pointer value.
static inline void Unref(CordRep* rep);
};
struct CordRepConcat : public CordRep {
@ -284,7 +335,63 @@ static_assert(sizeof(InlineData) == kMaxInline + 1, "");
static_assert(sizeof(AsTree) == sizeof(InlineData), "");
static_assert(offsetof(AsTree, tagged_size) == kMaxInline, "");
inline CordRepConcat* CordRep::concat() {
assert(tag == CONCAT);
return static_cast<CordRepConcat*>(this);
}
inline const CordRepConcat* CordRep::concat() const {
assert(tag == CONCAT);
return static_cast<const CordRepConcat*>(this);
}
inline CordRepSubstring* CordRep::substring() {
assert(tag == SUBSTRING);
return static_cast<CordRepSubstring*>(this);
}
inline const CordRepSubstring* CordRep::substring() const {
assert(tag == SUBSTRING);
return static_cast<const CordRepSubstring*>(this);
}
inline CordRepExternal* CordRep::external() {
assert(tag == EXTERNAL);
return static_cast<CordRepExternal*>(this);
}
inline const CordRepExternal* CordRep::external() const {
assert(tag == EXTERNAL);
return static_cast<const CordRepExternal*>(this);
}
inline CordRepFlat* CordRep::flat() {
assert(tag >= FLAT && tag <= MAX_FLAT_TAG);
return reinterpret_cast<CordRepFlat*>(this);
}
inline const CordRepFlat* CordRep::flat() const {
assert(tag >= FLAT && tag <= MAX_FLAT_TAG);
return reinterpret_cast<const CordRepFlat*>(this);
}
inline CordRep* CordRep::Ref(CordRep* rep) {
assert(rep != nullptr);
rep->refcount.Increment();
return rep;
}
inline void CordRep::Unref(CordRep* rep) {
assert(rep != nullptr);
// Expect refcount to be 0. Avoiding the cost of an atomic decrement should
// typically outweigh the cost of an extra branch checking for ref == 1.
if (ABSL_PREDICT_FALSE(!rep->refcount.DecrementExpectHighRefcount())) {
Destroy(rep);
}
}
} // namespace cord_internal
ABSL_NAMESPACE_END
} // namespace absl
#endif // ABSL_STRINGS_INTERNAL_CORD_INTERNAL_H_

@ -119,6 +119,7 @@ cc_test(
":time",
"//absl/base",
"//absl/base:core_headers",
"//absl/flags:flag",
"//absl/hash",
"@com_github_google_benchmark//:benchmark_main",
],

@ -18,9 +18,14 @@
#include <string>
#include "absl/base/attributes.h"
#include "absl/flags/flag.h"
#include "absl/time/time.h"
#include "benchmark/benchmark.h"
ABSL_FLAG(absl::Duration, absl_duration_flag_for_benchmark,
absl::Milliseconds(1),
"Flag to use for benchmarking duration flag access speed.");
namespace {
//
@ -425,4 +430,15 @@ void BM_Duration_ParseDuration(benchmark::State& state) {
}
BENCHMARK(BM_Duration_ParseDuration)->DenseRange(0, kNumDurations - 1);
//
// Flag access
//
void BM_Duration_GetFlag(benchmark::State& state) {
while (state.KeepRunning()) {
benchmark::DoNotOptimize(
absl::GetFlag(FLAGS_absl_duration_flag_for_benchmark));
}
}
BENCHMARK(BM_Duration_GetFlag);
} // namespace

Loading…
Cancel
Save