Export of internal Abseil changes

--
c68f1886f5e8fd90eb0c2d2e68feaf00a7cdacda by CJ Johnson <johnsoncj@google.com>:

Introduce absl::Cleanup to the OSS repo

PiperOrigin-RevId: 354583156

--
17030cf388e10f7eb959e3e566326d1072ce392e by Abseil Team <absl-team@google.com>:

Internal change only

PiperOrigin-RevId: 354574953

--
e979d7236d4f3252e79ddda6739b67a9a326bf6d by CJ Johnson <johnsoncj@google.com>:

Internal change

PiperOrigin-RevId: 354545297

--
7ea02b3783f7f49ef97d86a8f6580a19cc57df14 by Abseil Team <absl-team@google.com>:

Pre-allocate memory for vectors where the size is known.

PiperOrigin-RevId: 354344576

--
9246c7cb11f1d6444f79ebe25acc69a8a9b870e0 by Matt Kulukundis <kfm@google.com>:

Add support for Elbrus 2000 (e2k)

Import of https://github.com/abseil/abseil-cpp/pull/889

PiperOrigin-RevId: 354344013

--
0fc93d359cc1fb307552e917b37b7b2e7eed822f by Abseil Team <absl-team@google.com>:

Integrate CordRepRing logic into cord (but do not enable it)

PiperOrigin-RevId: 354312238

--
eda05622f7da71466723acb33403f783529df24b by Abseil Team <absl-team@google.com>:

Protect ignore diagnostic with "__has_warning".

PiperOrigin-RevId: 354112334

--
47716c5d8fb10efa4fdd801d28bac414c6f8ec32 by Abseil Team <absl-team@google.com>:

Rearrange InlinedVector copy constructor and destructor to treat
a few special cases inline and then tail-call a non-inlined routine
for the rest.  In particular, we optimize for empty vectors in both
cases.

Added a couple of benchmarks that copy either an InlVec<int64> or
an InlVec<InlVec<int64>>.

Speed difference:
```
BM_CopyTrivial/0                                    0.92ns +- 0%   0.47ns +- 0%  -48.91%  (p=0.000 n=11+12)
BM_CopyTrivial/1                                    0.92ns +- 0%   1.15ns +- 0%  +25.00%  (p=0.000 n=10+9)
BM_CopyTrivial/8                                    8.57ns +- 0%  10.72ns +- 1%  +25.16%  (p=0.000 n=10+12)
BM_CopyNonTrivial/0                                 3.21ns +- 0%   0.70ns +- 0%  -78.23%  (p=0.000 n=12+10)
BM_CopyNonTrivial/1                                 5.88ns +- 1%   5.51ns +- 0%   -6.28%  (p=0.000 n=10+8)
BM_CopyNonTrivial/8                                 21.5ns +- 1%   15.2ns +- 2%  -29.23%  (p=0.000 n=12+12)
```

Note: the slowdowns are a few cycles which is expected given the procedure
call added in that case. We decided this is a good tradeoff given the code
size reductions and the more significant speedups for empty vectors.

Size difference (as measured by nm):
```
BM_CopyTrivial     from 1048 bytes to 326 bytes.
BM_CopyNonTrivial  from  749 bytes to 470 bytes.
```

Code size for a large binary drops by ~500KB (from 349415719 to 348906015 348906191).

All of the benchmarks that showed a significant difference:

Ones that improve with this CL:
```
BM_CopyNonTrivial/0                                 3.21ns +- 0%   0.70ns +- 0%  -78.23%  (p=0.000 n=12+10)
BM_InlinedVectorFillString/0                        0.93ns +- 0%   0.24ns +- 0%  -74.19%  (p=0.000 n=12+10)
BM_InlinedVectorAssignments/1                       10.5ns +- 0%    4.1ns +- 0%  -60.64%  (p=0.000 n=11+10)
BM_InlinedVectorAssignments/2                       10.7ns +- 0%    4.4ns +- 0%  -59.08%  (p=0.000 n=11+11)
BM_CopyTrivial/0                                    0.92ns +- 0%   0.47ns +- 0%  -48.91%  (p=0.000 n=11+12)
BM_CopyNonTrivial/8                                 21.5ns +- 1%   15.2ns +- 2%  -29.23%  (p=0.000 n=12+12)
BM_StdVectorEmpty                                   0.47ns +- 1%   0.35ns +- 0%  -24.73%  (p=0.000 n=12+12)
BM_StdVectorSize                                    0.46ns +- 2%   0.35ns +- 0%  -24.32%  (p=0.000 n=12+12)
BM_SwapElements<LargeCopyableOnly>/0                3.44ns +- 0%   2.76ns +- 1%  -19.83%  (p=0.000 n=11+11)
BM_InlinedVectorFillRange/256                       20.7ns +- 1%   17.8ns +- 0%  -14.08%  (p=0.000 n=12+9)
BM_CopyNonTrivial/1                                 5.88ns +- 1%   5.51ns +- 0%   -6.28%  (p=0.000 n=10+8)
BM_SwapElements<LargeCopyableMovable>/1             4.19ns +- 0%   3.95ns +- 1%   -5.63%  (p=0.000 n=11+12)
BM_SwapElements<LargeCopyableMovableSwappable>/1    4.18ns +- 0%   3.99ns +- 0%   -4.70%  (p=0.000 n=9+11)
BM_SwapElements<LargeCopyableMovable>/0             2.41ns +- 0%   2.31ns +- 0%   -4.45%  (p=0.000 n=12+12)
BM_InlinedVectorFillRange/64                        8.25ns +- 0%   8.04ns +- 0%   -2.51%  (p=0.000 n=12+11)
BM_SwapElements<LargeCopyableOnly>/1                82.4ns +- 0%   81.5ns +- 0%   -1.06%  (p=0.000 n=12+12)
```

Ones that get worse with this CL:
```
BM_CopyTrivial/1                                    0.92ns +- 0%   1.15ns +- 0%  +25.00%  (p=0.000 n=10+9)
BM_CopyTrivial/8                                    8.57ns +- 0%  10.72ns +- 1%  +25.16%  (p=0.000 n=10+12)
BM_SwapElements<LargeCopyableMovableSwappable>/512  1.48ns +- 1%   1.66ns +- 1%  +11.88%  (p=0.000 n=12+12)
BM_InlinedVectorFillString/1                        11.5ns +- 0%   12.8ns +- 1%  +11.62%  (p=0.000 n=12+11)
BM_SwapElements<LargeCopyableMovableSwappable>/64   1.48ns +- 2%   1.66ns +- 1%  +11.66%  (p=0.000 n=12+11)
BM_SwapElements<LargeCopyableMovableSwappable>/1k   1.48ns +- 1%   1.65ns +- 2%  +11.32%  (p=0.000 n=12+12)
BM_SwapElements<LargeCopyableMovable>/512           1.48ns +- 2%   1.58ns +- 4%   +6.62%  (p=0.000 n=11+12)
BM_SwapElements<LargeCopyableMovable>/1k            1.49ns +- 2%   1.58ns +- 3%   +6.05%  (p=0.000 n=12+12)
BM_SwapElements<LargeCopyableMovable>/64            1.48ns +- 2%   1.57ns +- 4%   +6.04%  (p=0.000 n=11+12)
BM_InlinedVectorFillRange/1                         4.81ns +- 0%   5.05ns +- 0%   +4.83%  (p=0.000 n=11+11)
BM_InlinedVectorFillString/8                        79.4ns +- 1%   83.1ns +- 1%   +4.64%  (p=0.000 n=10+12)
BM_StdVectorFillString/1                            16.3ns +- 0%   16.6ns +- 0%   +2.13%  (p=0.000 n=11+8)
```

PiperOrigin-RevId: 353906786

--
8e26518b3cec9c598e5e9573c46c3bd1b03a67ef by Abseil Team <absl-team@google.com>:

Internal change

PiperOrigin-RevId: 353737330

--
f206ae0983e58c9904ed8b8f05f9caf564a446be by Matt Kulukundis <kfm@google.com>:

Import of CCTZ from GitHub.

PiperOrigin-RevId: 353682256
GitOrigin-RevId: c68f1886f5e8fd90eb0c2d2e68feaf00a7cdacda
Change-Id: I5790c1036c4f543c701d1039848fabf7ae881ad8
pull/898/head
Abseil Team 4 years ago committed by CJ Johnson
parent af39e13305
commit a9a4956020
  1. 2
      CMake/AbseilDll.cmake
  2. 3
      README.md
  3. 9
      absl/base/config.h
  4. 1
      absl/base/spinlock_test_common.cc
  5. 66
      absl/cleanup/BUILD.bazel
  6. 55
      absl/cleanup/CMakeLists.txt
  7. 129
      absl/cleanup/cleanup.h
  8. 240
      absl/cleanup/cleanup_test.cc
  9. 77
      absl/cleanup/internal/cleanup.h
  10. 8
      absl/container/inlined_vector.h
  11. 22
      absl/container/inlined_vector_benchmark.cc
  12. 66
      absl/container/internal/inlined_vector.h
  13. 16
      absl/status/status.cc
  14. 9
      absl/status/status.h
  15. 141
      absl/strings/cord.cc
  16. 43
      absl/strings/cord.h
  17. 6
      absl/strings/cord_test.cc
  18. 7
      absl/strings/internal/cord_rep_flat.h
  19. 2
      absl/strings/internal/cord_rep_ring.cc
  20. 4
      absl/strings/internal/cord_rep_ring.h
  21. 2
      absl/time/internal/cctz/testdata/version
  22. BIN
      absl/time/internal/cctz/testdata/zoneinfo/Africa/Juba

@ -60,6 +60,8 @@ set(ABSL_INTERNAL_DLL_FILES
"base/policy_checks.h"
"base/port.h"
"base/thread_annotations.h"
"cleanup/cleanup.h"
"cleanup/internal/cleanup.h"
"container/btree_map.h"
"container/btree_set.h"
"container/fixed_array.h"

@ -72,6 +72,9 @@ Abseil contains the following C++ library components:
* [`algorithm`](absl/algorithm/)
<br /> The `algorithm` library contains additions to the C++ `<algorithm>`
library and container-based versions of such algorithms.
* [`cleanup`](absl/cleanup/)
<br /> The `cleanup` library contains the control-flow-construct-like type
`absl::Cleanup` which is used for executing a callback on scope exit.
* [`container`](absl/container/)
<br /> The `container` library contains additional STL-style containers,
including Abseil's unordered "Swiss table" containers.

@ -722,4 +722,13 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#define ABSL_HAVE_ADDRESS_SANITIZER 1
#endif
// ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION
//
// Class template argument deduction is a language feature added in C++17.
#ifdef ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION
#error "ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION cannot be directly set."
#elif defined(__cpp_deduction_guides)
#define ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION 1
#endif
#endif // ABSL_BASE_CONFIG_H_

@ -92,6 +92,7 @@ static void TestFunction(int thread_salt, SpinLock* spinlock) {
static void ThreadedTest(SpinLock* spinlock) {
std::vector<std::thread> threads;
threads.reserve(kNumThreads);
for (int i = 0; i < kNumThreads; ++i) {
threads.push_back(std::thread(TestFunction, i, spinlock));
}

@ -0,0 +1,66 @@
# Copyright 2021 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test")
load(
"//absl:copts/configure_copts.bzl",
"ABSL_DEFAULT_COPTS",
"ABSL_DEFAULT_LINKOPTS",
"ABSL_TEST_COPTS",
)
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
cc_library(
name = "cleanup_internal",
hdrs = ["internal/cleanup.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
"//absl/base:base_internal",
"//absl/base:core_headers",
"//absl/utility",
],
)
cc_library(
name = "cleanup",
hdrs = [
"cleanup.h",
],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":cleanup_internal",
"//absl/base:config",
"//absl/base:core_headers",
],
)
cc_test(
name = "cleanup_test",
size = "small",
srcs = [
"cleanup_test.cc",
],
copts = ABSL_TEST_COPTS,
deps = [
":cleanup",
"//absl/base:config",
"//absl/utility",
"@com_google_googletest//:gtest_main",
],
)

@ -0,0 +1,55 @@
# Copyright 2021 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
absl_cc_library(
NAME
cleanup_internal
HDRS
"internal/cleanup.h"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::base_internal
absl::core_headers
absl::utility
PUBLIC
)
absl_cc_library(
NAME
cleanup
HDRS
"cleanup.h"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::cleanup_internal
absl::config
absl::core_headers
PUBLIC
)
absl_cc_test(
NAME
cleanup_test
SRCS
"cleanup_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::cleanup
absl::config
absl::utility
gmock_main
)

@ -0,0 +1,129 @@
// Copyright 2021 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// -----------------------------------------------------------------------------
// File: cleanup.h
// -----------------------------------------------------------------------------
//
// `absl::Cleanup` implements the scope guard idiom, invoking `operator()() &&`
// on the callback it was constructed with, on scope exit.
//
// Example:
//
// ```
// void CopyGoodData(const char* input_path, const char* output_path) {
// FILE* in_file = fopen(input_path, "r");
// FILE* out_file = fopen(output_path, "w");
// if (in_file == nullptr || out_file == nullptr) return;
//
// // C++17 style using class template argument deduction
// absl::Cleanup in_closer = [&in_file] { fclose(in_file); };
//
// // C++11 style using the factory function
// auto out_closer = absl::MakeCleanup([&out_file] { fclose(out_file); });
//
// // `fclose` will be called on all exit paths by the cleanup instances
//
// Data data;
// while (ReadData(in_file, &data)) {
// if (data.IsBad()) {
// LOG(ERROR) << "Found bad data.";
// return; // `in_closer` and `out_closer` will call their callbacks
// }
// SaveData(out_file, &data);
// }
// return; // `in_closer` and `out_closer` will call their callbacks
// }
// ```
//
// `std::move(cleanup).Invoke()` will execute the callback early, before
// destruction, and prevent the callback from executing in the destructor.
//
// Alternatively, `std::move(cleanup).Cancel()` will prevent the callback from
// ever executing at all.
//
// Once a cleanup object has been `std::move(...)`-ed, it may not be used again.
#ifndef ABSL_CLEANUP_CLEANUP_H_
#define ABSL_CLEANUP_CLEANUP_H_
#include <utility>
#include "absl/base/config.h"
#include "absl/base/macros.h"
#include "absl/cleanup/internal/cleanup.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
template <typename Arg, typename Callback = void()>
class ABSL_MUST_USE_RESULT Cleanup {
static_assert(cleanup_internal::WasDeduced<Arg>(),
"Explicit template parameters are not supported.");
static_assert(cleanup_internal::ReturnsVoid<Callback>(),
"Callbacks that return values are not supported.");
public:
Cleanup(Callback callback) : storage_(std::move(callback)) {} // NOLINT
Cleanup(Cleanup&& other) : storage_(std::move(other.storage_)) {}
void Cancel() && {
ABSL_HARDENING_ASSERT(storage_.IsCallbackEngaged());
storage_.DisengageCallback();
}
void Invoke() && {
ABSL_HARDENING_ASSERT(storage_.IsCallbackEngaged());
storage_.DisengageCallback();
storage_.InvokeCallback();
}
~Cleanup() {
if (storage_.IsCallbackEngaged()) {
storage_.InvokeCallback();
}
}
private:
cleanup_internal::Storage<Callback> storage_;
};
// `auto c = absl::MakeCleanup(/* callback */);`
//
// C++11 type deduction API for creating an instance of `absl::Cleanup`.
template <typename... Args, typename Callback>
absl::Cleanup<cleanup_internal::Tag, Callback> MakeCleanup(Callback callback) {
static_assert(cleanup_internal::WasDeduced<cleanup_internal::Tag, Args...>(),
"Explicit template parameters are not supported.");
static_assert(cleanup_internal::ReturnsVoid<Callback>(),
"Callbacks that return values are not supported.");
return {std::move(callback)};
}
// `absl::Cleanup c = /* callback */;`
//
// C++17 type deduction API for creating an instance of `absl::Cleanup`.
#if defined(ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION)
template <typename Callback>
Cleanup(Callback callback) -> Cleanup<cleanup_internal::Tag, Callback>;
#endif // defined(ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION)
ABSL_NAMESPACE_END
} // namespace absl
#endif // ABSL_CLEANUP_CLEANUP_H_

@ -0,0 +1,240 @@
// Copyright 2021 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/cleanup/cleanup.h"
#include <functional>
#include <type_traits>
#include <utility>
#include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/utility/utility.h"
namespace {
using Tag = absl::cleanup_internal::Tag;
template <typename Type1, typename Type2>
void AssertSameType() {
static_assert(std::is_same<Type1, Type2>::value, "");
}
struct IdentityFactory {
template <typename Callback>
static Callback AsCallback(Callback callback) {
return Callback(std::move(callback));
}
};
// `FunctorClass` is a type used for testing `absl::Cleanup`. It is intended to
// represent users that make their own move-only callback types outside of
// `std::function` and lambda literals.
class FunctorClass {
using Callback = std::function<void()>;
public:
explicit FunctorClass(Callback callback) : callback_(std::move(callback)) {}
FunctorClass(FunctorClass&& other)
: callback_(absl::exchange(other.callback_, Callback())) {}
FunctorClass(const FunctorClass&) = delete;
FunctorClass& operator=(const FunctorClass&) = delete;
FunctorClass& operator=(FunctorClass&&) = delete;
void operator()() const& = delete;
void operator()() && {
ASSERT_TRUE(callback_);
callback_();
callback_ = nullptr;
}
private:
Callback callback_;
};
struct FunctorClassFactory {
template <typename Callback>
static FunctorClass AsCallback(Callback callback) {
return FunctorClass(std::move(callback));
}
};
struct StdFunctionFactory {
template <typename Callback>
static std::function<void()> AsCallback(Callback callback) {
return std::function<void()>(std::move(callback));
}
};
using CleanupTestParams =
::testing::Types<IdentityFactory, FunctorClassFactory, StdFunctionFactory>;
template <typename>
struct CleanupTest : public ::testing::Test {};
TYPED_TEST_SUITE(CleanupTest, CleanupTestParams);
bool function_pointer_called = false;
void FunctionPointerFunction() { function_pointer_called = true; }
TYPED_TEST(CleanupTest, FactoryProducesCorrectType) {
{
auto callback = TypeParam::AsCallback([] {});
auto cleanup = absl::MakeCleanup(std::move(callback));
AssertSameType<absl::Cleanup<Tag, decltype(callback)>, decltype(cleanup)>();
}
{
auto cleanup = absl::MakeCleanup(&FunctionPointerFunction);
AssertSameType<absl::Cleanup<Tag, void (*)()>, decltype(cleanup)>();
}
{
auto cleanup = absl::MakeCleanup(FunctionPointerFunction);
AssertSameType<absl::Cleanup<Tag, void (*)()>, decltype(cleanup)>();
}
}
#if defined(ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION)
TYPED_TEST(CleanupTest, CTADProducesCorrectType) {
{
auto callback = TypeParam::AsCallback([] {});
absl::Cleanup cleanup = std::move(callback);
AssertSameType<absl::Cleanup<Tag, decltype(callback)>, decltype(cleanup)>();
}
{
absl::Cleanup cleanup = &FunctionPointerFunction;
AssertSameType<absl::Cleanup<Tag, void (*)()>, decltype(cleanup)>();
}
{
absl::Cleanup cleanup = FunctionPointerFunction;
AssertSameType<absl::Cleanup<Tag, void (*)()>, decltype(cleanup)>();
}
}
TYPED_TEST(CleanupTest, FactoryAndCTADProduceSameType) {
{
auto callback = IdentityFactory::AsCallback([] {});
auto factory_cleanup = absl::MakeCleanup(callback);
absl::Cleanup deduction_cleanup = callback;
AssertSameType<decltype(factory_cleanup), decltype(deduction_cleanup)>();
}
{
auto factory_cleanup =
absl::MakeCleanup(FunctorClassFactory::AsCallback([] {}));
absl::Cleanup deduction_cleanup = FunctorClassFactory::AsCallback([] {});
AssertSameType<decltype(factory_cleanup), decltype(deduction_cleanup)>();
}
{
auto factory_cleanup =
absl::MakeCleanup(StdFunctionFactory::AsCallback([] {}));
absl::Cleanup deduction_cleanup = StdFunctionFactory::AsCallback([] {});
AssertSameType<decltype(factory_cleanup), decltype(deduction_cleanup)>();
}
{
auto factory_cleanup = absl::MakeCleanup(&FunctionPointerFunction);
absl::Cleanup deduction_cleanup = &FunctionPointerFunction;
AssertSameType<decltype(factory_cleanup), decltype(deduction_cleanup)>();
}
{
auto factory_cleanup = absl::MakeCleanup(FunctionPointerFunction);
absl::Cleanup deduction_cleanup = FunctionPointerFunction;
AssertSameType<decltype(factory_cleanup), decltype(deduction_cleanup)>();
}
}
#endif // defined(ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION)
TYPED_TEST(CleanupTest, BasicUsage) {
bool called = false;
{
EXPECT_FALSE(called);
auto cleanup =
absl::MakeCleanup(TypeParam::AsCallback([&called] { called = true; }));
EXPECT_FALSE(called);
}
EXPECT_TRUE(called);
}
TYPED_TEST(CleanupTest, BasicUsageWithFunctionPointer) {
function_pointer_called = false;
{
EXPECT_FALSE(function_pointer_called);
auto cleanup =
absl::MakeCleanup(TypeParam::AsCallback(&FunctionPointerFunction));
EXPECT_FALSE(function_pointer_called);
}
EXPECT_TRUE(function_pointer_called);
}
TYPED_TEST(CleanupTest, Cancel) {
bool called = false;
{
EXPECT_FALSE(called);
auto cleanup =
absl::MakeCleanup(TypeParam::AsCallback([&called] { called = true; }));
std::move(cleanup).Cancel();
EXPECT_FALSE(called);
}
EXPECT_FALSE(called);
}
TYPED_TEST(CleanupTest, Invoke) {
bool called = false;
{
EXPECT_FALSE(called);
auto cleanup =
absl::MakeCleanup(TypeParam::AsCallback([&called] { called = true; }));
std::move(cleanup).Invoke();
EXPECT_TRUE(called);
}
EXPECT_TRUE(called);
}
} // namespace

@ -0,0 +1,77 @@
// Copyright 2021 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef ABSL_CLEANUP_INTERNAL_CLEANUP_H_
#define ABSL_CLEANUP_INTERNAL_CLEANUP_H_
#include <type_traits>
#include <utility>
#include "absl/base/internal/invoke.h"
#include "absl/base/thread_annotations.h"
#include "absl/utility/utility.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cleanup_internal {
struct Tag {};
template <typename Arg, typename... Args>
constexpr bool WasDeduced() {
return (std::is_same<cleanup_internal::Tag, Arg>::value) &&
(sizeof...(Args) == 0);
}
template <typename Callback>
constexpr bool ReturnsVoid() {
return (std::is_same<base_internal::invoke_result_t<Callback>, void>::value);
}
template <typename Callback>
class Storage {
public:
explicit Storage(Callback callback)
: engaged_(true), callback_(std::move(callback)) {}
Storage(Storage&& other)
: engaged_(absl::exchange(other.engaged_, false)),
callback_(std::move(other.callback_)) {}
Storage(const Storage& other) = delete;
Storage& operator=(Storage&& other) = delete;
Storage& operator=(const Storage& other) = delete;
bool IsCallbackEngaged() const { return engaged_; }
void DisengageCallback() { engaged_ = false; }
void InvokeCallback() ABSL_NO_THREAD_SAFETY_ANALYSIS {
std::move(callback_)();
}
private:
bool engaged_;
Callback callback_;
};
} // namespace cleanup_internal
ABSL_NAMESPACE_END
} // namespace absl
#endif // ABSL_CLEANUP_INTERNAL_CLEANUP_H_

@ -167,11 +167,13 @@ class InlinedVector {
// Creates an inlined vector by copying the contents of `other` using `alloc`.
InlinedVector(const InlinedVector& other, const allocator_type& alloc)
: storage_(alloc) {
if (IsMemcpyOk::value && !other.storage_.GetIsAllocated()) {
if (other.empty()) {
// Empty; nothing to do.
} else if (IsMemcpyOk::value && !other.storage_.GetIsAllocated()) {
// Memcpy-able and do not need allocation.
storage_.MemcpyFrom(other.storage_);
} else {
storage_.Initialize(IteratorValueAdapter<const_pointer>(other.data()),
other.size());
storage_.InitFrom(other.storage_);
}
}

@ -534,6 +534,28 @@ void BM_ConstructFromMove(benchmark::State& state) {
ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, TrivialType);
ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, NontrivialType);
// Measure cost of copy-constructor+destructor.
void BM_CopyTrivial(benchmark::State& state) {
const int n = state.range(0);
InlVec<int64_t> src(n);
for (auto s : state) {
InlVec<int64_t> copy(src);
benchmark::DoNotOptimize(copy);
}
}
BENCHMARK(BM_CopyTrivial)->Arg(0)->Arg(1)->Arg(kLargeSize);
// Measure cost of copy-constructor+destructor.
void BM_CopyNonTrivial(benchmark::State& state) {
const int n = state.range(0);
InlVec<InlVec<int64_t>> src(n);
for (auto s : state) {
InlVec<InlVec<int64_t>> copy(src);
benchmark::DoNotOptimize(copy);
}
}
BENCHMARK(BM_CopyNonTrivial)->Arg(0)->Arg(1)->Arg(kLargeSize);
template <typename T, size_t FromSize, size_t ToSize>
void BM_AssignSizeRef(benchmark::State& state) {
auto size = ToSize;

@ -81,6 +81,23 @@ void DestroyElements(AllocatorType* alloc_ptr, Pointer destroy_first,
}
}
// If kUseMemcpy is true, memcpy(dst, src, n); else do nothing.
// Useful to avoid compiler warnings when memcpy() is used for T values
// that are not trivially copyable in non-reachable code.
template <bool kUseMemcpy>
inline void MemcpyIfAllowed(void* dst, const void* src, size_t n);
// memcpy when allowed.
template <>
inline void MemcpyIfAllowed<true>(void* dst, const void* src, size_t n) {
memcpy(dst, src, n);
}
// Do nothing for types that are not memcpy-able. This function is only
// called from non-reachable branches.
template <>
inline void MemcpyIfAllowed<false>(void*, const void*, size_t) {}
template <typename AllocatorType, typename Pointer, typename ValueAdapter,
typename SizeType>
void ConstructElements(AllocatorType* alloc_ptr, Pointer construct_first,
@ -310,9 +327,14 @@ class Storage {
: metadata_(alloc, /* size and is_allocated */ 0) {}
~Storage() {
pointer data = GetIsAllocated() ? GetAllocatedData() : GetInlinedData();
inlined_vector_internal::DestroyElements(GetAllocPtr(), data, GetSize());
DeallocateIfAllocated();
if (GetSizeAndIsAllocated() == 0) {
// Empty and not allocated; nothing to do.
} else if (IsMemcpyOk::value) {
// No destructors need to be run; just deallocate if necessary.
DeallocateIfAllocated();
} else {
DestroyContents();
}
}
// ---------------------------------------------------------------------------
@ -370,6 +392,8 @@ class Storage {
// Storage Member Mutators
// ---------------------------------------------------------------------------
ABSL_ATTRIBUTE_NOINLINE void InitFrom(const Storage& other);
template <typename ValueAdapter>
void Initialize(ValueAdapter values, size_type new_size);
@ -452,6 +476,8 @@ class Storage {
}
private:
ABSL_ATTRIBUTE_NOINLINE void DestroyContents();
using Metadata =
container_internal::CompressedTuple<allocator_type, size_type>;
@ -476,6 +502,40 @@ class Storage {
Data data_;
};
template <typename T, size_t N, typename A>
void Storage<T, N, A>::DestroyContents() {
pointer data = GetIsAllocated() ? GetAllocatedData() : GetInlinedData();
inlined_vector_internal::DestroyElements(GetAllocPtr(), data, GetSize());
DeallocateIfAllocated();
}
template <typename T, size_t N, typename A>
void Storage<T, N, A>::InitFrom(const Storage& other) {
const auto n = other.GetSize();
assert(n > 0); // Empty sources handled handled in caller.
const_pointer src;
pointer dst;
if (!other.GetIsAllocated()) {
dst = GetInlinedData();
src = other.GetInlinedData();
} else {
// Because this is only called from the `InlinedVector` constructors, it's
// safe to take on the allocation with size `0`. If `ConstructElements(...)`
// throws, deallocation will be automatically handled by `~Storage()`.
size_type new_capacity = ComputeCapacity(GetInlinedCapacity(), n);
dst = AllocatorTraits::allocate(*GetAllocPtr(), new_capacity);
SetAllocatedData(dst, new_capacity);
src = other.GetAllocatedData();
}
if (IsMemcpyOk::value) {
MemcpyIfAllowed<IsMemcpyOk::value>(dst, src, sizeof(dst[0]) * n);
} else {
auto values = IteratorValueAdapter<const_pointer>(src);
inlined_vector_internal::ConstructElements(GetAllocPtr(), dst, &values, n);
}
GetSizeAndIsAllocated() = other.GetSizeAndIsAllocated();
}
template <typename T, size_t N, typename A>
template <typename ValueAdapter>
auto Storage<T, N, A>::Initialize(ValueAdapter values, size_type new_size)

@ -207,10 +207,12 @@ void Status::UnrefNonInlined(uintptr_t rep) {
}
}
uintptr_t Status::NewRep(absl::StatusCode code, absl::string_view msg,
std::unique_ptr<status_internal::Payloads> payloads) {
uintptr_t Status::NewRep(
absl::StatusCode code, absl::string_view msg,
std::unique_ptr<status_internal::Payloads> payloads) {
status_internal::StatusRep* rep = new status_internal::StatusRep(
code, std::string(msg.data(), msg.size()), std::move(payloads));
code, std::string(msg.data(), msg.size()),
std::move(payloads));
return PointerToRep(rep);
}
@ -236,8 +238,9 @@ absl::StatusCode Status::code() const {
void Status::PrepareToModify() {
ABSL_RAW_CHECK(!ok(), "PrepareToModify shouldn't be called on OK status.");
if (IsInlined(rep_)) {
rep_ = NewRep(static_cast<absl::StatusCode>(raw_code()),
absl::string_view(), nullptr);
rep_ =
NewRep(static_cast<absl::StatusCode>(raw_code()), absl::string_view(),
nullptr);
return;
}
@ -248,7 +251,8 @@ void Status::PrepareToModify() {
if (rep->payloads) {
payloads = absl::make_unique<status_internal::Payloads>(*rep->payloads);
}
rep_ = NewRep(rep->code, message(), std::move(payloads));
rep_ = NewRep(rep->code, message(),
std::move(payloads));
UnrefNonInlined(rep_i);
}
}

@ -371,10 +371,10 @@ class ABSL_MUST_USE_RESULT Status final {
Status();
// Creates a status in the canonical error space with the specified
// `absl::StatusCode` and error message. If `code == absl::StatusCode::kOk`,
// `absl::StatusCode` and error message. If `code == absl::StatusCode::kOk`, // NOLINT
// `msg` is ignored and an object identical to an OK status is constructed.
//
// The `msg` string must be in UTF-8. The implementation may complain (e.g.,
// The `msg` string must be in UTF-8. The implementation may complain (e.g., // NOLINT
// by printing a warning) if it is not.
Status(absl::StatusCode code, absl::string_view msg);
@ -551,8 +551,9 @@ class ABSL_MUST_USE_RESULT Status final {
status_internal::Payloads* GetPayloads();
// Takes ownership of payload.
static uintptr_t NewRep(absl::StatusCode code, absl::string_view msg,
std::unique_ptr<status_internal::Payloads> payload);
static uintptr_t NewRep(
absl::StatusCode code, absl::string_view msg,
std::unique_ptr<status_internal::Payloads> payload);
static bool EqualsSlow(const absl::Status& a, const absl::Status& b);
// MSVC 14.0 limitation requires the const.

@ -37,6 +37,7 @@
#include "absl/strings/escaping.h"
#include "absl/strings/internal/cord_internal.h"
#include "absl/strings/internal/cord_rep_flat.h"
#include "absl/strings/internal/cord_rep_ring.h"
#include "absl/strings/internal/resize_uninitialized.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
@ -50,8 +51,8 @@ using ::absl::cord_internal::CordRep;
using ::absl::cord_internal::CordRepConcat;
using ::absl::cord_internal::CordRepExternal;
using ::absl::cord_internal::CordRepFlat;
using ::absl::cord_internal::CordRepRing;
using ::absl::cord_internal::CordRepSubstring;
using ::absl::cord_internal::kMinFlatLength;
using ::absl::cord_internal::kMaxFlatLength;
@ -94,6 +95,11 @@ static constexpr uint64_t min_length[] = {
static const int kMinLengthSize = ABSL_ARRAYSIZE(min_length);
static inline bool cord_ring_enabled() {
return cord_internal::cord_ring_buffer_enabled.load(
std::memory_order_relaxed);
}
static inline bool IsRootBalanced(CordRep* node) {
if (node->tag != CONCAT) {
return true;
@ -109,7 +115,8 @@ static inline bool IsRootBalanced(CordRep* node) {
}
static CordRep* Rebalance(CordRep* node);
static void DumpNode(CordRep* rep, bool include_data, std::ostream* os);
static void DumpNode(CordRep* rep, bool include_data, std::ostream* os,
int indent = 0);
static bool VerifyNode(CordRep* root, CordRep* start_node,
bool full_validation);
@ -198,12 +205,38 @@ static CordRep* MakeBalancedTree(CordRep** reps, size_t n) {
return reps[0];
}
static CordRepFlat* CreateFlat(const char* data, size_t length,
size_t alloc_hint) {
CordRepFlat* flat = CordRepFlat::New(length + alloc_hint);
flat->length = length;
memcpy(flat->Data(), data, length);
return flat;
}
// Creates a new flat or ringbuffer out of the specified array.
// The returned node has a refcount of 1.
static CordRep* RingNewTree(const char* data, size_t length,
size_t alloc_hint) {
if (length <= kMaxFlatLength) {
return CreateFlat(data, length, alloc_hint);
}
CordRepFlat* flat = CreateFlat(data, kMaxFlatLength, 0);
data += kMaxFlatLength;
length -= kMaxFlatLength;
size_t extra = (length - 1) / kMaxFlatLength + 1;
auto* root = CordRepRing::Create(flat, extra);
return CordRepRing::Append(root, {data, length}, alloc_hint);
}
// Create a new tree out of the specified array.
// The returned node has a refcount of 1.
static CordRep* NewTree(const char* data,
size_t length,
size_t alloc_hint) {
if (length == 0) return nullptr;
if (cord_ring_enabled()) {
return RingNewTree(data, length, alloc_hint);
}
absl::FixedArray<CordRep*> reps((length - 1) / kMaxFlatLength + 1);
size_t n = 0;
do {
@ -295,10 +328,18 @@ inline void Cord::InlineRep::remove_prefix(size_t n) {
reduce_size(n);
}
// Returns `rep` converted into a CordRepRing.
// Directly returns `rep` if `rep` is already a CordRepRing.
static CordRepRing* ForceRing(CordRep* rep, size_t extra) {
return (rep->tag == RING) ? rep->ring() : CordRepRing::Create(rep, extra);
}
void Cord::InlineRep::AppendTree(CordRep* tree) {
if (tree == nullptr) return;
if (data_.is_empty()) {
set_tree(tree);
} else if (cord_ring_enabled()) {
set_tree(CordRepRing::Append(ForceRing(force_tree(0), 1), tree));
} else {
set_tree(Concat(force_tree(0), tree));
}
@ -308,6 +349,8 @@ void Cord::InlineRep::PrependTree(CordRep* tree) {
assert(tree != nullptr);
if (data_.is_empty()) {
set_tree(tree);
} else if (cord_ring_enabled()) {
set_tree(CordRepRing::Prepend(ForceRing(force_tree(0), 1), tree));
} else {
set_tree(Concat(tree, force_tree(0)));
}
@ -319,6 +362,15 @@ void Cord::InlineRep::PrependTree(CordRep* tree) {
// written to region and the actual size increase will be written to size.
static inline bool PrepareAppendRegion(CordRep* root, char** region,
size_t* size, size_t max_length) {
if (root->tag == RING && root->refcount.IsOne()) {
Span<char> span = root->ring()->GetAppendBuffer(max_length);
if (!span.empty()) {
*region = span.data();
*size = span.size();
return true;
}
}
// Search down the right-hand path for a non-full FLAT node.
CordRep* dst = root;
while (dst->tag == CONCAT && dst->refcount.IsOne()) {
@ -383,6 +435,11 @@ void Cord::InlineRep::GetAppendRegion(char** region, size_t* size,
new_node->length = std::min(new_node->Capacity(), max_length);
*region = new_node->Data();
*size = new_node->length;
if (cord_ring_enabled()) {
replace_tree(CordRepRing::Append(ForceRing(root, 1), new_node));
return;
}
replace_tree(Concat(root, new_node));
}
@ -411,6 +468,11 @@ void Cord::InlineRep::GetAppendRegion(char** region, size_t* size) {
new_node->length = new_node->Capacity();
*region = new_node->Data();
*size = new_node->length;
if (cord_ring_enabled()) {
replace_tree(CordRepRing::Append(ForceRing(root, 1), new_node));
return;
}
replace_tree(Concat(root, new_node));
}
@ -593,6 +655,13 @@ void Cord::InlineRep::AppendArray(const char* src_data, size_t src_size) {
return;
}
if (cord_ring_enabled()) {
absl::string_view data(src_data, src_size);
root = ForceRing(root, (data.size() - 1) / kMaxFlatLength + 1);
replace_tree(CordRepRing::Append(root->ring(), data));
return;
}
// Use new block(s) for any remaining bytes that were not handled above.
// Alloc extra memory only if the right child of the root of the new tree is
// going to be a FLAT node, which will permit further inplace appends.
@ -805,6 +874,8 @@ void Cord::RemovePrefix(size_t n) {
CordRep* tree = contents_.tree();
if (tree == nullptr) {
contents_.remove_prefix(n);
} else if (tree->tag == RING) {
contents_.replace_tree(CordRepRing::RemovePrefix(tree->ring(), n));
} else {
CordRep* newrep = RemovePrefixFrom(tree, n);
CordRep::Unref(tree);
@ -819,6 +890,8 @@ void Cord::RemoveSuffix(size_t n) {
CordRep* tree = contents_.tree();
if (tree == nullptr) {
contents_.reduce_size(n);
} else if (tree->tag == RING) {
contents_.replace_tree(CordRepRing::RemoveSuffix(tree->ring(), n));
} else {
CordRep* newrep = RemoveSuffixFrom(tree, n);
CordRep::Unref(tree);
@ -902,6 +975,9 @@ Cord Cord::Subcord(size_t pos, size_t new_size) const {
}
cord_internal::SmallMemmove(dest, it->data(), remaining_size);
sub_cord.contents_.set_inline_size(new_size);
} else if (tree->tag == RING) {
tree = CordRepRing::SubRing(CordRep::Ref(tree)->ring(), pos, new_size);
sub_cord.contents_.set_tree(tree);
} else {
sub_cord.contents_.set_tree(NewSubRange(tree, pos, new_size));
}
@ -1103,6 +1179,10 @@ inline absl::string_view Cord::InlineRep::FindFlatStartPiece() const {
return absl::string_view(node->external()->base, node->length);
}
if (node->tag == RING) {
return node->ring()->entry_data(node->ring()->head());
}
// Walk down the left branches until we hit a non-CONCAT node.
while (node->tag == CONCAT) {
node = node->concat()->left;
@ -1360,6 +1440,25 @@ Cord Cord::ChunkIterator::AdvanceAndReadBytes(size_t n) {
}
return subcord;
}
if (ring_reader_) {
size_t chunk_size = current_chunk_.size();
if (n <= chunk_size && n <= kMaxBytesToCopy) {
subcord = Cord(current_chunk_.substr(0, n));
} else {
auto* ring = CordRep::Ref(ring_reader_.ring())->ring();
size_t offset = ring_reader_.length() - bytes_remaining_;
subcord.contents_.set_tree(CordRepRing::SubRing(ring, offset, n));
}
if (n < chunk_size) {
bytes_remaining_ -= n;
current_chunk_.remove_prefix(n);
} else {
AdvanceBytesRing(n);
}
return subcord;
}
auto& stack_of_right_children = stack_of_right_children_;
if (n < current_chunk_.size()) {
// Range to read is a proper subrange of the current chunk.
@ -1533,6 +1632,8 @@ char Cord::operator[](size_t i) const {
if (rep->tag >= FLAT) {
// Get the "i"th character directly from the flat array.
return rep->flat()->Data()[offset];
} else if (rep->tag == RING) {
return rep->ring()->GetCharacter(offset);
} else if (rep->tag == EXTERNAL) {
// Get the "i"th character from the external array.
return rep->external()->base[offset];
@ -1609,6 +1710,15 @@ absl::string_view Cord::FlattenSlowPath() {
/* static */ void Cord::ForEachChunkAux(
absl::cord_internal::CordRep* rep,
absl::FunctionRef<void(absl::string_view)> callback) {
if (rep->tag == RING) {
ChunkIterator it(rep), end;
while (it != end) {
callback(*it);
++it;
}
return;
}
assert(rep != nullptr);
int stack_pos = 0;
constexpr int stack_max = 128;
@ -1650,9 +1760,9 @@ absl::string_view Cord::FlattenSlowPath() {
}
}
static void DumpNode(CordRep* rep, bool include_data, std::ostream* os) {
static void DumpNode(CordRep* rep, bool include_data, std::ostream* os,
int indent) {
const int kIndentStep = 1;
int indent = 0;
absl::InlinedVector<CordRep*, kInlinedVectorSize> stack;
absl::InlinedVector<int, kInlinedVectorSize> indents;
for (;;) {
@ -1673,18 +1783,28 @@ static void DumpNode(CordRep* rep, bool include_data, std::ostream* os) {
*os << "SUBSTRING @ " << rep->substring()->start << "\n";
indent += kIndentStep;
rep = rep->substring()->child;
} else { // Leaf
} else { // Leaf or ring
if (rep->tag == EXTERNAL) {
*os << "EXTERNAL [";
if (include_data)
*os << absl::CEscape(std::string(rep->external()->base, rep->length));
*os << "]\n";
} else {
} else if (rep->tag >= FLAT) {
*os << "FLAT cap=" << rep->flat()->Capacity()
<< " [";
if (include_data)
*os << absl::CEscape(std::string(rep->flat()->Data(), rep->length));
*os << "]\n";
} else {
assert(rep->tag == RING);
auto* ring = rep->ring();
*os << "RING, entries = " << ring->entries() << "\n";
CordRepRing::index_type head = ring->head();
do {
DumpNode(ring->entry_child(head), include_data, os,
indent + kIndentStep);
head = ring->advance(head);;
} while (head != ring->tail());
}
if (stack.empty()) break;
rep = stack.back();
@ -1778,6 +1898,15 @@ static bool VerifyNode(CordRep* root, CordRep* start_node,
}
next_node = right;
}
} else if (cur_node->tag == RING) {
total_mem_usage += CordRepRing::AllocSize(cur_node->ring()->capacity());
const CordRepRing* ring = cur_node->ring();
CordRepRing::index_type pos = ring->head(), tail = ring->tail();
do {
CordRep* node = ring->entry_child(pos);
assert(node->tag >= FLAT || node->tag == EXTERNAL);
RepMemoryUsageLeaf(node, &total_mem_usage);
} while ((pos = ring->advance(pos)) != tail);
} else {
// Since cur_node is not a leaf or a concat node it must be a substring.
assert(cur_node->tag == SUBSTRING);

@ -78,6 +78,8 @@
#include "absl/functional/function_ref.h"
#include "absl/meta/type_traits.h"
#include "absl/strings/internal/cord_internal.h"
#include "absl/strings/internal/cord_rep_ring.h"
#include "absl/strings/internal/cord_rep_ring_reader.h"
#include "absl/strings/internal/resize_uninitialized.h"
#include "absl/strings/internal/string_constant.h"
#include "absl/strings/string_view.h"
@ -361,6 +363,10 @@ class Cord {
friend class CharIterator;
private:
using CordRep = absl::cord_internal::CordRep;
using CordRepRing = absl::cord_internal::CordRepRing;
using CordRepRingReader = absl::cord_internal::CordRepRingReader;
// Stack of right children of concat nodes that we have to visit.
// Keep this at the end of the structure to avoid cache-thrashing.
// TODO(jgm): Benchmark to see if there's a more optimal value than 47 for
@ -385,6 +391,10 @@ class Cord {
// Stack specific operator++
ChunkIterator& AdvanceStack();
// Ring buffer specific operator++
ChunkIterator& AdvanceRing();
void AdvanceBytesRing(size_t n);
// Iterates `n` bytes, where `n` is expected to be greater than or equal to
// `current_chunk_.size()`.
void AdvanceBytesSlowPath(size_t n);
@ -398,6 +408,10 @@ class Cord {
absl::cord_internal::CordRep* current_leaf_ = nullptr;
// The number of bytes left in the `Cord` over which we are iterating.
size_t bytes_remaining_ = 0;
// Cord reader for ring buffers. Empty if not traversing a ring buffer.
CordRepRingReader ring_reader_;
// See 'Stack' alias definition.
Stack stack_of_right_children_;
};
@ -1107,6 +1121,11 @@ inline bool Cord::StartsWith(absl::string_view rhs) const {
}
inline void Cord::ChunkIterator::InitTree(cord_internal::CordRep* tree) {
if (tree->tag == cord_internal::RING) {
current_chunk_ = ring_reader_.Reset(tree->ring());
return;
}
stack_of_right_children_.push_back(tree);
operator++();
}
@ -1126,13 +1145,33 @@ inline Cord::ChunkIterator::ChunkIterator(const Cord* cord)
}
}
inline Cord::ChunkIterator& Cord::ChunkIterator::AdvanceRing() {
current_chunk_ = ring_reader_.Next();
return *this;
}
inline void Cord::ChunkIterator::AdvanceBytesRing(size_t n) {
assert(n >= current_chunk_.size());
bytes_remaining_ -= n;
if (bytes_remaining_) {
if (n == current_chunk_.size()) {
current_chunk_ = ring_reader_.Next();
} else {
size_t offset = ring_reader_.length() - bytes_remaining_;
current_chunk_ = ring_reader_.Seek(offset);
}
} else {
current_chunk_ = {};
}
}
inline Cord::ChunkIterator& Cord::ChunkIterator::operator++() {
ABSL_HARDENING_ASSERT(bytes_remaining_ > 0 &&
"Attempted to iterate past `end()`");
assert(bytes_remaining_ >= current_chunk_.size());
bytes_remaining_ -= current_chunk_.size();
if (bytes_remaining_ > 0) {
return AdvanceStack();
return ring_reader_ ? AdvanceRing() : AdvanceStack();
} else {
current_chunk_ = {};
}
@ -1174,7 +1213,7 @@ inline void Cord::ChunkIterator::AdvanceBytes(size_t n) {
if (ABSL_PREDICT_TRUE(n < current_chunk_.size())) {
RemoveChunkPrefix(n);
} else if (n != 0) {
AdvanceBytesSlowPath(n);
ring_reader_ ? AdvanceBytesRing(n) : AdvanceBytesSlowPath(n);
}
}

@ -367,7 +367,7 @@ TEST(Cord, Subcord) {
for (size_t end_pos : positions) {
if (end_pos < pos || end_pos > a.size()) continue;
absl::Cord sa = a.Subcord(pos, end_pos - pos);
EXPECT_EQ(absl::string_view(s).substr(pos, end_pos - pos),
ASSERT_EQ(absl::string_view(s).substr(pos, end_pos - pos),
std::string(sa))
<< a;
}
@ -379,7 +379,7 @@ TEST(Cord, Subcord) {
for (size_t pos = 0; pos <= sh.size(); ++pos) {
for (size_t n = 0; n <= sh.size() - pos; ++n) {
absl::Cord sc = c.Subcord(pos, n);
EXPECT_EQ(sh.substr(pos, n), std::string(sc)) << c;
ASSERT_EQ(sh.substr(pos, n), std::string(sc)) << c;
}
}
@ -389,7 +389,7 @@ TEST(Cord, Subcord) {
while (sa.size() > 1) {
sa = sa.Subcord(1, sa.size() - 2);
ss = ss.substr(1, ss.size() - 2);
EXPECT_EQ(ss, std::string(sa)) << a;
ASSERT_EQ(ss, std::string(sa)) << a;
if (HasFailure()) break; // halt cascade
}

@ -43,8 +43,9 @@ static constexpr size_t kMaxFlatSize = 4096;
static constexpr size_t kMaxFlatLength = kMaxFlatSize - kFlatOverhead;
static constexpr size_t kMinFlatLength = kMinFlatSize - kFlatOverhead;
constexpr size_t AllocatedSizeToTagUnchecked(size_t size) {
return (size <= 1024) ? size / 8 : 128 + size / 32 - 1024 / 32;
constexpr uint8_t AllocatedSizeToTagUnchecked(size_t size) {
return static_cast<uint8_t>((size <= 1024) ? size / 8
: 128 + size / 32 - 1024 / 32);
}
static_assert(kMinFlatSize / 8 >= FLAT, "");
@ -65,7 +66,7 @@ inline size_t RoundUpForTag(size_t size) {
// undefined if the size exceeds the maximum size that can be encoded in
// a tag, i.e., if size is larger than TagToAllocatedSize(<max tag>).
inline uint8_t AllocatedSizeToTag(size_t size) {
const size_t tag = AllocatedSizeToTagUnchecked(size);
const uint8_t tag = AllocatedSizeToTagUnchecked(size);
assert(tag <= MAX_FLAT_TAG);
return tag;
}

@ -36,8 +36,10 @@ namespace cord_internal {
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wshadow"
#if __has_warning("-Wshadow-field")
#pragma clang diagnostic ignored "-Wshadow-field"
#endif
#endif
namespace {

@ -34,8 +34,10 @@ namespace cord_internal {
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wshadow"
#if __has_warning("-Wshadow-field")
#pragma clang diagnostic ignored "-Wshadow-field"
#endif
#endif
// All operations modifying a ring buffer are implemented as static methods
// requiring a CordRepRing instance with a reference adopted by the method.
@ -81,7 +83,7 @@ class CordRepRing : public CordRep {
// `end_pos` which is the `end_pos` of the previous node (or `begin_pos`) plus
// this node's length. The purpose is to allow for a binary search on this
// position, while allowing O(1) prepend and append operations.
using pos_type = uint64_t;
using pos_type = size_t;
// `index_type` is the type for the `head`, `tail` and `capacity` indexes.
// Ring buffers are limited to having no more than four billion entries.

Loading…
Cancel
Save