Export of internal Abseil changes

--
0bfa836596a9c787a2f0bdc283011dd1f6810c6e by Benjamin Barenblat <bbaren@google.com>:

Ignore missing CPU frequency on more architectures

Linux on MIPS, PA-RISC, RISC-V, and SystemZ doesn’t expose the nominal
CPU frequency via /sys, so don’t worry if `NominalCPUFrequency` returns
1.0 on those platforms.

Some POWER machines expose the CPU frequency; others do not. Since we
can’t predict which type of machine the tests will run on, simply
disable testing for `NominalCPUFrequency` on POWER.

PiperOrigin-RevId: 347079873

--
492b6834ed4a07cbc3abccd846f7e37d8c556ee5 by Benjamin Barenblat <bbaren@google.com>:

Use ABSL_HAVE_THREAD_LOCAL macro instead of copying code

Reduce code duplication by checking the ABSL_HAVE_THREAD_LOCAL macro
instead of copying code from base/config.h.

PiperOrigin-RevId: 347079561

--
8d656efce4da9cb032094377e58493d98427a536 by Abseil Team <absl-team@google.com>:

Rollback

PiperOrigin-RevId: 347078779

--
221bc69ec6dd7e2777ffcff6942584f979ef6382 by Abseil Team <absl-team@google.com>:

Add flag for 'shallow subcord' feature for experimental ring buffer rollout

There is a potential trade-off of CPU cost vs over-sharing cord data for subcord of large cords. This flag allows making subcords shallow for ringbuffers (with a potential larger waste of referenced source cords), which allows us to make subcord fast for this apps that do no persist (unmodified / plain copied) sub cords.

This change also introduces constants for the default settings, intended to keep the internal cord settings concistent with external flags.

PiperOrigin-RevId: 347053271

--
00a56c24293566734009f6bf2169a83fb37a35ba by Abseil Team <absl-team@google.com>:

Revert the usage of variant<> in Cord iterator and reader.

The introduction of the variant may lead to some missed compiler optimizations.

PiperOrigin-RevId: 347053041

--
c7b7b5ed7e3ab46b1e75b80f1a7de0bda26c8f70 by Chris Kennelly <ckennelly@google.com>:

Release library for integer power-of-2 functions and bit counting.

PiperOrigin-RevId: 347035065

--
5a035c0d9840b251967f9e7039fc6a4e01dd52f3 by Abseil Team <absl-team@google.com>:

Restructure Cord::ChunkIterator for future ring buffer support.

PiperOrigin-RevId: 346890054
GitOrigin-RevId: 0bfa836596a9c787a2f0bdc283011dd1f6810c6e
Change-Id: I3a58e2a44cb4c6f2116c43e2a4ccbc319d3ccecf
pull/861/head
Abseil Team 4 years ago committed by Andy Getz
parent 938fd0f4e6
commit 1918ad2ae3
  1. 2
      CMake/AbseilDll.cmake
  2. 6
      absl/base/CMakeLists.txt
  3. 23
      absl/base/internal/sysinfo_test.cc
  4. 2
      absl/container/CMakeLists.txt
  5. 11
      absl/container/internal/inlined_vector.h
  6. 15
      absl/debugging/internal/stacktrace_config.h
  7. 29
      absl/numeric/BUILD.bazel
  8. 29
      absl/numeric/CMakeLists.txt
  9. 177
      absl/numeric/bits.h
  10. 565
      absl/numeric/bits_test.cc
  11. 350
      absl/numeric/internal/bits.h
  12. 10
      absl/random/CMakeLists.txt
  13. 4
      absl/strings/CMakeLists.txt
  14. 45
      absl/strings/cord.cc
  15. 30
      absl/strings/cord.h
  16. 5
      absl/strings/internal/cord_internal.cc
  17. 11
      absl/strings/internal/cord_internal.h

@ -126,8 +126,10 @@ set(ABSL_INTERNAL_DLL_FILES
"hash/internal/wyhash.cc"
"memory/memory.h"
"meta/type_traits.h"
"numeric/bits.h"
"numeric/int128.cc"
"numeric/int128.h"
"numeric/internal/bits.h"
"random/bernoulli_distribution.h"
"random/beta_distribution.h"
"random/bit_gen_ref.h"

@ -520,7 +520,7 @@ absl_cc_test(
absl_cc_library(
NAME
bits
internal_bits
HDRS
"internal/bits.h"
COPTS
@ -532,13 +532,13 @@ absl_cc_library(
absl_cc_test(
NAME
bits_test
internal_bits_test
SRCS
"internal/bits_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::bits
absl::internal_bits
gtest_main
)

@ -37,17 +37,28 @@ TEST(SysinfoTest, NumCPUs) {
<< "NumCPUs() should not have the default value of 0";
}
// Ensure that NominalCPUFrequency returns a reasonable value, or 1.00 on
// platforms where the CPU frequency is not available through sysfs.
//
// POWER is particularly problematic here; some Linux kernels expose the CPU
// frequency, while others do not. Since we can't predict a priori what a given
// machine is going to do, just disable this test on POWER on Linux.
#if !(defined(__linux) && (defined(__ppc64__) || defined(__PPC64__)))
TEST(SysinfoTest, NominalCPUFrequency) {
#if !(defined(__aarch64__) && defined(__linux__)) && !defined(__EMSCRIPTEN__)
EXPECT_GE(NominalCPUFrequency(), 1000.0)
<< "NominalCPUFrequency() did not return a reasonable value";
#else
// Aarch64 cannot read the CPU frequency from sysfs, so we get back 1.0.
// Emscripten does not have a sysfs to read from at all.
// Linux only exposes the CPU frequency on certain architectures, and
// Emscripten doesn't expose it at all.
#if defined(__linux__) && \
(defined(__aarch64__) || defined(__hppa__) || defined(__mips__) || \
defined(__riscv) || defined(__s390x__)) || \
defined(__EMSCRIPTEN__)
EXPECT_EQ(NominalCPUFrequency(), 1.0)
<< "CPU frequency detection was fixed! Please update unittest.";
#else
EXPECT_GE(NominalCPUFrequency(), 1000.0)
<< "NominalCPUFrequency() did not return a reasonable value";
#endif
}
#endif
TEST(SysinfoTest, GetTID) {
EXPECT_EQ(GetTID(), GetTID()); // Basic compile and equality test.

@ -665,7 +665,7 @@ absl_cc_library(
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::bits
absl::internal_bits
absl::compressed_tuple
absl::config
absl::container_common

@ -33,6 +33,12 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace inlined_vector_internal {
// GCC does not deal very well with the below code
#if !defined(__clang__) && defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#endif
template <typename Iterator>
using IsAtLeastForwardIterator = std::is_convertible<
typename std::iterator_traits<Iterator>::iterator_category,
@ -889,6 +895,11 @@ auto Storage<T, N, A>::Swap(Storage* other_storage_ptr) -> void {
swap(*GetAllocPtr(), *other_storage_ptr->GetAllocPtr());
}
// End ignore "maybe-uninitialized"
#if !defined(__clang__) && defined(__GNUC__)
#pragma GCC diagnostic pop
#endif
} // namespace inlined_vector_internal
ABSL_NAMESPACE_END
} // namespace absl

@ -21,6 +21,8 @@
#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_CONFIG_H_
#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_CONFIG_H_
#include "absl/base/config.h"
#if defined(ABSL_STACKTRACE_INL_HEADER)
#error ABSL_STACKTRACE_INL_HEADER cannot be directly set
@ -29,19 +31,8 @@
"absl/debugging/internal/stacktrace_win32-inl.inc"
#elif defined(__APPLE__)
#ifdef ABSL_HAVE_THREAD_LOCAL
// Thread local support required for UnwindImpl.
// Notes:
// * Xcode's clang did not support `thread_local` until version 8, and
// even then not for all iOS < 9.0.
// * Xcode 9.3 started disallowing `thread_local` for 32-bit iOS simulator
// targeting iOS 9.x.
// * Xcode 10 moves the deployment target check for iOS < 9.0 to link time
// making __has_feature unreliable there.
//
// Otherwise, `__has_feature` is only supported by Clang so it has be inside
// `defined(__APPLE__)` check.
#if __has_feature(cxx_thread_local) && \
!(TARGET_OS_IPHONE && __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_9_0)
#define ABSL_STACKTRACE_INL_HEADER \
"absl/debugging/internal/stacktrace_generic-inl.inc"
#endif

@ -24,6 +24,35 @@ package(default_visibility = ["//visibility:public"])
licenses(["notice"])
cc_library(
name = "bits",
hdrs = [
"bits.h",
"internal/bits.h",
],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
"//absl/base:config",
"//absl/base:core_headers",
],
)
cc_test(
name = "bits_test",
size = "small",
srcs = [
"bits_test.cc",
],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":bits",
"//absl/random",
"@com_google_googletest//:gtest_main",
],
)
cc_library(
name = "int128",
srcs = [

@ -14,6 +14,33 @@
# limitations under the License.
#
absl_cc_library(
NAME
bits
HDRS
"bits.h"
"internal/bits.h"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::core_headers
PUBLIC
)
absl_cc_test(
NAME
bits_test
SRCS
"bits_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::bits
absl::core_headers
absl::random_random
gmock_main
)
absl_cc_library(
NAME
int128
@ -26,9 +53,9 @@ absl_cc_library(
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::bits
absl::config
absl::core_headers
absl::internal_bits
PUBLIC
)

@ -0,0 +1,177 @@
// Copyright 2020 The Abseil Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// -----------------------------------------------------------------------------
// File: bits.h
// -----------------------------------------------------------------------------
//
// This file contains implementations of C++20's bitwise math functions, as
// defined by:
//
// P0553R4:
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2019/p0553r4.html
// P0556R3:
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0556r3.html
// P1355R2:
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2019/p1355r2.html
// P1956R1:
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p1956r1.pdf
//
// When using a standard library that implements these functions, we use the
// standard library's implementation.
#ifndef ABSL_NUMERIC_BITS_H_
#define ABSL_NUMERIC_BITS_H_
#include <cstdint>
#include <limits>
#include <type_traits>
#if (defined(__cpp_lib_int_pow2) && __cpp_lib_int_pow2 >= 202002L) || \
(defined(__cpp_lib_bitops) && __cpp_lib_bitops >= 201907L)
#include <bit>
#endif
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/numeric/internal/bits.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
#if !(defined(__cpp_lib_bitops) && __cpp_lib_bitops >= 201907L)
// rotating
template <class T>
ABSL_MUST_USE_RESULT constexpr
typename std::enable_if<std::is_unsigned<T>::value, T>::type
rotl(T x, int s) noexcept {
return numeric_internal::RotateLeft(x, s);
}
template <class T>
ABSL_MUST_USE_RESULT constexpr
typename std::enable_if<std::is_unsigned<T>::value, T>::type
rotr(T x, int s) noexcept {
return numeric_internal::RotateRight(x, s);
}
// Counting functions
//
// While these functions are typically constexpr, on some platforms, they may
// not be marked as constexpr due to constraints of the compiler/available
// intrinsics.
template <class T>
ABSL_INTERNAL_CONSTEXPR_CLZ inline
typename std::enable_if<std::is_unsigned<T>::value, int>::type
countl_zero(T x) noexcept {
return numeric_internal::CountLeadingZeroes(x);
}
template <class T>
ABSL_INTERNAL_CONSTEXPR_CLZ inline
typename std::enable_if<std::is_unsigned<T>::value, int>::type
countl_one(T x) noexcept {
// Avoid integer promotion to a wider type
return countl_zero(static_cast<T>(~x));
}
template <class T>
ABSL_INTERNAL_CONSTEXPR_CTZ inline
typename std::enable_if<std::is_unsigned<T>::value, int>::type
countr_zero(T x) noexcept {
return numeric_internal::CountTrailingZeroes(x);
}
template <class T>
ABSL_INTERNAL_CONSTEXPR_CTZ inline
typename std::enable_if<std::is_unsigned<T>::value, int>::type
countr_one(T x) noexcept {
// Avoid integer promotion to a wider type
return countr_zero(static_cast<T>(~x));
}
template <class T>
ABSL_INTERNAL_CONSTEXPR_POPCOUNT inline
typename std::enable_if<std::is_unsigned<T>::value, int>::type
popcount(T x) noexcept {
return numeric_internal::Popcount(x);
}
#else // defined(__cpp_lib_bitops) && __cpp_lib_bitops >= 201907L
using std::countl_one;
using std::countl_zero;
using std::countr_one;
using std::countr_zero;
using std::popcount;
using std::rotl;
using std::rotr;
#endif
#if !(defined(__cpp_lib_int_pow2) && __cpp_lib_int_pow2 >= 202002L)
// Returns: true if x is an integral power of two; false otherwise.
template <class T>
constexpr inline typename std::enable_if<std::is_unsigned<T>::value, bool>::type
has_single_bit(T x) noexcept {
return x != 0 && (x & (x - 1)) == 0;
}
// Returns: If x == 0, 0; otherwise one plus the base-2 logarithm of x, with any
// fractional part discarded.
template <class T>
ABSL_INTERNAL_CONSTEXPR_CLZ inline
typename std::enable_if<std::is_unsigned<T>::value, T>::type
bit_width(T x) noexcept {
return std::numeric_limits<T>::digits - countl_zero(x);
}
// Returns: If x == 0, 0; otherwise the maximal value y such that
// has_single_bit(y) is true and y <= x.
template <class T>
ABSL_INTERNAL_CONSTEXPR_CLZ inline
typename std::enable_if<std::is_unsigned<T>::value, T>::type
bit_floor(T x) noexcept {
return x == 0 ? 0 : T{1} << (bit_width(x) - 1);
}
// Returns: N, where N is the smallest power of 2 greater than or equal to x.
//
// Preconditions: N is representable as a value of type T.
template <class T>
ABSL_INTERNAL_CONSTEXPR_CLZ inline
typename std::enable_if<std::is_unsigned<T>::value, T>::type
bit_ceil(T x) {
// If T is narrower than unsigned, T{1} << bit_width will be promoted. We
// want to force it to wraparound so that bit_ceil of an invalid value are not
// core constant expressions.
//
// BitCeilNonPowerOf2 triggers an overflow in constexpr contexts if we would
// undergo promotion to unsigned but not fit the result into T without
// truncation.
return has_single_bit(x) ? T{1} << (bit_width(x) - 1)
: numeric_internal::BitCeilNonPowerOf2(x);
}
#else // defined(__cpp_lib_int_pow2) && __cpp_lib_int_pow2 >= 202002L
using std::bit_ceil;
using std::bit_floor;
using std::bit_width;
using std::has_single_bit;
#endif
ABSL_NAMESPACE_END
} // namespace absl
#endif // ABSL_NUMERIC_BITS_H_

@ -0,0 +1,565 @@
// Copyright 2020 The Abseil Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/numeric/bits.h"
#include <limits>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/random/random.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace {
TEST(Rotate, Left) {
static_assert(rotl(uint8_t{0x12}, 0) == uint8_t{0x12}, "");
static_assert(rotl(uint16_t{0x1234}, 0) == uint16_t{0x1234}, "");
static_assert(rotl(uint32_t{0x12345678UL}, 0) == uint32_t{0x12345678UL}, "");
static_assert(rotl(uint64_t{0x12345678ABCDEF01ULL}, 0) ==
uint64_t{0x12345678ABCDEF01ULL},
"");
EXPECT_EQ(rotl(uint8_t{0x12}, 0), uint8_t{0x12});
EXPECT_EQ(rotl(uint16_t{0x1234}, 0), uint16_t{0x1234});
EXPECT_EQ(rotl(uint32_t{0x12345678UL}, 0), uint32_t{0x12345678UL});
EXPECT_EQ(rotl(uint64_t{0x12345678ABCDEF01ULL}, 0),
uint64_t{0x12345678ABCDEF01ULL});
EXPECT_EQ(rotl(uint8_t{0x12}, 8), uint8_t{0x12});
EXPECT_EQ(rotl(uint16_t{0x1234}, 16), uint16_t{0x1234});
EXPECT_EQ(rotl(uint32_t{0x12345678UL}, 32), uint32_t{0x12345678UL});
EXPECT_EQ(rotl(uint64_t{0x12345678ABCDEF01ULL}, 64),
uint64_t{0x12345678ABCDEF01ULL});
EXPECT_EQ(rotl(uint8_t{0x12}, -8), uint8_t{0x12});
EXPECT_EQ(rotl(uint16_t{0x1234}, -16), uint16_t{0x1234});
EXPECT_EQ(rotl(uint32_t{0x12345678UL}, -32), uint32_t{0x12345678UL});
EXPECT_EQ(rotl(uint64_t{0x12345678ABCDEF01ULL}, -64),
uint64_t{0x12345678ABCDEF01ULL});
EXPECT_EQ(rotl(uint8_t{0x12}, 4), uint8_t{0x21});
EXPECT_EQ(rotl(uint16_t{0x1234}, 4), uint16_t{0x2341});
EXPECT_EQ(rotl(uint32_t{0x12345678UL}, 4), uint32_t{0x23456781UL});
EXPECT_EQ(rotl(uint64_t{0x12345678ABCDEF01ULL}, 4),
uint64_t{0x2345678ABCDEF011ULL});
EXPECT_EQ(rotl(uint8_t{0x12}, -4), uint8_t{0x21});
EXPECT_EQ(rotl(uint16_t{0x1234}, -4), uint16_t{0x4123});
EXPECT_EQ(rotl(uint32_t{0x12345678UL}, -4), uint32_t{0x81234567UL});
EXPECT_EQ(rotl(uint64_t{0x12345678ABCDEF01ULL}, -4),
uint64_t{0x112345678ABCDEF0ULL});
}
TEST(Rotate, Right) {
static_assert(rotr(uint8_t{0x12}, 0) == uint8_t{0x12}, "");
static_assert(rotr(uint16_t{0x1234}, 0) == uint16_t{0x1234}, "");
static_assert(rotr(uint32_t{0x12345678UL}, 0) == uint32_t{0x12345678UL}, "");
static_assert(rotr(uint64_t{0x12345678ABCDEF01ULL}, 0) ==
uint64_t{0x12345678ABCDEF01ULL},
"");
EXPECT_EQ(rotr(uint8_t{0x12}, 0), uint8_t{0x12});
EXPECT_EQ(rotr(uint16_t{0x1234}, 0), uint16_t{0x1234});
EXPECT_EQ(rotr(uint32_t{0x12345678UL}, 0), uint32_t{0x12345678UL});
EXPECT_EQ(rotr(uint64_t{0x12345678ABCDEF01ULL}, 0),
uint64_t{0x12345678ABCDEF01ULL});
EXPECT_EQ(rotr(uint8_t{0x12}, 8), uint8_t{0x12});
EXPECT_EQ(rotr(uint16_t{0x1234}, 16), uint16_t{0x1234});
EXPECT_EQ(rotr(uint32_t{0x12345678UL}, 32), uint32_t{0x12345678UL});
EXPECT_EQ(rotr(uint64_t{0x12345678ABCDEF01ULL}, 64),
uint64_t{0x12345678ABCDEF01ULL});
EXPECT_EQ(rotr(uint8_t{0x12}, -8), uint8_t{0x12});
EXPECT_EQ(rotr(uint16_t{0x1234}, -16), uint16_t{0x1234});
EXPECT_EQ(rotr(uint32_t{0x12345678UL}, -32), uint32_t{0x12345678UL});
EXPECT_EQ(rotr(uint64_t{0x12345678ABCDEF01ULL}, -64),
uint64_t{0x12345678ABCDEF01ULL});
EXPECT_EQ(rotr(uint8_t{0x12}, 4), uint8_t{0x21});
EXPECT_EQ(rotr(uint16_t{0x1234}, 4), uint16_t{0x4123});
EXPECT_EQ(rotr(uint32_t{0x12345678UL}, 4), uint32_t{0x81234567UL});
EXPECT_EQ(rotr(uint64_t{0x12345678ABCDEF01ULL}, 4),
uint64_t{0x112345678ABCDEF0ULL});
EXPECT_EQ(rotr(uint8_t{0x12}, -4), uint8_t{0x21});
EXPECT_EQ(rotr(uint16_t{0x1234}, -4), uint16_t{0x2341});
EXPECT_EQ(rotr(uint32_t{0x12345678UL}, -4), uint32_t{0x23456781UL});
EXPECT_EQ(rotr(uint64_t{0x12345678ABCDEF01ULL}, -4),
uint64_t{0x2345678ABCDEF011ULL});
}
TEST(Rotate, Symmetry) {
// rotr(x, s) is equivalent to rotl(x, -s)
absl::BitGen rng;
constexpr int kTrials = 100;
for (int i = 0; i < kTrials; ++i) {
uint8_t value = absl::Uniform(rng, std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max());
int shift = absl::Uniform(rng, -2 * std::numeric_limits<uint8_t>::digits,
2 * std::numeric_limits<uint8_t>::digits);
EXPECT_EQ(rotl(value, shift), rotr(value, -shift));
}
for (int i = 0; i < kTrials; ++i) {
uint16_t value = absl::Uniform(rng, std::numeric_limits<uint16_t>::min(),
std::numeric_limits<uint16_t>::max());
int shift = absl::Uniform(rng, -2 * std::numeric_limits<uint16_t>::digits,
2 * std::numeric_limits<uint16_t>::digits);
EXPECT_EQ(rotl(value, shift), rotr(value, -shift));
}
for (int i = 0; i < kTrials; ++i) {
uint32_t value = absl::Uniform(rng, std::numeric_limits<uint32_t>::min(),
std::numeric_limits<uint32_t>::max());
int shift = absl::Uniform(rng, -2 * std::numeric_limits<uint32_t>::digits,
2 * std::numeric_limits<uint32_t>::digits);
EXPECT_EQ(rotl(value, shift), rotr(value, -shift));
}
for (int i = 0; i < kTrials; ++i) {
uint64_t value = absl::Uniform(rng, std::numeric_limits<uint64_t>::min(),
std::numeric_limits<uint64_t>::max());
int shift = absl::Uniform(rng, -2 * std::numeric_limits<uint64_t>::digits,
2 * std::numeric_limits<uint64_t>::digits);
EXPECT_EQ(rotl(value, shift), rotr(value, -shift));
}
}
TEST(Counting, LeadingZeroes) {
#if ABSL_INTERNAL_HAS_CONSTEXPR_CLZ
static_assert(countl_zero(uint8_t{}) == 8, "");
static_assert(countl_zero(static_cast<uint8_t>(-1)) == 0, "");
static_assert(countl_zero(uint16_t{}) == 16, "");
static_assert(countl_zero(static_cast<uint16_t>(-1)) == 0, "");
static_assert(countl_zero(uint32_t{}) == 32, "");
static_assert(countl_zero(~uint32_t{}) == 0, "");
static_assert(countl_zero(uint64_t{}) == 64, "");
static_assert(countl_zero(~uint64_t{}) == 0, "");
#endif
EXPECT_EQ(countl_zero(uint8_t{}), 8);
EXPECT_EQ(countl_zero(static_cast<uint8_t>(-1)), 0);
EXPECT_EQ(countl_zero(uint16_t{}), 16);
EXPECT_EQ(countl_zero(static_cast<uint16_t>(-1)), 0);
EXPECT_EQ(countl_zero(uint32_t{}), 32);
EXPECT_EQ(countl_zero(~uint32_t{}), 0);
EXPECT_EQ(countl_zero(uint64_t{}), 64);
EXPECT_EQ(countl_zero(~uint64_t{}), 0);
for (int i = 0; i < 8; i++) {
EXPECT_EQ(countl_zero(static_cast<uint8_t>(1u << i)), 7 - i);
}
for (int i = 0; i < 16; i++) {
EXPECT_EQ(countl_zero(static_cast<uint16_t>(1u << i)), 15 - i);
}
for (int i = 0; i < 32; i++) {
EXPECT_EQ(countl_zero(uint32_t{1} << i), 31 - i);
}
for (int i = 0; i < 64; i++) {
EXPECT_EQ(countl_zero(uint64_t{1} << i), 63 - i);
}
}
TEST(Counting, LeadingOnes) {
#if ABSL_INTERNAL_HAS_CONSTEXPR_CLZ
static_assert(countl_one(uint8_t{}) == 0, "");
static_assert(countl_one(static_cast<uint8_t>(-1)) == 8, "");
static_assert(countl_one(uint16_t{}) == 0, "");
static_assert(countl_one(static_cast<uint16_t>(-1)) == 16, "");
static_assert(countl_one(uint32_t{}) == 0, "");
static_assert(countl_one(~uint32_t{}) == 32, "");
static_assert(countl_one(uint64_t{}) == 0, "");
static_assert(countl_one(~uint64_t{}) == 64, "");
#endif
EXPECT_EQ(countl_one(uint8_t{}), 0);
EXPECT_EQ(countl_one(static_cast<uint8_t>(-1)), 8);
EXPECT_EQ(countl_one(uint16_t{}), 0);
EXPECT_EQ(countl_one(static_cast<uint16_t>(-1)), 16);
EXPECT_EQ(countl_one(uint32_t{}), 0);
EXPECT_EQ(countl_one(~uint32_t{}), 32);
EXPECT_EQ(countl_one(uint64_t{}), 0);
EXPECT_EQ(countl_one(~uint64_t{}), 64);
}
TEST(Counting, TrailingZeroes) {
#if ABSL_INTERNAL_HAS_CONSTEXPR_CTZ
static_assert(countr_zero(uint8_t{}) == 8, "");
static_assert(countr_zero(static_cast<uint8_t>(-1)) == 0, "");
static_assert(countr_zero(uint16_t{}) == 16, "");
static_assert(countr_zero(static_cast<uint16_t>(-1)) == 0, "");
static_assert(countr_zero(uint32_t{}) == 32, "");
static_assert(countr_zero(~uint32_t{}) == 0, "");
static_assert(countr_zero(uint64_t{}) == 64, "");
static_assert(countr_zero(~uint64_t{}) == 0, "");
#endif
EXPECT_EQ(countr_zero(uint8_t{}), 8);
EXPECT_EQ(countr_zero(static_cast<uint8_t>(-1)), 0);
EXPECT_EQ(countr_zero(uint16_t{}), 16);
EXPECT_EQ(countr_zero(static_cast<uint16_t>(-1)), 0);
EXPECT_EQ(countr_zero(uint32_t{}), 32);
EXPECT_EQ(countr_zero(~uint32_t{}), 0);
EXPECT_EQ(countr_zero(uint64_t{}), 64);
EXPECT_EQ(countr_zero(~uint64_t{}), 0);
}
TEST(Counting, TrailingOnes) {
#if ABSL_INTERNAL_HAS_CONSTEXPR_CTZ
static_assert(countr_one(uint8_t{}) == 0, "");
static_assert(countr_one(static_cast<uint8_t>(-1)) == 8, "");
static_assert(countr_one(uint16_t{}) == 0, "");
static_assert(countr_one(static_cast<uint16_t>(-1)) == 16, "");
static_assert(countr_one(uint32_t{}) == 0, "");
static_assert(countr_one(~uint32_t{}) == 32, "");
static_assert(countr_one(uint64_t{}) == 0, "");
static_assert(countr_one(~uint64_t{}) == 64, "");
#endif
EXPECT_EQ(countr_one(uint8_t{}), 0);
EXPECT_EQ(countr_one(static_cast<uint8_t>(-1)), 8);
EXPECT_EQ(countr_one(uint16_t{}), 0);
EXPECT_EQ(countr_one(static_cast<uint16_t>(-1)), 16);
EXPECT_EQ(countr_one(uint32_t{}), 0);
EXPECT_EQ(countr_one(~uint32_t{}), 32);
EXPECT_EQ(countr_one(uint64_t{}), 0);
EXPECT_EQ(countr_one(~uint64_t{}), 64);
}
TEST(Counting, Popcount) {
#if ABSL_INTERNAL_HAS_CONSTEXPR_POPCOUNT
static_assert(popcount(uint8_t{}) == 0, "");
static_assert(popcount(uint8_t{1}) == 1, "");
static_assert(popcount(static_cast<uint8_t>(-1)) == 8, "");
static_assert(popcount(uint16_t{}) == 0, "");
static_assert(popcount(uint16_t{1}) == 1, "");
static_assert(popcount(static_cast<uint16_t>(-1)) == 16, "");
static_assert(popcount(uint32_t{}) == 0, "");
static_assert(popcount(uint32_t{1}) == 1, "");
static_assert(popcount(~uint32_t{}) == 32, "");
static_assert(popcount(uint64_t{}) == 0, "");
static_assert(popcount(uint64_t{1}) == 1, "");
static_assert(popcount(~uint64_t{}) == 64, "");
#endif // ABSL_INTERNAL_HAS_CONSTEXPR_POPCOUNT
EXPECT_EQ(popcount(uint8_t{}), 0);
EXPECT_EQ(popcount(uint8_t{1}), 1);
EXPECT_EQ(popcount(static_cast<uint8_t>(-1)), 8);
EXPECT_EQ(popcount(uint16_t{}), 0);
EXPECT_EQ(popcount(uint16_t{1}), 1);
EXPECT_EQ(popcount(static_cast<uint16_t>(-1)), 16);
EXPECT_EQ(popcount(uint32_t{}), 0);
EXPECT_EQ(popcount(uint32_t{1}), 1);
EXPECT_EQ(popcount(~uint32_t{}), 32);
EXPECT_EQ(popcount(uint64_t{}), 0);
EXPECT_EQ(popcount(uint64_t{1}), 1);
EXPECT_EQ(popcount(~uint64_t{}), 64);
for (int i = 0; i < 8; i++) {
EXPECT_EQ(popcount(static_cast<uint8_t>(uint8_t{1} << i)), 1);
EXPECT_EQ(popcount(static_cast<uint8_t>(static_cast<uint8_t>(-1) ^
(uint8_t{1} << i))),
7);
}
for (int i = 0; i < 16; i++) {
EXPECT_EQ(popcount(static_cast<uint16_t>(uint16_t{1} << i)), 1);
EXPECT_EQ(popcount(static_cast<uint16_t>(static_cast<uint16_t>(-1) ^
(uint16_t{1} << i))),
15);
}
for (int i = 0; i < 32; i++) {
EXPECT_EQ(popcount(uint32_t{1} << i), 1);
EXPECT_EQ(popcount(static_cast<uint32_t>(-1) ^ (uint32_t{1} << i)), 31);
}
for (int i = 0; i < 64; i++) {
EXPECT_EQ(popcount(uint64_t{1} << i), 1);
EXPECT_EQ(popcount(static_cast<uint64_t>(-1) ^ (uint64_t{1} << i)), 63);
}
}
template <typename T>
struct PopcountInput {
T value = 0;
int expected = 0;
};
template <typename T>
PopcountInput<T> GeneratePopcountInput(absl::BitGen& gen) {
PopcountInput<T> ret;
for (int i = 0; i < std::numeric_limits<T>::digits; i++) {
bool coin = absl::Bernoulli(gen, 0.2);
if (coin) {
ret.value |= T{1} << i;
ret.expected++;
}
}
return ret;
}
TEST(Counting, PopcountFuzz) {
absl::BitGen rng;
constexpr int kTrials = 100;
for (int i = 0; i < kTrials; ++i) {
auto input = GeneratePopcountInput<uint8_t>(rng);
EXPECT_EQ(popcount(input.value), input.expected);
}
for (int i = 0; i < kTrials; ++i) {
auto input = GeneratePopcountInput<uint16_t>(rng);
EXPECT_EQ(popcount(input.value), input.expected);
}
for (int i = 0; i < kTrials; ++i) {
auto input = GeneratePopcountInput<uint32_t>(rng);
EXPECT_EQ(popcount(input.value), input.expected);
}
for (int i = 0; i < kTrials; ++i) {
auto input = GeneratePopcountInput<uint64_t>(rng);
EXPECT_EQ(popcount(input.value), input.expected);
}
}
TEST(IntegralPowersOfTwo, SingleBit) {
EXPECT_FALSE(has_single_bit(uint8_t{}));
EXPECT_FALSE(has_single_bit(static_cast<uint8_t>(-1)));
EXPECT_FALSE(has_single_bit(uint16_t{}));
EXPECT_FALSE(has_single_bit(static_cast<uint16_t>(-1)));
EXPECT_FALSE(has_single_bit(uint32_t{}));
EXPECT_FALSE(has_single_bit(~uint32_t{}));
EXPECT_FALSE(has_single_bit(uint64_t{}));
EXPECT_FALSE(has_single_bit(~uint64_t{}));
static_assert(!has_single_bit(0u), "");
static_assert(has_single_bit(1u), "");
static_assert(has_single_bit(2u), "");
static_assert(!has_single_bit(3u), "");
static_assert(has_single_bit(4u), "");
static_assert(!has_single_bit(1337u), "");
static_assert(has_single_bit(65536u), "");
static_assert(has_single_bit(uint32_t{1} << 30), "");
static_assert(has_single_bit(uint64_t{1} << 42), "");
EXPECT_FALSE(has_single_bit(0u));
EXPECT_TRUE(has_single_bit(1u));
EXPECT_TRUE(has_single_bit(2u));
EXPECT_FALSE(has_single_bit(3u));
EXPECT_TRUE(has_single_bit(4u));
EXPECT_FALSE(has_single_bit(1337u));
EXPECT_TRUE(has_single_bit(65536u));
EXPECT_TRUE(has_single_bit(uint32_t{1} << 30));
EXPECT_TRUE(has_single_bit(uint64_t{1} << 42));
EXPECT_TRUE(has_single_bit(
static_cast<uint8_t>(std::numeric_limits<uint8_t>::max() / 2 + 1)));
EXPECT_TRUE(has_single_bit(
static_cast<uint16_t>(std::numeric_limits<uint16_t>::max() / 2 + 1)));
EXPECT_TRUE(has_single_bit(
static_cast<uint32_t>(std::numeric_limits<uint32_t>::max() / 2 + 1)));
EXPECT_TRUE(has_single_bit(
static_cast<uint64_t>(std::numeric_limits<uint64_t>::max() / 2 + 1)));
}
template <typename T, T arg, T = bit_ceil(arg)>
bool IsBitCeilConstantExpression(int) {
return true;
}
template <typename T, T arg>
bool IsBitCeilConstantExpression(char) {
return false;
}
TEST(IntegralPowersOfTwo, Ceiling) {
#if ABSL_INTERNAL_HAS_CONSTEXPR_CLZ
static_assert(bit_ceil(0u) == 1, "");
static_assert(bit_ceil(1u) == 1, "");
static_assert(bit_ceil(2u) == 2, "");
static_assert(bit_ceil(3u) == 4, "");
static_assert(bit_ceil(4u) == 4, "");
static_assert(bit_ceil(1337u) == 2048, "");
static_assert(bit_ceil(65536u) == 65536, "");
static_assert(bit_ceil(65536u - 1337u) == 65536, "");
static_assert(bit_ceil(uint32_t{0x80000000}) == uint32_t{0x80000000}, "");
static_assert(bit_ceil(uint64_t{0x40000000000}) == uint64_t{0x40000000000},
"");
static_assert(
bit_ceil(uint64_t{0x8000000000000000}) == uint64_t{0x8000000000000000},
"");
EXPECT_TRUE((IsBitCeilConstantExpression<uint8_t, uint8_t{0x0}>(0)));
EXPECT_TRUE((IsBitCeilConstantExpression<uint8_t, uint8_t{0x80}>(0)));
EXPECT_FALSE((IsBitCeilConstantExpression<uint8_t, uint8_t{0x81}>(0)));
EXPECT_FALSE((IsBitCeilConstantExpression<uint8_t, uint8_t{0xff}>(0)));
EXPECT_TRUE((IsBitCeilConstantExpression<uint16_t, uint16_t{0x0}>(0)));
EXPECT_TRUE((IsBitCeilConstantExpression<uint16_t, uint16_t{0x8000}>(0)));
EXPECT_FALSE((IsBitCeilConstantExpression<uint16_t, uint16_t{0x8001}>(0)));
EXPECT_FALSE((IsBitCeilConstantExpression<uint16_t, uint16_t{0xffff}>(0)));
EXPECT_TRUE((IsBitCeilConstantExpression<uint32_t, uint32_t{0x0}>(0)));
EXPECT_TRUE((IsBitCeilConstantExpression<uint32_t, uint32_t{0x80000000}>(0)));
EXPECT_FALSE(
(IsBitCeilConstantExpression<uint32_t, uint32_t{0x80000001}>(0)));
EXPECT_FALSE(
(IsBitCeilConstantExpression<uint32_t, uint32_t{0xffffffff}>(0)));
EXPECT_TRUE((IsBitCeilConstantExpression<uint64_t, uint64_t{0x0}>(0)));
EXPECT_TRUE(
(IsBitCeilConstantExpression<uint64_t, uint64_t{0x8000000000000000}>(0)));
EXPECT_FALSE(
(IsBitCeilConstantExpression<uint64_t, uint64_t{0x8000000000000001}>(0)));
EXPECT_FALSE(
(IsBitCeilConstantExpression<uint64_t, uint64_t{0xffffffffffffffff}>(0)));
#endif
EXPECT_EQ(bit_ceil(0u), 1);
EXPECT_EQ(bit_ceil(1u), 1);
EXPECT_EQ(bit_ceil(2u), 2);
EXPECT_EQ(bit_ceil(3u), 4);
EXPECT_EQ(bit_ceil(4u), 4);
EXPECT_EQ(bit_ceil(1337u), 2048);
EXPECT_EQ(bit_ceil(65536u), 65536);
EXPECT_EQ(bit_ceil(65536u - 1337u), 65536);
EXPECT_EQ(bit_ceil(uint64_t{0x40000000000}), uint64_t{0x40000000000});
}
TEST(IntegralPowersOfTwo, Floor) {
#if ABSL_INTERNAL_HAS_CONSTEXPR_CLZ
static_assert(bit_floor(0u) == 0, "");
static_assert(bit_floor(1u) == 1, "");
static_assert(bit_floor(2u) == 2, "");
static_assert(bit_floor(3u) == 2, "");
static_assert(bit_floor(4u) == 4, "");
static_assert(bit_floor(1337u) == 1024, "");
static_assert(bit_floor(65536u) == 65536, "");
static_assert(bit_floor(65536u - 1337u) == 32768, "");
static_assert(bit_floor(uint64_t{0x40000000000}) == uint64_t{0x40000000000},
"");
#endif
EXPECT_EQ(bit_floor(0u), 0);
EXPECT_EQ(bit_floor(1u), 1);
EXPECT_EQ(bit_floor(2u), 2);
EXPECT_EQ(bit_floor(3u), 2);
EXPECT_EQ(bit_floor(4u), 4);
EXPECT_EQ(bit_floor(1337u), 1024);
EXPECT_EQ(bit_floor(65536u), 65536);
EXPECT_EQ(bit_floor(65536u - 1337u), 32768);
EXPECT_EQ(bit_floor(uint64_t{0x40000000000}), uint64_t{0x40000000000});
for (int i = 0; i < 8; i++) {
uint8_t input = uint8_t{1} << i;
EXPECT_EQ(bit_floor(input), input);
if (i > 0) {
EXPECT_EQ(bit_floor(static_cast<uint8_t>(input + 1)), input);
}
}
for (int i = 0; i < 16; i++) {
uint16_t input = uint16_t{1} << i;
EXPECT_EQ(bit_floor(input), input);
if (i > 0) {
EXPECT_EQ(bit_floor(static_cast<uint16_t>(input + 1)), input);
}
}
for (int i = 0; i < 32; i++) {
uint32_t input = uint32_t{1} << i;
EXPECT_EQ(bit_floor(input), input);
if (i > 0) {
EXPECT_EQ(bit_floor(input + 1), input);
}
}
for (int i = 0; i < 64; i++) {
uint64_t input = uint64_t{1} << i;
EXPECT_EQ(bit_floor(input), input);
if (i > 0) {
EXPECT_EQ(bit_floor(input + 1), input);
}
}
}
TEST(IntegralPowersOfTwo, Width) {
#if ABSL_INTERNAL_HAS_CONSTEXPR_CLZ
static_assert(bit_width(uint8_t{}) == 0, "");
static_assert(bit_width(uint8_t{1}) == 1, "");
static_assert(bit_width(uint8_t{3}) == 2, "");
static_assert(bit_width(static_cast<uint8_t>(-1)) == 8, "");
static_assert(bit_width(uint16_t{}) == 0, "");
static_assert(bit_width(uint16_t{1}) == 1, "");
static_assert(bit_width(uint16_t{3}) == 2, "");
static_assert(bit_width(static_cast<uint16_t>(-1)) == 16, "");
static_assert(bit_width(uint32_t{}) == 0, "");
static_assert(bit_width(uint32_t{1}) == 1, "");
static_assert(bit_width(uint32_t{3}) == 2, "");
static_assert(bit_width(~uint32_t{}) == 32, "");
static_assert(bit_width(uint64_t{}) == 0, "");
static_assert(bit_width(uint64_t{1}) == 1, "");
static_assert(bit_width(uint64_t{3}) == 2, "");
static_assert(bit_width(~uint64_t{}) == 64, "");
#endif
EXPECT_EQ(bit_width(uint8_t{}), 0);
EXPECT_EQ(bit_width(uint8_t{1}), 1);
EXPECT_EQ(bit_width(uint8_t{3}), 2);
EXPECT_EQ(bit_width(static_cast<uint8_t>(-1)), 8);
EXPECT_EQ(bit_width(uint16_t{}), 0);
EXPECT_EQ(bit_width(uint16_t{1}), 1);
EXPECT_EQ(bit_width(uint16_t{3}), 2);
EXPECT_EQ(bit_width(static_cast<uint16_t>(-1)), 16);
EXPECT_EQ(bit_width(uint32_t{}), 0);
EXPECT_EQ(bit_width(uint32_t{1}), 1);
EXPECT_EQ(bit_width(uint32_t{3}), 2);
EXPECT_EQ(bit_width(~uint32_t{}), 32);
EXPECT_EQ(bit_width(uint64_t{}), 0);
EXPECT_EQ(bit_width(uint64_t{1}), 1);
EXPECT_EQ(bit_width(uint64_t{3}), 2);
EXPECT_EQ(bit_width(~uint64_t{}), 64);
for (int i = 0; i < 8; i++) {
EXPECT_EQ(bit_width(static_cast<uint8_t>(uint8_t{1} << i)), i + 1);
}
for (int i = 0; i < 16; i++) {
EXPECT_EQ(bit_width(static_cast<uint16_t>(uint16_t{1} << i)), i + 1);
}
for (int i = 0; i < 32; i++) {
EXPECT_EQ(bit_width(uint32_t{1} << i), i + 1);
}
for (int i = 0; i < 64; i++) {
EXPECT_EQ(bit_width(uint64_t{1} << i), i + 1);
}
}
} // namespace
ABSL_NAMESPACE_END
} // namespace absl

@ -0,0 +1,350 @@
// Copyright 2020 The Abseil Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef ABSL_NUMERIC_INTERNAL_BITS_H_
#define ABSL_NUMERIC_INTERNAL_BITS_H_
#include <cstdint>
#include <limits>
#include <type_traits>
// Clang on Windows has __builtin_clzll; otherwise we need to use the
// windows intrinsic functions.
#if defined(_MSC_VER) && !defined(__clang__)
#include <intrin.h>
#if defined(_M_X64)
#pragma intrinsic(_BitScanReverse64)
#pragma intrinsic(_BitScanForward64)
#endif
#pragma intrinsic(_BitScanReverse)
#pragma intrinsic(_BitScanForward)
#endif
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#if ABSL_HAVE_BUILTIN(__builtin_popcountl) && \
ABSL_HAVE_BUILTIN(__builtin_popcountll)
#define ABSL_INTERNAL_CONSTEXPR_POPCOUNT constexpr
#define ABSL_INTERNAL_HAS_CONSTEXPR_POPCOUNT 1
#else
#define ABSL_INTERNAL_CONSTEXPR_POPCOUNT
#define ABSL_INTERNAL_HAS_CONSTEXPR_POPCOUNT 0
#endif
#if ABSL_HAVE_BUILTIN(__builtin_clz) && ABSL_HAVE_BUILTIN(__builtin_clzll)
#define ABSL_INTERNAL_CONSTEXPR_CLZ constexpr
#define ABSL_INTERNAL_HAS_CONSTEXPR_CLZ 1
#else
#define ABSL_INTERNAL_CONSTEXPR_CLZ
#define ABSL_INTERNAL_HAS_CONSTEXPR_CLZ 0
#endif
#if ABSL_HAVE_BUILTIN(__builtin_ctz) && ABSL_HAVE_BUILTIN(__builtin_ctzll)
#define ABSL_INTERNAL_CONSTEXPR_CTZ constexpr
#define ABSL_INTERNAL_HAS_CONSTEXPR_CTZ 1
#else
#define ABSL_INTERNAL_CONSTEXPR_CTZ
#define ABSL_INTERNAL_HAS_CONSTEXPR_CTZ 0
#endif
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace numeric_internal {
constexpr bool IsPowerOf2(unsigned int x) noexcept {
return x != 0 && (x & (x - 1)) == 0;
}
template <class T>
ABSL_MUST_USE_RESULT ABSL_ATTRIBUTE_ALWAYS_INLINE constexpr T RotateRight(
T x, int s) noexcept {
static_assert(std::is_unsigned<T>::value, "T must be unsigned");
static_assert(IsPowerOf2(std::numeric_limits<T>::digits),
"T must have a power-of-2 size");
return static_cast<T>(x >> (s & (std::numeric_limits<T>::digits - 1))) |
static_cast<T>(x << ((-s) & (std::numeric_limits<T>::digits - 1)));
}
template <class T>
ABSL_MUST_USE_RESULT ABSL_ATTRIBUTE_ALWAYS_INLINE constexpr T RotateLeft(
T x, int s) noexcept {
static_assert(std::is_unsigned<T>::value, "T must be unsigned");
static_assert(IsPowerOf2(std::numeric_limits<T>::digits),
"T must have a power-of-2 size");
return static_cast<T>(x << (s & (std::numeric_limits<T>::digits - 1))) |
static_cast<T>(x >> ((-s) & (std::numeric_limits<T>::digits - 1)));
}
ABSL_INTERNAL_CONSTEXPR_POPCOUNT int Popcount32(uint32_t x) noexcept {
#if ABSL_HAVE_BUILTIN(__builtin_popcount)
static_assert(sizeof(unsigned int) == sizeof(x),
"__builtin_popcount does not take 32-bit arg");
return __builtin_popcount(x);
#else
x -= ((x >> 1) & 0x55555555);
x = ((x >> 2) & 0x33333333) + (x & 0x33333333);
return static_cast<int>((((x + (x >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24);
#endif
}
ABSL_INTERNAL_CONSTEXPR_POPCOUNT int Popcount64(uint64_t x) noexcept {
#if ABSL_HAVE_BUILTIN(__builtin_popcountll)
static_assert(sizeof(unsigned long long) == sizeof(x), // NOLINT(runtime/int)
"__builtin_popcount does not take 64-bit arg");
return __builtin_popcountll(x);
#else
x -= (x >> 1) & 0x5555555555555555ULL;
x = ((x >> 2) & 0x3333333333333333ULL) + (x & 0x3333333333333333ULL);
return static_cast<int>(
(((x + (x >> 4)) & 0xF0F0F0F0F0F0F0FULL) * 0x101010101010101ULL) >> 56);
#endif
}
template <class T>
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_POPCOUNT inline int
Popcount(T x) noexcept {
static_assert(std::is_unsigned<T>::value, "T must be unsigned");
static_assert(IsPowerOf2(std::numeric_limits<T>::digits),
"T must have a power-of-2 size");
static_assert(sizeof(x) <= sizeof(uint64_t), "T is too large");
return sizeof(x) <= sizeof(uint32_t) ? Popcount32(x) : Popcount64(x);
}
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline int
CountLeadingZeroes32(uint32_t x) {
#if ABSL_HAVE_BUILTIN(__builtin_clz)
// Use __builtin_clz, which uses the following instructions:
// x86: bsr, lzcnt
// ARM64: clz
// PPC: cntlzd
static_assert(sizeof(unsigned int) == sizeof(x),
"__builtin_clz does not take 32-bit arg");
// Handle 0 as a special case because __builtin_clz(0) is undefined.
return x == 0 ? 32 : __builtin_clz(x);
#elif defined(_MSC_VER) && !defined(__clang__)
unsigned long result = 0; // NOLINT(runtime/int)
if (_BitScanReverse(&result, x)) {
return 31 - result;
}
return 32;
#else
int zeroes = 28;
if (x >> 16) {
zeroes -= 16;
x >>= 16;
}
if (x >> 8) {
zeroes -= 8;
x >>= 8;
}
if (x >> 4) {
zeroes -= 4;
x >>= 4;
}
return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[x] + zeroes;
#endif
}
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline int
CountLeadingZeroes16(uint16_t x) {
#if ABSL_HAVE_BUILTIN(__builtin_clzs)
static_assert(sizeof(unsigned short) == sizeof(x), // NOLINT(runtime/int)
"__builtin_clzs does not take 16-bit arg");
return x == 0 ? 16 : __builtin_clzs(x);
#else
return CountLeadingZeroes32(x) - 16;
#endif
}
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline int
CountLeadingZeroes64(uint64_t x) {
#if ABSL_HAVE_BUILTIN(__builtin_clzll)
// Use __builtin_clzll, which uses the following instructions:
// x86: bsr, lzcnt
// ARM64: clz
// PPC: cntlzd
static_assert(sizeof(unsigned long long) == sizeof(x), // NOLINT(runtime/int)
"__builtin_clzll does not take 64-bit arg");
// Handle 0 as a special case because __builtin_clzll(0) is undefined.
return x == 0 ? 64 : __builtin_clzll(x);
#elif defined(_MSC_VER) && !defined(__clang__) && defined(_M_X64)
// MSVC does not have __buitin_clzll. Use _BitScanReverse64.
unsigned long result = 0; // NOLINT(runtime/int)
if (_BitScanReverse64(&result, x)) {
return 63 - result;
}
return 64;
#elif defined(_MSC_VER) && !defined(__clang__)
// MSVC does not have __buitin_clzll. Compose two calls to _BitScanReverse
unsigned long result = 0; // NOLINT(runtime/int)
if ((x >> 32) &&
_BitScanReverse(&result, static_cast<unsigned long>(x >> 32))) {
return 31 - result;
}
if (_BitScanReverse(&result, static_cast<unsigned long>(x))) {
return 63 - result;
}
return 64;
#else
int zeroes = 60;
if (x >> 32) {
zeroes -= 32;
x >>= 32;
}
if (x >> 16) {
zeroes -= 16;
x >>= 16;
}
if (x >> 8) {
zeroes -= 8;
x >>= 8;
}
if (x >> 4) {
zeroes -= 4;
x >>= 4;
}
return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[x] + zeroes;
#endif
}
template <typename T>
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline int
CountLeadingZeroes(T x) {
static_assert(std::is_unsigned<T>::value, "T must be unsigned");
static_assert(IsPowerOf2(std::numeric_limits<T>::digits),
"T must have a power-of-2 size");
static_assert(sizeof(T) <= sizeof(uint64_t), "T too large");
return sizeof(T) <= sizeof(uint16_t)
? CountLeadingZeroes16(x) -
(std::numeric_limits<uint16_t>::digits -
std::numeric_limits<T>::digits)
: (sizeof(T) <= sizeof(uint32_t)
? CountLeadingZeroes32(x) -
(std::numeric_limits<uint32_t>::digits -
std::numeric_limits<T>::digits)
: CountLeadingZeroes64(x));
}
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CTZ inline int
CountTrailingZeroesNonzero32(uint32_t x) {
#if ABSL_HAVE_BUILTIN(__builtin_ctz)
static_assert(sizeof(unsigned int) == sizeof(x),
"__builtin_ctz does not take 32-bit arg");
return __builtin_ctz(x);
#elif defined(_MSC_VER) && !defined(__clang__)
unsigned long result = 0; // NOLINT(runtime/int)
_BitScanForward(&result, x);
return result;
#else
int c = 31;
x &= ~x + 1;
if (x & 0x0000FFFF) c -= 16;
if (x & 0x00FF00FF) c -= 8;
if (x & 0x0F0F0F0F) c -= 4;
if (x & 0x33333333) c -= 2;
if (x & 0x55555555) c -= 1;
return c;
#endif
}
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CTZ inline int
CountTrailingZeroesNonzero64(uint64_t x) {
#if ABSL_HAVE_BUILTIN(__builtin_ctzll)
static_assert(sizeof(unsigned long long) == sizeof(x), // NOLINT(runtime/int)
"__builtin_ctzll does not take 64-bit arg");
return __builtin_ctzll(x);
#elif defined(_MSC_VER) && !defined(__clang__) && defined(_M_X64)
unsigned long result = 0; // NOLINT(runtime/int)
_BitScanForward64(&result, x);
return result;
#elif defined(_MSC_VER) && !defined(__clang__)
unsigned long result = 0; // NOLINT(runtime/int)
if (static_cast<uint32_t>(x) == 0) {
_BitScanForward(&result, static_cast<unsigned long>(x >> 32));
return result + 32;
}
_BitScanForward(&result, static_cast<unsigned long>(x));
return result;
#else
int c = 63;
x &= ~x + 1;
if (x & 0x00000000FFFFFFFF) c -= 32;
if (x & 0x0000FFFF0000FFFF) c -= 16;
if (x & 0x00FF00FF00FF00FF) c -= 8;
if (x & 0x0F0F0F0F0F0F0F0F) c -= 4;
if (x & 0x3333333333333333) c -= 2;
if (x & 0x5555555555555555) c -= 1;
return c;
#endif
}
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CTZ inline int
CountTrailingZeroesNonzero16(uint16_t x) {
#if ABSL_HAVE_BUILTIN(__builtin_ctzs)
static_assert(sizeof(unsigned short) == sizeof(x), // NOLINT(runtime/int)
"__builtin_ctzs does not take 16-bit arg");
return __builtin_ctzs(x);
#else
return CountTrailingZeroesNonzero32(x);
#endif
}
template <class T>
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CTZ inline int
CountTrailingZeroes(T x) noexcept {
static_assert(std::is_unsigned<T>::value, "T must be unsigned");
static_assert(IsPowerOf2(std::numeric_limits<T>::digits),
"T must have a power-of-2 size");
static_assert(sizeof(T) <= sizeof(uint64_t), "T too large");
return x == 0 ? std::numeric_limits<T>::digits
: (sizeof(T) <= sizeof(uint16_t)
? CountTrailingZeroesNonzero16(x)
: (sizeof(T) <= sizeof(uint32_t)
? CountTrailingZeroesNonzero32(x)
: CountTrailingZeroesNonzero64(x)));
}
// If T is narrower than unsigned, T{1} << bit_width will be promoted. We
// want to force it to wraparound so that bit_ceil of an invalid value are not
// core constant expressions.
template <class T>
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline
typename std::enable_if<std::is_unsigned<T>::value, T>::type
BitCeilPromotionHelper(T x, T promotion) {
return (T{1} << (x + promotion)) >> promotion;
}
template <class T>
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline
typename std::enable_if<std::is_unsigned<T>::value, T>::type
BitCeilNonPowerOf2(T x) {
// If T is narrower than unsigned, it undergoes promotion to unsigned when we
// shift. We calcualte the number of bits added by the wider type.
return BitCeilPromotionHelper(
static_cast<T>(std::numeric_limits<T>::digits - CountLeadingZeroes(x)),
T{sizeof(T) >= sizeof(unsigned) ? 0
: std::numeric_limits<unsigned>::digits -
std::numeric_limits<T>::digits});
}
} // namespace numeric_internal
ABSL_NAMESPACE_END
} // namespace absl
#endif // ABSL_NUMERIC_INTERNAL_BITS_H_

@ -673,7 +673,7 @@ absl_cc_library(
LINKOPTS
${ABSL_DEFAULT_LINKOPTS}
DEPS
absl::bits
absl::internal_bits
absl::random_internal_fastmath
absl::random_internal_traits
absl::type_traits
@ -690,7 +690,7 @@ absl_cc_library(
LINKOPTS
${ABSL_DEFAULT_LINKOPTS}
DEPS
absl::bits
absl::internal_bits
absl::config
absl::int128
)
@ -706,7 +706,7 @@ absl_cc_library(
LINKOPTS
${ABSL_DEFAULT_LINKOPTS}
DEPS
absl::bits
absl::internal_bits
)
# Internal-only target, do not depend on directly.
@ -902,7 +902,7 @@ absl_cc_test(
LINKOPTS
${ABSL_DEFAULT_LINKOPTS}
DEPS
absl::bits
absl::internal_bits
absl::flags
absl::random_internal_generate_real
gtest_main
@ -1201,7 +1201,7 @@ absl_cc_test(
${ABSL_DEFAULT_LINKOPTS}
DEPS
absl::random_internal_wide_multiply
absl::bits
absl::internal_bits
absl::int128
gtest_main
)

@ -56,7 +56,7 @@ absl_cc_library(
DEPS
absl::strings_internal
absl::base
absl::bits
absl::internal_bits
absl::config
absl::core_headers
absl::endian
@ -406,7 +406,7 @@ absl_cc_library(
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::bits
absl::internal_bits
absl::strings
absl::config
absl::core_headers

@ -1298,26 +1298,23 @@ void Cord::CopyToArraySlowPath(char* dst) const {
}
}
Cord::ChunkIterator& Cord::ChunkIterator::operator++() {
ABSL_HARDENING_ASSERT(bytes_remaining_ > 0 &&
"Attempted to iterate past `end()`");
assert(bytes_remaining_ >= current_chunk_.size());
bytes_remaining_ -= current_chunk_.size();
if (stack_of_right_children_.empty()) {
Cord::ChunkIterator& Cord::ChunkIterator::AdvanceStack() {
assert(absl::holds_alternative<Stack>(context_));
auto& stack_of_right_children = absl::get<Stack>(context_);
if (stack_of_right_children.empty()) {
assert(!current_chunk_.empty()); // Called on invalid iterator.
// We have reached the end of the Cord.
return *this;
}
// Process the next node on the stack.
CordRep* node = stack_of_right_children_.back();
stack_of_right_children_.pop_back();
CordRep* node = stack_of_right_children.back();
stack_of_right_children.pop_back();
// Walk down the left branches until we hit a non-CONCAT node. Save the
// right children to the stack for subsequent traversal.
while (node->tag == CONCAT) {
stack_of_right_children_.push_back(node->concat()->right);
stack_of_right_children.push_back(node->concat()->right);
node = node->concat()->left;
}
@ -1360,6 +1357,8 @@ Cord Cord::ChunkIterator::AdvanceAndReadBytes(size_t n) {
}
return subcord;
}
assert(absl::holds_alternative<Stack>(context_));
auto& stack_of_right_children = absl::get<Stack>(context_);
if (n < current_chunk_.size()) {
// Range to read is a proper subrange of the current chunk.
assert(current_leaf_ != nullptr);
@ -1388,13 +1387,13 @@ Cord Cord::ChunkIterator::AdvanceAndReadBytes(size_t n) {
// Process the next node(s) on the stack, reading whole subtrees depending on
// their length and how many bytes we are advancing.
CordRep* node = nullptr;
while (!stack_of_right_children_.empty()) {
node = stack_of_right_children_.back();
stack_of_right_children_.pop_back();
while (!stack_of_right_children.empty()) {
node = stack_of_right_children.back();
stack_of_right_children.pop_back();
if (node->length > n) break;
// TODO(qrczak): This might unnecessarily recreate existing concat nodes.
// Avoiding that would need pretty complicated logic (instead of
// current_leaf_, keep current_subtree_ which points to the highest node
// current_leaf, keep current_subtree_ which points to the highest node
// such that the current leaf can be found on the path of left children
// starting from current_subtree_; delay creating subnode while node is
// below current_subtree_; find the proper node along the path of left
@ -1419,7 +1418,7 @@ Cord Cord::ChunkIterator::AdvanceAndReadBytes(size_t n) {
while (node->tag == CONCAT) {
if (node->concat()->left->length > n) {
// Push right, descend left.
stack_of_right_children_.push_back(node->concat()->right);
stack_of_right_children.push_back(node->concat()->right);
node = node->concat()->left;
} else {
// Read left, descend right.
@ -1462,12 +1461,20 @@ void Cord::ChunkIterator::AdvanceBytesSlowPath(size_t n) {
n -= current_chunk_.size();
bytes_remaining_ -= current_chunk_.size();
if (!absl::holds_alternative<Stack>(context_)) {
// We have reached the end of the Cord.
assert(bytes_remaining_ == 0);
return;
}
// Process the next node(s) on the stack, skipping whole subtrees depending on
// their length and how many bytes we are advancing.
CordRep* node = nullptr;
while (!stack_of_right_children_.empty()) {
node = stack_of_right_children_.back();
stack_of_right_children_.pop_back();
assert(absl::holds_alternative<Stack>(context_));
auto& stack_of_right_children = absl::get<Stack>(context_);
while (!stack_of_right_children.empty()) {
node = stack_of_right_children.back();
stack_of_right_children.pop_back();
if (node->length > n) break;
n -= node->length;
bytes_remaining_ -= node->length;
@ -1485,7 +1492,7 @@ void Cord::ChunkIterator::AdvanceBytesSlowPath(size_t n) {
while (node->tag == CONCAT) {
if (node->concat()->left->length > n) {
// Push right, descend left.
stack_of_right_children_.push_back(node->concat()->right);
stack_of_right_children.push_back(node->concat()->right);
node = node->concat()->left;
} else {
// Skip left, descend right.

@ -362,6 +362,8 @@ class Cord {
friend class CharIterator;
private:
using Stack = absl::InlinedVector<absl::cord_internal::CordRep*, 4>;
// Constructs a `begin()` iterator from `cord`.
explicit ChunkIterator(const Cord* cord);
@ -370,6 +372,10 @@ class Cord {
void RemoveChunkPrefix(size_t n);
Cord AdvanceAndReadBytes(size_t n);
void AdvanceBytes(size_t n);
// Stack specific operator++
ChunkIterator& AdvanceStack();
// Iterates `n` bytes, where `n` is expected to be greater than or equal to
// `current_chunk_.size()`.
void AdvanceBytesSlowPath(size_t n);
@ -383,8 +389,10 @@ class Cord {
absl::cord_internal::CordRep* current_leaf_ = nullptr;
// The number of bytes left in the `Cord` over which we are iterating.
size_t bytes_remaining_ = 0;
absl::InlinedVector<absl::cord_internal::CordRep*, 4>
stack_of_right_children_;
// Context of this chunk iterator, can be one of:
// - monostate: iterator holds only one chunk or is empty.
// - Stack : iterator holds a concat / substring tree
absl::variant<absl::monostate, Stack> context_;
};
// Cord::ChunkIterator::chunk_begin()
@ -1100,13 +1108,29 @@ inline Cord::ChunkIterator::ChunkIterator(const Cord* cord)
: bytes_remaining_(cord->size()) {
if (cord->empty()) return;
if (cord->contents_.is_tree()) {
stack_of_right_children_.push_back(cord->contents_.tree());
Stack& stack_of_right_children = context_.emplace<Stack>();
stack_of_right_children.push_back(cord->contents_.tree());
operator++();
} else {
current_chunk_ = absl::string_view(cord->contents_.data(), cord->size());
}
}
inline Cord::ChunkIterator& Cord::ChunkIterator::operator++() {
ABSL_HARDENING_ASSERT(bytes_remaining_ > 0 &&
"Attempted to iterate past `end()`");
assert(bytes_remaining_ >= current_chunk_.size());
bytes_remaining_ -= current_chunk_.size();
if (bytes_remaining_ > 0) {
if (absl::holds_alternative<Stack>(context_)) {
return AdvanceStack();
}
} else {
current_chunk_ = {};
}
return *this;
}
inline Cord::ChunkIterator Cord::ChunkIterator::operator++(int) {
ChunkIterator tmp(*this);
operator++();

@ -24,7 +24,10 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
ABSL_CONST_INIT std::atomic<bool> cord_ring_buffer_enabled(false);
ABSL_CONST_INIT std::atomic<bool> cord_ring_buffer_enabled(
kCordEnableRingBufferDefault);
ABSL_CONST_INIT std::atomic<bool> shallow_subcords_enabled(
kCordShallowSubcordsDefault);
void CordRep::Destroy(CordRep* rep) {
assert(rep != nullptr);

@ -31,12 +31,23 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
// Default feature enable states for cord ring buffers
enum CordFeatureDefaults {
kCordEnableRingBufferDefault = false,
kCordShallowSubcordsDefault = false
};
extern std::atomic<bool> cord_ring_buffer_enabled;
extern std::atomic<bool> shallow_subcords_enabled;
inline void enable_cord_ring_buffer(bool enable) {
cord_ring_buffer_enabled.store(enable, std::memory_order_relaxed);
}
inline void enable_shallow_subcords(bool enable) {
shallow_subcords_enabled.store(enable, std::memory_order_relaxed);
}
enum Constants {
// The inlined size to use with absl::InlinedVector.
//

Loading…
Cancel
Save