Export of internal Abseil changes

--
4833151c207fac9f57a735efe6d5db4c83368415 by Gennadiy Rozental <rogeeff@google.com>:

Import of CCTZ from GitHub.

PiperOrigin-RevId: 320398694

--
a1becb36b223230f0a45f204a5fb33b83d2deffe by Gennadiy Rozental <rogeeff@google.com>:

Update CMakeLists.txt

Import of https://github.com/abseil/abseil-cpp/pull/737

PiperOrigin-RevId: 320391906

--
b529c45856fe7a3447f1f3259286d57e13b1f292 by Abseil Team <absl-team@google.com>:

Improves a comment about use of absl::Condition.

PiperOrigin-RevId: 320384329

--
c7b1dacda2739c10dc1ccbfb56b07ed7fe2464a4 by Laramie Leavitt <lar@google.com>:

Improve FastUniformBits performance for std::minstd_rand.

The rejection algorithm was too pessimistic before, and not in line with the [rand.adapt.ibits]. Specifically, when sampling from an URBG with a non power of 2 range, FastUniformBits constructed a rejection threshold with a power-of-2 range that was too restrictive.

For example, minstd_rand has a range of
  [1, 2147483646], which has a range of 2145386495, or about 30.999 bits.

Before FastUniformBits rejected values between 1<<30 and 2145386495, which includes approximately 50% of the generated values. However, since a minimum of 3 calls are required to generate a full 64-bit value from an entropy pool of 30.9 bits, the correct value for rejection sampling is the range value which masks 21 (0x7fe00000) or 22 bits and rejects values greater than that.  This reduces the probability of rejecting a sample to about 0.1%

NOTE: Abseil random does not guarantee sequence stability over time, and this is expected to change sequences in some cases.
PiperOrigin-RevId: 320285836

--
15800a39557a07dd52e0add66a0ab67aed00590b by Gennadiy Rozental <rogeeff@google.com>:

Internal change.

PiperOrigin-RevId: 320220913

--
ef39348360873f6d19669755fe0b5d09a945a501 by Gennadiy Rozental <rogeeff@google.com>:

Internal change

PiperOrigin-RevId: 320181729

--
4f9f6ef8034a24da1832e4c838c72f80fc2ea062 by Gennadiy Rozental <rogeeff@google.com>:

Internal change

PiperOrigin-RevId: 320176084

--
6bfc8008462801657d231585bd5c37fc18bb25b6 by Gennadiy Rozental <rogeeff@google.com>:

Internal change

PiperOrigin-RevId: 320176070

--
b35b055ab1f41e6056031ff0641cabab23530027 by Abseil Team <absl-team@google.com>:

Disabling using header module as well as building one for randen_hwaes_impl

PiperOrigin-RevId: 320024299
GitOrigin-RevId: 4833151c207fac9f57a735efe6d5db4c83368415
Change-Id: I9cf102dbf46ed07752a508b7cda3ab3858857d0d
pull/739/head
Abseil Team 5 years ago committed by Gennadiy Rozental
parent bf655de09b
commit d5269a8b6d
  1. 4
      CMake/AbseilHelpers.cmake
  2. 2
      CMake/AbseilInstallDirs.cmake
  3. 2
      absl/container/inlined_vector.h
  4. 12
      absl/flags/reflection.cc
  5. 9
      absl/random/internal/BUILD.bazel
  6. 202
      absl/random/internal/fast_uniform_bits.h
  7. 318
      absl/random/internal/fast_uniform_bits_test.cc
  8. 5
      absl/synchronization/mutex.h
  9. 22
      absl/time/internal/cctz/src/time_zone_format.cc
  10. 5
      absl/time/internal/cctz/src/time_zone_format_test.cc
  11. 9
      absl/time/internal/cctz/src/time_zone_libc.cc
  12. 8
      absl/time/internal/cctz/src/tzfile.h
  13. 2
      absl/types/CMakeLists.txt

@ -23,7 +23,9 @@ include(AbseilInstallDirs)
# project that sets # project that sets
# set_property(GLOBAL PROPERTY USE_FOLDERS ON) # set_property(GLOBAL PROPERTY USE_FOLDERS ON)
# For example, Visual Studio supports folders. # For example, Visual Studio supports folders.
set(ABSL_IDE_FOLDER Abseil) if(NOT DEFINED ABSL_IDE_FOLDER)
set(ABSL_IDE_FOLDER Abseil)
endif()
# absl_cc_library() # absl_cc_library()
# #

@ -10,7 +10,7 @@ if(absl_VERSION)
set(ABSL_SUBDIR "${PROJECT_NAME}_${PROJECT_VERSION}") set(ABSL_SUBDIR "${PROJECT_NAME}_${PROJECT_VERSION}")
set(ABSL_INSTALL_BINDIR "${CMAKE_INSTALL_BINDIR}/${ABSL_SUBDIR}") set(ABSL_INSTALL_BINDIR "${CMAKE_INSTALL_BINDIR}/${ABSL_SUBDIR}")
set(ABSL_INSTALL_CONFIGDIR "${CMAKE_INSTALL_LIBDIR}/cmake/${ABSL_SUBDIR}") set(ABSL_INSTALL_CONFIGDIR "${CMAKE_INSTALL_LIBDIR}/cmake/${ABSL_SUBDIR}")
set(ABSL_INSTALL_INCLUDEDIR "${CMAKE_INSTALL_INCLUDEDIR}/{ABSL_SUBDIR}") set(ABSL_INSTALL_INCLUDEDIR "${CMAKE_INSTALL_INCLUDEDIR}/${ABSL_SUBDIR}")
set(ABSL_INSTALL_LIBDIR "${CMAKE_INSTALL_LIBDIR}/${ABSL_SUBDIR}") set(ABSL_INSTALL_LIBDIR "${CMAKE_INSTALL_LIBDIR}/${ABSL_SUBDIR}")
else() else()
set(ABSL_INSTALL_BINDIR "${CMAKE_INSTALL_BINDIR}") set(ABSL_INSTALL_BINDIR "${CMAKE_INSTALL_BINDIR}")

@ -64,7 +64,7 @@ ABSL_NAMESPACE_BEGIN
// `std::vector` for use cases where the vector's size is sufficiently small // `std::vector` for use cases where the vector's size is sufficiently small
// that it can be inlined. If the inlined vector does grow beyond its estimated // that it can be inlined. If the inlined vector does grow beyond its estimated
// capacity, it will trigger an initial allocation on the heap, and will behave // capacity, it will trigger an initial allocation on the heap, and will behave
// as a `std:vector`. The API of the `absl::InlinedVector` within this file is // as a `std::vector`. The API of the `absl::InlinedVector` within this file is
// designed to cover the same API footprint as covered by `std::vector`. // designed to cover the same API footprint as covered by `std::vector`.
template <typename T, size_t N, typename A = std::allocator<T>> template <typename T, size_t N, typename A = std::allocator<T>>
class InlinedVector { class InlinedVector {

@ -58,10 +58,6 @@ class FlagRegistry {
// Will emit a warning if a 'retired' flag is specified. // Will emit a warning if a 'retired' flag is specified.
CommandLineFlag* FindFlagLocked(absl::string_view name); CommandLineFlag* FindFlagLocked(absl::string_view name);
// Returns the retired flag object for the specified name, or nullptr if not
// found or not retired. Does not emit a warning.
CommandLineFlag* FindRetiredFlagLocked(absl::string_view name);
static FlagRegistry& GlobalRegistry(); // returns a singleton registry static FlagRegistry& GlobalRegistry(); // returns a singleton registry
private: private:
@ -88,14 +84,6 @@ CommandLineFlag* FlagRegistry::FindFlagLocked(absl::string_view name) {
if (i == flags_.end()) { if (i == flags_.end()) {
return nullptr; return nullptr;
} }
return i->second;
}
CommandLineFlag* FlagRegistry::FindRetiredFlagLocked(absl::string_view name) {
FlagConstIterator i = flags_.find(name);
if (i == flags_.end() || !i->second->IsRetired()) {
return nullptr;
}
return i->second; return i->second;
} }

@ -59,7 +59,10 @@ cc_library(
], ],
copts = ABSL_DEFAULT_COPTS, copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS, linkopts = ABSL_DEFAULT_LINKOPTS,
deps = ["//absl/base:config"], deps = [
"//absl/base:config",
"//absl/meta:type_traits",
],
) )
cc_library( cc_library(
@ -319,10 +322,6 @@ cc_library(
"//absl:windows": [], "//absl:windows": [],
"//conditions:default": ["-Wno-pass-failed"], "//conditions:default": ["-Wno-pass-failed"],
}), }),
# copts in RANDEN_HWAES_COPTS can make this target unusable as a module
# leading to a Clang diagnostic. Furthermore, it only has a private header
# anyway and thus there wouldn't be any gain from using it as a module.
features = ["-header_modules"],
linkopts = ABSL_DEFAULT_LINKOPTS, linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [ deps = [
":platform", ":platform",

@ -21,6 +21,7 @@
#include <type_traits> #include <type_traits>
#include "absl/base/config.h" #include "absl/base/config.h"
#include "absl/meta/type_traits.h"
namespace absl { namespace absl {
ABSL_NAMESPACE_BEGIN ABSL_NAMESPACE_BEGIN
@ -38,28 +39,17 @@ constexpr bool IsPowerOfTwoOrZero(UIntType n) {
template <typename URBG> template <typename URBG>
constexpr typename URBG::result_type RangeSize() { constexpr typename URBG::result_type RangeSize() {
using result_type = typename URBG::result_type; using result_type = typename URBG::result_type;
static_assert((URBG::max)() != (URBG::min)(), "URBG range cannot be 0.");
return ((URBG::max)() == (std::numeric_limits<result_type>::max)() && return ((URBG::max)() == (std::numeric_limits<result_type>::max)() &&
(URBG::min)() == std::numeric_limits<result_type>::lowest()) (URBG::min)() == std::numeric_limits<result_type>::lowest())
? result_type{0} ? result_type{0}
: (URBG::max)() - (URBG::min)() + result_type{1}; : ((URBG::max)() - (URBG::min)() + result_type{1});
}
template <typename UIntType>
constexpr UIntType LargestPowerOfTwoLessThanOrEqualTo(UIntType n) {
return n < 2 ? n : 2 * LargestPowerOfTwoLessThanOrEqualTo(n / 2);
}
// Given a URBG generating values in the closed interval [Lo, Hi], returns the
// largest power of two less than or equal to `Hi - Lo + 1`.
template <typename URBG>
constexpr typename URBG::result_type PowerOfTwoSubRangeSize() {
return LargestPowerOfTwoLessThanOrEqualTo(RangeSize<URBG>());
} }
// Computes the floor of the log. (i.e., std::floor(std::log2(N)); // Computes the floor of the log. (i.e., std::floor(std::log2(N));
template <typename UIntType> template <typename UIntType>
constexpr UIntType IntegerLog2(UIntType n) { constexpr UIntType IntegerLog2(UIntType n) {
return (n <= 1) ? 0 : 1 + IntegerLog2(n / 2); return (n <= 1) ? 0 : 1 + IntegerLog2(n >> 1);
} }
// Returns the number of bits of randomness returned through // Returns the number of bits of randomness returned through
@ -68,18 +58,23 @@ template <typename URBG>
constexpr size_t NumBits() { constexpr size_t NumBits() {
return RangeSize<URBG>() == 0 return RangeSize<URBG>() == 0
? std::numeric_limits<typename URBG::result_type>::digits ? std::numeric_limits<typename URBG::result_type>::digits
: IntegerLog2(PowerOfTwoSubRangeSize<URBG>()); : IntegerLog2(RangeSize<URBG>());
} }
// Given a shift value `n`, constructs a mask with exactly the low `n` bits set. // Given a shift value `n`, constructs a mask with exactly the low `n` bits set.
// If `n == 0`, all bits are set. // If `n == 0`, all bits are set.
template <typename UIntType> template <typename UIntType>
constexpr UIntType MaskFromShift(UIntType n) { constexpr UIntType MaskFromShift(size_t n) {
return ((n % std::numeric_limits<UIntType>::digits) == 0) return ((n % std::numeric_limits<UIntType>::digits) == 0)
? ~UIntType{0} ? ~UIntType{0}
: (UIntType{1} << n) - UIntType{1}; : (UIntType{1} << n) - UIntType{1};
} }
// Tags used to dispatch FastUniformBits::generate to the simple or more complex
// entropy extraction algorithm.
struct SimplifiedLoopTag {};
struct RejectionLoopTag {};
// FastUniformBits implements a fast path to acquire uniform independent bits // FastUniformBits implements a fast path to acquire uniform independent bits
// from a type which conforms to the [rand.req.urbg] concept. // from a type which conforms to the [rand.req.urbg] concept.
// Parameterized by: // Parameterized by:
@ -107,50 +102,16 @@ class FastUniformBits {
"Class-template FastUniformBits<> must be parameterized using " "Class-template FastUniformBits<> must be parameterized using "
"an unsigned type."); "an unsigned type.");
// PowerOfTwoVariate() generates a single random variate, always returning a
// value in the half-open interval `[0, PowerOfTwoSubRangeSize<URBG>())`. If
// the URBG already generates values in a power-of-two range, the generator
// itself is used. Otherwise, we use rejection sampling on the largest
// possible power-of-two-sized subrange.
struct PowerOfTwoTag {};
struct RejectionSamplingTag {};
template <typename URBG>
static typename URBG::result_type PowerOfTwoVariate(
URBG& g) { // NOLINT(runtime/references)
using tag =
typename std::conditional<IsPowerOfTwoOrZero(RangeSize<URBG>()),
PowerOfTwoTag, RejectionSamplingTag>::type;
return PowerOfTwoVariate(g, tag{});
}
template <typename URBG>
static typename URBG::result_type PowerOfTwoVariate(
URBG& g, // NOLINT(runtime/references)
PowerOfTwoTag) {
return g() - (URBG::min)();
}
template <typename URBG>
static typename URBG::result_type PowerOfTwoVariate(
URBG& g, // NOLINT(runtime/references)
RejectionSamplingTag) {
// Use rejection sampling to ensure uniformity across the range.
typename URBG::result_type u;
do {
u = g() - (URBG::min)();
} while (u >= PowerOfTwoSubRangeSize<URBG>());
return u;
}
// Generate() generates a random value, dispatched on whether // Generate() generates a random value, dispatched on whether
// the underlying URBG must loop over multiple calls or not. // the underlying URBG must use rejection sampling to generate a value,
// or whether a simplified loop will suffice.
template <typename URBG> template <typename URBG>
result_type Generate(URBG& g, // NOLINT(runtime/references) result_type Generate(URBG& g, // NOLINT(runtime/references)
std::true_type /* avoid_looping */); SimplifiedLoopTag);
template <typename URBG> template <typename URBG>
result_type Generate(URBG& g, // NOLINT(runtime/references) result_type Generate(URBG& g, // NOLINT(runtime/references)
std::false_type /* avoid_looping */); RejectionLoopTag);
}; };
template <typename UIntType> template <typename UIntType>
@ -162,31 +123,47 @@ FastUniformBits<UIntType>::operator()(URBG& g) { // NOLINT(runtime/references)
// Y = (2 ^ kRange) - 1 // Y = (2 ^ kRange) - 1
static_assert((URBG::max)() > (URBG::min)(), static_assert((URBG::max)() > (URBG::min)(),
"URBG::max and URBG::min may not be equal."); "URBG::max and URBG::min may not be equal.");
using urbg_result_type = typename URBG::result_type;
constexpr urbg_result_type kRangeMask = using tag = absl::conditional_t<IsPowerOfTwoOrZero(RangeSize<URBG>()),
RangeSize<URBG>() == 0 SimplifiedLoopTag, RejectionLoopTag>;
? (std::numeric_limits<urbg_result_type>::max)() return Generate(g, tag{});
: static_cast<urbg_result_type>(PowerOfTwoSubRangeSize<URBG>() - 1);
return Generate(g, std::integral_constant<bool, (kRangeMask >= (max)())>{});
} }
template <typename UIntType> template <typename UIntType>
template <typename URBG> template <typename URBG>
typename FastUniformBits<UIntType>::result_type typename FastUniformBits<UIntType>::result_type
FastUniformBits<UIntType>::Generate(URBG& g, // NOLINT(runtime/references) FastUniformBits<UIntType>::Generate(URBG& g, // NOLINT(runtime/references)
std::true_type /* avoid_looping */) { SimplifiedLoopTag) {
// The width of the result_type is less than than the width of the random bits // The simplified version of FastUniformBits works only on URBGs that have
// provided by URBG. Thus, generate a single value and then simply mask off // a range that is a power of 2. In this case we simply loop and shift without
// the required bits. // attempting to balance the bits across calls.
static_assert(IsPowerOfTwoOrZero(RangeSize<URBG>()),
"incorrect Generate tag for URBG instance");
static constexpr size_t kResultBits =
std::numeric_limits<result_type>::digits;
static constexpr size_t kUrbgBits = NumBits<URBG>();
static constexpr size_t kIters =
(kResultBits / kUrbgBits) + (kResultBits % kUrbgBits != 0);
static constexpr size_t kShift = (kIters == 1) ? 0 : kUrbgBits;
static constexpr auto kMin = (URBG::min)();
return PowerOfTwoVariate(g) & (max)(); result_type r = static_cast<result_type>(g() - kMin);
for (size_t n = 1; n < kIters; ++n) {
r = (r << kShift) + static_cast<result_type>(g() - kMin);
}
return r;
} }
template <typename UIntType> template <typename UIntType>
template <typename URBG> template <typename URBG>
typename FastUniformBits<UIntType>::result_type typename FastUniformBits<UIntType>::result_type
FastUniformBits<UIntType>::Generate(URBG& g, // NOLINT(runtime/references) FastUniformBits<UIntType>::Generate(URBG& g, // NOLINT(runtime/references)
std::false_type /* avoid_looping */) { RejectionLoopTag) {
static_assert(!IsPowerOfTwoOrZero(RangeSize<URBG>()),
"incorrect Generate tag for URBG instance");
using urbg_result_type = typename URBG::result_type;
// See [rand.adapt.ibits] for more details on the constants calculated below. // See [rand.adapt.ibits] for more details on the constants calculated below.
// //
// It is preferable to use roughly the same number of bits from each generator // It is preferable to use roughly the same number of bits from each generator
@ -199,21 +176,44 @@ FastUniformBits<UIntType>::Generate(URBG& g, // NOLINT(runtime/references)
// `kSmallIters` and `kLargeIters` times respectively such // `kSmallIters` and `kLargeIters` times respectively such
// that // that
// //
// `kTotalWidth == kSmallIters * kSmallWidth // `kResultBits == kSmallIters * kSmallBits
// + kLargeIters * kLargeWidth` // + kLargeIters * kLargeBits`
// //
// where `kTotalWidth` is the total number of bits in `result_type`. // where `kResultBits` is the total number of bits in `result_type`.
// //
constexpr size_t kTotalWidth = std::numeric_limits<result_type>::digits; static constexpr size_t kResultBits =
constexpr size_t kUrbgWidth = NumBits<URBG>(); std::numeric_limits<result_type>::digits; // w
constexpr size_t kTotalIters = static constexpr urbg_result_type kUrbgRange = RangeSize<URBG>(); // R
kTotalWidth / kUrbgWidth + (kTotalWidth % kUrbgWidth != 0); static constexpr size_t kUrbgBits = NumBits<URBG>(); // m
constexpr size_t kSmallWidth = kTotalWidth / kTotalIters;
constexpr size_t kLargeWidth = kSmallWidth + 1; // compute the initial estimate of the bits used.
// [rand.adapt.ibits] 2 (c)
static constexpr size_t kA = // ceil(w/m)
(kResultBits / kUrbgBits) + ((kResultBits % kUrbgBits) != 0); // n'
static constexpr size_t kABits = kResultBits / kA; // w0'
static constexpr urbg_result_type kARejection =
((kUrbgRange >> kABits) << kABits); // y0'
// refine the selection to reduce the rejection frequency.
static constexpr size_t kTotalIters =
((kUrbgRange - kARejection) <= (kARejection / kA)) ? kA : (kA + 1); // n
// [rand.adapt.ibits] 2 (b)
static constexpr size_t kSmallIters =
kTotalIters - (kResultBits % kTotalIters); // n0
static constexpr size_t kSmallBits = kResultBits / kTotalIters; // w0
static constexpr urbg_result_type kSmallRejection =
((kUrbgRange >> kSmallBits) << kSmallBits); // y0
static constexpr size_t kLargeBits = kSmallBits + 1; // w0+1
static constexpr urbg_result_type kLargeRejection =
((kUrbgRange >> kLargeBits) << kLargeBits); // y1
// //
// Because `kLargeWidth == kSmallWidth + 1`, it follows that // Because `kLargeBits == kSmallBits + 1`, it follows that
// //
// `kTotalWidth == kTotalIters * kSmallWidth + kLargeIters` // `kResultBits == kSmallIters * kSmallBits + kLargeIters`
// //
// and therefore // and therefore
// //
@ -224,36 +224,40 @@ FastUniformBits<UIntType>::Generate(URBG& g, // NOLINT(runtime/references)
// mentioned above, if the URBG width is a divisor of `kTotalWidth`, then // mentioned above, if the URBG width is a divisor of `kTotalWidth`, then
// there would be no need for any large iterations (i.e., one loop would // there would be no need for any large iterations (i.e., one loop would
// suffice), and indeed, in this case, `kLargeIters` would be zero. // suffice), and indeed, in this case, `kLargeIters` would be zero.
constexpr size_t kLargeIters = kTotalWidth % kSmallWidth; static_assert(kResultBits == kSmallIters * kSmallBits +
constexpr size_t kSmallIters = (kTotalIters - kSmallIters) * kLargeBits,
(kTotalWidth - (kLargeWidth * kLargeIters)) / kSmallWidth; "Error in looping constant calculations.");
static_assert( // The small shift is essentially small bits, but due to the potential
kTotalWidth == kSmallIters * kSmallWidth + kLargeIters * kLargeWidth, // of generating a smaller result_type from a larger urbg type, the actual
"Error in looping constant calculations."); // shift might be 0.
static constexpr size_t kSmallShift = kSmallBits % kResultBits;
static constexpr auto kSmallMask =
MaskFromShift<urbg_result_type>(kSmallShift);
static constexpr size_t kLargeShift = kLargeBits % kResultBits;
static constexpr auto kLargeMask =
MaskFromShift<urbg_result_type>(kLargeShift);
result_type s = 0; static constexpr auto kMin = (URBG::min)();
constexpr size_t kSmallShift = kSmallWidth % kTotalWidth; result_type s = 0;
constexpr result_type kSmallMask = MaskFromShift(result_type{kSmallShift});
for (size_t n = 0; n < kSmallIters; ++n) { for (size_t n = 0; n < kSmallIters; ++n) {
s = (s << kSmallShift) + urbg_result_type v;
(static_cast<result_type>(PowerOfTwoVariate(g)) & kSmallMask); do {
} v = g() - kMin;
} while (v >= kSmallRejection);
constexpr size_t kLargeShift = kLargeWidth % kTotalWidth; s = (s << kSmallShift) + static_cast<result_type>(v & kSmallMask);
constexpr result_type kLargeMask = MaskFromShift(result_type{kLargeShift});
for (size_t n = 0; n < kLargeIters; ++n) {
s = (s << kLargeShift) +
(static_cast<result_type>(PowerOfTwoVariate(g)) & kLargeMask);
} }
static_assert( for (size_t n = kSmallIters; n < kTotalIters; ++n) {
kLargeShift == kSmallShift + 1 || urbg_result_type v;
(kLargeShift == 0 && do {
kSmallShift == std::numeric_limits<result_type>::digits - 1), v = g() - kMin;
"Error in looping constant calculations"); } while (v >= kLargeRejection);
s = (s << kLargeShift) + static_cast<result_type>(v & kLargeMask);
}
return s; return s;
} }

@ -34,8 +34,8 @@ TYPED_TEST(FastUniformBitsTypedTest, BasicTest) {
using Limits = std::numeric_limits<TypeParam>; using Limits = std::numeric_limits<TypeParam>;
using FastBits = FastUniformBits<TypeParam>; using FastBits = FastUniformBits<TypeParam>;
EXPECT_EQ(0, FastBits::min()); EXPECT_EQ(0, (FastBits::min)());
EXPECT_EQ(Limits::max(), FastBits::max()); EXPECT_EQ((Limits::max)(), (FastBits::max)());
constexpr int kIters = 10000; constexpr int kIters = 10000;
std::random_device rd; std::random_device rd;
@ -43,8 +43,8 @@ TYPED_TEST(FastUniformBitsTypedTest, BasicTest) {
FastBits fast; FastBits fast;
for (int i = 0; i < kIters; i++) { for (int i = 0; i < kIters; i++) {
const auto v = fast(gen); const auto v = fast(gen);
EXPECT_LE(v, FastBits::max()); EXPECT_LE(v, (FastBits::max)());
EXPECT_GE(v, FastBits::min()); EXPECT_GE(v, (FastBits::min)());
} }
} }
@ -52,21 +52,26 @@ template <typename UIntType, UIntType Lo, UIntType Hi, UIntType Val = Lo>
struct FakeUrbg { struct FakeUrbg {
using result_type = UIntType; using result_type = UIntType;
FakeUrbg() = default;
explicit FakeUrbg(bool r) : reject(r) {}
static constexpr result_type(max)() { return Hi; } static constexpr result_type(max)() { return Hi; }
static constexpr result_type(min)() { return Lo; } static constexpr result_type(min)() { return Lo; }
result_type operator()() { return Val; } result_type operator()() {
}; // when reject is set, return Hi half the time.
return ((++calls % 2) == 1 && reject) ? Hi : Val;
}
using UrngOddbits = FakeUrbg<uint8_t, 1, 0xfe, 0x73>; bool reject = false;
using Urng4bits = FakeUrbg<uint8_t, 1, 0x10, 2>; size_t calls = 0;
using Urng31bits = FakeUrbg<uint32_t, 1, 0xfffffffe, 0x60070f03>; };
using Urng32bits = FakeUrbg<uint32_t, 0, 0xffffffff, 0x74010f01>;
TEST(FastUniformBitsTest, IsPowerOfTwoOrZero) { TEST(FastUniformBitsTest, IsPowerOfTwoOrZero) {
EXPECT_TRUE(IsPowerOfTwoOrZero(uint8_t{0})); EXPECT_TRUE(IsPowerOfTwoOrZero(uint8_t{0}));
EXPECT_TRUE(IsPowerOfTwoOrZero(uint8_t{1})); EXPECT_TRUE(IsPowerOfTwoOrZero(uint8_t{1}));
EXPECT_TRUE(IsPowerOfTwoOrZero(uint8_t{2})); EXPECT_TRUE(IsPowerOfTwoOrZero(uint8_t{2}));
EXPECT_FALSE(IsPowerOfTwoOrZero(uint8_t{3})); EXPECT_FALSE(IsPowerOfTwoOrZero(uint8_t{3}));
EXPECT_TRUE(IsPowerOfTwoOrZero(uint8_t{4}));
EXPECT_TRUE(IsPowerOfTwoOrZero(uint8_t{16})); EXPECT_TRUE(IsPowerOfTwoOrZero(uint8_t{16}));
EXPECT_FALSE(IsPowerOfTwoOrZero(uint8_t{17})); EXPECT_FALSE(IsPowerOfTwoOrZero(uint8_t{17}));
EXPECT_FALSE(IsPowerOfTwoOrZero((std::numeric_limits<uint8_t>::max)())); EXPECT_FALSE(IsPowerOfTwoOrZero((std::numeric_limits<uint8_t>::max)()));
@ -75,6 +80,7 @@ TEST(FastUniformBitsTest, IsPowerOfTwoOrZero) {
EXPECT_TRUE(IsPowerOfTwoOrZero(uint16_t{1})); EXPECT_TRUE(IsPowerOfTwoOrZero(uint16_t{1}));
EXPECT_TRUE(IsPowerOfTwoOrZero(uint16_t{2})); EXPECT_TRUE(IsPowerOfTwoOrZero(uint16_t{2}));
EXPECT_FALSE(IsPowerOfTwoOrZero(uint16_t{3})); EXPECT_FALSE(IsPowerOfTwoOrZero(uint16_t{3}));
EXPECT_TRUE(IsPowerOfTwoOrZero(uint16_t{4}));
EXPECT_TRUE(IsPowerOfTwoOrZero(uint16_t{16})); EXPECT_TRUE(IsPowerOfTwoOrZero(uint16_t{16}));
EXPECT_FALSE(IsPowerOfTwoOrZero(uint16_t{17})); EXPECT_FALSE(IsPowerOfTwoOrZero(uint16_t{17}));
EXPECT_FALSE(IsPowerOfTwoOrZero((std::numeric_limits<uint16_t>::max)())); EXPECT_FALSE(IsPowerOfTwoOrZero((std::numeric_limits<uint16_t>::max)()));
@ -91,181 +97,237 @@ TEST(FastUniformBitsTest, IsPowerOfTwoOrZero) {
EXPECT_TRUE(IsPowerOfTwoOrZero(uint64_t{1})); EXPECT_TRUE(IsPowerOfTwoOrZero(uint64_t{1}));
EXPECT_TRUE(IsPowerOfTwoOrZero(uint64_t{2})); EXPECT_TRUE(IsPowerOfTwoOrZero(uint64_t{2}));
EXPECT_FALSE(IsPowerOfTwoOrZero(uint64_t{3})); EXPECT_FALSE(IsPowerOfTwoOrZero(uint64_t{3}));
EXPECT_TRUE(IsPowerOfTwoOrZero(uint64_t{4}));
EXPECT_TRUE(IsPowerOfTwoOrZero(uint64_t{64})); EXPECT_TRUE(IsPowerOfTwoOrZero(uint64_t{64}));
EXPECT_FALSE(IsPowerOfTwoOrZero(uint64_t{17})); EXPECT_FALSE(IsPowerOfTwoOrZero(uint64_t{17}));
EXPECT_FALSE(IsPowerOfTwoOrZero((std::numeric_limits<uint64_t>::max)())); EXPECT_FALSE(IsPowerOfTwoOrZero((std::numeric_limits<uint64_t>::max)()));
} }
TEST(FastUniformBitsTest, IntegerLog2) { TEST(FastUniformBitsTest, IntegerLog2) {
EXPECT_EQ(IntegerLog2(uint16_t{0}), 0); EXPECT_EQ(0, IntegerLog2(uint16_t{0}));
EXPECT_EQ(IntegerLog2(uint16_t{1}), 0); EXPECT_EQ(0, IntegerLog2(uint16_t{1}));
EXPECT_EQ(IntegerLog2(uint16_t{2}), 1); EXPECT_EQ(1, IntegerLog2(uint16_t{2}));
EXPECT_EQ(IntegerLog2(uint16_t{3}), 1); EXPECT_EQ(1, IntegerLog2(uint16_t{3}));
EXPECT_EQ(IntegerLog2(uint16_t{4}), 2); EXPECT_EQ(2, IntegerLog2(uint16_t{4}));
EXPECT_EQ(IntegerLog2(uint16_t{5}), 2); EXPECT_EQ(2, IntegerLog2(uint16_t{5}));
EXPECT_EQ(IntegerLog2(std::numeric_limits<uint64_t>::max()), 63); EXPECT_EQ(2, IntegerLog2(uint16_t{7}));
EXPECT_EQ(3, IntegerLog2(uint16_t{8}));
EXPECT_EQ(63, IntegerLog2((std::numeric_limits<uint64_t>::max)()));
} }
TEST(FastUniformBitsTest, RangeSize) { TEST(FastUniformBitsTest, RangeSize) {
EXPECT_EQ((RangeSize<FakeUrbg<uint8_t, 0, 3>>()), 4); EXPECT_EQ(2, (RangeSize<FakeUrbg<uint8_t, 0, 1>>()));
EXPECT_EQ((RangeSize<FakeUrbg<uint8_t, 2, 2>>()), 1); EXPECT_EQ(3, (RangeSize<FakeUrbg<uint8_t, 0, 2>>()));
EXPECT_EQ((RangeSize<FakeUrbg<uint8_t, 2, 5>>()), 4); EXPECT_EQ(4, (RangeSize<FakeUrbg<uint8_t, 0, 3>>()));
EXPECT_EQ((RangeSize<FakeUrbg<uint8_t, 2, 6>>()), 5); // EXPECT_EQ(0, (RangeSize<FakeUrbg<uint8_t, 2, 2>>()));
EXPECT_EQ((RangeSize<FakeUrbg<uint8_t, 2, 10>>()), 9); EXPECT_EQ(4, (RangeSize<FakeUrbg<uint8_t, 2, 5>>()));
EXPECT_EQ(5, (RangeSize<FakeUrbg<uint8_t, 2, 6>>()));
EXPECT_EQ(9, (RangeSize<FakeUrbg<uint8_t, 2, 10>>()));
EXPECT_EQ( EXPECT_EQ(
(RangeSize<FakeUrbg<uint8_t, 0, std::numeric_limits<uint8_t>::max()>>()), 0, (RangeSize<
0); FakeUrbg<uint8_t, 0, (std::numeric_limits<uint8_t>::max)()>>()));
EXPECT_EQ((RangeSize<FakeUrbg<uint16_t, 0, 3>>()), 4);
EXPECT_EQ((RangeSize<FakeUrbg<uint16_t, 2, 2>>()), 1);
EXPECT_EQ((RangeSize<FakeUrbg<uint16_t, 2, 5>>()), 4);
EXPECT_EQ((RangeSize<FakeUrbg<uint16_t, 2, 6>>()), 5);
EXPECT_EQ((RangeSize<FakeUrbg<uint16_t, 1000, 1017>>()), 18);
EXPECT_EQ((RangeSize<
FakeUrbg<uint16_t, 0, std::numeric_limits<uint16_t>::max()>>()),
0);
EXPECT_EQ((RangeSize<FakeUrbg<uint32_t, 0, 3>>()), 4);
EXPECT_EQ((RangeSize<FakeUrbg<uint32_t, 2, 2>>()), 1);
EXPECT_EQ((RangeSize<FakeUrbg<uint32_t, 2, 5>>()), 4);
EXPECT_EQ((RangeSize<FakeUrbg<uint32_t, 2, 6>>()), 5);
EXPECT_EQ((RangeSize<FakeUrbg<uint32_t, 1000, 1017>>()), 18);
EXPECT_EQ((RangeSize<FakeUrbg<uint32_t, 0, 0xffffffff>>()), 0);
EXPECT_EQ((RangeSize<FakeUrbg<uint32_t, 1, 0xffffffff>>()), 0xffffffff);
EXPECT_EQ((RangeSize<FakeUrbg<uint32_t, 1, 0xfffffffe>>()), 0xfffffffe);
EXPECT_EQ((RangeSize<FakeUrbg<uint32_t, 2, 0xfffffffe>>()), 0xfffffffd);
EXPECT_EQ((RangeSize<
FakeUrbg<uint32_t, 0, std::numeric_limits<uint32_t>::max()>>()),
0);
EXPECT_EQ((RangeSize<FakeUrbg<uint64_t, 0, 3>>()), 4);
EXPECT_EQ((RangeSize<FakeUrbg<uint64_t, 2, 2>>()), 1);
EXPECT_EQ((RangeSize<FakeUrbg<uint64_t, 2, 5>>()), 4);
EXPECT_EQ((RangeSize<FakeUrbg<uint64_t, 2, 6>>()), 5);
EXPECT_EQ((RangeSize<FakeUrbg<uint64_t, 1000, 1017>>()), 18);
EXPECT_EQ((RangeSize<FakeUrbg<uint64_t, 0, 0xffffffff>>()), 0x100000000ull);
EXPECT_EQ((RangeSize<FakeUrbg<uint64_t, 1, 0xffffffff>>()), 0xffffffffull);
EXPECT_EQ((RangeSize<FakeUrbg<uint64_t, 1, 0xfffffffe>>()), 0xfffffffeull);
EXPECT_EQ((RangeSize<FakeUrbg<uint64_t, 2, 0xfffffffe>>()), 0xfffffffdull);
EXPECT_EQ((RangeSize<FakeUrbg<uint64_t, 0, 0xffffffffffffffffull>>()), 0ull);
EXPECT_EQ((RangeSize<FakeUrbg<uint64_t, 1, 0xffffffffffffffffull>>()),
0xffffffffffffffffull);
EXPECT_EQ((RangeSize<FakeUrbg<uint64_t, 1, 0xfffffffffffffffeull>>()),
0xfffffffffffffffeull);
EXPECT_EQ((RangeSize<FakeUrbg<uint64_t, 2, 0xfffffffffffffffeull>>()),
0xfffffffffffffffdull);
EXPECT_EQ((RangeSize<
FakeUrbg<uint64_t, 0, std::numeric_limits<uint64_t>::max()>>()),
0);
}
TEST(FastUniformBitsTest, PowerOfTwoSubRangeSize) { EXPECT_EQ(4, (RangeSize<FakeUrbg<uint16_t, 0, 3>>()));
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint8_t, 0, 3>>()), 4); EXPECT_EQ(4, (RangeSize<FakeUrbg<uint16_t, 2, 5>>()));
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint8_t, 2, 2>>()), 1); EXPECT_EQ(5, (RangeSize<FakeUrbg<uint16_t, 2, 6>>()));
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint8_t, 2, 5>>()), 4); EXPECT_EQ(18, (RangeSize<FakeUrbg<uint16_t, 1000, 1017>>()));
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint8_t, 2, 6>>()), 4);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint8_t, 2, 10>>()), 8);
EXPECT_EQ((PowerOfTwoSubRangeSize<
FakeUrbg<uint8_t, 0, std::numeric_limits<uint8_t>::max()>>()),
0);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint16_t, 0, 3>>()), 4);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint16_t, 2, 2>>()), 1);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint16_t, 2, 5>>()), 4);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint16_t, 2, 6>>()), 4);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint16_t, 1000, 1017>>()), 16);
EXPECT_EQ((PowerOfTwoSubRangeSize<
FakeUrbg<uint16_t, 0, std::numeric_limits<uint16_t>::max()>>()),
0);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint32_t, 0, 3>>()), 4);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint32_t, 2, 2>>()), 1);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint32_t, 2, 5>>()), 4);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint32_t, 2, 6>>()), 4);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint32_t, 1000, 1017>>()), 16);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint32_t, 0, 0xffffffff>>()), 0);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint32_t, 1, 0xffffffff>>()),
0x80000000);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint32_t, 1, 0xfffffffe>>()),
0x80000000);
EXPECT_EQ((PowerOfTwoSubRangeSize<
FakeUrbg<uint32_t, 0, std::numeric_limits<uint32_t>::max()>>()),
0);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint64_t, 0, 3>>()), 4);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint64_t, 2, 2>>()), 1);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint64_t, 2, 5>>()), 4);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint64_t, 2, 6>>()), 4);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint64_t, 1000, 1017>>()), 16);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint64_t, 0, 0xffffffff>>()),
0x100000000ull);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint64_t, 1, 0xffffffff>>()),
0x80000000ull);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint64_t, 1, 0xfffffffe>>()),
0x80000000ull);
EXPECT_EQ( EXPECT_EQ(
(PowerOfTwoSubRangeSize<FakeUrbg<uint64_t, 0, 0xffffffffffffffffull>>()), 0, (RangeSize<
0); FakeUrbg<uint16_t, 0, (std::numeric_limits<uint16_t>::max)()>>()));
EXPECT_EQ(4, (RangeSize<FakeUrbg<uint32_t, 0, 3>>()));
EXPECT_EQ(4, (RangeSize<FakeUrbg<uint32_t, 2, 5>>()));
EXPECT_EQ(5, (RangeSize<FakeUrbg<uint32_t, 2, 6>>()));
EXPECT_EQ(18, (RangeSize<FakeUrbg<uint32_t, 1000, 1017>>()));
EXPECT_EQ(0, (RangeSize<FakeUrbg<uint32_t, 0, 0xffffffff>>()));
EXPECT_EQ(0xffffffff, (RangeSize<FakeUrbg<uint32_t, 1, 0xffffffff>>()));
EXPECT_EQ(0xfffffffe, (RangeSize<FakeUrbg<uint32_t, 1, 0xfffffffe>>()));
EXPECT_EQ(0xfffffffd, (RangeSize<FakeUrbg<uint32_t, 2, 0xfffffffe>>()));
EXPECT_EQ( EXPECT_EQ(
(PowerOfTwoSubRangeSize<FakeUrbg<uint64_t, 1, 0xffffffffffffffffull>>()), 0, (RangeSize<
0x8000000000000000ull); FakeUrbg<uint32_t, 0, (std::numeric_limits<uint32_t>::max)()>>()));
EXPECT_EQ(4, (RangeSize<FakeUrbg<uint64_t, 0, 3>>()));
EXPECT_EQ(4, (RangeSize<FakeUrbg<uint64_t, 2, 5>>()));
EXPECT_EQ(5, (RangeSize<FakeUrbg<uint64_t, 2, 6>>()));
EXPECT_EQ(18, (RangeSize<FakeUrbg<uint64_t, 1000, 1017>>()));
EXPECT_EQ(0x100000000, (RangeSize<FakeUrbg<uint64_t, 0, 0xffffffff>>()));
EXPECT_EQ(0xffffffff, (RangeSize<FakeUrbg<uint64_t, 1, 0xffffffff>>()));
EXPECT_EQ(0xfffffffe, (RangeSize<FakeUrbg<uint64_t, 1, 0xfffffffe>>()));
EXPECT_EQ(0xfffffffd, (RangeSize<FakeUrbg<uint64_t, 2, 0xfffffffe>>()));
EXPECT_EQ(0, (RangeSize<FakeUrbg<uint64_t, 0, 0xffffffffffffffff>>()));
EXPECT_EQ(0xffffffffffffffff,
(RangeSize<FakeUrbg<uint64_t, 1, 0xffffffffffffffff>>()));
EXPECT_EQ(0xfffffffffffffffe,
(RangeSize<FakeUrbg<uint64_t, 1, 0xfffffffffffffffe>>()));
EXPECT_EQ(0xfffffffffffffffd,
(RangeSize<FakeUrbg<uint64_t, 2, 0xfffffffffffffffe>>()));
EXPECT_EQ( EXPECT_EQ(
(PowerOfTwoSubRangeSize<FakeUrbg<uint64_t, 1, 0xfffffffffffffffeull>>()), 0, (RangeSize<
0x8000000000000000ull); FakeUrbg<uint64_t, 0, (std::numeric_limits<uint64_t>::max)()>>()));
EXPECT_EQ((PowerOfTwoSubRangeSize<
FakeUrbg<uint64_t, 0, std::numeric_limits<uint64_t>::max()>>()),
0);
} }
TEST(FastUniformBitsTest, Urng4_VariousOutputs) { // The constants need to be choosen so that an infinite rejection loop doesn't
// happen...
using Urng1_5bit = FakeUrbg<uint8_t, 0, 2, 0>; // ~1.5 bits (range 3)
using Urng4bits = FakeUrbg<uint8_t, 1, 0x10, 2>;
using Urng22bits = FakeUrbg<uint32_t, 0, 0x3fffff, 0x301020>;
using Urng31bits = FakeUrbg<uint32_t, 1, 0xfffffffe, 0x60070f03>; // ~31.9 bits
using Urng32bits = FakeUrbg<uint32_t, 0, 0xffffffff, 0x74010f01>;
using Urng33bits =
FakeUrbg<uint64_t, 1, 0x1ffffffff, 0x013301033>; // ~32.9 bits
using Urng63bits = FakeUrbg<uint64_t, 1, 0xfffffffffffffffe,
0xfedcba9012345678>; // ~63.9 bits
using Urng64bits =
FakeUrbg<uint64_t, 0, 0xffffffffffffffff, 0x123456780fedcba9>;
TEST(FastUniformBitsTest, OutputsUpTo32Bits) {
// Tests that how values are composed; the single-bit deltas should be spread // Tests that how values are composed; the single-bit deltas should be spread
// across each invocation. // across each invocation.
Urng1_5bit urng1_5;
Urng4bits urng4; Urng4bits urng4;
Urng22bits urng22;
Urng31bits urng31; Urng31bits urng31;
Urng32bits urng32; Urng32bits urng32;
Urng33bits urng33;
Urng63bits urng63;
Urng64bits urng64;
// 8-bit types // 8-bit types
{ {
FastUniformBits<uint8_t> fast8; FastUniformBits<uint8_t> fast8;
EXPECT_EQ(0x0, fast8(urng1_5));
EXPECT_EQ(0x11, fast8(urng4)); EXPECT_EQ(0x11, fast8(urng4));
EXPECT_EQ(0x20, fast8(urng22));
EXPECT_EQ(0x2, fast8(urng31)); EXPECT_EQ(0x2, fast8(urng31));
EXPECT_EQ(0x1, fast8(urng32)); EXPECT_EQ(0x1, fast8(urng32));
EXPECT_EQ(0x32, fast8(urng33));
EXPECT_EQ(0x77, fast8(urng63));
EXPECT_EQ(0xa9, fast8(urng64));
} }
// 16-bit types // 16-bit types
{ {
FastUniformBits<uint16_t> fast16; FastUniformBits<uint16_t> fast16;
EXPECT_EQ(0x0, fast16(urng1_5));
EXPECT_EQ(0x1111, fast16(urng4)); EXPECT_EQ(0x1111, fast16(urng4));
EXPECT_EQ(0xf02, fast16(urng31)); EXPECT_EQ(0x1020, fast16(urng22));
EXPECT_EQ(0xf01, fast16(urng32)); EXPECT_EQ(0x0f02, fast16(urng31));
EXPECT_EQ(0x0f01, fast16(urng32));
EXPECT_EQ(0x1032, fast16(urng33));
EXPECT_EQ(0x5677, fast16(urng63));
EXPECT_EQ(0xcba9, fast16(urng64));
} }
// 32-bit types // 32-bit types
{ {
FastUniformBits<uint32_t> fast32; FastUniformBits<uint32_t> fast32;
EXPECT_EQ(0x0, fast32(urng1_5));
EXPECT_EQ(0x11111111, fast32(urng4)); EXPECT_EQ(0x11111111, fast32(urng4));
EXPECT_EQ(0x08301020, fast32(urng22));
EXPECT_EQ(0x0f020f02, fast32(urng31)); EXPECT_EQ(0x0f020f02, fast32(urng31));
EXPECT_EQ(0x74010f01, fast32(urng32)); EXPECT_EQ(0x74010f01, fast32(urng32));
EXPECT_EQ(0x13301032, fast32(urng33));
EXPECT_EQ(0x12345677, fast32(urng63));
EXPECT_EQ(0x0fedcba9, fast32(urng64));
} }
}
TEST(FastUniformBitsTest, Outputs64Bits) {
// Tests that how values are composed; the single-bit deltas should be spread
// across each invocation.
FastUniformBits<uint64_t> fast64;
// 64-bit types
{ {
FastUniformBits<uint64_t> fast64; FakeUrbg<uint8_t, 0, 1, 0> urng0;
FakeUrbg<uint8_t, 0, 1, 1> urng1;
Urng4bits urng4;
Urng22bits urng22;
Urng31bits urng31;
Urng32bits urng32;
Urng33bits urng33;
Urng63bits urng63;
Urng64bits urng64;
// somewhat degenerate cases only create a single bit.
EXPECT_EQ(0x0, fast64(urng0));
EXPECT_EQ(64, urng0.calls);
EXPECT_EQ(0xffffffffffffffff, fast64(urng1));
EXPECT_EQ(64, urng1.calls);
// less degenerate cases.
EXPECT_EQ(0x1111111111111111, fast64(urng4)); EXPECT_EQ(0x1111111111111111, fast64(urng4));
EXPECT_EQ(16, urng4.calls);
EXPECT_EQ(0x01020c0408301020, fast64(urng22));
EXPECT_EQ(3, urng22.calls);
EXPECT_EQ(0x387811c3c0870f02, fast64(urng31)); EXPECT_EQ(0x387811c3c0870f02, fast64(urng31));
EXPECT_EQ(3, urng31.calls);
EXPECT_EQ(0x74010f0174010f01, fast64(urng32)); EXPECT_EQ(0x74010f0174010f01, fast64(urng32));
EXPECT_EQ(2, urng32.calls);
EXPECT_EQ(0x808194040cb01032, fast64(urng33));
EXPECT_EQ(3, urng33.calls);
EXPECT_EQ(0x1234567712345677, fast64(urng63));
EXPECT_EQ(2, urng63.calls);
EXPECT_EQ(0x123456780fedcba9, fast64(urng64));
EXPECT_EQ(1, urng64.calls);
}
// The 1.5 bit case is somewhat interesting in that the algorithm refinement
// causes one extra small sample. Comments here reference the names used in
// [rand.adapt.ibits] that correspond to this case.
{
Urng1_5bit urng1_5;
// w = 64
// R = 3
// m = 1
// n' = 64
// w0' = 1
// y0' = 2
// n = (1 <= 0) > 64 : 65 = 65
// n0 = 65 - (64%65) = 1
// n1 = 64
// w0 = 0
// y0 = 3
// w1 = 1
// y1 = 2
EXPECT_EQ(0x0, fast64(urng1_5));
EXPECT_EQ(65, urng1_5.calls);
}
// Validate rejections for non-power-of-2 cases.
{
Urng1_5bit urng1_5(true);
Urng31bits urng31(true);
Urng33bits urng33(true);
Urng63bits urng63(true);
// For 1.5 bits, there would be 1+2*64, except the first
// value was accepted and shifted off the end.
EXPECT_EQ(0, fast64(urng1_5));
EXPECT_EQ(128, urng1_5.calls);
EXPECT_EQ(0x387811c3c0870f02, fast64(urng31));
EXPECT_EQ(6, urng31.calls);
EXPECT_EQ(0x808194040cb01032, fast64(urng33));
EXPECT_EQ(6, urng33.calls);
EXPECT_EQ(0x1234567712345677, fast64(urng63));
EXPECT_EQ(4, urng63.calls);
} }
} }
TEST(FastUniformBitsTest, URBG32bitRegression) { TEST(FastUniformBitsTest, URBG32bitRegression) {
// Validate with deterministic 32-bit std::minstd_rand // Validate with deterministic 32-bit std::minstd_rand
// to ensure that operator() performs as expected. // to ensure that operator() performs as expected.
EXPECT_EQ(2147483646, RangeSize<std::minstd_rand>());
EXPECT_EQ(30, IntegerLog2(RangeSize<std::minstd_rand>()));
std::minstd_rand gen(1); std::minstd_rand gen(1);
FastUniformBits<uint64_t> fast64; FastUniformBits<uint64_t> fast64;
EXPECT_EQ(0x05e47095f847c122ull, fast64(gen)); EXPECT_EQ(0x05e47095f8791f45, fast64(gen));
EXPECT_EQ(0x8f82c1ba30b64d22ull, fast64(gen)); EXPECT_EQ(0x028be17e3c07c122, fast64(gen));
EXPECT_EQ(0x3b971a3558155039ull, fast64(gen)); EXPECT_EQ(0x55d2847c1626e8c2, fast64(gen));
} }
} // namespace } // namespace

@ -685,6 +685,11 @@ class Condition {
// return processed_ >= current; // return processed_ >= current;
// }; // };
// mu_.Await(Condition(&reached)); // mu_.Await(Condition(&reached));
//
// NOTE: never use "mu_.AssertHeld()" instead of "mu_.AssertReadHeld()" in the
// lambda as it may be called when the mutex is being unlocked from a scope
// holding only a reader lock, which will make the assertion not fulfilled and
// crash the binary.
// See class comment for performance advice. In particular, if there // See class comment for performance advice. In particular, if there
// might be more than one waiter for the same condition, make sure // might be more than one waiter for the same condition, make sure

@ -654,14 +654,23 @@ const char* ParseTM(const char* dp, const char* fmt, std::tm* tm) {
} }
// Sets year, tm_mon and tm_mday given the year, week_num, and tm_wday, // Sets year, tm_mon and tm_mday given the year, week_num, and tm_wday,
// and the day on which weeks are defined to start. // and the day on which weeks are defined to start. Returns false if year
void FromWeek(int week_num, weekday week_start, year_t* year, std::tm* tm) { // would need to move outside its bounds.
bool FromWeek(int week_num, weekday week_start, year_t* year, std::tm* tm) {
const civil_year y(*year % 400); const civil_year y(*year % 400);
civil_day cd = prev_weekday(y, week_start); // week 0 civil_day cd = prev_weekday(y, week_start); // week 0
cd = next_weekday(cd - 1, FromTmWday(tm->tm_wday)) + (week_num * 7); cd = next_weekday(cd - 1, FromTmWday(tm->tm_wday)) + (week_num * 7);
*year += cd.year() - y.year(); if (const year_t shift = cd.year() - y.year()) {
if (shift > 0) {
if (*year > std::numeric_limits<year_t>::max() - shift) return false;
} else {
if (*year < std::numeric_limits<year_t>::min() - shift) return false;
}
*year += shift;
}
tm->tm_mon = cd.month() - 1; tm->tm_mon = cd.month() - 1;
tm->tm_mday = cd.day(); tm->tm_mday = cd.day();
return true;
} }
} // namespace } // namespace
@ -965,7 +974,12 @@ bool parse(const std::string& format, const std::string& input,
} }
// Compute year, tm.tm_mon and tm.tm_mday if we parsed a week number. // Compute year, tm.tm_mon and tm.tm_mday if we parsed a week number.
if (week_num != -1) FromWeek(week_num, week_start, &year, &tm); if (week_num != -1) {
if (!FromWeek(week_num, week_start, &year, &tm)) {
if (err != nullptr) *err = "Out-of-range field";
return false;
}
}
const int month = tm.tm_mon + 1; const int month = tm.tm_mon + 1;
civil_second cs(year, month, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); civil_second cs(year, month, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);

@ -1481,6 +1481,11 @@ TEST(Parse, WeekYearShift) {
EXPECT_EQ(exp, tp); EXPECT_EQ(exp, tp);
EXPECT_TRUE(parse("%Y-%W-%w", "2020-52-5", utc, &tp)); EXPECT_TRUE(parse("%Y-%W-%w", "2020-52-5", utc, &tp));
EXPECT_EQ(exp, tp); EXPECT_EQ(exp, tp);
// Slipping into the previous/following calendar years should fail when
// we're already at the extremes.
EXPECT_FALSE(parse("%Y-%U-%u", "-9223372036854775808-0-7", utc, &tp));
EXPECT_FALSE(parse("%Y-%U-%u", "9223372036854775807-53-7", utc, &tp));
} }
TEST(Parse, MaxRange) { TEST(Parse, MaxRange) {

@ -223,11 +223,10 @@ time_zone::civil_lookup TimeZoneLibC::MakeTime(const civil_second& cs) const {
civil_second() + ToUnixSeconds(time_point<seconds>::min()); civil_second() + ToUnixSeconds(time_point<seconds>::min());
static const civil_second max_tp_cs = static const civil_second max_tp_cs =
civil_second() + ToUnixSeconds(time_point<seconds>::max()); civil_second() + ToUnixSeconds(time_point<seconds>::max());
const time_point<seconds> tp = const time_point<seconds> tp = (cs < min_tp_cs) ? time_point<seconds>::min()
(cs < min_tp_cs) : (cs > max_tp_cs)
? time_point<seconds>::min() ? time_point<seconds>::max()
: (cs > max_tp_cs) ? time_point<seconds>::max() : FromUnixSeconds(cs - civil_second());
: FromUnixSeconds(cs - civil_second());
return {time_zone::civil_lookup::UNIQUE, tp, tp, tp}; return {time_zone::civil_lookup::UNIQUE, tp, tp, tp};
} }

@ -108,15 +108,15 @@ struct tzhead {
#ifndef TZ_MAX_TYPES #ifndef TZ_MAX_TYPES
/* This must be at least 17 for Europe/Samara and Europe/Vilnius. */ /* This must be at least 17 for Europe/Samara and Europe/Vilnius. */
#define TZ_MAX_TYPES 256 /* Limited by what (unsigned char)'s can hold */ #define TZ_MAX_TYPES 256 /* Limited by what (unsigned char)'s can hold */
#endif /* !defined TZ_MAX_TYPES */ #endif /* !defined TZ_MAX_TYPES */
#ifndef TZ_MAX_CHARS #ifndef TZ_MAX_CHARS
#define TZ_MAX_CHARS 50 /* Maximum number of abbreviation characters */ #define TZ_MAX_CHARS 50 /* Maximum number of abbreviation characters */
/* (limited by what unsigned chars can hold) */ /* (limited by what unsigned chars can hold) */
#endif /* !defined TZ_MAX_CHARS */ #endif /* !defined TZ_MAX_CHARS */
#ifndef TZ_MAX_LEAPS #ifndef TZ_MAX_LEAPS
#define TZ_MAX_LEAPS 50 /* Maximum number of leap second corrections */ #define TZ_MAX_LEAPS 50 /* Maximum number of leap second corrections */
#endif /* !defined TZ_MAX_LEAPS */ #endif /* !defined TZ_MAX_LEAPS */
#endif /* !defined TZFILE_H */ #endif /* !defined TZFILE_H */

@ -259,7 +259,7 @@ absl_cc_library(
absl::strings absl::strings
absl::utility absl::utility
gmock_main gmock_main
PUBLIC TESTONLY
) )
absl_cc_test( absl_cc_test(

Loading…
Cancel
Save