Fix "unsafe narrowing" warnings in absl, 8/n.

Addresses failures with the following, in some files:
-Wshorten-64-to-32
-Wimplicit-int-conversion
-Wsign-compare
-Wsign-conversion
-Wtautological-unsigned-zero-compare

(This specific CL focuses on .cc files in */internal/.)

Bug: chromium:1292951
PiperOrigin-RevId: 473868797
Change-Id: Ibe0b76e33f9e001d59862beaac54fb47bacd39b2
pull/1277/head
Abseil Team 2 years ago committed by Copybara-Service
parent 0a066f31d9
commit 5a547f8bbd
  1. 3
      absl/base/internal/direct_mmap.h
  2. 2
      absl/base/internal/low_level_alloc.cc
  3. 15
      absl/base/internal/sysinfo.cc
  4. 275
      absl/container/internal/btree.h
  5. 4
      absl/container/internal/inlined_vector.h
  6. 21
      absl/container/internal/raw_hash_set.h
  7. 4
      absl/flags/internal/flag.cc
  8. 3
      absl/flags/internal/usage.cc
  9. 20
      absl/profiling/internal/exponential_biased_test.cc
  10. 4
      absl/random/internal/seed_material.cc
  11. 37
      absl/synchronization/internal/futex.h
  12. 5
      absl/time/internal/test_util.cc

@ -97,7 +97,8 @@ inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd,
#ifdef __BIONIC__ #ifdef __BIONIC__
// SYS_mmap2 has problems on Android API level <= 16. // SYS_mmap2 has problems on Android API level <= 16.
// Workaround by invoking __mmap2() instead. // Workaround by invoking __mmap2() instead.
return __mmap2(start, length, prot, flags, fd, offset / pagesize); return __mmap2(start, length, prot, flags, fd,
static_cast<size_t>(offset / pagesize));
#else #else
return reinterpret_cast<void*>( return reinterpret_cast<void*>(
syscall(SYS_mmap2, start, length, prot, flags, fd, syscall(SYS_mmap2, start, length, prot, flags, fd,

@ -332,7 +332,7 @@ size_t GetPageSize() {
#elif defined(__wasm__) || defined(__asmjs__) #elif defined(__wasm__) || defined(__asmjs__)
return getpagesize(); return getpagesize();
#else #else
return sysconf(_SC_PAGESIZE); return static_cast<size_t>(sysconf(_SC_PAGESIZE));
#endif #endif
} }

@ -136,7 +136,7 @@ static int GetNumCPUs() {
// Other possibilities: // Other possibilities:
// - Read /sys/devices/system/cpu/online and use cpumask_parse() // - Read /sys/devices/system/cpu/online and use cpumask_parse()
// - sysconf(_SC_NPROCESSORS_ONLN) // - sysconf(_SC_NPROCESSORS_ONLN)
return std::thread::hardware_concurrency(); return static_cast<int>(std::thread::hardware_concurrency());
#endif #endif
} }
@ -194,7 +194,7 @@ static bool ReadLongFromFile(const char *file, long *value) {
char line[1024]; char line[1024];
char *err; char *err;
memset(line, '\0', sizeof(line)); memset(line, '\0', sizeof(line));
int len = read(fd, line, sizeof(line) - 1); ssize_t len = read(fd, line, sizeof(line) - 1);
if (len <= 0) { if (len <= 0) {
ret = false; ret = false;
} else { } else {
@ -376,7 +376,7 @@ pid_t GetTID() {
#endif #endif
pid_t GetTID() { pid_t GetTID() {
return syscall(SYS_gettid); return static_cast<pid_t>(syscall(SYS_gettid));
} }
#elif defined(__akaros__) #elif defined(__akaros__)
@ -429,11 +429,11 @@ static constexpr int kBitsPerWord = 32; // tid_array is uint32_t.
// Returns the TID to tid_array. // Returns the TID to tid_array.
static void FreeTID(void *v) { static void FreeTID(void *v) {
intptr_t tid = reinterpret_cast<intptr_t>(v); intptr_t tid = reinterpret_cast<intptr_t>(v);
int word = tid / kBitsPerWord; intptr_t word = tid / kBitsPerWord;
uint32_t mask = ~(1u << (tid % kBitsPerWord)); uint32_t mask = ~(1u << (tid % kBitsPerWord));
absl::base_internal::SpinLockHolder lock(&tid_lock); absl::base_internal::SpinLockHolder lock(&tid_lock);
assert(0 <= word && static_cast<size_t>(word) < tid_array->size()); assert(0 <= word && static_cast<size_t>(word) < tid_array->size());
(*tid_array)[word] &= mask; (*tid_array)[static_cast<size_t>(word)] &= mask;
} }
static void InitGetTID() { static void InitGetTID() {
@ -455,7 +455,7 @@ pid_t GetTID() {
intptr_t tid = reinterpret_cast<intptr_t>(pthread_getspecific(tid_key)); intptr_t tid = reinterpret_cast<intptr_t>(pthread_getspecific(tid_key));
if (tid != 0) { if (tid != 0) {
return tid; return static_cast<pid_t>(tid);
} }
int bit; // tid_array[word] = 1u << bit; int bit; // tid_array[word] = 1u << bit;
@ -476,7 +476,8 @@ pid_t GetTID() {
while (bit < kBitsPerWord && (((*tid_array)[word] >> bit) & 1) != 0) { while (bit < kBitsPerWord && (((*tid_array)[word] >> bit) & 1) != 0) {
++bit; ++bit;
} }
tid = (word * kBitsPerWord) + bit; tid =
static_cast<intptr_t>((word * kBitsPerWord) + static_cast<size_t>(bit));
(*tid_array)[word] |= 1u << bit; // Mark the TID as allocated. (*tid_array)[word] |= 1u << bit; // Mark the TID as allocated.
} }

@ -634,27 +634,27 @@ class btree_node {
: NodeTargetSlots((begin + end) / 2 + 1, end); : NodeTargetSlots((begin + end) / 2 + 1, end);
} }
enum { constexpr static size_type kTargetNodeSize = params_type::kTargetNodeSize;
kTargetNodeSize = params_type::kTargetNodeSize, constexpr static size_type kNodeTargetSlots =
kNodeTargetSlots = NodeTargetSlots(0, params_type::kTargetNodeSize), NodeTargetSlots(0, kTargetNodeSize);
// We need a minimum of 3 slots per internal node in order to perform // We need a minimum of 3 slots per internal node in order to perform
// splitting (1 value for the two nodes involved in the split and 1 value // splitting (1 value for the two nodes involved in the split and 1 value
// propagated to the parent as the delimiter for the split). For performance // propagated to the parent as the delimiter for the split). For performance
// reasons, we don't allow 3 slots-per-node due to bad worst case occupancy // reasons, we don't allow 3 slots-per-node due to bad worst case occupancy of
// of 1/3 (for a node, not a b-tree). // 1/3 (for a node, not a b-tree).
kMinNodeSlots = 4, constexpr static size_type kMinNodeSlots = 4;
kNodeSlots = constexpr static size_type kNodeSlots =
kNodeTargetSlots >= kMinNodeSlots ? kNodeTargetSlots : kMinNodeSlots, kNodeTargetSlots >= kMinNodeSlots ? kNodeTargetSlots : kMinNodeSlots;
// The node is internal (i.e. is not a leaf node) if and only if `max_count` // The node is internal (i.e. is not a leaf node) if and only if `max_count`
// has this value. // has this value.
kInternalNodeMaxCount = 0, constexpr static field_type kInternalNodeMaxCount = 0;
};
// Leaves can have less than kNodeSlots values. // Leaves can have less than kNodeSlots values.
constexpr static layout_type LeafLayout(const int slot_count = kNodeSlots) { constexpr static layout_type LeafLayout(
const size_type slot_count = kNodeSlots) {
return layout_type( return layout_type(
/*parent*/ 1, /*parent*/ 1,
/*generation*/ params_type::kEnableGenerations ? 1 : 0, /*generation*/ params_type::kEnableGenerations ? 1 : 0,
@ -670,7 +670,7 @@ class btree_node {
/*slots*/ kNodeSlots, /*slots*/ kNodeSlots,
/*children*/ kNodeSlots + 1); /*children*/ kNodeSlots + 1);
} }
constexpr static size_type LeafSize(const int slot_count = kNodeSlots) { constexpr static size_type LeafSize(const size_type slot_count = kNodeSlots) {
return LeafLayout(slot_count).AllocSize(); return LeafLayout(slot_count).AllocSize();
} }
constexpr static size_type InternalSize() { constexpr static size_type InternalSize() {
@ -693,10 +693,10 @@ class btree_node {
} }
void set_parent(btree_node *p) { *GetField<0>() = p; } void set_parent(btree_node *p) { *GetField<0>() = p; }
field_type &mutable_finish() { return GetField<2>()[2]; } field_type &mutable_finish() { return GetField<2>()[2]; }
slot_type *slot(int i) { return &GetField<3>()[i]; } slot_type* slot(size_type i) { return &GetField<3>()[i]; }
slot_type *start_slot() { return slot(start()); } slot_type *start_slot() { return slot(start()); }
slot_type *finish_slot() { return slot(finish()); } slot_type *finish_slot() { return slot(finish()); }
const slot_type *slot(int i) const { return &GetField<3>()[i]; } const slot_type* slot(size_type i) const { return &GetField<3>()[i]; }
void set_position(field_type v) { GetField<2>()[0] = v; } void set_position(field_type v) { GetField<2>()[0] = v; }
void set_start(field_type v) { GetField<2>()[1] = v; } void set_start(field_type v) { GetField<2>()[1] = v; }
void set_finish(field_type v) { GetField<2>()[2] = v; } void set_finish(field_type v) { GetField<2>()[2] = v; }
@ -773,52 +773,55 @@ class btree_node {
} }
// Getters for the key/value at position i in the node. // Getters for the key/value at position i in the node.
const key_type &key(int i) const { return params_type::key(slot(i)); } const key_type& key(size_type i) const { return params_type::key(slot(i)); }
reference value(int i) { return params_type::element(slot(i)); } reference value(size_type i) { return params_type::element(slot(i)); }
const_reference value(int i) const { return params_type::element(slot(i)); } const_reference value(size_type i) const {
return params_type::element(slot(i));
}
// Getters/setter for the child at position i in the node. // Getters/setter for the child at position i in the node.
btree_node *child(int i) const { return GetField<4>()[i]; } btree_node* child(field_type i) const { return GetField<4>()[i]; }
btree_node *start_child() const { return child(start()); } btree_node *start_child() const { return child(start()); }
btree_node *&mutable_child(int i) { return GetField<4>()[i]; } btree_node*& mutable_child(field_type i) { return GetField<4>()[i]; }
void clear_child(int i) { void clear_child(field_type i) {
absl::container_internal::SanitizerPoisonObject(&mutable_child(i)); absl::container_internal::SanitizerPoisonObject(&mutable_child(i));
} }
void set_child(int i, btree_node *c) { void set_child(field_type i, btree_node* c) {
absl::container_internal::SanitizerUnpoisonObject(&mutable_child(i)); absl::container_internal::SanitizerUnpoisonObject(&mutable_child(i));
mutable_child(i) = c; mutable_child(i) = c;
c->set_position(i); c->set_position(i);
} }
void init_child(int i, btree_node *c) { void init_child(field_type i, btree_node* c) {
set_child(i, c); set_child(i, c);
c->set_parent(this); c->set_parent(this);
} }
// Returns the position of the first value whose key is not less than k. // Returns the position of the first value whose key is not less than k.
template <typename K> template <typename K>
SearchResult<int, is_key_compare_to::value> lower_bound( SearchResult<size_type, is_key_compare_to::value> lower_bound(
const K &k, const key_compare &comp) const { const K& k,
const key_compare& comp) const {
return use_linear_search::value ? linear_search(k, comp) return use_linear_search::value ? linear_search(k, comp)
: binary_search(k, comp); : binary_search(k, comp);
} }
// Returns the position of the first value whose key is greater than k. // Returns the position of the first value whose key is greater than k.
template <typename K> template <typename K>
int upper_bound(const K &k, const key_compare &comp) const { size_type upper_bound(const K& k, const key_compare& comp) const {
auto upper_compare = upper_bound_adapter<key_compare>(comp); auto upper_compare = upper_bound_adapter<key_compare>(comp);
return use_linear_search::value ? linear_search(k, upper_compare).value return use_linear_search::value ? linear_search(k, upper_compare).value
: binary_search(k, upper_compare).value; : binary_search(k, upper_compare).value;
} }
template <typename K, typename Compare> template <typename K, typename Compare>
SearchResult<int, btree_is_key_compare_to<Compare, key_type>::value> SearchResult<size_type, btree_is_key_compare_to<Compare, key_type>::value>
linear_search(const K &k, const Compare &comp) const { linear_search(const K& k, const Compare& comp) const {
return linear_search_impl(k, start(), finish(), comp, return linear_search_impl(k, start(), finish(), comp,
btree_is_key_compare_to<Compare, key_type>()); btree_is_key_compare_to<Compare, key_type>());
} }
template <typename K, typename Compare> template <typename K, typename Compare>
SearchResult<int, btree_is_key_compare_to<Compare, key_type>::value> SearchResult<size_type, btree_is_key_compare_to<Compare, key_type>::value>
binary_search(const K &k, const Compare &comp) const { binary_search(const K& k, const Compare& comp) const {
return binary_search_impl(k, start(), finish(), comp, return binary_search_impl(k, start(), finish(), comp,
btree_is_key_compare_to<Compare, key_type>()); btree_is_key_compare_to<Compare, key_type>());
} }
@ -826,8 +829,11 @@ class btree_node {
// Returns the position of the first value whose key is not less than k using // Returns the position of the first value whose key is not less than k using
// linear search performed using plain compare. // linear search performed using plain compare.
template <typename K, typename Compare> template <typename K, typename Compare>
SearchResult<int, false> linear_search_impl( SearchResult<size_type, false> linear_search_impl(
const K &k, int s, const int e, const Compare &comp, const K& k,
size_type s,
const size_type e,
const Compare& comp,
std::false_type /* IsCompareTo */) const { std::false_type /* IsCompareTo */) const {
while (s < e) { while (s < e) {
if (!comp(key(s), k)) { if (!comp(key(s), k)) {
@ -835,14 +841,17 @@ class btree_node {
} }
++s; ++s;
} }
return SearchResult<int, false>{s}; return SearchResult<size_type, false>{s};
} }
// Returns the position of the first value whose key is not less than k using // Returns the position of the first value whose key is not less than k using
// linear search performed using compare-to. // linear search performed using compare-to.
template <typename K, typename Compare> template <typename K, typename Compare>
SearchResult<int, true> linear_search_impl( SearchResult<size_type, true> linear_search_impl(
const K &k, int s, const int e, const Compare &comp, const K& k,
size_type s,
const size_type e,
const Compare& comp,
std::true_type /* IsCompareTo */) const { std::true_type /* IsCompareTo */) const {
while (s < e) { while (s < e) {
const absl::weak_ordering c = comp(key(s), k); const absl::weak_ordering c = comp(key(s), k);
@ -859,30 +868,36 @@ class btree_node {
// Returns the position of the first value whose key is not less than k using // Returns the position of the first value whose key is not less than k using
// binary search performed using plain compare. // binary search performed using plain compare.
template <typename K, typename Compare> template <typename K, typename Compare>
SearchResult<int, false> binary_search_impl( SearchResult<size_type, false> binary_search_impl(
const K &k, int s, int e, const Compare &comp, const K& k,
size_type s,
size_type e,
const Compare& comp,
std::false_type /* IsCompareTo */) const { std::false_type /* IsCompareTo */) const {
while (s != e) { while (s != e) {
const int mid = (s + e) >> 1; const size_type mid = (s + e) >> 1;
if (comp(key(mid), k)) { if (comp(key(mid), k)) {
s = mid + 1; s = mid + 1;
} else { } else {
e = mid; e = mid;
} }
} }
return SearchResult<int, false>{s}; return SearchResult<size_type, false>{s};
} }
// Returns the position of the first value whose key is not less than k using // Returns the position of the first value whose key is not less than k using
// binary search performed using compare-to. // binary search performed using compare-to.
template <typename K, typename CompareTo> template <typename K, typename CompareTo>
SearchResult<int, true> binary_search_impl( SearchResult<size_type, true> binary_search_impl(
const K &k, int s, int e, const CompareTo &comp, const K& k,
size_type s,
size_type e,
const CompareTo& comp,
std::true_type /* IsCompareTo */) const { std::true_type /* IsCompareTo */) const {
if (params_type::template can_have_multiple_equivalent_keys<K>()) { if (params_type::template can_have_multiple_equivalent_keys<K>()) {
MatchKind exact_match = MatchKind::kNe; MatchKind exact_match = MatchKind::kNe;
while (s != e) { while (s != e) {
const int mid = (s + e) >> 1; const size_type mid = (s + e) >> 1;
const absl::weak_ordering c = comp(key(mid), k); const absl::weak_ordering c = comp(key(mid), k);
if (c < 0) { if (c < 0) {
s = mid + 1; s = mid + 1;
@ -899,7 +914,7 @@ class btree_node {
return {s, exact_match}; return {s, exact_match};
} else { // Can't have multiple equivalent keys. } else { // Can't have multiple equivalent keys.
while (s != e) { while (s != e) {
const int mid = (s + e) >> 1; const size_type mid = (s + e) >> 1;
const absl::weak_ordering c = comp(key(mid), k); const absl::weak_ordering c = comp(key(mid), k);
if (c < 0) { if (c < 0) {
s = mid + 1; s = mid + 1;
@ -916,7 +931,7 @@ class btree_node {
// Emplaces a value at position i, shifting all existing values and // Emplaces a value at position i, shifting all existing values and
// children at positions >= i to the right by 1. // children at positions >= i to the right by 1.
template <typename... Args> template <typename... Args>
void emplace_value(size_type i, allocator_type *alloc, Args &&... args); void emplace_value(field_type i, allocator_type* alloc, Args&&... args);
// Removes the values at positions [i, i + to_erase), shifting all existing // Removes the values at positions [i, i + to_erase), shifting all existing
// values and children after that range to the left by to_erase. Clears all // values and children after that range to the left by to_erase. Clears all
@ -924,10 +939,12 @@ class btree_node {
void remove_values(field_type i, field_type to_erase, allocator_type *alloc); void remove_values(field_type i, field_type to_erase, allocator_type *alloc);
// Rebalances a node with its right sibling. // Rebalances a node with its right sibling.
void rebalance_right_to_left(int to_move, btree_node *right, void rebalance_right_to_left(field_type to_move,
allocator_type *alloc); btree_node* right,
void rebalance_left_to_right(int to_move, btree_node *right, allocator_type* alloc);
allocator_type *alloc); void rebalance_left_to_right(field_type to_move,
btree_node* right,
allocator_type* alloc);
// Splits a node, moving a portion of the node's values to its right sibling. // Splits a node, moving a portion of the node's values to its right sibling.
void split(int insert_position, btree_node *dest, allocator_type *alloc); void split(int insert_position, btree_node *dest, allocator_type *alloc);
@ -937,7 +954,7 @@ class btree_node {
void merge(btree_node *src, allocator_type *alloc); void merge(btree_node *src, allocator_type *alloc);
// Node allocation/deletion routines. // Node allocation/deletion routines.
void init_leaf(int max_count, btree_node *parent) { void init_leaf(field_type max_count, btree_node* parent) {
set_generation(0); set_generation(0);
set_parent(parent); set_parent(parent);
set_position(0); set_position(0);
@ -1017,10 +1034,15 @@ class btree_node {
const size_type src_i, btree_node *src_node, const size_type src_i, btree_node *src_node,
allocator_type *alloc) { allocator_type *alloc) {
next_generation(); next_generation();
for (slot_type *src = src_node->slot(src_i + n - 1), *end = src - n, for (slot_type *src = src_node->slot(src_i + n), *end = src - n,
*dest = slot(dest_i + n - 1); *dest = slot(dest_i + n);
src != end; --src, --dest) { src != end; --src, --dest) {
transfer(dest, src, alloc); // If we modified the loop index calculations above to avoid the -1s here,
// it would result in UB in the computation of `end` (and possibly `src`
// as well, if n == 0), since slot() is effectively an array index and it
// is UB to compute the address of any out-of-bounds array element except
// for one-past-the-end.
transfer(dest - 1, src - 1, alloc);
} }
} }
@ -1034,6 +1056,7 @@ class btree_node {
template <typename Node, typename Reference, typename Pointer> template <typename Node, typename Reference, typename Pointer>
class btree_iterator { class btree_iterator {
using field_type = typename Node::field_type;
using key_type = typename Node::key_type; using key_type = typename Node::key_type;
using size_type = typename Node::size_type; using size_type = typename Node::size_type;
using params_type = typename Node::params_type; using params_type = typename Node::params_type;
@ -1105,7 +1128,7 @@ class btree_iterator {
ABSL_HARDENING_ASSERT(node_->start() <= position_); ABSL_HARDENING_ASSERT(node_->start() <= position_);
ABSL_HARDENING_ASSERT(node_->finish() > position_); ABSL_HARDENING_ASSERT(node_->finish() > position_);
assert_valid_generation(); assert_valid_generation();
return node_->value(position_); return node_->value(static_cast<field_type>(position_));
} }
pointer operator->() const { return &operator*(); } pointer operator->() const { return &operator*(); }
@ -1189,9 +1212,11 @@ class btree_iterator {
#endif #endif
} }
const key_type &key() const { return node_->key(position_); } const key_type& key() const {
return node_->key(static_cast<size_type>(position_));
}
decltype(std::declval<Node *>()->slot(0)) slot() { decltype(std::declval<Node *>()->slot(0)) slot() {
return node_->slot(position_); return node_->slot(static_cast<size_type>(position_));
} }
void assert_valid_generation() const { void assert_valid_generation() const {
@ -1600,7 +1625,7 @@ class btree {
// Allocates a correctly aligned node of at least size bytes using the // Allocates a correctly aligned node of at least size bytes using the
// allocator. // allocator.
node_type *allocate(const size_type size) { node_type* allocate(size_type size) {
return reinterpret_cast<node_type *>( return reinterpret_cast<node_type *>(
absl::container_internal::Allocate<node_type::Alignment()>( absl::container_internal::Allocate<node_type::Alignment()>(
mutable_allocator(), size)); mutable_allocator(), size));
@ -1617,7 +1642,7 @@ class btree {
n->init_leaf(kNodeSlots, parent); n->init_leaf(kNodeSlots, parent);
return n; return n;
} }
node_type *new_leaf_root_node(const int max_count) { node_type* new_leaf_root_node(field_type max_count) {
node_type *n = allocate(node_type::LeafSize(max_count)); node_type *n = allocate(node_type::LeafSize(max_count));
n->init_leaf(max_count, /*parent=*/n); n->init_leaf(max_count, /*parent=*/n);
return n; return n;
@ -1685,8 +1710,9 @@ class btree {
iterator internal_find(const K &key) const; iterator internal_find(const K &key) const;
// Verifies the tree structure of node. // Verifies the tree structure of node.
int internal_verify(const node_type *node, const key_type *lo, size_type internal_verify(const node_type* node,
const key_type *hi) const; const key_type* lo,
const key_type* hi) const;
node_stats internal_stats(const node_type *node) const { node_stats internal_stats(const node_type *node) const {
// The root can be a static empty node. // The root can be a static empty node.
@ -1720,9 +1746,9 @@ class btree {
// btree_node methods // btree_node methods
template <typename P> template <typename P>
template <typename... Args> template <typename... Args>
inline void btree_node<P>::emplace_value(const size_type i, inline void btree_node<P>::emplace_value(const field_type i,
allocator_type *alloc, allocator_type* alloc,
Args &&... args) { Args&&... args) {
assert(i >= start()); assert(i >= start());
assert(i <= finish()); assert(i <= finish());
// Shift old values to create space for new value and then construct it in // Shift old values to create space for new value and then construct it in
@ -1731,7 +1757,7 @@ inline void btree_node<P>::emplace_value(const size_type i,
transfer_n_backward(finish() - i, /*dest_i=*/i + 1, /*src_i=*/i, this, transfer_n_backward(finish() - i, /*dest_i=*/i + 1, /*src_i=*/i, this,
alloc); alloc);
} }
value_init(i, alloc, std::forward<Args>(args)...); value_init(static_cast<field_type>(i), alloc, std::forward<Args>(args)...);
set_finish(finish() + 1); set_finish(finish() + 1);
if (is_internal() && finish() > i + 1) { if (is_internal() && finish() > i + 1) {
@ -1767,9 +1793,9 @@ inline void btree_node<P>::remove_values(const field_type i,
} }
template <typename P> template <typename P>
void btree_node<P>::rebalance_right_to_left(const int to_move, void btree_node<P>::rebalance_right_to_left(field_type to_move,
btree_node *right, btree_node* right,
allocator_type *alloc) { allocator_type* alloc) {
assert(parent() == right->parent()); assert(parent() == right->parent());
assert(position() + 1 == right->position()); assert(position() + 1 == right->position());
assert(right->count() >= count()); assert(right->count() >= count());
@ -1791,10 +1817,10 @@ void btree_node<P>::rebalance_right_to_left(const int to_move,
if (is_internal()) { if (is_internal()) {
// Move the child pointers from the right to the left node. // Move the child pointers from the right to the left node.
for (int i = 0; i < to_move; ++i) { for (field_type i = 0; i < to_move; ++i) {
init_child(finish() + i + 1, right->child(i)); init_child(finish() + i + 1, right->child(i));
} }
for (int i = right->start(); i <= right->finish() - to_move; ++i) { for (field_type i = right->start(); i <= right->finish() - to_move; ++i) {
assert(i + to_move <= right->max_count()); assert(i + to_move <= right->max_count());
right->init_child(i, right->child(i + to_move)); right->init_child(i, right->child(i + to_move));
right->clear_child(i + to_move); right->clear_child(i + to_move);
@ -1807,9 +1833,9 @@ void btree_node<P>::rebalance_right_to_left(const int to_move,
} }
template <typename P> template <typename P>
void btree_node<P>::rebalance_left_to_right(const int to_move, void btree_node<P>::rebalance_left_to_right(field_type to_move,
btree_node *right, btree_node* right,
allocator_type *alloc) { allocator_type* alloc) {
assert(parent() == right->parent()); assert(parent() == right->parent());
assert(position() + 1 == right->position()); assert(position() + 1 == right->position());
assert(count() >= right->count()); assert(count() >= right->count());
@ -1838,11 +1864,11 @@ void btree_node<P>::rebalance_left_to_right(const int to_move,
if (is_internal()) { if (is_internal()) {
// Move the child pointers from the left to the right node. // Move the child pointers from the left to the right node.
for (int i = right->finish(); i >= right->start(); --i) { for (field_type i = right->finish() + 1; i > right->start(); --i) {
right->init_child(i + to_move, right->child(i)); right->init_child(i - 1 + to_move, right->child(i - 1));
right->clear_child(i); right->clear_child(i - 1);
} }
for (int i = 1; i <= to_move; ++i) { for (field_type i = 1; i <= to_move; ++i) {
right->init_child(i - 1, child(finish() - to_move + i)); right->init_child(i - 1, child(finish() - to_move + i));
clear_child(finish() - to_move + i); clear_child(finish() - to_move + i);
} }
@ -1883,7 +1909,7 @@ void btree_node<P>::split(const int insert_position, btree_node *dest,
parent()->init_child(position() + 1, dest); parent()->init_child(position() + 1, dest);
if (is_internal()) { if (is_internal()) {
for (int i = dest->start(), j = finish() + 1; i <= dest->finish(); for (field_type i = dest->start(), j = finish() + 1; i <= dest->finish();
++i, ++j) { ++i, ++j) {
assert(child(j) != nullptr); assert(child(j) != nullptr);
dest->init_child(i, child(j)); dest->init_child(i, child(j));
@ -1944,15 +1970,15 @@ void btree_node<P>::clear_and_delete(btree_node *node, allocator_type *alloc) {
// instead of checking whether the parent is a leaf, we can remove this logic. // instead of checking whether the parent is a leaf, we can remove this logic.
btree_node *leftmost_leaf = node; btree_node *leftmost_leaf = node;
#endif #endif
// Use `int` because `pos` needs to be able to hold `kNodeSlots+1`, which // Use `size_type` because `pos` needs to be able to hold `kNodeSlots+1`,
// isn't guaranteed to be a valid `field_type`. // which isn't guaranteed to be a valid `field_type`.
int pos = node->position(); size_type pos = node->position();
btree_node *parent = node->parent(); btree_node *parent = node->parent();
for (;;) { for (;;) {
// In each iteration of the next loop, we delete one leaf node and go right. // In each iteration of the next loop, we delete one leaf node and go right.
assert(pos <= parent->finish()); assert(pos <= parent->finish());
do { do {
node = parent->child(pos); node = parent->child(static_cast<field_type>(pos));
if (node->is_internal()) { if (node->is_internal()) {
// Navigate to the leftmost leaf under node. // Navigate to the leftmost leaf under node.
while (node->is_internal()) node = node->start_child(); while (node->is_internal()) node = node->start_child();
@ -2004,7 +2030,7 @@ void btree_iterator<N, R, P>::increment_slow() {
} }
} else { } else {
assert(position_ < node_->finish()); assert(position_ < node_->finish());
node_ = node_->child(position_ + 1); node_ = node_->child(static_cast<field_type>(position_ + 1));
while (node_->is_internal()) { while (node_->is_internal()) {
node_ = node_->start_child(); node_ = node_->start_child();
} }
@ -2028,7 +2054,7 @@ void btree_iterator<N, R, P>::decrement_slow() {
} }
} else { } else {
assert(position_ >= node_->start()); assert(position_ >= node_->start());
node_ = node_->child(position_); node_ = node_->child(static_cast<field_type>(position_));
while (node_->is_internal()) { while (node_->is_internal()) {
node_ = node_->child(node_->finish()); node_ = node_->child(node_->finish());
} }
@ -2475,16 +2501,19 @@ void btree<P>::rebalance_or_split(iterator *iter) {
// We bias rebalancing based on the position being inserted. If we're // We bias rebalancing based on the position being inserted. If we're
// inserting at the end of the right node then we bias rebalancing to // inserting at the end of the right node then we bias rebalancing to
// fill up the left node. // fill up the left node.
int to_move = (kNodeSlots - left->count()) / field_type to_move =
(1 + (insert_position < static_cast<int>(kNodeSlots))); (kNodeSlots - left->count()) /
to_move = (std::max)(1, to_move); (1 + (static_cast<field_type>(insert_position) < kNodeSlots));
to_move = (std::max)(field_type{1}, to_move);
if (insert_position - to_move >= node->start() ||
left->count() + to_move < static_cast<int>(kNodeSlots)) { if (static_cast<field_type>(insert_position) - to_move >=
node->start() ||
left->count() + to_move < kNodeSlots) {
left->rebalance_right_to_left(to_move, node, mutable_allocator()); left->rebalance_right_to_left(to_move, node, mutable_allocator());
assert(node->max_count() - node->count() == to_move); assert(node->max_count() - node->count() == to_move);
insert_position = insert_position - to_move; insert_position = static_cast<int>(
static_cast<field_type>(insert_position) - to_move);
if (insert_position < node->start()) { if (insert_position < node->start()) {
insert_position = insert_position + left->count() + 1; insert_position = insert_position + left->count() + 1;
node = left; node = left;
@ -2504,12 +2533,13 @@ void btree<P>::rebalance_or_split(iterator *iter) {
// We bias rebalancing based on the position being inserted. If we're // We bias rebalancing based on the position being inserted. If we're
// inserting at the beginning of the left node then we bias rebalancing // inserting at the beginning of the left node then we bias rebalancing
// to fill up the right node. // to fill up the right node.
int to_move = (static_cast<int>(kNodeSlots) - right->count()) / field_type to_move = (kNodeSlots - right->count()) /
(1 + (insert_position > node->start())); (1 + (insert_position > node->start()));
to_move = (std::max)(1, to_move); to_move = (std::max)(field_type{1}, to_move);
if (insert_position <= node->finish() - to_move || if (static_cast<field_type>(insert_position) <=
right->count() + to_move < static_cast<int>(kNodeSlots)) { node->finish() - to_move ||
right->count() + to_move < kNodeSlots) {
node->rebalance_left_to_right(to_move, right, mutable_allocator()); node->rebalance_left_to_right(to_move, right, mutable_allocator());
if (insert_position > node->finish()) { if (insert_position > node->finish()) {
@ -2594,8 +2624,9 @@ bool btree<P>::try_merge_or_rebalance(iterator *iter) {
// from the front of the tree. // from the front of the tree.
if (right->count() > kMinNodeValues && if (right->count() > kMinNodeValues &&
(iter->node_->count() == 0 || iter->position_ > iter->node_->start())) { (iter->node_->count() == 0 || iter->position_ > iter->node_->start())) {
int to_move = (right->count() - iter->node_->count()) / 2; field_type to_move = (right->count() - iter->node_->count()) / 2;
to_move = (std::min)(to_move, right->count() - 1); to_move =
(std::min)(to_move, static_cast<field_type>(right->count() - 1));
iter->node_->rebalance_right_to_left(to_move, right, mutable_allocator()); iter->node_->rebalance_right_to_left(to_move, right, mutable_allocator());
return false; return false;
} }
@ -2609,8 +2640,8 @@ bool btree<P>::try_merge_or_rebalance(iterator *iter) {
if (left->count() > kMinNodeValues && if (left->count() > kMinNodeValues &&
(iter->node_->count() == 0 || (iter->node_->count() == 0 ||
iter->position_ < iter->node_->finish())) { iter->position_ < iter->node_->finish())) {
int to_move = (left->count() - iter->node_->count()) / 2; field_type to_move = (left->count() - iter->node_->count()) / 2;
to_move = (std::min)(to_move, left->count() - 1); to_move = (std::min)(to_move, static_cast<field_type>(left->count() - 1));
left->rebalance_left_to_right(to_move, iter->node_, mutable_allocator()); left->rebalance_left_to_right(to_move, iter->node_, mutable_allocator());
iter->position_ += to_move; iter->position_ += to_move;
return false; return false;
@ -2671,8 +2702,9 @@ inline auto btree<P>::internal_emplace(iterator iter, Args &&... args)
// Insertion into the root where the root is smaller than the full node // Insertion into the root where the root is smaller than the full node
// size. Simply grow the size of the root node. // size. Simply grow the size of the root node.
assert(iter.node_ == root()); assert(iter.node_ == root());
iter.node_ = iter.node_ = new_leaf_root_node(
new_leaf_root_node((std::min<int>)(kNodeSlots, 2 * max_count)); static_cast<field_type>((std::min)(static_cast<int>(kNodeSlots),
2 * max_count)));
// Transfer the values from the old root to the new root. // Transfer the values from the old root to the new root.
node_type *old_root = root(); node_type *old_root = root();
node_type *new_root = iter.node_; node_type *new_root = iter.node_;
@ -2687,7 +2719,8 @@ inline auto btree<P>::internal_emplace(iterator iter, Args &&... args)
rebalance_or_split(&iter); rebalance_or_split(&iter);
} }
} }
iter.node_->emplace_value(iter.position_, alloc, std::forward<Args>(args)...); iter.node_->emplace_value(static_cast<field_type>(iter.position_), alloc,
std::forward<Args>(args)...);
++size_; ++size_;
iter.update_generation(); iter.update_generation();
return iter; return iter;
@ -2699,9 +2732,9 @@ inline auto btree<P>::internal_locate(const K &key) const
-> SearchResult<iterator, is_key_compare_to::value> { -> SearchResult<iterator, is_key_compare_to::value> {
iterator iter(const_cast<node_type *>(root())); iterator iter(const_cast<node_type *>(root()));
for (;;) { for (;;) {
SearchResult<int, is_key_compare_to::value> res = SearchResult<size_type, is_key_compare_to::value> res =
iter.node_->lower_bound(key, key_comp()); iter.node_->lower_bound(key, key_comp());
iter.position_ = res.value; iter.position_ = static_cast<int>(res.value);
if (res.IsEq()) { if (res.IsEq()) {
return {iter, MatchKind::kEq}; return {iter, MatchKind::kEq};
} }
@ -2712,7 +2745,7 @@ inline auto btree<P>::internal_locate(const K &key) const
if (iter.node_->is_leaf()) { if (iter.node_->is_leaf()) {
break; break;
} }
iter.node_ = iter.node_->child(iter.position_); iter.node_ = iter.node_->child(static_cast<field_type>(iter.position_));
} }
// Note: in the non-key-compare-to case, the key may actually be equivalent // Note: in the non-key-compare-to case, the key may actually be equivalent
// here (and the MatchKind::kNe is ignored). // here (and the MatchKind::kNe is ignored).
@ -2729,16 +2762,16 @@ auto btree<P>::internal_lower_bound(const K &key) const
return ret; return ret;
} }
iterator iter(const_cast<node_type *>(root())); iterator iter(const_cast<node_type *>(root()));
SearchResult<int, is_key_compare_to::value> res; SearchResult<size_type, is_key_compare_to::value> res;
bool seen_eq = false; bool seen_eq = false;
for (;;) { for (;;) {
res = iter.node_->lower_bound(key, key_comp()); res = iter.node_->lower_bound(key, key_comp());
iter.position_ = res.value; iter.position_ = static_cast<int>(res.value);
if (iter.node_->is_leaf()) { if (iter.node_->is_leaf()) {
break; break;
} }
seen_eq = seen_eq || res.IsEq(); seen_eq = seen_eq || res.IsEq();
iter.node_ = iter.node_->child(iter.position_); iter.node_ = iter.node_->child(static_cast<field_type>(iter.position_));
} }
if (res.IsEq()) return {iter, MatchKind::kEq}; if (res.IsEq()) return {iter, MatchKind::kEq};
return {internal_last(iter), seen_eq ? MatchKind::kEq : MatchKind::kNe}; return {internal_last(iter), seen_eq ? MatchKind::kEq : MatchKind::kNe};
@ -2749,11 +2782,11 @@ template <typename K>
auto btree<P>::internal_upper_bound(const K &key) const -> iterator { auto btree<P>::internal_upper_bound(const K &key) const -> iterator {
iterator iter(const_cast<node_type *>(root())); iterator iter(const_cast<node_type *>(root()));
for (;;) { for (;;) {
iter.position_ = iter.node_->upper_bound(key, key_comp()); iter.position_ = static_cast<int>(iter.node_->upper_bound(key, key_comp()));
if (iter.node_->is_leaf()) { if (iter.node_->is_leaf()) {
break; break;
} }
iter.node_ = iter.node_->child(iter.position_); iter.node_ = iter.node_->child(static_cast<field_type>(iter.position_));
} }
return internal_last(iter); return internal_last(iter);
} }
@ -2776,8 +2809,10 @@ auto btree<P>::internal_find(const K &key) const -> iterator {
} }
template <typename P> template <typename P>
int btree<P>::internal_verify(const node_type *node, const key_type *lo, typename btree<P>::size_type btree<P>::internal_verify(
const key_type *hi) const { const node_type* node,
const key_type* lo,
const key_type* hi) const {
assert(node->count() > 0); assert(node->count() > 0);
assert(node->count() <= node->max_count()); assert(node->count() <= node->max_count());
if (lo) { if (lo) {
@ -2789,9 +2824,9 @@ int btree<P>::internal_verify(const node_type *node, const key_type *lo,
for (int i = node->start() + 1; i < node->finish(); ++i) { for (int i = node->start() + 1; i < node->finish(); ++i) {
assert(!compare_keys(node->key(i), node->key(i - 1))); assert(!compare_keys(node->key(i), node->key(i - 1)));
} }
int count = node->count(); size_type count = node->count();
if (node->is_internal()) { if (node->is_internal()) {
for (int i = node->start(); i <= node->finish(); ++i) { for (field_type i = node->start(); i <= node->finish(); ++i) {
assert(node->child(i) != nullptr); assert(node->child(i) != nullptr);
assert(node->child(i)->parent() == node); assert(node->child(i)->parent() == node);
assert(node->child(i)->position() == i); assert(node->child(i)->position() == i);

@ -641,8 +641,8 @@ auto Storage<T, N, A>::Insert(ConstIterator<A> pos, ValueAdapter values,
SizeType<A> insert_count) -> Iterator<A> { SizeType<A> insert_count) -> Iterator<A> {
StorageView<A> storage_view = MakeStorageView(); StorageView<A> storage_view = MakeStorageView();
SizeType<A> insert_index = auto insert_index = static_cast<SizeType<A>>(
std::distance(ConstIterator<A>(storage_view.data), pos); std::distance(ConstIterator<A>(storage_view.data), pos));
SizeType<A> insert_end_index = insert_index + insert_count; SizeType<A> insert_end_index = insert_index + insert_count;
SizeType<A> new_size = storage_view.size + insert_count; SizeType<A> new_size = storage_view.size + insert_count;

@ -612,9 +612,9 @@ struct GroupAArch64Impl {
NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const { NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
uint64_t mask = uint64_t mask =
vget_lane_u64(vreinterpret_u64_u8( vget_lane_u64(vreinterpret_u64_u8(vceq_s8(
vceq_s8(vdup_n_s8(static_cast<h2_t>(ctrl_t::kEmpty)), vdup_n_s8(static_cast<int8_t>(ctrl_t::kEmpty)),
vreinterpret_s8_u8(ctrl))), vreinterpret_s8_u8(ctrl))),
0); 0);
return NonIterableBitMask<uint64_t, kWidth, 3>(mask); return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
} }
@ -1144,11 +1144,12 @@ class raw_hash_set {
std::is_nothrow_default_constructible<key_equal>::value&& std::is_nothrow_default_constructible<key_equal>::value&&
std::is_nothrow_default_constructible<allocator_type>::value) {} std::is_nothrow_default_constructible<allocator_type>::value) {}
explicit raw_hash_set(size_t bucket_count, const hasher& hash = hasher(), explicit raw_hash_set(size_t bucket_count,
const hasher& hash = hasher(),
const key_equal& eq = key_equal(), const key_equal& eq = key_equal(),
const allocator_type& alloc = allocator_type()) const allocator_type& alloc = allocator_type())
: ctrl_(EmptyGroup()), : ctrl_(EmptyGroup()),
settings_(0, HashtablezInfoHandle(), hash, eq, alloc) { settings_(0u, HashtablezInfoHandle(), hash, eq, alloc) {
if (bucket_count) { if (bucket_count) {
capacity_ = NormalizeCapacity(bucket_count); capacity_ = NormalizeCapacity(bucket_count);
initialize_slots(); initialize_slots();
@ -1273,14 +1274,16 @@ class raw_hash_set {
std::is_nothrow_copy_constructible<allocator_type>::value) std::is_nothrow_copy_constructible<allocator_type>::value)
: ctrl_(absl::exchange(that.ctrl_, EmptyGroup())), : ctrl_(absl::exchange(that.ctrl_, EmptyGroup())),
slots_(absl::exchange(that.slots_, nullptr)), slots_(absl::exchange(that.slots_, nullptr)),
size_(absl::exchange(that.size_, 0)), size_(absl::exchange(that.size_, size_t{0})),
capacity_(absl::exchange(that.capacity_, 0)), capacity_(absl::exchange(that.capacity_, size_t{0})),
// Hash, equality and allocator are copied instead of moved because // Hash, equality and allocator are copied instead of moved because
// `that` must be left valid. If Hash is std::function<Key>, moving it // `that` must be left valid. If Hash is std::function<Key>, moving it
// would create a nullptr functor that cannot be called. // would create a nullptr functor that cannot be called.
settings_(absl::exchange(that.growth_left(), 0), settings_(absl::exchange(that.growth_left(), size_t{0}),
absl::exchange(that.infoz(), HashtablezInfoHandle()), absl::exchange(that.infoz(), HashtablezInfoHandle()),
that.hash_ref(), that.eq_ref(), that.alloc_ref()) {} that.hash_ref(),
that.eq_ref(),
that.alloc_ref()) {}
raw_hash_set(raw_hash_set&& that, const allocator_type& a) raw_hash_set(raw_hash_set&& that, const allocator_type& a)
: ctrl_(EmptyGroup()), : ctrl_(EmptyGroup()),

@ -406,7 +406,7 @@ template <typename StorageT>
StorageT* FlagImpl::OffsetValue() const { StorageT* FlagImpl::OffsetValue() const {
char* p = reinterpret_cast<char*>(const_cast<FlagImpl*>(this)); char* p = reinterpret_cast<char*>(const_cast<FlagImpl*>(this));
// The offset is deduced via Flag value type specific op_. // The offset is deduced via Flag value type specific op_.
size_t offset = flags_internal::ValueOffset(op_); ptrdiff_t offset = flags_internal::ValueOffset(op_);
return reinterpret_cast<StorageT*>(p + offset); return reinterpret_cast<StorageT*>(p + offset);
} }
@ -486,7 +486,7 @@ bool FlagImpl::ReadOneBool() const {
} }
void FlagImpl::ReadSequenceLockedData(void* dst) const { void FlagImpl::ReadSequenceLockedData(void* dst) const {
int size = Sizeof(op_); size_t size = Sizeof(op_);
// Attempt to read using the sequence lock. // Attempt to read using the sequence lock.
if (ABSL_PREDICT_TRUE(seq_lock_.TryRead(dst, AtomicBufferValue(), size))) { if (ABSL_PREDICT_TRUE(seq_lock_.TryRead(dst, AtomicBufferValue(), size))) {
return; return;

@ -148,8 +148,7 @@ class FlagHelpPrettyPrinter {
} }
// Write the token, ending the string first if necessary/possible. // Write the token, ending the string first if necessary/possible.
if (!new_line && if (!new_line && (line_len_ + token.size() >= max_line_len_)) {
(line_len_ + static_cast<int>(token.size()) >= max_line_len_)) {
EndLine(); EndLine();
new_line = true; new_line = true;
} }

@ -94,13 +94,14 @@ double AndersonDarlingPValue(int n, double z) {
} }
double AndersonDarlingStatistic(const std::vector<double>& random_sample) { double AndersonDarlingStatistic(const std::vector<double>& random_sample) {
int n = random_sample.size(); size_t n = random_sample.size();
double ad_sum = 0; double ad_sum = 0;
for (int i = 0; i < n; i++) { for (size_t i = 0; i < n; i++) {
ad_sum += (2 * i + 1) * ad_sum += (2 * i + 1) *
std::log(random_sample[i] * (1 - random_sample[n - 1 - i])); std::log(random_sample[i] * (1 - random_sample[n - 1 - i]));
} }
double ad_statistic = -n - 1 / static_cast<double>(n) * ad_sum; const auto n_as_double = static_cast<double>(n);
double ad_statistic = -n_as_double - 1 / n_as_double * ad_sum;
return ad_statistic; return ad_statistic;
} }
@ -111,14 +112,15 @@ double AndersonDarlingStatistic(const std::vector<double>& random_sample) {
// Marsaglia and Marsaglia for details. // Marsaglia and Marsaglia for details.
double AndersonDarlingTest(const std::vector<double>& random_sample) { double AndersonDarlingTest(const std::vector<double>& random_sample) {
double ad_statistic = AndersonDarlingStatistic(random_sample); double ad_statistic = AndersonDarlingStatistic(random_sample);
double p = AndersonDarlingPValue(random_sample.size(), ad_statistic); double p = AndersonDarlingPValue(static_cast<int>(random_sample.size()),
ad_statistic);
return p; return p;
} }
TEST(ExponentialBiasedTest, CoinTossDemoWithGetSkipCount) { TEST(ExponentialBiasedTest, CoinTossDemoWithGetSkipCount) {
ExponentialBiased eb; ExponentialBiased eb;
for (int runs = 0; runs < 10; ++runs) { for (int runs = 0; runs < 10; ++runs) {
for (int flips = eb.GetSkipCount(1); flips > 0; --flips) { for (int64_t flips = eb.GetSkipCount(1); flips > 0; --flips) {
printf("head..."); printf("head...");
} }
printf("tail\n"); printf("tail\n");
@ -132,7 +134,7 @@ TEST(ExponentialBiasedTest, CoinTossDemoWithGetSkipCount) {
TEST(ExponentialBiasedTest, SampleDemoWithStride) { TEST(ExponentialBiasedTest, SampleDemoWithStride) {
ExponentialBiased eb; ExponentialBiased eb;
int stride = eb.GetStride(10); int64_t stride = eb.GetStride(10);
int samples = 0; int samples = 0;
for (int i = 0; i < 10000000; ++i) { for (int i = 0; i < 10000000; ++i) {
if (--stride == 0) { if (--stride == 0) {
@ -147,7 +149,7 @@ TEST(ExponentialBiasedTest, SampleDemoWithStride) {
// Testing that NextRandom generates uniform random numbers. Applies the // Testing that NextRandom generates uniform random numbers. Applies the
// Anderson-Darling test for uniformity // Anderson-Darling test for uniformity
TEST(ExponentialBiasedTest, TestNextRandom) { TEST(ExponentialBiasedTest, TestNextRandom) {
for (auto n : std::vector<int>({ for (auto n : std::vector<size_t>({
10, // Check short-range correlation 10, // Check short-range correlation
100, 1000, 100, 1000,
10000 // Make sure there's no systemic error 10000 // Make sure there's no systemic error
@ -161,7 +163,7 @@ TEST(ExponentialBiasedTest, TestNextRandom) {
} }
std::vector<uint64_t> int_random_sample(n); std::vector<uint64_t> int_random_sample(n);
// Collect samples // Collect samples
for (int i = 0; i < n; i++) { for (size_t i = 0; i < n; i++) {
int_random_sample[i] = x; int_random_sample[i] = x;
x = ExponentialBiased::NextRandom(x); x = ExponentialBiased::NextRandom(x);
} }
@ -169,7 +171,7 @@ TEST(ExponentialBiasedTest, TestNextRandom) {
std::sort(int_random_sample.begin(), int_random_sample.end()); std::sort(int_random_sample.begin(), int_random_sample.end());
std::vector<double> random_sample(n); std::vector<double> random_sample(n);
// Convert them to uniform randoms (in the range [0,1]) // Convert them to uniform randoms (in the range [0,1])
for (int i = 0; i < n; i++) { for (size_t i = 0; i < n; i++) {
random_sample[i] = random_sample[i] =
static_cast<double>(int_random_sample[i]) / max_prng_value; static_cast<double>(int_random_sample[i]) / max_prng_value;
} }

@ -173,12 +173,12 @@ bool ReadSeedMaterialFromDevURandom(absl::Span<uint32_t> values) {
} }
while (success && buffer_size > 0) { while (success && buffer_size > 0) {
int bytes_read = read(dev_urandom, buffer, buffer_size); ssize_t bytes_read = read(dev_urandom, buffer, buffer_size);
int read_error = errno; int read_error = errno;
success = (bytes_read > 0); success = (bytes_read > 0);
if (success) { if (success) {
buffer += bytes_read; buffer += bytes_read;
buffer_size -= bytes_read; buffer_size -= static_cast<size_t>(bytes_read);
} else if (bytes_read == -1 && read_error == EINTR) { } else if (bytes_read == -1 && read_error == EINTR) {
success = true; // Need to try again. success = true; // Need to try again.
} }

@ -87,7 +87,7 @@ class FutexImpl {
public: public:
static int WaitUntil(std::atomic<int32_t> *v, int32_t val, static int WaitUntil(std::atomic<int32_t> *v, int32_t val,
KernelTimeout t) { KernelTimeout t) {
int err = 0; long err = 0; // NOLINT(runtime/int)
if (t.has_timeout()) { if (t.has_timeout()) {
// https://locklessinc.com/articles/futex_cheat_sheet/ // https://locklessinc.com/articles/futex_cheat_sheet/
// Unlike FUTEX_WAIT, FUTEX_WAIT_BITSET uses absolute time. // Unlike FUTEX_WAIT, FUTEX_WAIT_BITSET uses absolute time.
@ -105,41 +105,44 @@ class FutexImpl {
FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, nullptr); FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, nullptr);
} }
if (ABSL_PREDICT_FALSE(err != 0)) { if (ABSL_PREDICT_FALSE(err != 0)) {
err = -errno; return -errno;
} }
return err; return 0;
} }
static int WaitBitsetAbsoluteTimeout(std::atomic<int32_t> *v, int32_t val, static int WaitBitsetAbsoluteTimeout(std::atomic<int32_t> *v, int32_t val,
int32_t bits, int32_t bits,
const struct timespec *abstime) { const struct timespec *abstime) {
int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v), // NOLINTNEXTLINE(runtime/int)
FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG, val, abstime, long err = syscall(SYS_futex, reinterpret_cast<int32_t*>(v),
nullptr, bits); FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG, val, abstime,
nullptr, bits);
if (ABSL_PREDICT_FALSE(err != 0)) { if (ABSL_PREDICT_FALSE(err != 0)) {
err = -errno; return -errno;
} }
return err; return 0;
} }
static int Wake(std::atomic<int32_t> *v, int32_t count) { static int Wake(std::atomic<int32_t> *v, int32_t count) {
int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v), // NOLINTNEXTLINE(runtime/int)
FUTEX_WAKE | FUTEX_PRIVATE_FLAG, count); long err = syscall(SYS_futex, reinterpret_cast<int32_t*>(v),
FUTEX_WAKE | FUTEX_PRIVATE_FLAG, count);
if (ABSL_PREDICT_FALSE(err < 0)) { if (ABSL_PREDICT_FALSE(err < 0)) {
err = -errno; return -errno;
} }
return err; return 0;
} }
// FUTEX_WAKE_BITSET // FUTEX_WAKE_BITSET
static int WakeBitset(std::atomic<int32_t> *v, int32_t count, int32_t bits) { static int WakeBitset(std::atomic<int32_t> *v, int32_t count, int32_t bits) {
int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v), // NOLINTNEXTLINE(runtime/int)
FUTEX_WAKE_BITSET | FUTEX_PRIVATE_FLAG, count, nullptr, long err = syscall(SYS_futex, reinterpret_cast<int32_t*>(v),
nullptr, bits); FUTEX_WAKE_BITSET | FUTEX_PRIVATE_FLAG, count, nullptr,
nullptr, bits);
if (ABSL_PREDICT_FALSE(err < 0)) { if (ABSL_PREDICT_FALSE(err < 0)) {
err = -errno; return -errno;
} }
return err; return 0;
} }
}; };

@ -84,14 +84,15 @@ class TestZoneInfoSource : public cctz::ZoneInfoSource {
: data_(data), end_(data + size) {} : data_(data), end_(data + size) {}
std::size_t Read(void* ptr, std::size_t size) override { std::size_t Read(void* ptr, std::size_t size) override {
const std::size_t len = std::min<std::size_t>(size, end_ - data_); const std::size_t len =
std::min(size, static_cast<std::size_t>(end_ - data_));
memcpy(ptr, data_, len); memcpy(ptr, data_, len);
data_ += len; data_ += len;
return len; return len;
} }
int Skip(std::size_t offset) override { int Skip(std::size_t offset) override {
data_ += std::min<std::size_t>(offset, end_ - data_); data_ += std::min(offset, static_cast<std::size_t>(end_ - data_));
return 0; return 0;
} }

Loading…
Cancel
Save