Fix "unsafe narrowing" warnings in absl, 1/n.

Addresses failures with the following, in some files:
-Wshorten-64-to-32
-Wimplicit-int-conversion
-Wsign-compare
-Wsign-conversion
-Wtautological-unsigned-zero-compare

(This specific CL focuses on .h and win32 .inc files.)

Bug: chromium:1292951
PiperOrigin-RevId: 463835431
Change-Id: If8e5f7f651d5cd96035e23e4623bdb08a7fedabe
pull/1237/head
Abseil Team 3 years ago committed by Copybara-Service
parent c7e60ccfcd
commit 7f51ef5ed2
  1. 6
      absl/base/internal/spinlock.cc
  2. 2
      absl/base/internal/spinlock.h
  3. 5
      absl/base/internal/spinlock_win32.inc
  4. 2
      absl/base/internal/unscaledcycleclock.h
  5. 24
      absl/base/spinlock_test_common.cc
  6. 9
      absl/container/internal/hashtablez_sampler.cc
  7. 6
      absl/container/internal/hashtablez_sampler.h
  8. 6
      absl/container/internal/inlined_vector.h
  9. 11
      absl/container/internal/raw_hash_set.h
  10. 9
      absl/debugging/internal/stacktrace_win32-inl.inc
  11. 13
      absl/debugging/symbolize_win32.inc
  12. 12
      absl/profiling/internal/sample_recorder.h
  13. 5
      absl/strings/cord_buffer.h
  14. 20
      absl/strings/internal/char_map.h
  15. 2
      absl/strings/internal/cord_internal.h
  16. 8
      absl/strings/internal/str_format/parser.h
  17. 32
      absl/strings/substitute.h
  18. 3
      absl/synchronization/internal/kernel_timeout.h

@ -178,7 +178,7 @@ void SpinLock::SlowUnlock(uint32_t lock_value) {
// reserve a unitary wait time to represent that a waiter exists without our
// own acquisition having been contended.
if ((lock_value & kWaitTimeMask) != kSpinLockSleeper) {
const uint64_t wait_cycles = DecodeWaitCycles(lock_value);
const int64_t wait_cycles = DecodeWaitCycles(lock_value);
ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
submit_profile_data(this, wait_cycles);
ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
@ -220,9 +220,9 @@ uint32_t SpinLock::EncodeWaitCycles(int64_t wait_start_time,
return clamped;
}
uint64_t SpinLock::DecodeWaitCycles(uint32_t lock_value) {
int64_t SpinLock::DecodeWaitCycles(uint32_t lock_value) {
// Cast to uint32_t first to ensure bits [63:32] are cleared.
const uint64_t scaled_wait_time =
const int64_t scaled_wait_time =
static_cast<uint32_t>(lock_value & kWaitTimeMask);
return scaled_wait_time << (kProfileTimestampShift - kLockwordReservedShift);
}

@ -137,7 +137,7 @@ class ABSL_LOCKABLE SpinLock {
int64_t wait_end_time);
// Extract number of wait cycles in a lock value.
static uint64_t DecodeWaitCycles(uint32_t lock_value);
static int64_t DecodeWaitCycles(uint32_t lock_value);
// Provide access to protected method above. Use for testing only.
friend struct SpinLockTest;

@ -27,7 +27,10 @@ void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
} else if (loop == 1) {
Sleep(0);
} else {
Sleep(absl::base_internal::SpinLockSuggestedDelayNS(loop) / 1000000);
// SpinLockSuggestedDelayNS() always returns a positive integer, so this
// static_cast is safe.
Sleep(static_cast<DWORD>(
absl::base_internal::SpinLockSuggestedDelayNS(loop) / 1000000));
}
}

@ -119,7 +119,7 @@ class UnscaledCycleClock {
inline int64_t UnscaledCycleClock::Now() {
uint64_t low, high;
__asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
return (high << 32) | low;
return static_cast<int64_t>((high << 32) | low);
}
#endif

@ -48,7 +48,7 @@ struct SpinLockTest {
int64_t wait_end_time) {
return SpinLock::EncodeWaitCycles(wait_start_time, wait_end_time);
}
static uint64_t DecodeWaitCycles(uint32_t lock_value) {
static int64_t DecodeWaitCycles(uint32_t lock_value) {
return SpinLock::DecodeWaitCycles(lock_value);
}
};
@ -133,20 +133,20 @@ TEST(SpinLock, WaitCyclesEncoding) {
// but the lower kProfileTimestampShift will be dropped.
const int kMaxCyclesShift =
32 - kLockwordReservedShift + kProfileTimestampShift;
const uint64_t kMaxCycles = (int64_t{1} << kMaxCyclesShift) - 1;
const int64_t kMaxCycles = (int64_t{1} << kMaxCyclesShift) - 1;
// These bits should be zero after encoding.
const uint32_t kLockwordReservedMask = (1 << kLockwordReservedShift) - 1;
// These bits are dropped when wait cycles are encoded.
const uint64_t kProfileTimestampMask = (1 << kProfileTimestampShift) - 1;
const int64_t kProfileTimestampMask = (1 << kProfileTimestampShift) - 1;
// Test a bunch of random values
std::default_random_engine generator;
// Shift to avoid overflow below.
std::uniform_int_distribution<uint64_t> time_distribution(
0, std::numeric_limits<uint64_t>::max() >> 4);
std::uniform_int_distribution<uint64_t> cycle_distribution(0, kMaxCycles);
std::uniform_int_distribution<int64_t> time_distribution(
0, std::numeric_limits<int64_t>::max() >> 3);
std::uniform_int_distribution<int64_t> cycle_distribution(0, kMaxCycles);
for (int i = 0; i < 100; i++) {
int64_t start_time = time_distribution(generator);
@ -154,7 +154,7 @@ TEST(SpinLock, WaitCyclesEncoding) {
int64_t end_time = start_time + cycles;
uint32_t lock_value = SpinLockTest::EncodeWaitCycles(start_time, end_time);
EXPECT_EQ(0, lock_value & kLockwordReservedMask);
uint64_t decoded = SpinLockTest::DecodeWaitCycles(lock_value);
int64_t decoded = SpinLockTest::DecodeWaitCycles(lock_value);
EXPECT_EQ(0, decoded & kProfileTimestampMask);
EXPECT_EQ(cycles & ~kProfileTimestampMask, decoded);
}
@ -178,21 +178,21 @@ TEST(SpinLock, WaitCyclesEncoding) {
// Test clamping
uint32_t max_value =
SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles);
uint64_t max_value_decoded = SpinLockTest::DecodeWaitCycles(max_value);
uint64_t expected_max_value_decoded = kMaxCycles & ~kProfileTimestampMask;
int64_t max_value_decoded = SpinLockTest::DecodeWaitCycles(max_value);
int64_t expected_max_value_decoded = kMaxCycles & ~kProfileTimestampMask;
EXPECT_EQ(expected_max_value_decoded, max_value_decoded);
const int64_t step = (1 << kProfileTimestampShift);
uint32_t after_max_value =
SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles + step);
uint64_t after_max_value_decoded =
int64_t after_max_value_decoded =
SpinLockTest::DecodeWaitCycles(after_max_value);
EXPECT_EQ(expected_max_value_decoded, after_max_value_decoded);
uint32_t before_max_value = SpinLockTest::EncodeWaitCycles(
start_time, start_time + kMaxCycles - step);
uint64_t before_max_value_decoded =
SpinLockTest::DecodeWaitCycles(before_max_value);
int64_t before_max_value_decoded =
SpinLockTest::DecodeWaitCycles(before_max_value);
EXPECT_GT(expected_max_value_decoded, before_max_value_decoded);
}

@ -215,21 +215,20 @@ void SetHashtablezSampleParameterInternal(int32_t rate) {
}
}
int32_t GetHashtablezMaxSamples() {
size_t GetHashtablezMaxSamples() {
return GlobalHashtablezSampler().GetMaxSamples();
}
void SetHashtablezMaxSamples(int32_t max) {
void SetHashtablezMaxSamples(size_t max) {
SetHashtablezMaxSamplesInternal(max);
TriggerHashtablezConfigListener();
}
void SetHashtablezMaxSamplesInternal(int32_t max) {
void SetHashtablezMaxSamplesInternal(size_t max) {
if (max > 0) {
GlobalHashtablezSampler().SetMaxSamples(max);
} else {
ABSL_RAW_LOG(ERROR, "Invalid hashtablez max samples: %lld",
static_cast<long long>(max)); // NOLINT(runtime/int)
ABSL_RAW_LOG(ERROR, "Invalid hashtablez max samples: 0");
}
}

@ -281,9 +281,9 @@ void SetHashtablezSampleParameter(int32_t rate);
void SetHashtablezSampleParameterInternal(int32_t rate);
// Sets a soft max for the number of samples that will be kept.
int32_t GetHashtablezMaxSamples();
void SetHashtablezMaxSamples(int32_t max);
void SetHashtablezMaxSamplesInternal(int32_t max);
size_t GetHashtablezMaxSamples();
void SetHashtablezMaxSamples(size_t max);
void SetHashtablezMaxSamplesInternal(size_t max);
// Configuration override.
// This allows process-wide sampling without depending on order of

@ -784,9 +784,9 @@ auto Storage<T, N, A>::Erase(ConstIterator<A> from, ConstIterator<A> to)
-> Iterator<A> {
StorageView<A> storage_view = MakeStorageView();
SizeType<A> erase_size = std::distance(from, to);
SizeType<A> erase_index =
std::distance(ConstIterator<A>(storage_view.data), from);
auto erase_size = static_cast<SizeType<A>>(std::distance(from, to));
auto erase_index = static_cast<SizeType<A>>(
std::distance(ConstIterator<A>(storage_view.data), from));
SizeType<A> erase_end_index = erase_index + erase_size;
IteratorValueAdapter<A, MoveIterator<A>> move_values(

@ -545,7 +545,7 @@ struct GroupSse2Impl {
// Returns a bitmask representing the positions of slots that match hash.
BitMask<uint32_t, kWidth> Match(h2_t hash) const {
auto match = _mm_set1_epi8(hash);
auto match = _mm_set1_epi8(static_cast<char>(hash));
return BitMask<uint32_t, kWidth>(
static_cast<uint32_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
}
@ -557,7 +557,7 @@ struct GroupSse2Impl {
return NonIterableBitMask<uint32_t, kWidth>(
static_cast<uint32_t>(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl))));
#else
auto match = _mm_set1_epi8(static_cast<h2_t>(ctrl_t::kEmpty));
auto match = _mm_set1_epi8(static_cast<char>(ctrl_t::kEmpty));
return NonIterableBitMask<uint32_t, kWidth>(
static_cast<uint32_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
#endif
@ -565,14 +565,14 @@ struct GroupSse2Impl {
// Returns a bitmask representing the positions of empty or deleted slots.
NonIterableBitMask<uint32_t, kWidth> MaskEmptyOrDeleted() const {
auto special = _mm_set1_epi8(static_cast<uint8_t>(ctrl_t::kSentinel));
auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
return NonIterableBitMask<uint32_t, kWidth>(static_cast<uint32_t>(
_mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl))));
}
// Returns the number of trailing empty or deleted elements in the group.
uint32_t CountLeadingEmptyOrDeleted() const {
auto special = _mm_set1_epi8(static_cast<uint8_t>(ctrl_t::kSentinel));
auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
return TrailingZeros(static_cast<uint32_t>(
_mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1));
}
@ -635,7 +635,8 @@ struct GroupAArch64Impl {
// Clang and GCC optimize countr_zero to rbit+clz without any check for 0,
// so we should be fine.
constexpr uint64_t bits = 0x0101010101010101ULL;
return countr_zero((mask | ~(mask >> 7)) & bits) >> 3;
return static_cast<uint32_t>(countr_zero((mask | ~(mask >> 7)) & bits) >>
3);
}
void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {

@ -63,11 +63,12 @@ static RtlCaptureStackBackTrace_Function* const RtlCaptureStackBackTrace_fn =
template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
const void*, int* min_dropped_frames) {
int n = 0;
if (!RtlCaptureStackBackTrace_fn) {
// can't find a stacktrace with no function to call
USHORT n = 0;
if (!RtlCaptureStackBackTrace_fn || skip_count < 0 || max_depth < 0) {
// can't get a stacktrace with no function/invalid args
} else {
n = (int)RtlCaptureStackBackTrace_fn(skip_count + 2, max_depth, result, 0);
n = RtlCaptureStackBackTrace_fn(static_cast<ULONG>(skip_count) + 2,
static_cast<ULONG>(max_depth), result, 0);
}
if (IS_STACK_FRAMES) {
// No implementation for finding out the stack frame sizes yet.

@ -65,14 +65,15 @@ bool Symbolize(const void* pc, char* out, int out_size) {
if (!SymFromAddr(process, reinterpret_cast<DWORD64>(pc), nullptr, symbol)) {
return false;
}
strncpy(out, symbol->Name, out_size);
if (out[out_size - 1] != '\0') {
const size_t out_size_t = static_cast<size_t>(out_size);
strncpy(out, symbol->Name, out_size_t);
if (out[out_size_t - 1] != '\0') {
// strncpy() does not '\0' terminate when it truncates.
static constexpr char kEllipsis[] = "...";
int ellipsis_size =
std::min<int>(sizeof(kEllipsis) - 1, out_size - 1);
memcpy(out + out_size - ellipsis_size - 1, kEllipsis, ellipsis_size);
out[out_size - 1] = '\0';
size_t ellipsis_size =
std::min(sizeof(kEllipsis) - 1, out_size_t - 1);
memcpy(out + out_size_t - ellipsis_size - 1, kEllipsis, ellipsis_size);
out[out_size_t - 1] = '\0';
}
return true;
}

@ -77,8 +77,8 @@ class SampleRecorder {
// samples that have been dropped.
int64_t Iterate(const std::function<void(const T& stack)>& f);
int32_t GetMaxSamples() const;
void SetMaxSamples(int32_t max);
size_t GetMaxSamples() const;
void SetMaxSamples(size_t max);
private:
void PushNew(T* sample);
@ -88,7 +88,7 @@ class SampleRecorder {
std::atomic<size_t> dropped_samples_;
std::atomic<size_t> size_estimate_;
std::atomic<int32_t> max_samples_{1 << 20};
std::atomic<size_t> max_samples_{1 << 20};
// Intrusive lock free linked lists for tracking samples.
//
@ -186,7 +186,7 @@ T* SampleRecorder<T>::PopDead(Targs... args) {
template <typename T>
template <typename... Targs>
T* SampleRecorder<T>::Register(Targs&&... args) {
int64_t size = size_estimate_.fetch_add(1, std::memory_order_relaxed);
size_t size = size_estimate_.fetch_add(1, std::memory_order_relaxed);
if (size > max_samples_.load(std::memory_order_relaxed)) {
size_estimate_.fetch_sub(1, std::memory_order_relaxed);
dropped_samples_.fetch_add(1, std::memory_order_relaxed);
@ -229,12 +229,12 @@ int64_t SampleRecorder<T>::Iterate(
}
template <typename T>
void SampleRecorder<T>::SetMaxSamples(int32_t max) {
void SampleRecorder<T>::SetMaxSamples(size_t max) {
max_samples_.store(max, std::memory_order_release);
}
template <typename T>
int32_t SampleRecorder<T>::GetMaxSamples() const {
size_t SampleRecorder<T>::GetMaxSamples() const {
return max_samples_.load(std::memory_order_acquire);
}

@ -330,8 +330,7 @@ class CordBuffer {
// Returns the available area of the internal SSO data
absl::Span<char> short_available() {
assert(is_short());
const size_t length = (short_rep.raw_size >> 1);
const size_t length = short_length();
return absl::Span<char>(short_rep.data + length,
kInlineCapacity - length);
}
@ -347,7 +346,7 @@ class CordBuffer {
// Returns the length of the internal SSO data.
size_t short_length() const {
assert(is_short());
return short_rep.raw_size >> 1;
return static_cast<size_t>(short_rep.raw_size >> 1);
}
// Sets the length of the internal SSO data.

@ -103,10 +103,9 @@ class Charmap {
constexpr Charmap(uint64_t b0, uint64_t b1, uint64_t b2, uint64_t b3)
: m_{b0, b1, b2, b3} {}
static constexpr uint64_t RangeForWord(unsigned char lo, unsigned char hi,
uint64_t word) {
return OpenRangeFromZeroForWord(hi + 1, word) &
~OpenRangeFromZeroForWord(lo, word);
static constexpr uint64_t RangeForWord(char lo, char hi, uint64_t word) {
return OpenRangeFromZeroForWord(static_cast<unsigned char>(hi) + 1, word) &
~OpenRangeFromZeroForWord(static_cast<unsigned char>(lo), word);
}
// All the chars in the specified word of the range [0, upper).
@ -119,13 +118,16 @@ class Charmap {
: (~static_cast<uint64_t>(0) >> (64 - upper % 64));
}
static constexpr uint64_t CharMaskForWord(unsigned char x, uint64_t word) {
return (x / 64 == word) ? (static_cast<uint64_t>(1) << (x % 64)) : 0;
static constexpr uint64_t CharMaskForWord(char x, uint64_t word) {
const auto unsigned_x = static_cast<unsigned char>(x);
return (unsigned_x / 64 == word)
? (static_cast<uint64_t>(1) << (unsigned_x % 64))
: 0;
}
private:
void SetChar(unsigned char c) {
m_[c / 64] |= static_cast<uint64_t>(1) << (c % 64);
void SetChar(char c) {
const auto unsigned_c = static_cast<unsigned char>(c);
m_[unsigned_c / 64] |= static_cast<uint64_t>(1) << (unsigned_c % 64);
}
uint64_t m_[4];

@ -570,7 +570,7 @@ class InlineData {
// Requires the current instance to hold inline data.
size_t inline_size() const {
assert(!is_tree());
return tag() >> 1;
return static_cast<size_t>(tag()) >> 1;
}
// Sets the size of the inlined character data inside this instance.

@ -155,10 +155,11 @@ bool ParseFormatString(string_view src, Consumer consumer) {
static_cast<const char*>(memchr(p, '%', static_cast<size_t>(end - p)));
if (!percent) {
// We found the last substring.
return consumer.Append(string_view(p, end - p));
return consumer.Append(string_view(p, static_cast<size_t>(end - p)));
}
// We found a percent, so push the text run then process the percent.
if (ABSL_PREDICT_FALSE(!consumer.Append(string_view(p, percent - p)))) {
if (ABSL_PREDICT_FALSE(!consumer.Append(
string_view(p, static_cast<size_t>(percent - p))))) {
return false;
}
if (ABSL_PREDICT_FALSE(percent + 1 >= end)) return false;
@ -189,7 +190,8 @@ bool ParseFormatString(string_view src, Consumer consumer) {
p = ConsumeUnboundConversion(percent + 1, end, &conv, &next_arg);
if (ABSL_PREDICT_FALSE(p == nullptr)) return false;
if (ABSL_PREDICT_FALSE(!consumer.ConvertOne(
conv, string_view(percent + 1, p - (percent + 1))))) {
conv, string_view(percent + 1,
static_cast<size_t>(p - (percent + 1)))))) {
return false;
}
} else {

@ -125,28 +125,44 @@ class Arg {
}
Arg(short value) // NOLINT(*)
: piece_(scratch_,
numbers_internal::FastIntToBuffer(value, scratch_) - scratch_) {}
static_cast<size_t>(
numbers_internal::FastIntToBuffer(value, scratch_) -
scratch_)) {}
Arg(unsigned short value) // NOLINT(*)
: piece_(scratch_,
numbers_internal::FastIntToBuffer(value, scratch_) - scratch_) {}
static_cast<size_t>(
numbers_internal::FastIntToBuffer(value, scratch_) -
scratch_)) {}
Arg(int value) // NOLINT(runtime/explicit)
: piece_(scratch_,
numbers_internal::FastIntToBuffer(value, scratch_) - scratch_) {}
static_cast<size_t>(
numbers_internal::FastIntToBuffer(value, scratch_) -
scratch_)) {}
Arg(unsigned int value) // NOLINT(runtime/explicit)
: piece_(scratch_,
numbers_internal::FastIntToBuffer(value, scratch_) - scratch_) {}
static_cast<size_t>(
numbers_internal::FastIntToBuffer(value, scratch_) -
scratch_)) {}
Arg(long value) // NOLINT(*)
: piece_(scratch_,
numbers_internal::FastIntToBuffer(value, scratch_) - scratch_) {}
static_cast<size_t>(
numbers_internal::FastIntToBuffer(value, scratch_) -
scratch_)) {}
Arg(unsigned long value) // NOLINT(*)
: piece_(scratch_,
numbers_internal::FastIntToBuffer(value, scratch_) - scratch_) {}
static_cast<size_t>(
numbers_internal::FastIntToBuffer(value, scratch_) -
scratch_)) {}
Arg(long long value) // NOLINT(*)
: piece_(scratch_,
numbers_internal::FastIntToBuffer(value, scratch_) - scratch_) {}
static_cast<size_t>(
numbers_internal::FastIntToBuffer(value, scratch_) -
scratch_)) {}
Arg(unsigned long long value) // NOLINT(*)
: piece_(scratch_,
numbers_internal::FastIntToBuffer(value, scratch_) - scratch_) {}
static_cast<size_t>(
numbers_internal::FastIntToBuffer(value, scratch_) -
scratch_)) {}
Arg(float value) // NOLINT(runtime/explicit)
: piece_(scratch_, numbers_internal::SixDigitsToBuffer(value, scratch_)) {
}

@ -111,7 +111,8 @@ class KernelTimeout {
constexpr uint64_t max_nanos =
(std::numeric_limits<int64_t>::max)() - 999999u;
uint64_t ms_from_now =
(std::min<uint64_t>(max_nanos, ns_ - now) + 999999u) / 1000000u;
((std::min)(max_nanos, static_cast<uint64_t>(ns_ - now)) + 999999u) /
1000000u;
if (ms_from_now > kInfinite) {
return kInfinite;
}

Loading…
Cancel
Save