Export of internal Abseil changes

--
7f5caec21a1a88db6486b8bc2e5f6d5baba076ed by Derek Mauro <dmauro@google.com>:

Tag absl::StatusOr with [[nodiscard]] when it is available

[[nodiscard]] provides better diagnostics on classes than the current
ABSL_MUST_USE_RESULT, which expands to
__attribute__((warn_unused_result))

Ideally we would make ABSL_MUST_USE_RESULT expand to [[nodiscard]], but
we would need to fix all code to build with [[nodiscard]] first.

PiperOrigin-RevId: 422628161

--
236be69f0f34ccaa3adc831075613847797e6557 by Jorg Brown <jorg@google.com>:

Make sure all of absl/strings compiles even with -Wsign-compare and -Wconversion warnings.

PiperOrigin-RevId: 422573904

--
005669883a8794e6d19dc4bbb4f0e18032e2fbc9 by Chris Kennelly <ckennelly@google.com>:

Include sampling stride in hashtable sampling data.

This allows us to weight each sample to control for our sampling rate.

PiperOrigin-RevId: 421886935

--
78046ce6b429b9f5b6c97b05e8448a791db93bbe by Abseil Team <absl-team@google.com>:

Use __builtin_bit_cast for absl::bit_cast when possible

This makes absl::bit_cast match the requirements of the standard when compiler support exists.

PiperOrigin-RevId: 421883999

--
f397461f4bbeabd32437df0f2275663aeb51adb2 by Derek Mauro <dmauro@google.com>:

Tag absl::Status with [[nodiscard]] when it is available

[[nodiscard]] provides better diagnostics on classes than the current
ABSL_MUST_USE_RESULT, which expands to
__attribute__((warn_unused_result))

Ideally we would make ABSL_MUST_USE_RESULT expand to [[nodiscard]], but
we would need to fix all code to build with [[nodiscard]] first.

PiperOrigin-RevId: 421825565
GitOrigin-RevId: 7f5caec21a1a88db6486b8bc2e5f6d5baba076ed
Change-Id: I760b45b68f6012809c70c2a584e4144a99f98733
pull/1095/head
Abseil Team 3 years ago committed by dinord
parent c59e7e59f5
commit 9bb4285711
  1. 18
      absl/base/casts.h
  2. 34
      absl/container/internal/hashtablez_sampler.cc
  3. 18
      absl/container/internal/hashtablez_sampler.h
  4. 51
      absl/container/internal/hashtablez_sampler_test.cc
  5. 1
      absl/profiling/internal/sample_recorder.h
  6. 39
      absl/profiling/internal/sample_recorder_test.cc
  7. 8
      absl/status/internal/status_internal.h
  8. 6
      absl/status/statusor.h
  9. 9
      absl/strings/internal/escaping.cc
  10. 2
      absl/strings/internal/ostringstream.cc
  11. 18
      absl/strings/internal/utf8.cc

@ -44,8 +44,12 @@ struct is_bitcastable
bool,
sizeof(Dest) == sizeof(Source) &&
type_traits_internal::is_trivially_copyable<Source>::value &&
type_traits_internal::is_trivially_copyable<Dest>::value &&
std::is_default_constructible<Dest>::value> {};
type_traits_internal::is_trivially_copyable<Dest>::value
#if !ABSL_HAVE_BUILTIN(__builtin_bit_cast)
&& std::is_default_constructible<Dest>::value
#endif
> {
};
} // namespace internal_casts
@ -147,20 +151,26 @@ constexpr To implicit_cast(typename absl::internal::identity_t<To> to) {
// introducing this undefined behavior (since the original value is never
// accessed in the wrong way).
//
// NOTE: The requirements here are stricter than the bit_cast of standard
// proposal P0476 due to the need for workarounds and lack of intrinsics.
// NOTE: The requirements here are more strict than the bit_cast of standard
// proposal P0476 when __builtin_bit_cast is not available.
// Specifically, this implementation also requires `Dest` to be
// default-constructible.
template <
typename Dest, typename Source,
typename std::enable_if<internal_casts::is_bitcastable<Dest, Source>::value,
int>::type = 0>
#if ABSL_HAVE_BUILTIN(__builtin_bit_cast)
inline constexpr Dest bit_cast(const Source& source) {
return __builtin_bit_cast(Dest, source);
}
#else
inline Dest bit_cast(const Source& source) {
Dest dest;
memcpy(static_cast<void*>(std::addressof(dest)),
static_cast<const void*>(std::addressof(source)), sizeof(dest));
return dest;
}
#endif
// NOTE: This overload is only picked if the requirements of bit_cast are
// not met. It is therefore UB, but is provided temporarily as previous

@ -27,6 +27,7 @@
#include "absl/profiling/internal/exponential_biased.h"
#include "absl/profiling/internal/sample_recorder.h"
#include "absl/synchronization/mutex.h"
#include "absl/utility/utility.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@ -53,7 +54,7 @@ void TriggerHashtablezConfigListener() {
} // namespace
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample = 0;
ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample = {0, 0};
#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
HashtablezSampler& GlobalHashtablezSampler() {
@ -64,7 +65,8 @@ HashtablezSampler& GlobalHashtablezSampler() {
HashtablezInfo::HashtablezInfo() = default;
HashtablezInfo::~HashtablezInfo() = default;
void HashtablezInfo::PrepareForSampling(size_t inline_element_size_value) {
void HashtablezInfo::PrepareForSampling(int64_t stride,
size_t inline_element_size_value) {
capacity.store(0, std::memory_order_relaxed);
size.store(0, std::memory_order_relaxed);
num_erases.store(0, std::memory_order_relaxed);
@ -77,6 +79,7 @@ void HashtablezInfo::PrepareForSampling(size_t inline_element_size_value) {
max_reserve.store(0, std::memory_order_relaxed);
create_time = absl::Now();
weight = stride;
// The inliner makes hardcoded skip_count difficult (especially when combined
// with LTO). We use the ability to exclude stacks by regex when encoding
// instead.
@ -105,23 +108,32 @@ static bool ShouldForceSampling() {
return state == kForce;
}
HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size) {
HashtablezInfo* SampleSlow(SamplingState& next_sample,
size_t inline_element_size) {
if (ABSL_PREDICT_FALSE(ShouldForceSampling())) {
*next_sample = 1;
next_sample.next_sample = 1;
const int64_t old_stride = exchange(next_sample.sample_stride, 1);
HashtablezInfo* result =
GlobalHashtablezSampler().Register(inline_element_size);
GlobalHashtablezSampler().Register(old_stride, inline_element_size);
return result;
}
#if !defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
*next_sample = std::numeric_limits<int64_t>::max();
next_sample = {
std::numeric_limits<int64_t>::max(),
std::numeric_limits<int64_t>::max(),
};
return nullptr;
#else
bool first = *next_sample < 0;
*next_sample = g_exponential_biased_generator.GetStride(
bool first = next_sample.next_sample < 0;
const int64_t next_stride = g_exponential_biased_generator.GetStride(
g_hashtablez_sample_parameter.load(std::memory_order_relaxed));
next_sample.next_sample = next_stride;
const int64_t old_stride = exchange(next_sample.sample_stride, next_stride);
// Small values of interval are equivalent to just sampling next time.
ABSL_ASSERT(*next_sample >= 1);
ABSL_ASSERT(next_stride >= 1);
// g_hashtablez_enabled can be dynamically flipped, we need to set a threshold
// low enough that we will start sampling in a reasonable time, so we just use
@ -131,11 +143,11 @@ HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size) {
// We will only be negative on our first count, so we should just retry in
// that case.
if (first) {
if (ABSL_PREDICT_TRUE(--*next_sample > 0)) return nullptr;
if (ABSL_PREDICT_TRUE(--next_sample.next_sample > 0)) return nullptr;
return SampleSlow(next_sample, inline_element_size);
}
return GlobalHashtablezSampler().Register(inline_element_size);
return GlobalHashtablezSampler().Register(old_stride, inline_element_size);
#endif
}

@ -67,7 +67,7 @@ struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> {
// Puts the object into a clean state, fills in the logically `const` members,
// blocking for any readers that are currently sampling the object.
void PrepareForSampling(size_t inline_element_size_value)
void PrepareForSampling(int64_t stride, size_t inline_element_size_value)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu);
// These fields are mutated by the various Record* APIs and need to be
@ -145,7 +145,15 @@ inline void RecordEraseSlow(HashtablezInfo* info) {
std::memory_order_relaxed);
}
HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size);
struct SamplingState {
int64_t next_sample;
// When we make a sampling decision, we record that distance so we can weight
// each sample.
int64_t sample_stride;
};
HashtablezInfo* SampleSlow(SamplingState& next_sample,
size_t inline_element_size);
void UnsampleSlow(HashtablezInfo* info);
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
@ -235,7 +243,7 @@ class HashtablezInfoHandle {
#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
extern ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample;
extern ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample;
#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
// Returns an RAII sampling handle that manages registration and unregistation
@ -243,11 +251,11 @@ extern ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample;
inline HashtablezInfoHandle Sample(
size_t inline_element_size ABSL_ATTRIBUTE_UNUSED) {
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
if (ABSL_PREDICT_TRUE(--global_next_sample > 0)) {
if (ABSL_PREDICT_TRUE(--global_next_sample.next_sample > 0)) {
return HashtablezInfoHandle(nullptr);
}
return HashtablezInfoHandle(
SampleSlow(&global_next_sample, inline_element_size));
SampleSlow(global_next_sample, inline_element_size));
#else
return HashtablezInfoHandle(nullptr);
#endif // !ABSL_PER_THREAD_TLS

@ -70,8 +70,9 @@ std::vector<size_t> GetSizes(HashtablezSampler* s) {
}
HashtablezInfo* Register(HashtablezSampler* s, size_t size) {
const int64_t test_stride = 123;
const size_t test_element_size = 17;
auto* info = s->Register(test_element_size);
auto* info = s->Register(test_stride, test_element_size);
assert(info != nullptr);
info->size.store(size);
return info;
@ -79,10 +80,11 @@ HashtablezInfo* Register(HashtablezSampler* s, size_t size) {
TEST(HashtablezInfoTest, PrepareForSampling) {
absl::Time test_start = absl::Now();
const int64_t test_stride = 123;
const size_t test_element_size = 17;
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
info.PrepareForSampling(test_element_size);
info.PrepareForSampling(test_stride, test_element_size);
EXPECT_EQ(info.capacity.load(), 0);
EXPECT_EQ(info.size.load(), 0);
@ -95,6 +97,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
EXPECT_EQ(info.hashes_bitwise_xor.load(), 0);
EXPECT_EQ(info.max_reserve.load(), 0);
EXPECT_GE(info.create_time, test_start);
EXPECT_EQ(info.weight, test_stride);
EXPECT_EQ(info.inline_element_size, test_element_size);
info.capacity.store(1, std::memory_order_relaxed);
@ -108,7 +111,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
info.max_reserve.store(1, std::memory_order_relaxed);
info.create_time = test_start - absl::Hours(20);
info.PrepareForSampling(test_element_size);
info.PrepareForSampling(test_stride * 2, test_element_size);
EXPECT_EQ(info.capacity.load(), 0);
EXPECT_EQ(info.size.load(), 0);
EXPECT_EQ(info.num_erases.load(), 0);
@ -119,6 +122,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{});
EXPECT_EQ(info.hashes_bitwise_xor.load(), 0);
EXPECT_EQ(info.max_reserve.load(), 0);
EXPECT_EQ(info.weight, 2 * test_stride);
EXPECT_EQ(info.inline_element_size, test_element_size);
EXPECT_GE(info.create_time, test_start);
}
@ -126,8 +130,9 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
TEST(HashtablezInfoTest, RecordStorageChanged) {
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
const int64_t test_stride = 21;
const size_t test_element_size = 19;
info.PrepareForSampling(test_element_size);
info.PrepareForSampling(test_stride, test_element_size);
RecordStorageChangedSlow(&info, 17, 47);
EXPECT_EQ(info.size.load(), 17);
EXPECT_EQ(info.capacity.load(), 47);
@ -139,8 +144,9 @@ TEST(HashtablezInfoTest, RecordStorageChanged) {
TEST(HashtablezInfoTest, RecordInsert) {
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
const int64_t test_stride = 25;
const size_t test_element_size = 23;
info.PrepareForSampling(test_element_size);
info.PrepareForSampling(test_stride, test_element_size);
EXPECT_EQ(info.max_probe_length.load(), 0);
RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength);
EXPECT_EQ(info.max_probe_length.load(), 6);
@ -160,10 +166,11 @@ TEST(HashtablezInfoTest, RecordInsert) {
}
TEST(HashtablezInfoTest, RecordErase) {
const int64_t test_stride = 31;
const size_t test_element_size = 29;
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
info.PrepareForSampling(test_element_size);
info.PrepareForSampling(test_stride, test_element_size);
EXPECT_EQ(info.num_erases.load(), 0);
EXPECT_EQ(info.size.load(), 0);
RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength);
@ -175,10 +182,11 @@ TEST(HashtablezInfoTest, RecordErase) {
}
TEST(HashtablezInfoTest, RecordRehash) {
const int64_t test_stride = 33;
const size_t test_element_size = 31;
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
info.PrepareForSampling(test_element_size);
info.PrepareForSampling(test_stride, test_element_size);
RecordInsertSlow(&info, 0x1, 0);
RecordInsertSlow(&info, 0x2, kProbeLength);
RecordInsertSlow(&info, 0x4, kProbeLength);
@ -203,8 +211,9 @@ TEST(HashtablezInfoTest, RecordRehash) {
TEST(HashtablezInfoTest, RecordReservation) {
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
const int64_t test_stride = 35;
const size_t test_element_size = 33;
info.PrepareForSampling(test_element_size);
info.PrepareForSampling(test_stride, test_element_size);
RecordReservationSlow(&info, 3);
EXPECT_EQ(info.max_reserve.load(), 3);
@ -224,9 +233,10 @@ TEST(HashtablezSamplerTest, SmallSampleParameter) {
SetHashtablezSampleParameter(100);
for (int i = 0; i < 1000; ++i) {
int64_t next_sample = 0;
HashtablezInfo* sample = SampleSlow(&next_sample, test_element_size);
EXPECT_GT(next_sample, 0);
SamplingState next_sample = {0, 0};
HashtablezInfo* sample = SampleSlow(next_sample, test_element_size);
EXPECT_GT(next_sample.next_sample, 0);
EXPECT_EQ(next_sample.next_sample, next_sample.sample_stride);
EXPECT_NE(sample, nullptr);
UnsampleSlow(sample);
}
@ -238,9 +248,10 @@ TEST(HashtablezSamplerTest, LargeSampleParameter) {
SetHashtablezSampleParameter(std::numeric_limits<int32_t>::max());
for (int i = 0; i < 1000; ++i) {
int64_t next_sample = 0;
HashtablezInfo* sample = SampleSlow(&next_sample, test_element_size);
EXPECT_GT(next_sample, 0);
SamplingState next_sample = {0, 0};
HashtablezInfo* sample = SampleSlow(next_sample, test_element_size);
EXPECT_GT(next_sample.next_sample, 0);
EXPECT_EQ(next_sample.next_sample, next_sample.sample_stride);
EXPECT_NE(sample, nullptr);
UnsampleSlow(sample);
}
@ -267,14 +278,16 @@ TEST(HashtablezSamplerTest, Sample) {
TEST(HashtablezSamplerTest, Handle) {
auto& sampler = GlobalHashtablezSampler();
const int64_t test_stride = 41;
const size_t test_element_size = 39;
HashtablezInfoHandle h(sampler.Register(test_element_size));
HashtablezInfoHandle h(sampler.Register(test_stride, test_element_size));
auto* info = HashtablezInfoHandlePeer::GetInfo(&h);
info->hashes_bitwise_and.store(0x12345678, std::memory_order_relaxed);
bool found = false;
sampler.Iterate([&](const HashtablezInfo& h) {
if (&h == info) {
EXPECT_EQ(h.weight, test_stride);
EXPECT_EQ(h.hashes_bitwise_and.load(), 0x12345678);
found = true;
}
@ -340,19 +353,20 @@ TEST(HashtablezSamplerTest, MultiThreaded) {
ThreadPool pool(10);
for (int i = 0; i < 10; ++i) {
const int64_t sampling_stride = 11 + i % 3;
const size_t elt_size = 10 + i % 2;
pool.Schedule([&sampler, &stop, elt_size]() {
pool.Schedule([&sampler, &stop, sampling_stride, elt_size]() {
std::random_device rd;
std::mt19937 gen(rd());
std::vector<HashtablezInfo*> infoz;
while (!stop.HasBeenNotified()) {
if (infoz.empty()) {
infoz.push_back(sampler.Register(elt_size));
infoz.push_back(sampler.Register(sampling_stride, elt_size));
}
switch (std::uniform_int_distribution<>(0, 2)(gen)) {
case 0: {
infoz.push_back(sampler.Register(elt_size));
infoz.push_back(sampler.Register(sampling_stride, elt_size));
break;
}
case 1: {
@ -361,6 +375,7 @@ TEST(HashtablezSamplerTest, MultiThreaded) {
HashtablezInfo* info = infoz[p];
infoz[p] = infoz.back();
infoz.pop_back();
EXPECT_EQ(info->weight, sampling_stride);
sampler.Unregister(info);
break;
}

@ -46,6 +46,7 @@ struct Sample {
absl::Mutex init_mu;
T* next = nullptr;
T* dead ABSL_GUARDED_BY(init_mu) = nullptr;
int64_t weight; // How many sampling events were required to sample this one.
};
// Holds samples and their associated stack traces with a soft limit of

@ -36,7 +36,7 @@ using ::testing::UnorderedElementsAre;
struct Info : public Sample<Info> {
public:
void PrepareForSampling() {}
void PrepareForSampling(int64_t w) { weight = w; }
std::atomic<size_t> size;
absl::Time create_time;
};
@ -49,8 +49,14 @@ std::vector<size_t> GetSizes(SampleRecorder<Info>* s) {
return res;
}
Info* Register(SampleRecorder<Info>* s, size_t size) {
auto* info = s->Register();
std::vector<int64_t> GetWeights(SampleRecorder<Info>* s) {
std::vector<int64_t> res;
s->Iterate([&](const Info& info) { res.push_back(info.weight); });
return res;
}
Info* Register(SampleRecorder<Info>* s, int64_t weight, size_t size) {
auto* info = s->Register(weight);
assert(info != nullptr);
info->size.store(size);
return info;
@ -58,13 +64,15 @@ Info* Register(SampleRecorder<Info>* s, size_t size) {
TEST(SampleRecorderTest, Registration) {
SampleRecorder<Info> sampler;
auto* info1 = Register(&sampler, 1);
auto* info1 = Register(&sampler, 31, 1);
EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(1));
EXPECT_THAT(GetWeights(&sampler), UnorderedElementsAre(31));
auto* info2 = Register(&sampler, 2);
auto* info2 = Register(&sampler, 32, 2);
EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(1, 2));
info1->size.store(3);
EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(3, 2));
EXPECT_THAT(GetWeights(&sampler), UnorderedElementsAre(31, 32));
sampler.Unregister(info1);
sampler.Unregister(info2);
@ -74,18 +82,22 @@ TEST(SampleRecorderTest, Unregistration) {
SampleRecorder<Info> sampler;
std::vector<Info*> infos;
for (size_t i = 0; i < 3; ++i) {
infos.push_back(Register(&sampler, i));
infos.push_back(Register(&sampler, 33 + i, i));
}
EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 1, 2));
EXPECT_THAT(GetWeights(&sampler), UnorderedElementsAre(33, 34, 35));
sampler.Unregister(infos[1]);
EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2));
EXPECT_THAT(GetWeights(&sampler), UnorderedElementsAre(33, 35));
infos.push_back(Register(&sampler, 3));
infos.push_back(Register(&sampler, 4));
infos.push_back(Register(&sampler, 36, 3));
infos.push_back(Register(&sampler, 37, 4));
EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2, 3, 4));
EXPECT_THAT(GetWeights(&sampler), UnorderedElementsAre(33, 35, 36, 37));
sampler.Unregister(infos[3]);
EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2, 4));
EXPECT_THAT(GetWeights(&sampler), UnorderedElementsAre(33, 35, 37));
sampler.Unregister(infos[0]);
sampler.Unregister(infos[2]);
@ -99,18 +111,18 @@ TEST(SampleRecorderTest, MultiThreaded) {
ThreadPool pool(10);
for (int i = 0; i < 10; ++i) {
pool.Schedule([&sampler, &stop]() {
pool.Schedule([&sampler, &stop, i]() {
std::random_device rd;
std::mt19937 gen(rd());
std::vector<Info*> infoz;
while (!stop.HasBeenNotified()) {
if (infoz.empty()) {
infoz.push_back(sampler.Register());
infoz.push_back(sampler.Register(i));
}
switch (std::uniform_int_distribution<>(0, 2)(gen)) {
case 0: {
infoz.push_back(sampler.Register());
infoz.push_back(sampler.Register(i));
break;
}
case 1: {
@ -119,6 +131,7 @@ TEST(SampleRecorderTest, MultiThreaded) {
Info* info = infoz[p];
infoz[p] = infoz.back();
infoz.pop_back();
EXPECT_EQ(info->weight, i);
sampler.Unregister(info);
break;
}
@ -143,8 +156,8 @@ TEST(SampleRecorderTest, MultiThreaded) {
TEST(SampleRecorderTest, Callback) {
SampleRecorder<Info> sampler;
auto* info1 = Register(&sampler, 1);
auto* info2 = Register(&sampler, 2);
auto* info1 = Register(&sampler, 39, 1);
auto* info2 = Register(&sampler, 40, 2);
static const Info* expected;

@ -16,6 +16,7 @@
#include <string>
#include "absl/base/attributes.h"
#include "absl/container/inlined_vector.h"
#include "absl/strings/cord.h"
@ -25,7 +26,14 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
// Returned Status objects may not be ignored. Codesearch doesn't handle ifdefs
// as part of a class definitions (b/6995610), so we use a forward declaration.
//
// TODO(b/176172494): ABSL_MUST_USE_RESULT should expand to the more strict
// [[nodiscard]]. For now, just use [[nodiscard]] directly when it is available.
#if ABSL_HAVE_CPP_ATTRIBUTE(nodiscard)
class [[nodiscard]] Status;
#else
class ABSL_MUST_USE_RESULT Status;
#endif
ABSL_NAMESPACE_END
} // namespace absl
#endif // !SWIG

@ -106,7 +106,13 @@ class BadStatusOrAccess : public std::exception {
// Returned StatusOr objects may not be ignored.
template <typename T>
#if ABSL_HAVE_CPP_ATTRIBUTE(nodiscard)
// TODO(b/176172494): ABSL_MUST_USE_RESULT should expand to the more strict
// [[nodiscard]]. For now, just use [[nodiscard]] directly when it is available.
class [[nodiscard]] StatusOr;
#else
class ABSL_MUST_USE_RESULT StatusOr;
#endif // ABSL_HAVE_CPP_ATTRIBUTE(nodiscard)
// absl::StatusOr<T>
//

@ -102,8 +102,8 @@ size_t Base64EscapeInternal(const unsigned char* src, size_t szsrc, char* dest,
}
}
// To save time, we didn't update szdest or szsrc in the loop. So do it now.
szdest = limit_dest - cur_dest;
szsrc = limit_src - cur_src;
szdest = static_cast<size_t>(limit_dest - cur_dest);
szsrc = static_cast<size_t>(limit_src - cur_src);
/* now deal with the tail (<=3 bytes) */
switch (szsrc) {
@ -154,7 +154,8 @@ size_t Base64EscapeInternal(const unsigned char* src, size_t szsrc, char* dest,
// the loop because the loop above always reads 4 bytes, and the fourth
// byte is past the end of the input.
if (szdest < 4) return 0;
uint32_t in = (cur_src[0] << 16) + absl::big_endian::Load16(cur_src + 1);
uint32_t in =
(uint32_t{cur_src[0]} << 16) + absl::big_endian::Load16(cur_src + 1);
cur_dest[0] = base64[in >> 18];
in &= 0x3FFFF;
cur_dest[1] = base64[in >> 12];
@ -172,7 +173,7 @@ size_t Base64EscapeInternal(const unsigned char* src, size_t szsrc, char* dest,
ABSL_RAW_LOG(FATAL, "Logic problem? szsrc = %zu", szsrc);
break;
}
return (cur_dest - dest);
return static_cast<size_t>(cur_dest - dest);
}
} // namespace strings_internal

@ -27,7 +27,7 @@ OStringStream::Buf::int_type OStringStream::overflow(int c) {
std::streamsize OStringStream::xsputn(const char* s, std::streamsize n) {
assert(s_);
s_->append(s, n);
s_->append(s, static_cast<size_t>(n));
return n;
}

@ -25,25 +25,25 @@ size_t EncodeUTF8Char(char *buffer, char32_t utf8_char) {
*buffer = static_cast<char>(utf8_char);
return 1;
} else if (utf8_char <= 0x7FF) {
buffer[1] = 0x80 | (utf8_char & 0x3F);
buffer[1] = static_cast<char>(0x80 | (utf8_char & 0x3F));
utf8_char >>= 6;
buffer[0] = 0xC0 | utf8_char;
buffer[0] = static_cast<char>(0xC0 | utf8_char);
return 2;
} else if (utf8_char <= 0xFFFF) {
buffer[2] = 0x80 | (utf8_char & 0x3F);
buffer[2] = static_cast<char>(0x80 | (utf8_char & 0x3F));
utf8_char >>= 6;
buffer[1] = 0x80 | (utf8_char & 0x3F);
buffer[1] = static_cast<char>(0x80 | (utf8_char & 0x3F));
utf8_char >>= 6;
buffer[0] = 0xE0 | utf8_char;
buffer[0] = static_cast<char>(0xE0 | utf8_char);
return 3;
} else {
buffer[3] = 0x80 | (utf8_char & 0x3F);
buffer[3] = static_cast<char>(0x80 | (utf8_char & 0x3F));
utf8_char >>= 6;
buffer[2] = 0x80 | (utf8_char & 0x3F);
buffer[2] = static_cast<char>(0x80 | (utf8_char & 0x3F));
utf8_char >>= 6;
buffer[1] = 0x80 | (utf8_char & 0x3F);
buffer[1] = static_cast<char>(0x80 | (utf8_char & 0x3F));
utf8_char >>= 6;
buffer[0] = 0xF0 | utf8_char;
buffer[0] = static_cast<char>(0xF0 | utf8_char);
return 4;
}
}

Loading…
Cancel
Save