Export of internal Abseil changes

--
08d99ee216b7bfac1c5182db952d4e053e5ebc31 by Abseil Team <absl-team@google.com>:

Fix race condition reported by tsan on `inline_element_size` in hashtablez.

PiperOrigin-RevId: 413520771
GitOrigin-RevId: 08d99ee216b7bfac1c5182db952d4e053e5ebc31
Change-Id: Ibd396803f04a659cfbdb8dc7ec37511643657694
pull/1074/head
Abseil Team 3 years ago committed by Andy Getz
parent cad715dbf3
commit e11e039e02
  1. 6
      absl/container/internal/hashtablez_sampler.cc
  2. 19
      absl/container/internal/hashtablez_sampler.h
  3. 14
      absl/container/internal/hashtablez_sampler_test.cc
  4. 3
      absl/container/internal/raw_hash_set_test.cc
  5. 3
      absl/container/sample_element_size_test.cc

@ -111,7 +111,8 @@ HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size) {
if (ABSL_PREDICT_FALSE(ShouldForceSampling())) {
*next_sample = 1;
HashtablezInfo* result = GlobalHashtablezSampler().Register();
result->inline_element_size = inline_element_size;
result->inline_element_size.store(inline_element_size,
std::memory_order_relaxed);
return result;
}
@ -138,7 +139,8 @@ HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size) {
}
HashtablezInfo* result = GlobalHashtablezSampler().Register();
result->inline_element_size = inline_element_size;
result->inline_element_size.store(inline_element_size,
std::memory_order_relaxed);
return result;
#endif
}

@ -82,16 +82,27 @@ struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> {
std::atomic<size_t> hashes_bitwise_xor;
std::atomic<size_t> max_reserve;
// One could imagine that inline_element_size could be non-atomic, since it
// *almost* follows the rules for the fields that are set by
// `PrepareForSampling`. However, TSAN reports a race (see b/207323922) in
// which
// A: Thread 1: Register() returns, unlocking init_mu.
// B: Thread 2: Iterate() is called, locking init_mu.
// C: Thread 1: inlined_element_size is stored.
// D: Thread 2: inlined_element_size is accessed (a race).
// A simple solution is to make inline_element_size atomic so that we treat it
// at as we do the other atomic fields.
std::atomic<size_t> inline_element_size;
// All of the fields below are set by `PrepareForSampling`, they must not be
// mutated in `Record*` functions. They are logically `const` in that sense.
// These are guarded by init_mu, but that is not externalized to clients, who
// can only read them during `HashtablezSampler::Iterate` which will hold the
// lock.
// These are guarded by init_mu, but that is not externalized to clients,
// which can read them only during `SampleRecorder::Iterate` which will hold
// the lock.
static constexpr int kMaxStackDepth = 64;
absl::Time create_time;
int32_t depth;
void* stack[kMaxStackDepth];
size_t inline_element_size;
};
inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) {

@ -83,7 +83,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
absl::MutexLock l(&info.init_mu);
info.PrepareForSampling();
info.inline_element_size = test_element_size;
info.inline_element_size.store(test_element_size, std::memory_order_relaxed);
EXPECT_EQ(info.capacity.load(), 0);
EXPECT_EQ(info.size.load(), 0);
EXPECT_EQ(info.num_erases.load(), 0);
@ -95,7 +95,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
EXPECT_EQ(info.hashes_bitwise_xor.load(), 0);
EXPECT_EQ(info.max_reserve.load(), 0);
EXPECT_GE(info.create_time, test_start);
EXPECT_EQ(info.inline_element_size, test_element_size);
EXPECT_EQ(info.inline_element_size.load(), test_element_size);
info.capacity.store(1, std::memory_order_relaxed);
info.size.store(1, std::memory_order_relaxed);
@ -119,7 +119,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{});
EXPECT_EQ(info.hashes_bitwise_xor.load(), 0);
EXPECT_EQ(info.max_reserve.load(), 0);
EXPECT_EQ(info.inline_element_size, test_element_size);
EXPECT_EQ(info.inline_element_size.load(), test_element_size);
EXPECT_GE(info.create_time, test_start);
}
@ -162,7 +162,7 @@ TEST(HashtablezInfoTest, RecordErase) {
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
info.PrepareForSampling();
info.inline_element_size = test_element_size;
info.inline_element_size.store(test_element_size);
EXPECT_EQ(info.num_erases.load(), 0);
EXPECT_EQ(info.size.load(), 0);
RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength);
@ -170,7 +170,7 @@ TEST(HashtablezInfoTest, RecordErase) {
RecordEraseSlow(&info);
EXPECT_EQ(info.size.load(), 0);
EXPECT_EQ(info.num_erases.load(), 1);
EXPECT_EQ(info.inline_element_size, test_element_size);
EXPECT_EQ(info.inline_element_size.load(), test_element_size);
}
TEST(HashtablezInfoTest, RecordRehash) {
@ -178,7 +178,7 @@ TEST(HashtablezInfoTest, RecordRehash) {
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
info.PrepareForSampling();
info.inline_element_size = test_element_size;
info.inline_element_size.store(test_element_size);
RecordInsertSlow(&info, 0x1, 0);
RecordInsertSlow(&info, 0x2, kProbeLength);
RecordInsertSlow(&info, 0x4, kProbeLength);
@ -197,7 +197,7 @@ TEST(HashtablezInfoTest, RecordRehash) {
EXPECT_EQ(info.total_probe_length.load(), 3);
EXPECT_EQ(info.num_erases.load(), 0);
EXPECT_EQ(info.num_rehashes.load(), 1);
EXPECT_EQ(info.inline_element_size, test_element_size);
EXPECT_EQ(info.inline_element_size.load(), test_element_size);
}
TEST(HashtablezInfoTest, RecordReservation) {

@ -2075,7 +2075,8 @@ TEST(RawHashSamplerTest, Sample) {
std::memory_order_relaxed)]++;
reservations[info.max_reserve.load(std::memory_order_relaxed)]++;
}
EXPECT_EQ(info.inline_element_size, sizeof(int64_t));
EXPECT_EQ(info.inline_element_size.load(std::memory_order_relaxed),
sizeof(int64_t));
++end_size;
});

@ -51,7 +51,8 @@ void TestInlineElementSize(
size_t new_count = 0;
sampler.Iterate([&](const HashtablezInfo& info) {
if (preexisting_info.insert(&info).second) {
EXPECT_EQ(info.inline_element_size, expected_element_size);
EXPECT_EQ(info.inline_element_size.load(std::memory_order_relaxed),
expected_element_size);
++new_count;
}
});

Loading…
Cancel
Save