Export of internal Abseil changes

--
ca3a0009e675b699b5d6dd41f00ebac0e7d1935c by Derek Mauro <dmauro@google.com>:

Internal change

PiperOrigin-RevId: 396475923

--
04d9fff79085bb18612af3da49007907394ae0b6 by Abseil Team <absl-team@google.com>:

Move HastablezSampler from container/internal to profiling/internal.

PiperOrigin-RevId: 396362093
GitOrigin-RevId: ca3a0009e675b699b5d6dd41f00ebac0e7d1935c
Change-Id: I42d6d2944786afa24259fde002fed5e611f4e1f9
pull/1016/head
Abseil Team 4 years ago committed by Derek Mauro
parent 669184b4f3
commit b2dc72c17a
  1. 2
      CMake/AbseilDll.cmake
  2. 2
      absl/container/BUILD.bazel
  3. 1
      absl/container/CMakeLists.txt
  4. 104
      absl/container/internal/hashtablez_sampler.cc
  5. 81
      absl/container/internal/hashtablez_sampler.h
  6. 3
      absl/container/internal/hashtablez_sampler_test.cc
  7. 4
      absl/container/internal/raw_hash_set_test.cc
  8. 34
      absl/profiling/BUILD.bazel
  9. 25
      absl/profiling/CMakeLists.txt
  10. 231
      absl/profiling/internal/sample_recorder.h
  11. 171
      absl/profiling/internal/sample_recorder_test.cc

@ -133,6 +133,7 @@ set(ABSL_INTERNAL_DLL_FILES
"numeric/int128.h"
"numeric/internal/bits.h"
"numeric/internal/representation.h"
"profiling/internal/sample_recorder.h"
"random/bernoulli_distribution.h"
"random/beta_distribution.h"
"random/bit_gen_ref.h"
@ -457,6 +458,7 @@ set(ABSL_INTERNAL_DLL_TARGETS
"raw_hash_set"
"layout"
"tracked"
"sample_recorder"
)
function(absl_internal_dll_contains)

@ -513,6 +513,7 @@ cc_library(
"//absl/base:exponential_biased",
"//absl/debugging:stacktrace",
"//absl/memory",
"//absl/profiling:sample_recorder",
"//absl/synchronization",
"//absl/utility",
],
@ -526,6 +527,7 @@ cc_test(
":hashtablez_sampler",
":have_sse",
"//absl/base:core_headers",
"//absl/profiling:sample_recorder",
"//absl/synchronization",
"//absl/synchronization:thread_pool",
"//absl/time",

@ -548,6 +548,7 @@ absl_cc_library(
absl::base
absl::exponential_biased
absl::have_sse
absl::sample_recorder
absl::synchronization
)

@ -25,6 +25,7 @@
#include "absl/container/internal/have_sse.h"
#include "absl/debugging/stacktrace.h"
#include "absl/memory/memory.h"
#include "absl/profiling/internal/sample_recorder.h"
#include "absl/synchronization/mutex.h"
namespace absl {
@ -37,7 +38,6 @@ ABSL_CONST_INIT std::atomic<bool> g_hashtablez_enabled{
false
};
ABSL_CONST_INIT std::atomic<int32_t> g_hashtablez_sample_parameter{1 << 10};
ABSL_CONST_INIT std::atomic<int32_t> g_hashtablez_max_samples{1 << 20};
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
ABSL_PER_THREAD_TLS_KEYWORD absl::base_internal::ExponentialBiased
@ -50,16 +50,11 @@ ABSL_PER_THREAD_TLS_KEYWORD absl::base_internal::ExponentialBiased
ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample = 0;
#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
HashtablezSampler& HashtablezSampler::Global() {
HashtablezSampler& GlobalHashtablezSampler() {
static auto* sampler = new HashtablezSampler();
return *sampler;
}
HashtablezSampler::DisposeCallback HashtablezSampler::SetDisposeCallback(
DisposeCallback f) {
return dispose_.exchange(f, std::memory_order_relaxed);
}
HashtablezInfo::HashtablezInfo() { PrepareForSampling(); }
HashtablezInfo::~HashtablezInfo() = default;
@ -80,93 +75,6 @@ void HashtablezInfo::PrepareForSampling() {
// instead.
depth = absl::GetStackTrace(stack, HashtablezInfo::kMaxStackDepth,
/* skip_count= */ 0);
dead = nullptr;
}
HashtablezSampler::HashtablezSampler()
: dropped_samples_(0), size_estimate_(0), all_(nullptr), dispose_(nullptr) {
absl::MutexLock l(&graveyard_.init_mu);
graveyard_.dead = &graveyard_;
}
HashtablezSampler::~HashtablezSampler() {
HashtablezInfo* s = all_.load(std::memory_order_acquire);
while (s != nullptr) {
HashtablezInfo* next = s->next;
delete s;
s = next;
}
}
void HashtablezSampler::PushNew(HashtablezInfo* sample) {
sample->next = all_.load(std::memory_order_relaxed);
while (!all_.compare_exchange_weak(sample->next, sample,
std::memory_order_release,
std::memory_order_relaxed)) {
}
}
void HashtablezSampler::PushDead(HashtablezInfo* sample) {
if (auto* dispose = dispose_.load(std::memory_order_relaxed)) {
dispose(*sample);
}
absl::MutexLock graveyard_lock(&graveyard_.init_mu);
absl::MutexLock sample_lock(&sample->init_mu);
sample->dead = graveyard_.dead;
graveyard_.dead = sample;
}
HashtablezInfo* HashtablezSampler::PopDead() {
absl::MutexLock graveyard_lock(&graveyard_.init_mu);
// The list is circular, so eventually it collapses down to
// graveyard_.dead == &graveyard_
// when it is empty.
HashtablezInfo* sample = graveyard_.dead;
if (sample == &graveyard_) return nullptr;
absl::MutexLock sample_lock(&sample->init_mu);
graveyard_.dead = sample->dead;
sample->PrepareForSampling();
return sample;
}
HashtablezInfo* HashtablezSampler::Register() {
int64_t size = size_estimate_.fetch_add(1, std::memory_order_relaxed);
if (size > g_hashtablez_max_samples.load(std::memory_order_relaxed)) {
size_estimate_.fetch_sub(1, std::memory_order_relaxed);
dropped_samples_.fetch_add(1, std::memory_order_relaxed);
return nullptr;
}
HashtablezInfo* sample = PopDead();
if (sample == nullptr) {
// Resurrection failed. Hire a new warlock.
sample = new HashtablezInfo();
PushNew(sample);
}
return sample;
}
void HashtablezSampler::Unregister(HashtablezInfo* sample) {
PushDead(sample);
size_estimate_.fetch_sub(1, std::memory_order_relaxed);
}
int64_t HashtablezSampler::Iterate(
const std::function<void(const HashtablezInfo& stack)>& f) {
HashtablezInfo* s = all_.load(std::memory_order_acquire);
while (s != nullptr) {
absl::MutexLock l(&s->init_mu);
if (s->dead == nullptr) {
f(*s);
}
s = s->next;
}
return dropped_samples_.load(std::memory_order_relaxed);
}
static bool ShouldForceSampling() {
@ -192,7 +100,7 @@ static bool ShouldForceSampling() {
HashtablezInfo* SampleSlow(int64_t* next_sample) {
if (ABSL_PREDICT_FALSE(ShouldForceSampling())) {
*next_sample = 1;
return HashtablezSampler::Global().Register();
return GlobalHashtablezSampler().Register();
}
#if !defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
@ -217,12 +125,12 @@ HashtablezInfo* SampleSlow(int64_t* next_sample) {
return SampleSlow(next_sample);
}
return HashtablezSampler::Global().Register();
return GlobalHashtablezSampler().Register();
#endif
}
void UnsampleSlow(HashtablezInfo* info) {
HashtablezSampler::Global().Unregister(info);
GlobalHashtablezSampler().Unregister(info);
}
void RecordInsertSlow(HashtablezInfo* info, size_t hash,
@ -262,7 +170,7 @@ void SetHashtablezSampleParameter(int32_t rate) {
void SetHashtablezMaxSamples(int32_t max) {
if (max > 0) {
g_hashtablez_max_samples.store(max, std::memory_order_release);
GlobalHashtablezSampler().SetMaxSamples(max);
} else {
ABSL_RAW_LOG(ERROR, "Invalid hashtablez max samples: %lld",
static_cast<long long>(max)); // NOLINT(runtime/int)

@ -47,6 +47,7 @@
#include "absl/base/internal/per_thread_tls.h"
#include "absl/base/optimization.h"
#include "absl/container/internal/have_sse.h"
#include "absl/profiling/internal/sample_recorder.h"
#include "absl/synchronization/mutex.h"
#include "absl/utility/utility.h"
@ -57,7 +58,7 @@ namespace container_internal {
// Stores information about a sampled hashtable. All mutations to this *must*
// be made through `Record*` functions below. All reads from this *must* only
// occur in the callback to `HashtablezSampler::Iterate`.
struct HashtablezInfo {
struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> {
// Constructs the object but does not fill in any fields.
HashtablezInfo();
~HashtablezInfo();
@ -80,14 +81,6 @@ struct HashtablezInfo {
std::atomic<size_t> hashes_bitwise_and;
std::atomic<size_t> hashes_bitwise_xor;
// `HashtablezSampler` maintains intrusive linked lists for all samples. See
// comments on `HashtablezSampler::all_` for details on these. `init_mu`
// guards the ability to restore the sample to a pristine state. This
// prevents races with sampling and resurrecting an object.
absl::Mutex init_mu;
HashtablezInfo* next;
HashtablezInfo* dead ABSL_GUARDED_BY(init_mu);
// All of the fields below are set by `PrepareForSampling`, they must not be
// mutated in `Record*` functions. They are logically `const` in that sense.
// These are guarded by init_mu, but that is not externalized to clients, who
@ -231,73 +224,11 @@ inline HashtablezInfoHandle Sample() {
#endif // !ABSL_PER_THREAD_TLS
}
// Holds samples and their associated stack traces with a soft limit of
// `SetHashtablezMaxSamples()`.
//
// Thread safe.
class HashtablezSampler {
public:
// Returns a global Sampler.
static HashtablezSampler& Global();
HashtablezSampler();
~HashtablezSampler();
// Registers for sampling. Returns an opaque registration info.
HashtablezInfo* Register();
using HashtablezSampler =
::absl::profiling_internal::SampleRecorder<HashtablezInfo>;
// Unregisters the sample.
void Unregister(HashtablezInfo* sample);
// The dispose callback will be called on all samples the moment they are
// being unregistered. Only affects samples that are unregistered after the
// callback has been set.
// Returns the previous callback.
using DisposeCallback = void (*)(const HashtablezInfo&);
DisposeCallback SetDisposeCallback(DisposeCallback f);
// Iterates over all the registered `StackInfo`s. Returning the number of
// samples that have been dropped.
int64_t Iterate(const std::function<void(const HashtablezInfo& stack)>& f);
private:
void PushNew(HashtablezInfo* sample);
void PushDead(HashtablezInfo* sample);
HashtablezInfo* PopDead();
std::atomic<size_t> dropped_samples_;
std::atomic<size_t> size_estimate_;
// Intrusive lock free linked lists for tracking samples.
//
// `all_` records all samples (they are never removed from this list) and is
// terminated with a `nullptr`.
//
// `graveyard_.dead` is a circular linked list. When it is empty,
// `graveyard_.dead == &graveyard`. The list is circular so that
// every item on it (even the last) has a non-null dead pointer. This allows
// `Iterate` to determine if a given sample is live or dead using only
// information on the sample itself.
//
// For example, nodes [A, B, C, D, E] with [A, C, E] alive and [B, D] dead
// looks like this (G is the Graveyard):
//
// +---+ +---+ +---+ +---+ +---+
// all -->| A |--->| B |--->| C |--->| D |--->| E |
// | | | | | | | | | |
// +---+ | | +->| |-+ | | +->| |-+ | |
// | G | +---+ | +---+ | +---+ | +---+ | +---+
// | | | | | |
// | | --------+ +--------+ |
// +---+ |
// ^ |
// +--------------------------------------+
//
std::atomic<HashtablezInfo*> all_;
HashtablezInfo graveyard_;
std::atomic<DisposeCallback> dispose_;
};
// Returns a global Sampler.
HashtablezSampler& GlobalHashtablezSampler();
// Enables or disables sampling for Swiss tables.
void SetHashtablezEnabled(bool enabled);

@ -22,6 +22,7 @@
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
#include "absl/container/internal/have_sse.h"
#include "absl/profiling/internal/sample_recorder.h"
#include "absl/synchronization/blocking_counter.h"
#include "absl/synchronization/internal/thread_pool.h"
#include "absl/synchronization/mutex.h"
@ -232,7 +233,7 @@ TEST(HashtablezSamplerTest, Sample) {
}
TEST(HashtablezSamplerTest, Handle) {
auto& sampler = HashtablezSampler::Global();
auto& sampler = GlobalHashtablezSampler();
HashtablezInfoHandle h(sampler.Register());
auto* info = HashtablezInfoHandlePeer::GetInfo(&h);
info->hashes_bitwise_and.store(0x12345678, std::memory_order_relaxed);

@ -2038,7 +2038,7 @@ TEST(RawHashSamplerTest, Sample) {
SetHashtablezEnabled(true);
SetHashtablezSampleParameter(100);
auto& sampler = HashtablezSampler::Global();
auto& sampler = GlobalHashtablezSampler();
size_t start_size = 0;
std::unordered_set<const HashtablezInfo*> preexisting_info;
start_size += sampler.Iterate([&](const HashtablezInfo& info) {
@ -2076,7 +2076,7 @@ TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) {
SetHashtablezEnabled(true);
SetHashtablezSampleParameter(100);
auto& sampler = HashtablezSampler::Global();
auto& sampler = GlobalHashtablezSampler();
size_t start_size = 0;
start_size += sampler.Iterate([&](const HashtablezInfo&) { ++start_size; });

@ -12,6 +12,40 @@
# See the License for the specific language governing permissions and
# limitations under the License.
load(
"//absl:copts/configure_copts.bzl",
"ABSL_DEFAULT_COPTS",
"ABSL_DEFAULT_LINKOPTS",
)
package(default_visibility = ["//visibility:private"])
licenses(["notice"])
cc_library(
name = "sample_recorder",
hdrs = ["internal/sample_recorder.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
visibility = ["//absl:__subpackages__"],
deps = [
"//absl/base:config",
"//absl/base:core_headers",
"//absl/synchronization",
"//absl/time",
],
)
cc_test(
name = "sample_recorder_test",
srcs = ["internal/sample_recorder_test.cc"],
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":sample_recorder",
"//absl/base:core_headers",
"//absl/synchronization",
"//absl/synchronization:thread_pool",
"//absl/time",
"@com_google_googletest//:gtest_main",
],
)

@ -12,3 +12,28 @@
# See the License for the specific language governing permissions and
# limitations under the License.
absl_cc_library(
NAME
sample_recorder
HDRS
"internal/sample_recorder.h"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::base
absl::synchronization
)
absl_cc_test(
NAME
sample_recorder_test
SRCS
"internal/sample_recorder_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::sample_recorder
absl::time
GTest::gmock_main
)

@ -0,0 +1,231 @@
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// -----------------------------------------------------------------------------
// File: sample_recorder.h
// -----------------------------------------------------------------------------
//
// This header file defines a lock-free linked list for recording samples
// collected from a random/stochastic process.
//
// This utility is internal-only. Use at your own risk.
#ifndef ABSL_PROFILING_INTERNAL_SAMPLE_RECORDER_H_
#define ABSL_PROFILING_INTERNAL_SAMPLE_RECORDER_H_
#include <atomic>
#include <cstddef>
#include <functional>
#include "absl/base/config.h"
#include "absl/base/thread_annotations.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace profiling_internal {
// Sample<T> that has members required for linking samples in the linked list of
// samples maintained by the SampleRecorder. Type T defines the sampled data.
template <typename T>
struct Sample {
public:
// Guards the ability to restore the sample to a pristine state. This
// prevents races with sampling and resurrecting an object.
absl::Mutex init_mu;
T* next = nullptr;
T* dead ABSL_GUARDED_BY(init_mu) = nullptr;
};
// Holds samples and their associated stack traces with a soft limit of
// `SetHashtablezMaxSamples()`.
//
// Thread safe.
template <typename T>
class SampleRecorder {
public:
SampleRecorder();
~SampleRecorder();
// Registers for sampling. Returns an opaque registration info.
T* Register();
// Unregisters the sample.
void Unregister(T* sample);
// The dispose callback will be called on all samples the moment they are
// being unregistered. Only affects samples that are unregistered after the
// callback has been set.
// Returns the previous callback.
using DisposeCallback = void (*)(const T&);
DisposeCallback SetDisposeCallback(DisposeCallback f);
// Iterates over all the registered `StackInfo`s. Returning the number of
// samples that have been dropped.
int64_t Iterate(const std::function<void(const T& stack)>& f);
void SetMaxSamples(int32_t max);
private:
void PushNew(T* sample);
void PushDead(T* sample);
T* PopDead();
std::atomic<size_t> dropped_samples_;
std::atomic<size_t> size_estimate_;
std::atomic<int32_t> max_samples_{1 << 20};
// Intrusive lock free linked lists for tracking samples.
//
// `all_` records all samples (they are never removed from this list) and is
// terminated with a `nullptr`.
//
// `graveyard_.dead` is a circular linked list. When it is empty,
// `graveyard_.dead == &graveyard`. The list is circular so that
// every item on it (even the last) has a non-null dead pointer. This allows
// `Iterate` to determine if a given sample is live or dead using only
// information on the sample itself.
//
// For example, nodes [A, B, C, D, E] with [A, C, E] alive and [B, D] dead
// looks like this (G is the Graveyard):
//
// +---+ +---+ +---+ +---+ +---+
// all -->| A |--->| B |--->| C |--->| D |--->| E |
// | | | | | | | | | |
// +---+ | | +->| |-+ | | +->| |-+ | |
// | G | +---+ | +---+ | +---+ | +---+ | +---+
// | | | | | |
// | | --------+ +--------+ |
// +---+ |
// ^ |
// +--------------------------------------+
//
std::atomic<T*> all_;
T graveyard_;
std::atomic<DisposeCallback> dispose_;
};
template <typename T>
typename SampleRecorder<T>::DisposeCallback
SampleRecorder<T>::SetDisposeCallback(DisposeCallback f) {
return dispose_.exchange(f, std::memory_order_relaxed);
}
template <typename T>
SampleRecorder<T>::SampleRecorder()
: dropped_samples_(0), size_estimate_(0), all_(nullptr), dispose_(nullptr) {
absl::MutexLock l(&graveyard_.init_mu);
graveyard_.dead = &graveyard_;
}
template <typename T>
SampleRecorder<T>::~SampleRecorder() {
T* s = all_.load(std::memory_order_acquire);
while (s != nullptr) {
T* next = s->next;
delete s;
s = next;
}
}
template <typename T>
void SampleRecorder<T>::PushNew(T* sample) {
sample->next = all_.load(std::memory_order_relaxed);
while (!all_.compare_exchange_weak(sample->next, sample,
std::memory_order_release,
std::memory_order_relaxed)) {
}
}
template <typename T>
void SampleRecorder<T>::PushDead(T* sample) {
if (auto* dispose = dispose_.load(std::memory_order_relaxed)) {
dispose(*sample);
}
absl::MutexLock graveyard_lock(&graveyard_.init_mu);
absl::MutexLock sample_lock(&sample->init_mu);
sample->dead = graveyard_.dead;
graveyard_.dead = sample;
}
template <typename T>
T* SampleRecorder<T>::PopDead() {
absl::MutexLock graveyard_lock(&graveyard_.init_mu);
// The list is circular, so eventually it collapses down to
// graveyard_.dead == &graveyard_
// when it is empty.
T* sample = graveyard_.dead;
if (sample == &graveyard_) return nullptr;
absl::MutexLock sample_lock(&sample->init_mu);
graveyard_.dead = sample->dead;
sample->dead = nullptr;
sample->PrepareForSampling();
return sample;
}
template <typename T>
T* SampleRecorder<T>::Register() {
int64_t size = size_estimate_.fetch_add(1, std::memory_order_relaxed);
if (size > max_samples_.load(std::memory_order_relaxed)) {
size_estimate_.fetch_sub(1, std::memory_order_relaxed);
dropped_samples_.fetch_add(1, std::memory_order_relaxed);
return nullptr;
}
T* sample = PopDead();
if (sample == nullptr) {
// Resurrection failed. Hire a new warlock.
sample = new T();
PushNew(sample);
}
return sample;
}
template <typename T>
void SampleRecorder<T>::Unregister(T* sample) {
PushDead(sample);
size_estimate_.fetch_sub(1, std::memory_order_relaxed);
}
template <typename T>
int64_t SampleRecorder<T>::Iterate(
const std::function<void(const T& stack)>& f) {
T* s = all_.load(std::memory_order_acquire);
while (s != nullptr) {
absl::MutexLock l(&s->init_mu);
if (s->dead == nullptr) {
f(*s);
}
s = s->next;
}
return dropped_samples_.load(std::memory_order_relaxed);
}
template <typename T>
void SampleRecorder<T>::SetMaxSamples(int32_t max) {
max_samples_.store(max, std::memory_order_release);
}
} // namespace profiling_internal
ABSL_NAMESPACE_END
} // namespace absl
#endif // ABSL_PROFILING_INTERNAL_SAMPLE_RECORDER_H_

@ -0,0 +1,171 @@
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/profiling/internal/sample_recorder.h"
#include <atomic>
#include <random>
#include <vector>
#include "gmock/gmock.h"
#include "absl/base/thread_annotations.h"
#include "absl/synchronization/internal/thread_pool.h"
#include "absl/synchronization/mutex.h"
#include "absl/synchronization/notification.h"
#include "absl/time/time.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace profiling_internal {
namespace {
using ::absl::synchronization_internal::ThreadPool;
using ::testing::IsEmpty;
using ::testing::UnorderedElementsAre;
struct Info : public Sample<Info> {
public:
void PrepareForSampling() {}
std::atomic<size_t> size;
absl::Time create_time;
};
std::vector<size_t> GetSizes(SampleRecorder<Info>* s) {
std::vector<size_t> res;
s->Iterate([&](const Info& info) {
res.push_back(info.size.load(std::memory_order_acquire));
});
return res;
}
Info* Register(SampleRecorder<Info>* s, size_t size) {
auto* info = s->Register();
assert(info != nullptr);
info->size.store(size);
return info;
}
TEST(SampleRecorderTest, Registration) {
SampleRecorder<Info> sampler;
auto* info1 = Register(&sampler, 1);
EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(1));
auto* info2 = Register(&sampler, 2);
EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(1, 2));
info1->size.store(3);
EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(3, 2));
sampler.Unregister(info1);
sampler.Unregister(info2);
}
TEST(SampleRecorderTest, Unregistration) {
SampleRecorder<Info> sampler;
std::vector<Info*> infos;
for (size_t i = 0; i < 3; ++i) {
infos.push_back(Register(&sampler, i));
}
EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 1, 2));
sampler.Unregister(infos[1]);
EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2));
infos.push_back(Register(&sampler, 3));
infos.push_back(Register(&sampler, 4));
EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2, 3, 4));
sampler.Unregister(infos[3]);
EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2, 4));
sampler.Unregister(infos[0]);
sampler.Unregister(infos[2]);
sampler.Unregister(infos[4]);
EXPECT_THAT(GetSizes(&sampler), IsEmpty());
}
TEST(SampleRecorderTest, MultiThreaded) {
SampleRecorder<Info> sampler;
Notification stop;
ThreadPool pool(10);
for (int i = 0; i < 10; ++i) {
pool.Schedule([&sampler, &stop]() {
std::random_device rd;
std::mt19937 gen(rd());
std::vector<Info*> infoz;
while (!stop.HasBeenNotified()) {
if (infoz.empty()) {
infoz.push_back(sampler.Register());
}
switch (std::uniform_int_distribution<>(0, 2)(gen)) {
case 0: {
infoz.push_back(sampler.Register());
break;
}
case 1: {
size_t p =
std::uniform_int_distribution<>(0, infoz.size() - 1)(gen);
Info* info = infoz[p];
infoz[p] = infoz.back();
infoz.pop_back();
sampler.Unregister(info);
break;
}
case 2: {
absl::Duration oldest = absl::ZeroDuration();
sampler.Iterate([&](const Info& info) {
oldest = std::max(oldest, absl::Now() - info.create_time);
});
ASSERT_GE(oldest, absl::ZeroDuration());
break;
}
}
}
});
}
// The threads will hammer away. Give it a little bit of time for tsan to
// spot errors.
absl::SleepFor(absl::Seconds(3));
stop.Notify();
}
TEST(SampleRecorderTest, Callback) {
SampleRecorder<Info> sampler;
auto* info1 = Register(&sampler, 1);
auto* info2 = Register(&sampler, 2);
static const Info* expected;
auto callback = [](const Info& info) {
// We can't use `info` outside of this callback because the object will be
// disposed as soon as we return from here.
EXPECT_EQ(&info, expected);
};
// Set the callback.
EXPECT_EQ(sampler.SetDisposeCallback(callback), nullptr);
expected = info1;
sampler.Unregister(info1);
// Unset the callback.
EXPECT_EQ(callback, sampler.SetDisposeCallback(nullptr));
expected = nullptr; // no more calls.
sampler.Unregister(info2);
}
} // namespace
} // namespace profiling_internal
ABSL_NAMESPACE_END
} // namespace absl
Loading…
Cancel
Save