Merge pull request #21361 from yashykt/uselogicalthread

Change client channel code to use WorkSerializer
pull/22744/head
Yash Tibrewal 5 years ago committed by GitHub
commit f58a18daee
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 303
      src/core/ext/filters/client_channel/client_channel.cc
  2. 33
      src/core/ext/filters/client_channel/lb_policy.cc
  3. 26
      src/core/ext/filters/client_channel/lb_policy.h
  4. 2
      src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc
  5. 349
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
  6. 63
      src/core/ext/filters/client_channel/lb_policy/priority/priority.cc
  7. 2
      src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
  8. 33
      src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc
  9. 2
      src/core/ext/filters/client_channel/lb_policy/xds/cds.cc
  10. 6
      src/core/ext/filters/client_channel/lb_policy/xds/eds.cc
  11. 6
      src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc
  12. 30
      src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc
  13. 3
      src/core/ext/filters/client_channel/local_subchannel_pool.h
  14. 9
      src/core/ext/filters/client_channel/resolver.cc
  15. 22
      src/core/ext/filters/client_channel/resolver.h
  16. 121
      src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
  17. 70
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc
  18. 15
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h
  19. 36
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_libuv.cc
  20. 8
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc
  21. 189
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc
  22. 47
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
  23. 3
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h
  24. 5
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc
  25. 83
      src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc
  26. 195
      src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc
  27. 4
      src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h
  28. 4
      src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc
  29. 5
      src/core/ext/filters/client_channel/resolver/xds/xds_resolver.cc
  30. 4
      src/core/ext/filters/client_channel/resolver_factory.h
  31. 5
      src/core/ext/filters/client_channel/resolver_registry.cc
  32. 15
      src/core/ext/filters/client_channel/resolver_registry.h
  33. 5
      src/core/ext/filters/client_channel/resolving_lb_policy.cc
  34. 78
      src/core/ext/filters/client_channel/subchannel.cc
  35. 46
      src/core/ext/filters/client_channel/subchannel.h
  36. 418
      src/core/ext/filters/client_channel/xds/xds_client.cc
  37. 8
      src/core/ext/filters/client_channel/xds/xds_client.h
  38. 4
      src/core/ext/filters/client_channel/xds/xds_client_stats.h
  39. 13
      src/core/lib/transport/connectivity_state.cc
  40. 8
      src/core/lib/transport/connectivity_state.h
  41. 13
      test/core/client_channel/resolvers/dns_resolver_connectivity_test.cc
  42. 28
      test/core/client_channel/resolvers/dns_resolver_cooldown_test.cc
  43. 15
      test/core/client_channel/resolvers/dns_resolver_test.cc
  44. 12
      test/core/client_channel/resolvers/fake_resolver_test.cc
  45. 15
      test/core/client_channel/resolvers/sockaddr_resolver_test.cc
  46. 7
      test/core/end2end/goaway_server_test.cc
  47. 2
      test/core/util/test_lb_policies.cc
  48. 5
      test/cpp/end2end/xds_end2end_test.cc
  49. 7
      test/cpp/naming/cancel_ares_query_test.cc
  50. 18
      test/cpp/naming/resolver_component_test.cc

@ -56,9 +56,9 @@
#include "src/core/lib/gprpp/manual_constructor.h"
#include "src/core/lib/gprpp/map.h"
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/iomgr/work_serializer.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
@ -126,6 +126,7 @@ class ChannelData {
size_t per_rpc_retry_buffer_size() const {
return per_rpc_retry_buffer_size_;
}
grpc_channel_stack* owning_stack() const { return owning_stack_; }
// Note: Does NOT return a new ref.
grpc_error* disconnect_error() const {
@ -149,6 +150,7 @@ class ChannelData {
RefCountedPtr<ServiceConfig> service_config() const {
return service_config_;
}
WorkSerializer* work_serializer() const { return work_serializer_.get(); }
RefCountedPtr<ConnectedSubchannel> GetConnectedSubchannelInDataPlane(
SubchannelInterface* subchannel) const;
@ -159,11 +161,15 @@ class ChannelData {
grpc_connectivity_state* state,
grpc_closure* on_complete,
grpc_closure* watcher_timer_init) {
MutexLock lock(&external_watchers_mu_);
// Will be deleted when the watch is complete.
GPR_ASSERT(external_watchers_[on_complete] == nullptr);
external_watchers_[on_complete] = new ExternalConnectivityWatcher(
auto* watcher = new ExternalConnectivityWatcher(
this, pollent, state, on_complete, watcher_timer_init);
{
MutexLock lock(&external_watchers_mu_);
// Will be deleted when the watch is complete.
GPR_ASSERT(external_watchers_[on_complete] == nullptr);
external_watchers_[on_complete] = watcher;
}
watcher->Start();
}
void RemoveExternalConnectivityWatcher(grpc_closure* on_complete,
@ -204,13 +210,15 @@ class ChannelData {
~ExternalConnectivityWatcher();
void Start();
void Notify(grpc_connectivity_state state) override;
void Cancel();
private:
static void AddWatcherLocked(void* arg, grpc_error* ignored);
static void RemoveWatcherLocked(void* arg, grpc_error* ignored);
void AddWatcherLocked();
void RemoveWatcherLocked();
ChannelData* chand_;
grpc_polling_entity pollent_;
@ -218,8 +226,6 @@ class ChannelData {
grpc_connectivity_state* state_;
grpc_closure* on_complete_;
grpc_closure* watcher_timer_init_;
grpc_closure add_closure_;
grpc_closure remove_closure_;
Atomic<bool> done_{false};
};
@ -245,9 +251,9 @@ class ChannelData {
grpc_error* DoPingLocked(grpc_transport_op* op);
static void StartTransportOpLocked(void* arg, grpc_error* ignored);
void StartTransportOpLocked(grpc_transport_op* op);
static void TryToConnectLocked(void* arg, grpc_error* error_ignored);
void TryToConnectLocked();
void ProcessLbPolicy(
const Resolver::Result& resolver_result,
@ -280,9 +286,9 @@ class ChannelData {
RefCountedPtr<ServiceConfig> service_config_;
//
// Fields used in the control plane. Guarded by combiner.
// Fields used in the control plane. Guarded by work_serializer.
//
Combiner* combiner_;
std::shared_ptr<WorkSerializer> work_serializer_;
grpc_pollset_set* interested_parties_;
RefCountedPtr<SubchannelPoolInterface> subchannel_pool_;
OrphanablePtr<ResolvingLoadBalancingPolicy> resolving_lb_policy_;
@ -294,17 +300,18 @@ class ChannelData {
std::map<Subchannel*, int> subchannel_refcount_map_;
// The set of SubchannelWrappers that currently exist.
// No need to hold a ref, since the map is updated in the control-plane
// combiner when the SubchannelWrappers are created and destroyed.
// work_serializer when the SubchannelWrappers are created and destroyed.
std::set<SubchannelWrapper*> subchannel_wrappers_;
// Pending ConnectedSubchannel updates for each SubchannelWrapper.
// Updates are queued here in the control plane combiner and then applied
// in the data plane mutex when the picker is updated.
// Updates are queued here in the control plane work_serializer and then
// applied in the data plane mutex when the picker is updated.
std::map<RefCountedPtr<SubchannelWrapper>, RefCountedPtr<ConnectedSubchannel>,
RefCountedPtrLess<SubchannelWrapper>>
pending_subchannel_updates_;
//
// Fields accessed from both data plane mutex and control plane combiner.
// Fields accessed from both data plane mutex and control plane
// work_serializer.
//
Atomic<grpc_error*> disconnect_error_;
@ -838,7 +845,7 @@ class CallData {
// Note that no synchronization is needed here, because even if the
// underlying subchannel is shared between channels, this wrapper will only
// be used within one channel, so it will always be synchronized by the
// control plane combiner.
// control plane work_serializer.
class ChannelData::SubchannelWrapper : public SubchannelInterface {
public:
SubchannelWrapper(ChannelData* chand, Subchannel* subchannel,
@ -907,7 +914,7 @@ class ChannelData::SubchannelWrapper : public SubchannelInterface {
initial_state,
grpc_core::UniquePtr<char>(
gpr_strdup(health_check_service_name_.get())),
OrphanablePtr<Subchannel::ConnectivityStateWatcherInterface>(
RefCountedPtr<Subchannel::ConnectivityStateWatcherInterface>(
watcher_wrapper));
}
@ -957,14 +964,14 @@ class ChannelData::SubchannelWrapper : public SubchannelInterface {
replacement->last_seen_state(),
grpc_core::UniquePtr<char>(
gpr_strdup(health_check_service_name.get())),
OrphanablePtr<Subchannel::ConnectivityStateWatcherInterface>(
RefCountedPtr<Subchannel::ConnectivityStateWatcherInterface>(
replacement));
}
// Save the new health check service name.
health_check_service_name_ = std::move(health_check_service_name);
}
// Caller must be holding the control-plane combiner.
// Caller must be holding the control-plane work_serializer.
ConnectedSubchannel* connected_subchannel() const {
return connected_subchannel_.get();
}
@ -1004,23 +1011,27 @@ class ChannelData::SubchannelWrapper : public SubchannelInterface {
parent_(std::move(parent)),
last_seen_state_(initial_state) {}
~WatcherWrapper() { parent_.reset(DEBUG_LOCATION, "WatcherWrapper"); }
void Orphan() override { Unref(); }
~WatcherWrapper() {
auto* parent = parent_.release(); // ref owned by lambda
parent->chand_->work_serializer_->Run(
[parent]() { parent->Unref(DEBUG_LOCATION, "WatcherWrapper"); },
DEBUG_LOCATION);
}
void OnConnectivityStateChange(
grpc_connectivity_state new_state,
RefCountedPtr<ConnectedSubchannel> connected_subchannel) override {
void OnConnectivityStateChange() override {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO,
"chand=%p: connectivity change for subchannel wrapper %p "
"subchannel %p (connected_subchannel=%p state=%s); "
"hopping into combiner",
parent_->chand_, parent_.get(), parent_->subchannel_,
connected_subchannel.get(), ConnectivityStateName(new_state));
"subchannel %p; hopping into work_serializer",
parent_->chand_, parent_.get(), parent_->subchannel_);
}
// Will delete itself.
new Updater(Ref(), new_state, std::move(connected_subchannel));
Ref().release(); // ref owned by lambda
parent_->chand_->work_serializer_->Run(
[this]() {
ApplyUpdateInControlPlaneWorkSerializer();
Unref();
},
DEBUG_LOCATION);
}
grpc_pollset_set* interested_parties() override {
@ -1040,50 +1051,25 @@ class ChannelData::SubchannelWrapper : public SubchannelInterface {
grpc_connectivity_state last_seen_state() const { return last_seen_state_; }
private:
class Updater {
public:
Updater(RefCountedPtr<WatcherWrapper> parent,
grpc_connectivity_state new_state,
RefCountedPtr<ConnectedSubchannel> connected_subchannel)
: parent_(std::move(parent)),
state_(new_state),
connected_subchannel_(std::move(connected_subchannel)) {
parent_->parent_->chand_->combiner_->Run(
GRPC_CLOSURE_INIT(&closure_, ApplyUpdateInControlPlaneCombiner,
this, nullptr),
GRPC_ERROR_NONE);
void ApplyUpdateInControlPlaneWorkSerializer() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO,
"chand=%p: processing connectivity change in work serializer "
"for subchannel wrapper %p subchannel %p "
"watcher=%p",
parent_->chand_, parent_.get(), parent_->subchannel_,
watcher_.get());
}
private:
static void ApplyUpdateInControlPlaneCombiner(void* arg,
grpc_error* /*error*/) {
Updater* self = static_cast<Updater*>(arg);
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO,
"chand=%p: processing connectivity change in combiner "
"for subchannel wrapper %p subchannel %p "
"(connected_subchannel=%p state=%s): watcher=%p",
self->parent_->parent_->chand_, self->parent_->parent_.get(),
self->parent_->parent_->subchannel_,
self->connected_subchannel_.get(),
ConnectivityStateName(self->state_),
self->parent_->watcher_.get());
}
// Ignore update if the parent WatcherWrapper has been replaced
// since this callback was scheduled.
if (self->parent_->watcher_ == nullptr) return;
self->parent_->last_seen_state_ = self->state_;
self->parent_->parent_->MaybeUpdateConnectedSubchannel(
std::move(self->connected_subchannel_));
self->parent_->watcher_->OnConnectivityStateChange(self->state_);
delete self;
ConnectivityStateChange state_change = PopConnectivityStateChange();
// Ignore update if the parent WatcherWrapper has been replaced
// since this callback was scheduled.
if (watcher_ != nullptr) {
last_seen_state_ = state_change.state;
parent_->MaybeUpdateConnectedSubchannel(
std::move(state_change.connected_subchannel));
watcher_->OnConnectivityStateChange(state_change.state);
}
RefCountedPtr<WatcherWrapper> parent_;
grpc_connectivity_state state_;
RefCountedPtr<ConnectedSubchannel> connected_subchannel_;
grpc_closure closure_;
};
}
std::unique_ptr<SubchannelInterface::ConnectivityStateWatcherInterface>
watcher_;
@ -1122,7 +1108,7 @@ class ChannelData::SubchannelWrapper : public SubchannelInterface {
// CancelConnectivityStateWatch() with its watcher, we know the
// corresponding WrapperWatcher to cancel on the underlying subchannel.
std::map<ConnectivityStateWatcherInterface*, WatcherWrapper*> watcher_map_;
// To be accessed only in the control plane combiner.
// To be accessed only in the control plane work_serializer.
RefCountedPtr<ConnectedSubchannel> connected_subchannel_;
// To be accessed only in the data plane mutex.
RefCountedPtr<ConnectedSubchannel> connected_subchannel_in_data_plane_;
@ -1145,9 +1131,6 @@ ChannelData::ExternalConnectivityWatcher::ExternalConnectivityWatcher(
grpc_polling_entity_add_to_pollset_set(&pollent_,
chand_->interested_parties_);
GRPC_CHANNEL_STACK_REF(chand_->owning_stack_, "ExternalConnectivityWatcher");
chand_->combiner_->Run(
GRPC_CLOSURE_INIT(&add_closure_, AddWatcherLocked, this, nullptr),
GRPC_ERROR_NONE);
}
ChannelData::ExternalConnectivityWatcher::~ExternalConnectivityWatcher() {
@ -1157,6 +1140,11 @@ ChannelData::ExternalConnectivityWatcher::~ExternalConnectivityWatcher() {
"ExternalConnectivityWatcher");
}
void ChannelData::ExternalConnectivityWatcher::Start() {
chand_->work_serializer_->Run([this]() { AddWatcherLocked(); },
DEBUG_LOCATION);
}
void ChannelData::ExternalConnectivityWatcher::Notify(
grpc_connectivity_state state) {
bool done = false;
@ -1169,13 +1157,12 @@ void ChannelData::ExternalConnectivityWatcher::Notify(
// Report new state to the user.
*state_ = state;
ExecCtx::Run(DEBUG_LOCATION, on_complete_, GRPC_ERROR_NONE);
// Hop back into the combiner to clean up.
// Hop back into the work_serializer to clean up.
// Not needed in state SHUTDOWN, because the tracker will
// automatically remove all watchers in that case.
if (state != GRPC_CHANNEL_SHUTDOWN) {
chand_->combiner_->Run(
GRPC_CLOSURE_INIT(&remove_closure_, RemoveWatcherLocked, this, nullptr),
GRPC_ERROR_NONE);
chand_->work_serializer_->Run([this]() { RemoveWatcherLocked(); },
DEBUG_LOCATION);
}
}
@ -1186,28 +1173,20 @@ void ChannelData::ExternalConnectivityWatcher::Cancel() {
return; // Already done.
}
ExecCtx::Run(DEBUG_LOCATION, on_complete_, GRPC_ERROR_CANCELLED);
// Hop back into the combiner to clean up.
chand_->combiner_->Run(
GRPC_CLOSURE_INIT(&remove_closure_, RemoveWatcherLocked, this, nullptr),
GRPC_ERROR_NONE);
// Hop back into the work_serializer to clean up.
chand_->work_serializer_->Run([this]() { RemoveWatcherLocked(); },
DEBUG_LOCATION);
}
void ChannelData::ExternalConnectivityWatcher::AddWatcherLocked(
void* arg, grpc_error* /*ignored*/) {
ExternalConnectivityWatcher* self =
static_cast<ExternalConnectivityWatcher*>(arg);
Closure::Run(DEBUG_LOCATION, self->watcher_timer_init_, GRPC_ERROR_NONE);
void ChannelData::ExternalConnectivityWatcher::AddWatcherLocked() {
Closure::Run(DEBUG_LOCATION, watcher_timer_init_, GRPC_ERROR_NONE);
// Add new watcher.
self->chand_->state_tracker_.AddWatcher(
self->initial_state_,
OrphanablePtr<ConnectivityStateWatcherInterface>(self));
chand_->state_tracker_.AddWatcher(
initial_state_, OrphanablePtr<ConnectivityStateWatcherInterface>(this));
}
void ChannelData::ExternalConnectivityWatcher::RemoveWatcherLocked(
void* arg, grpc_error* /*ignored*/) {
ExternalConnectivityWatcher* self =
static_cast<ExternalConnectivityWatcher*>(arg);
self->chand_->state_tracker_.RemoveWatcher(self);
void ChannelData::ExternalConnectivityWatcher::RemoveWatcherLocked() {
chand_->state_tracker_.RemoveWatcher(this);
}
//
@ -1223,28 +1202,20 @@ class ChannelData::ConnectivityWatcherAdder {
initial_state_(initial_state),
watcher_(std::move(watcher)) {
GRPC_CHANNEL_STACK_REF(chand_->owning_stack_, "ConnectivityWatcherAdder");
chand_->combiner_->Run(
GRPC_CLOSURE_INIT(&closure_,
&ConnectivityWatcherAdder::AddWatcherLocked, this,
nullptr),
GRPC_ERROR_NONE);
chand_->work_serializer_->Run([this]() { AddWatcherLocked(); },
DEBUG_LOCATION);
}
private:
static void AddWatcherLocked(void* arg, grpc_error* /*error*/) {
ConnectivityWatcherAdder* self =
static_cast<ConnectivityWatcherAdder*>(arg);
self->chand_->state_tracker_.AddWatcher(self->initial_state_,
std::move(self->watcher_));
GRPC_CHANNEL_STACK_UNREF(self->chand_->owning_stack_,
"ConnectivityWatcherAdder");
delete self;
void AddWatcherLocked() {
chand_->state_tracker_.AddWatcher(initial_state_, std::move(watcher_));
GRPC_CHANNEL_STACK_UNREF(chand_->owning_stack_, "ConnectivityWatcherAdder");
delete this;
}
ChannelData* chand_;
grpc_connectivity_state initial_state_;
OrphanablePtr<AsyncConnectivityStateWatcherInterface> watcher_;
grpc_closure closure_;
};
//
@ -1257,26 +1228,20 @@ class ChannelData::ConnectivityWatcherRemover {
AsyncConnectivityStateWatcherInterface* watcher)
: chand_(chand), watcher_(watcher) {
GRPC_CHANNEL_STACK_REF(chand_->owning_stack_, "ConnectivityWatcherRemover");
chand_->combiner_->Run(
GRPC_CLOSURE_INIT(&closure_,
&ConnectivityWatcherRemover::RemoveWatcherLocked,
this, nullptr),
GRPC_ERROR_NONE);
chand_->work_serializer_->Run([this]() { RemoveWatcherLocked(); },
DEBUG_LOCATION);
}
private:
static void RemoveWatcherLocked(void* arg, grpc_error* /*error*/) {
ConnectivityWatcherRemover* self =
static_cast<ConnectivityWatcherRemover*>(arg);
self->chand_->state_tracker_.RemoveWatcher(self->watcher_);
GRPC_CHANNEL_STACK_UNREF(self->chand_->owning_stack_,
void RemoveWatcherLocked() {
chand_->state_tracker_.RemoveWatcher(watcher_);
GRPC_CHANNEL_STACK_UNREF(chand_->owning_stack_,
"ConnectivityWatcherRemover");
delete self;
delete this;
}
ChannelData* chand_;
AsyncConnectivityStateWatcherInterface* watcher_;
grpc_closure closure_;
};
//
@ -1417,7 +1382,7 @@ ChannelData::ChannelData(grpc_channel_element_args* args, grpc_error** error)
client_channel_factory_(
ClientChannelFactory::GetFromChannelArgs(args->channel_args)),
channelz_node_(GetChannelzNode(args->channel_args)),
combiner_(grpc_combiner_create()),
work_serializer_(std::make_shared<WorkSerializer>()),
interested_parties_(grpc_pollset_set_create()),
subchannel_pool_(GetSubchannelPool(args->channel_args)),
state_tracker_("client_channel", GRPC_CHANNEL_IDLE),
@ -1488,7 +1453,6 @@ ChannelData::~ChannelData() {
// Stop backup polling.
grpc_client_channel_stop_backup_polling(interested_parties_);
grpc_pollset_set_destroy(interested_parties_);
GRPC_COMBINER_UNREF(combiner_, "client_channel");
GRPC_ERROR_UNREF(disconnect_error_.Load(MemoryOrder::RELAXED));
gpr_mu_destroy(&info_mu_);
}
@ -1592,7 +1556,7 @@ void ChannelData::UpdateServiceConfigLocked(
void ChannelData::CreateResolvingLoadBalancingPolicyLocked() {
// Instantiate resolving LB policy.
LoadBalancingPolicy::Args lb_args;
lb_args.combiner = combiner_;
lb_args.work_serializer = work_serializer_;
lb_args.channel_control_helper =
absl::make_unique<ClientChannelControlHelper>(this);
lb_args.args = channel_args_;
@ -1813,22 +1777,18 @@ grpc_error* ChannelData::DoPingLocked(grpc_transport_op* op) {
return result.error;
}
void ChannelData::StartTransportOpLocked(void* arg, grpc_error* /*ignored*/) {
grpc_transport_op* op = static_cast<grpc_transport_op*>(arg);
grpc_channel_element* elem =
static_cast<grpc_channel_element*>(op->handler_private.extra_arg);
ChannelData* chand = static_cast<ChannelData*>(elem->channel_data);
void ChannelData::StartTransportOpLocked(grpc_transport_op* op) {
// Connectivity watch.
if (op->start_connectivity_watch != nullptr) {
chand->state_tracker_.AddWatcher(op->start_connectivity_watch_state,
std::move(op->start_connectivity_watch));
state_tracker_.AddWatcher(op->start_connectivity_watch_state,
std::move(op->start_connectivity_watch));
}
if (op->stop_connectivity_watch != nullptr) {
chand->state_tracker_.RemoveWatcher(op->stop_connectivity_watch);
state_tracker_.RemoveWatcher(op->stop_connectivity_watch);
}
// Ping.
if (op->send_ping.on_initiate != nullptr || op->send_ping.on_ack != nullptr) {
grpc_error* error = chand->DoPingLocked(op);
grpc_error* error = DoPingLocked(op);
if (error != GRPC_ERROR_NONE) {
ExecCtx::Run(DEBUG_LOCATION, op->send_ping.on_initiate,
GRPC_ERROR_REF(error));
@ -1840,40 +1800,39 @@ void ChannelData::StartTransportOpLocked(void* arg, grpc_error* /*ignored*/) {
}
// Reset backoff.
if (op->reset_connect_backoff) {
if (chand->resolving_lb_policy_ != nullptr) {
chand->resolving_lb_policy_->ResetBackoffLocked();
if (resolving_lb_policy_ != nullptr) {
resolving_lb_policy_->ResetBackoffLocked();
}
}
// Disconnect or enter IDLE.
if (op->disconnect_with_error != GRPC_ERROR_NONE) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO, "chand=%p: disconnect_with_error: %s", chand,
gpr_log(GPR_INFO, "chand=%p: disconnect_with_error: %s", this,
grpc_error_string(op->disconnect_with_error));
}
chand->DestroyResolvingLoadBalancingPolicyLocked();
DestroyResolvingLoadBalancingPolicyLocked();
intptr_t value;
if (grpc_error_get_int(op->disconnect_with_error,
GRPC_ERROR_INT_CHANNEL_CONNECTIVITY_STATE, &value) &&
static_cast<grpc_connectivity_state>(value) == GRPC_CHANNEL_IDLE) {
if (chand->disconnect_error() == GRPC_ERROR_NONE) {
if (disconnect_error() == GRPC_ERROR_NONE) {
// Enter IDLE state.
chand->UpdateStateAndPickerLocked(GRPC_CHANNEL_IDLE,
"channel entering IDLE", nullptr);
UpdateStateAndPickerLocked(GRPC_CHANNEL_IDLE, "channel entering IDLE",
nullptr);
}
GRPC_ERROR_UNREF(op->disconnect_with_error);
} else {
// Disconnect.
GPR_ASSERT(chand->disconnect_error_.Load(MemoryOrder::RELAXED) ==
GPR_ASSERT(disconnect_error_.Load(MemoryOrder::RELAXED) ==
GRPC_ERROR_NONE);
chand->disconnect_error_.Store(op->disconnect_with_error,
MemoryOrder::RELEASE);
chand->UpdateStateAndPickerLocked(
disconnect_error_.Store(op->disconnect_with_error, MemoryOrder::RELEASE);
UpdateStateAndPickerLocked(
GRPC_CHANNEL_SHUTDOWN, "shutdown from API",
absl::make_unique<LoadBalancingPolicy::TransientFailurePicker>(
GRPC_ERROR_REF(op->disconnect_with_error)));
}
}
GRPC_CHANNEL_STACK_UNREF(chand->owning_stack_, "start_transport_op");
GRPC_CHANNEL_STACK_UNREF(owning_stack_, "start_transport_op");
ExecCtx::Run(DEBUG_LOCATION, op->on_consumed, GRPC_ERROR_NONE);
}
@ -1885,13 +1844,10 @@ void ChannelData::StartTransportOp(grpc_channel_element* elem,
if (op->bind_pollset != nullptr) {
grpc_pollset_set_add_pollset(chand->interested_parties_, op->bind_pollset);
}
// Pop into control plane combiner for remaining ops.
op->handler_private.extra_arg = elem;
// Pop into control plane work_serializer for remaining ops.
GRPC_CHANNEL_STACK_REF(chand->owning_stack_, "start_transport_op");
chand->combiner_->Run(
GRPC_CLOSURE_INIT(&op->handler_private.closure,
ChannelData::StartTransportOpLocked, op, nullptr),
GRPC_ERROR_NONE);
chand->work_serializer_->Run(
[chand, op]() { chand->StartTransportOpLocked(op); }, DEBUG_LOCATION);
}
void ChannelData::GetChannelInfo(grpc_channel_element* elem,
@ -1942,14 +1898,13 @@ ChannelData::GetConnectedSubchannelInDataPlane(
return connected_subchannel->Ref();
}
void ChannelData::TryToConnectLocked(void* arg, grpc_error* /*error_ignored*/) {
auto* chand = static_cast<ChannelData*>(arg);
if (chand->resolving_lb_policy_ != nullptr) {
chand->resolving_lb_policy_->ExitIdleLocked();
void ChannelData::TryToConnectLocked() {
if (resolving_lb_policy_ != nullptr) {
resolving_lb_policy_->ExitIdleLocked();
} else {
chand->CreateResolvingLoadBalancingPolicyLocked();
CreateResolvingLoadBalancingPolicyLocked();
}
GRPC_CHANNEL_STACK_UNREF(chand->owning_stack_, "TryToConnect");
GRPC_CHANNEL_STACK_UNREF(owning_stack_, "TryToConnect");
}
grpc_connectivity_state ChannelData::CheckConnectivityState(
@ -1957,8 +1912,7 @@ grpc_connectivity_state ChannelData::CheckConnectivityState(
grpc_connectivity_state out = state_tracker_.state();
if (out == GRPC_CHANNEL_IDLE && try_to_connect) {
GRPC_CHANNEL_STACK_REF(owning_stack_, "TryToConnect");
combiner_->Run(GRPC_CLOSURE_CREATE(TryToConnectLocked, this, nullptr),
GRPC_ERROR_NONE);
work_serializer_->Run([this]() { TryToConnectLocked(); }, DEBUG_LOCATION);
}
return out;
}
@ -3892,8 +3846,25 @@ bool CallData::PickSubchannelLocked(grpc_call_element* elem,
// The picker being null means that the channel is currently in IDLE state.
// The incoming call will make the channel exit IDLE.
if (chand->picker() == nullptr) {
// Bounce into the control plane combiner to exit IDLE.
chand->CheckConnectivityState(/*try_to_connect=*/true);
GRPC_CHANNEL_STACK_REF(chand->owning_stack(), "PickSubchannelLocked");
// Bounce into the control plane work serializer to exit IDLE. Since we are
// holding on to the data plane mutex here, we offload it on the ExecCtx so
// that we don't deadlock with ourselves.
ExecCtx::Run(
DEBUG_LOCATION,
GRPC_CLOSURE_CREATE(
[](void* arg, grpc_error* /*error*/) {
auto* chand = static_cast<ChannelData*>(arg);
chand->work_serializer()->Run(
[chand]() {
chand->CheckConnectivityState(/*try_to_connect=*/true);
GRPC_CHANNEL_STACK_UNREF(chand->owning_stack(),
"PickSubchannelLocked");
},
DEBUG_LOCATION);
},
chand, nullptr),
GRPC_ERROR_NONE);
// Queue the pick, so that it will be attempted once the channel
// becomes connected.
AddCallToQueuedPicksLocked(elem);

@ -33,13 +33,12 @@ DebugOnlyTraceFlag grpc_trace_lb_policy_refcount(false, "lb_policy_refcount");
LoadBalancingPolicy::LoadBalancingPolicy(Args args, intptr_t initial_refcount)
: InternallyRefCounted(&grpc_trace_lb_policy_refcount, initial_refcount),
combiner_(GRPC_COMBINER_REF(args.combiner, "lb_policy")),
work_serializer_(std::move(args.work_serializer)),
interested_parties_(grpc_pollset_set_create()),
channel_control_helper_(std::move(args.channel_control_helper)) {}
LoadBalancingPolicy::~LoadBalancingPolicy() {
grpc_pollset_set_destroy(interested_parties_);
GRPC_COMBINER_UNREF(combiner_, "lb_policy");
}
void LoadBalancingPolicy::Orphan() {
@ -99,29 +98,31 @@ LoadBalancingPolicy::PickResult LoadBalancingPolicy::QueuePicker::Pick(
// the time this function returns, the pick will already have
// been processed, and we'll be trying to re-process the same
// pick again, leading to a crash.
// 2. We are currently running in the data plane combiner, but we
// need to bounce into the control plane combiner to call
// 2. We are currently running in the data plane mutex, but we
// need to bounce into the control plane work_serializer to call
// ExitIdleLocked().
if (!exit_idle_called_) {
exit_idle_called_ = true;
// Ref held by closure.
parent_->Ref(DEBUG_LOCATION, "QueuePicker::CallExitIdle").release();
parent_->combiner()->Run(
GRPC_CLOSURE_CREATE(&CallExitIdle, parent_.get(), nullptr),
GRPC_ERROR_NONE);
auto* parent = parent_->Ref().release(); // ref held by lambda.
ExecCtx::Run(DEBUG_LOCATION,
GRPC_CLOSURE_CREATE(
[](void* arg, grpc_error* /*error*/) {
auto* parent = static_cast<LoadBalancingPolicy*>(arg);
parent->work_serializer()->Run(
[parent]() {
parent->ExitIdleLocked();
parent->Unref();
},
DEBUG_LOCATION);
},
parent, nullptr),
GRPC_ERROR_NONE);
}
PickResult result;
result.type = PickResult::PICK_QUEUE;
return result;
}
void LoadBalancingPolicy::QueuePicker::CallExitIdle(void* arg,
grpc_error* /*error*/) {
LoadBalancingPolicy* parent = static_cast<LoadBalancingPolicy*>(arg);
parent->ExitIdleLocked();
parent->Unref(DEBUG_LOCATION, "QueuePicker::CallExitIdle");
}
//
// LoadBalancingPolicy::TransientFailurePicker
//

@ -31,8 +31,8 @@
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/gprpp/string_view.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/iomgr/work_serializer.h"
#include "src/core/lib/transport/connectivity_state.h"
namespace grpc_core {
@ -72,7 +72,7 @@ extern DebugOnlyTraceFlag grpc_trace_lb_policy_refcount;
/// LoadBalacingPolicy API.
///
/// Note: All methods with a "Locked" suffix must be called from the
/// combiner passed to the constructor.
/// work_serializer passed to the constructor.
///
/// Any I/O done by the LB policy should be done under the pollset_set
/// returned by \a interested_parties().
@ -242,7 +242,7 @@ class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
/// live in the LB policy object itself.
///
/// Currently, pickers are always accessed from within the
/// client_channel data plane combiner, so they do not have to be
/// client_channel data plane mutex, so they do not have to be
/// thread-safe.
class SubchannelPicker {
public:
@ -309,12 +309,8 @@ class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
/// Args used to instantiate an LB policy.
struct Args {
/// The combiner under which all LB policy calls will be run.
/// Policy does NOT take ownership of the reference to the combiner.
// TODO(roth): Once we have a C++-like interface for combiners, this
// API should change to take a smart pointer that does pass ownership
// of a reference.
Combiner* combiner = nullptr;
/// The work_serializer under which all LB policy calls will be run.
std::shared_ptr<WorkSerializer> work_serializer;
/// Channel control helper.
/// Note: LB policies MUST NOT call any method on the helper from
/// their constructor.
@ -352,7 +348,7 @@ class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
grpc_pollset_set* interested_parties() const { return interested_parties_; }
// Note: This must be invoked while holding the combiner.
// Note: This must be invoked while holding the work_serializer.
void Orphan() override;
// A picker that returns PICK_QUEUE for all picks.
@ -368,8 +364,6 @@ class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
PickResult Pick(PickArgs args) override;
private:
static void CallExitIdle(void* arg, grpc_error* error);
RefCountedPtr<LoadBalancingPolicy> parent_;
bool exit_idle_called_ = false;
};
@ -387,7 +381,9 @@ class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
};
protected:
Combiner* combiner() const { return combiner_; }
std::shared_ptr<WorkSerializer> work_serializer() const {
return work_serializer_;
}
// Note: LB policies MUST NOT call any method on the helper from their
// constructor.
@ -399,8 +395,8 @@ class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
virtual void ShutdownLocked() = 0;
private:
/// Combiner under which LB policy actions take place.
Combiner* combiner_;
/// Work Serializer under which LB policy actions take place.
std::shared_ptr<WorkSerializer> work_serializer_;
/// Owned pointer to interested parties in load balancing decisions.
grpc_pollset_set* interested_parties_;
/// Channel control helper.

@ -251,7 +251,7 @@ OrphanablePtr<LoadBalancingPolicy> ChildPolicyHandler::CreateChildPolicy(
const char* child_policy_name, const grpc_channel_args& args) {
Helper* helper = new Helper(Ref(DEBUG_LOCATION, "Helper"));
LoadBalancingPolicy::Args lb_policy_args;
lb_policy_args.combiner = combiner();
lb_policy_args.work_serializer = work_serializer();
lb_policy_args.channel_control_helper =
std::unique_ptr<ChannelControlHelper>(helper);
lb_policy_args.args = &args;

@ -91,7 +91,6 @@
#include "src/core/lib/gprpp/memory.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/sockaddr.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/iomgr/timer.h"
@ -179,11 +178,11 @@ class GrpcLb : public LoadBalancingPolicy {
static void OnBalancerMessageReceived(void* arg, grpc_error* error);
static void OnBalancerStatusReceived(void* arg, grpc_error* error);
static void MaybeSendClientLoadReportLocked(void* arg, grpc_error* error);
static void ClientLoadReportDoneLocked(void* arg, grpc_error* error);
static void OnInitialRequestSentLocked(void* arg, grpc_error* error);
static void OnBalancerMessageReceivedLocked(void* arg, grpc_error* error);
static void OnBalancerStatusReceivedLocked(void* arg, grpc_error* error);
void MaybeSendClientLoadReportLocked(grpc_error* error);
void ClientLoadReportDoneLocked(grpc_error* error);
void OnInitialRequestSentLocked();
void OnBalancerMessageReceivedLocked();
void OnBalancerStatusReceivedLocked(grpc_error* error);
// The owning LB policy.
RefCountedPtr<LoadBalancingPolicy> grpclb_policy_;
@ -248,16 +247,16 @@ class GrpcLb : public LoadBalancingPolicy {
// should not be dropped.
//
// Note: This is called from the picker, so it will be invoked in
// the channel's data plane combiner, NOT the control plane
// combiner. It should not be accessed by any other part of the LB
// the channel's data plane mutex, NOT the control plane
// work_serializer. It should not be accessed by any other part of the LB
// policy.
const char* ShouldDrop();
private:
std::vector<GrpcLbServer> serverlist_;
// Guarded by the channel's data plane combiner, NOT the control
// plane combiner. It should not be accessed by anything but the
// Guarded by the channel's data plane mutex, NOT the control
// plane work_serializer. It should not be accessed by anything but the
// picker via the ShouldDrop() method.
size_t drop_index_ = 0;
};
@ -305,7 +304,7 @@ class GrpcLb : public LoadBalancingPolicy {
class StateWatcher : public AsyncConnectivityStateWatcherInterface {
public:
explicit StateWatcher(RefCountedPtr<GrpcLb> parent)
: AsyncConnectivityStateWatcherInterface(parent->combiner()),
: AsyncConnectivityStateWatcherInterface(parent->work_serializer()),
parent_(std::move(parent)) {}
~StateWatcher() { parent_.reset(DEBUG_LOCATION, "StateWatcher"); }
@ -340,18 +339,19 @@ class GrpcLb : public LoadBalancingPolicy {
// Helper functions used in UpdateLocked().
void ProcessAddressesAndChannelArgsLocked(const ServerAddressList& addresses,
const grpc_channel_args& args);
void CancelBalancerChannelConnectivityWatchLocked();
// Methods for dealing with fallback state.
void MaybeEnterFallbackModeAfterStartup();
static void OnFallbackTimer(void* arg, grpc_error* error);
static void OnFallbackTimerLocked(void* arg, grpc_error* error);
void OnFallbackTimerLocked(grpc_error* error);
// Methods for dealing with the balancer call.
void StartBalancerCallLocked();
void StartBalancerCallRetryTimerLocked();
static void OnBalancerCallRetryTimer(void* arg, grpc_error* error);
static void OnBalancerCallRetryTimerLocked(void* arg, grpc_error* error);
void OnBalancerCallRetryTimerLocked(grpc_error* error);
// Methods for dealing with the child policy.
grpc_channel_args* CreateChildPolicyArgsLocked(
@ -739,6 +739,15 @@ GrpcLb::BalancerCallState::BalancerCallState(
// the polling entities from client_channel.
GPR_ASSERT(grpclb_policy()->server_name_ != nullptr);
GPR_ASSERT(grpclb_policy()->server_name_[0] != '\0');
// Closure Initialization
GRPC_CLOSURE_INIT(&lb_on_initial_request_sent_, OnInitialRequestSent, this,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&lb_on_balancer_message_received_,
OnBalancerMessageReceived, this, grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&lb_on_balancer_status_received_, OnBalancerStatusReceived,
this, grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&client_load_report_closure_, MaybeSendClientLoadReport,
this, grpc_schedule_on_exec_ctx);
const grpc_millis deadline =
grpclb_policy()->lb_call_timeout_ms_ == 0
? GRPC_MILLIS_INF_FUTURE
@ -815,8 +824,6 @@ void GrpcLb::BalancerCallState::StartQuery() {
// with the callback.
auto self = Ref(DEBUG_LOCATION, "on_initial_request_sent");
self.release();
GRPC_CLOSURE_INIT(&lb_on_initial_request_sent_, OnInitialRequestSent, this,
grpc_schedule_on_exec_ctx);
call_error = grpc_call_start_batch_and_execute(
lb_call_, ops, (size_t)(op - ops), &lb_on_initial_request_sent_);
GPR_ASSERT(GRPC_CALL_OK == call_error);
@ -839,8 +846,6 @@ void GrpcLb::BalancerCallState::StartQuery() {
// with the callback.
self = Ref(DEBUG_LOCATION, "on_message_received");
self.release();
GRPC_CLOSURE_INIT(&lb_on_balancer_message_received_,
OnBalancerMessageReceived, this, grpc_schedule_on_exec_ctx);
call_error = grpc_call_start_batch_and_execute(
lb_call_, ops, (size_t)(op - ops), &lb_on_balancer_message_received_);
GPR_ASSERT(GRPC_CALL_OK == call_error);
@ -857,8 +862,6 @@ void GrpcLb::BalancerCallState::StartQuery() {
// This callback signals the end of the LB call, so it relies on the initial
// ref instead of a new ref. When it's invoked, it's the initial ref that is
// unreffed.
GRPC_CLOSURE_INIT(&lb_on_balancer_status_received_, OnBalancerStatusReceived,
this, grpc_schedule_on_exec_ctx);
call_error = grpc_call_start_batch_and_execute(
lb_call_, ops, (size_t)(op - ops), &lb_on_balancer_status_received_);
GPR_ASSERT(GRPC_CALL_OK == call_error);
@ -877,28 +880,27 @@ void GrpcLb::BalancerCallState::ScheduleNextClientLoadReportLocked() {
void GrpcLb::BalancerCallState::MaybeSendClientLoadReport(void* arg,
grpc_error* error) {
BalancerCallState* lb_calld = static_cast<BalancerCallState*>(arg);
lb_calld->grpclb_policy()->combiner()->Run(
GRPC_CLOSURE_INIT(&lb_calld->client_load_report_closure_,
MaybeSendClientLoadReportLocked, lb_calld, nullptr),
GRPC_ERROR_REF(error));
GRPC_ERROR_REF(error); // ref owned by lambda
lb_calld->grpclb_policy()->work_serializer()->Run(
[lb_calld, error]() { lb_calld->MaybeSendClientLoadReportLocked(error); },
DEBUG_LOCATION);
}
void GrpcLb::BalancerCallState::MaybeSendClientLoadReportLocked(
void* arg, grpc_error* error) {
BalancerCallState* lb_calld = static_cast<BalancerCallState*>(arg);
GrpcLb* grpclb_policy = lb_calld->grpclb_policy();
lb_calld->client_load_report_timer_callback_pending_ = false;
if (error != GRPC_ERROR_NONE || lb_calld != grpclb_policy->lb_calld_.get()) {
lb_calld->Unref(DEBUG_LOCATION, "client_load_report");
grpc_error* error) {
client_load_report_timer_callback_pending_ = false;
if (error != GRPC_ERROR_NONE || this != grpclb_policy()->lb_calld_.get()) {
Unref(DEBUG_LOCATION, "client_load_report");
GRPC_ERROR_UNREF(error);
return;
}
// If we've already sent the initial request, then we can go ahead and send
// the load report. Otherwise, we need to wait until the initial request has
// been sent to send this (see OnInitialRequestSentLocked()).
if (lb_calld->send_message_payload_ == nullptr) {
lb_calld->SendClientLoadReportLocked();
if (send_message_payload_ == nullptr) {
SendClientLoadReportLocked();
} else {
lb_calld->client_load_report_is_due_ = true;
client_load_report_is_due_ = true;
}
}
@ -957,110 +959,98 @@ void GrpcLb::BalancerCallState::SendClientLoadReportLocked() {
void GrpcLb::BalancerCallState::ClientLoadReportDone(void* arg,
grpc_error* error) {
BalancerCallState* lb_calld = static_cast<BalancerCallState*>(arg);
lb_calld->grpclb_policy()->combiner()->Run(
GRPC_CLOSURE_INIT(&lb_calld->client_load_report_closure_,
ClientLoadReportDoneLocked, lb_calld, nullptr),
GRPC_ERROR_REF(error));
GRPC_ERROR_REF(error); // ref owned by lambda
lb_calld->grpclb_policy()->work_serializer()->Run(
[lb_calld, error]() { lb_calld->ClientLoadReportDoneLocked(error); },
DEBUG_LOCATION);
}
void GrpcLb::BalancerCallState::ClientLoadReportDoneLocked(void* arg,
grpc_error* error) {
BalancerCallState* lb_calld = static_cast<BalancerCallState*>(arg);
GrpcLb* grpclb_policy = lb_calld->grpclb_policy();
grpc_byte_buffer_destroy(lb_calld->send_message_payload_);
lb_calld->send_message_payload_ = nullptr;
if (error != GRPC_ERROR_NONE || lb_calld != grpclb_policy->lb_calld_.get()) {
lb_calld->Unref(DEBUG_LOCATION, "client_load_report");
void GrpcLb::BalancerCallState::ClientLoadReportDoneLocked(grpc_error* error) {
grpc_byte_buffer_destroy(send_message_payload_);
send_message_payload_ = nullptr;
if (error != GRPC_ERROR_NONE || this != grpclb_policy()->lb_calld_.get()) {
Unref(DEBUG_LOCATION, "client_load_report");
GRPC_ERROR_UNREF(error);
return;
}
lb_calld->ScheduleNextClientLoadReportLocked();
ScheduleNextClientLoadReportLocked();
}
void GrpcLb::BalancerCallState::OnInitialRequestSent(void* arg,
grpc_error* error) {
grpc_error* /*error*/) {
BalancerCallState* lb_calld = static_cast<BalancerCallState*>(arg);
lb_calld->grpclb_policy()->combiner()->Run(
GRPC_CLOSURE_INIT(&lb_calld->lb_on_initial_request_sent_,
OnInitialRequestSentLocked, lb_calld, nullptr),
GRPC_ERROR_REF(error));
lb_calld->grpclb_policy()->work_serializer()->Run(
[lb_calld]() { lb_calld->OnInitialRequestSentLocked(); }, DEBUG_LOCATION);
}
void GrpcLb::BalancerCallState::OnInitialRequestSentLocked(
void* arg, grpc_error* /*error*/) {
BalancerCallState* lb_calld = static_cast<BalancerCallState*>(arg);
grpc_byte_buffer_destroy(lb_calld->send_message_payload_);
lb_calld->send_message_payload_ = nullptr;
void GrpcLb::BalancerCallState::OnInitialRequestSentLocked() {
grpc_byte_buffer_destroy(send_message_payload_);
send_message_payload_ = nullptr;
// If we attempted to send a client load report before the initial request was
// sent (and this lb_calld is still in use), send the load report now.
if (lb_calld->client_load_report_is_due_ &&
lb_calld == lb_calld->grpclb_policy()->lb_calld_.get()) {
lb_calld->SendClientLoadReportLocked();
lb_calld->client_load_report_is_due_ = false;
if (client_load_report_is_due_ && this == grpclb_policy()->lb_calld_.get()) {
SendClientLoadReportLocked();
client_load_report_is_due_ = false;
}
lb_calld->Unref(DEBUG_LOCATION, "on_initial_request_sent");
Unref(DEBUG_LOCATION, "on_initial_request_sent");
}
void GrpcLb::BalancerCallState::OnBalancerMessageReceived(void* arg,
grpc_error* error) {
void GrpcLb::BalancerCallState::OnBalancerMessageReceived(
void* arg, grpc_error* /*error*/) {
BalancerCallState* lb_calld = static_cast<BalancerCallState*>(arg);
lb_calld->grpclb_policy()->combiner()->Run(
GRPC_CLOSURE_INIT(&lb_calld->lb_on_balancer_message_received_,
OnBalancerMessageReceivedLocked, lb_calld, nullptr),
GRPC_ERROR_REF(error));
lb_calld->grpclb_policy()->work_serializer()->Run(
[lb_calld]() { lb_calld->OnBalancerMessageReceivedLocked(); },
DEBUG_LOCATION);
}
void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked(
void* arg, grpc_error* /*error*/) {
BalancerCallState* lb_calld = static_cast<BalancerCallState*>(arg);
GrpcLb* grpclb_policy = lb_calld->grpclb_policy();
void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked() {
// Null payload means the LB call was cancelled.
if (lb_calld != grpclb_policy->lb_calld_.get() ||
lb_calld->recv_message_payload_ == nullptr) {
lb_calld->Unref(DEBUG_LOCATION, "on_message_received");
if (this != grpclb_policy()->lb_calld_.get() ||
recv_message_payload_ == nullptr) {
Unref(DEBUG_LOCATION, "on_message_received");
return;
}
grpc_byte_buffer_reader bbr;
grpc_byte_buffer_reader_init(&bbr, lb_calld->recv_message_payload_);
grpc_byte_buffer_reader_init(&bbr, recv_message_payload_);
grpc_slice response_slice = grpc_byte_buffer_reader_readall(&bbr);
grpc_byte_buffer_reader_destroy(&bbr);
grpc_byte_buffer_destroy(lb_calld->recv_message_payload_);
lb_calld->recv_message_payload_ = nullptr;
grpc_byte_buffer_destroy(recv_message_payload_);
recv_message_payload_ = nullptr;
GrpcLbResponse response;
upb::Arena arena;
if (!GrpcLbResponseParse(response_slice, arena.ptr(), &response) ||
(response.type == response.INITIAL && lb_calld->seen_initial_response_)) {
(response.type == response.INITIAL && seen_initial_response_)) {
char* response_slice_str =
grpc_dump_slice(response_slice, GPR_DUMP_ASCII | GPR_DUMP_HEX);
gpr_log(GPR_ERROR,
"[grpclb %p] lb_calld=%p: Invalid LB response received: '%s'. "
"Ignoring.",
grpclb_policy, lb_calld, response_slice_str);
grpclb_policy(), this, response_slice_str);
gpr_free(response_slice_str);
} else {
switch (response.type) {
case response.INITIAL: {
if (response.client_stats_report_interval != 0) {
lb_calld->client_stats_report_interval_ =
client_stats_report_interval_ =
GPR_MAX(GPR_MS_PER_SEC, response.client_stats_report_interval);
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"[grpclb %p] lb_calld=%p: Received initial LB response "
"message; client load reporting interval = %" PRId64
" milliseconds",
grpclb_policy, lb_calld,
lb_calld->client_stats_report_interval_);
grpclb_policy(), this, client_stats_report_interval_);
}
} else if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"[grpclb %p] lb_calld=%p: Received initial LB response "
"message; client load reporting NOT enabled",
grpclb_policy, lb_calld);
grpclb_policy(), this);
}
lb_calld->seen_initial_response_ = true;
seen_initial_response_ = true;
break;
}
case response.SERVERLIST: {
GPR_ASSERT(lb_calld->lb_call_ != nullptr);
GPR_ASSERT(lb_call_ != nullptr);
auto serverlist_wrapper =
MakeRefCounted<Serverlist>(std::move(response.serverlist));
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
@ -1069,28 +1059,27 @@ void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked(
gpr_log(GPR_INFO,
"[grpclb %p] lb_calld=%p: Serverlist with %" PRIuPTR
" servers received:\n%s",
grpclb_policy, lb_calld,
grpclb_policy(), this,
serverlist_wrapper->serverlist().size(),
serverlist_text.get());
}
lb_calld->seen_serverlist_ = true;
seen_serverlist_ = true;
// Start sending client load report only after we start using the
// serverlist returned from the current LB call.
if (lb_calld->client_stats_report_interval_ > 0 &&
lb_calld->client_stats_ == nullptr) {
lb_calld->client_stats_ = MakeRefCounted<GrpcLbClientStats>();
if (client_stats_report_interval_ > 0 && client_stats_ == nullptr) {
client_stats_ = MakeRefCounted<GrpcLbClientStats>();
// Ref held by callback.
lb_calld->Ref(DEBUG_LOCATION, "client_load_report").release();
lb_calld->ScheduleNextClientLoadReportLocked();
Ref(DEBUG_LOCATION, "client_load_report").release();
ScheduleNextClientLoadReportLocked();
}
// Check if the serverlist differs from the previous one.
if (grpclb_policy->serverlist_ != nullptr &&
*grpclb_policy->serverlist_ == *serverlist_wrapper) {
if (grpclb_policy()->serverlist_ != nullptr &&
*grpclb_policy()->serverlist_ == *serverlist_wrapper) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"[grpclb %p] lb_calld=%p: Incoming server list identical "
"to current, ignoring.",
grpclb_policy, lb_calld);
grpclb_policy(), this);
}
} else { // New serverlist.
// Dispose of the fallback.
@ -1112,130 +1101,124 @@ void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked(
// the grpclb implementation at this point, since we're deprecating
// it in favor of the xds policy. We will implement this the
// right way in the xds policy instead.
if (grpclb_policy->fallback_mode_) {
if (grpclb_policy()->fallback_mode_) {
gpr_log(GPR_INFO,
"[grpclb %p] Received response from balancer; exiting "
"fallback mode",
grpclb_policy);
grpclb_policy->fallback_mode_ = false;
grpclb_policy());
grpclb_policy()->fallback_mode_ = false;
}
if (grpclb_policy->fallback_at_startup_checks_pending_) {
grpclb_policy->fallback_at_startup_checks_pending_ = false;
grpc_timer_cancel(&grpclb_policy->lb_fallback_timer_);
grpclb_policy->CancelBalancerChannelConnectivityWatchLocked();
if (grpclb_policy()->fallback_at_startup_checks_pending_) {
grpclb_policy()->fallback_at_startup_checks_pending_ = false;
grpc_timer_cancel(&grpclb_policy()->lb_fallback_timer_);
grpclb_policy()->CancelBalancerChannelConnectivityWatchLocked();
}
// Update the serverlist in the GrpcLb instance. This serverlist
// instance will be destroyed either upon the next update or when the
// GrpcLb instance is destroyed.
grpclb_policy->serverlist_ = std::move(serverlist_wrapper);
grpclb_policy->CreateOrUpdateChildPolicyLocked();
grpclb_policy()->serverlist_ = std::move(serverlist_wrapper);
grpclb_policy()->CreateOrUpdateChildPolicyLocked();
}
break;
}
case response.FALLBACK: {
if (!grpclb_policy->fallback_mode_) {
if (!grpclb_policy()->fallback_mode_) {
gpr_log(GPR_INFO,
"[grpclb %p] Entering fallback mode as requested by balancer",
grpclb_policy);
if (grpclb_policy->fallback_at_startup_checks_pending_) {
grpclb_policy->fallback_at_startup_checks_pending_ = false;
grpc_timer_cancel(&grpclb_policy->lb_fallback_timer_);
grpclb_policy->CancelBalancerChannelConnectivityWatchLocked();
grpclb_policy());
if (grpclb_policy()->fallback_at_startup_checks_pending_) {
grpclb_policy()->fallback_at_startup_checks_pending_ = false;
grpc_timer_cancel(&grpclb_policy()->lb_fallback_timer_);
grpclb_policy()->CancelBalancerChannelConnectivityWatchLocked();
}
grpclb_policy->fallback_mode_ = true;
grpclb_policy->CreateOrUpdateChildPolicyLocked();
grpclb_policy()->fallback_mode_ = true;
grpclb_policy()->CreateOrUpdateChildPolicyLocked();
// Reset serverlist, so that if the balancer exits fallback
// mode by sending the same serverlist we were previously
// using, we don't incorrectly ignore it as a duplicate.
grpclb_policy->serverlist_.reset();
grpclb_policy()->serverlist_.reset();
}
break;
}
}
}
grpc_slice_unref_internal(response_slice);
if (!grpclb_policy->shutting_down_) {
if (!grpclb_policy()->shutting_down_) {
// Keep listening for serverlist updates.
grpc_op op;
memset(&op, 0, sizeof(op));
op.op = GRPC_OP_RECV_MESSAGE;
op.data.recv_message.recv_message = &lb_calld->recv_message_payload_;
op.data.recv_message.recv_message = &recv_message_payload_;
op.flags = 0;
op.reserved = nullptr;
// Reuse the "OnBalancerMessageReceivedLocked" ref taken in StartQuery().
GRPC_CLOSURE_INIT(&lb_calld->lb_on_balancer_message_received_,
GrpcLb::BalancerCallState::OnBalancerMessageReceived,
lb_calld, grpc_schedule_on_exec_ctx);
const grpc_call_error call_error = grpc_call_start_batch_and_execute(
lb_calld->lb_call_, &op, 1,
&lb_calld->lb_on_balancer_message_received_);
lb_call_, &op, 1, &lb_on_balancer_message_received_);
GPR_ASSERT(GRPC_CALL_OK == call_error);
} else {
lb_calld->Unref(DEBUG_LOCATION, "on_message_received+grpclb_shutdown");
Unref(DEBUG_LOCATION, "on_message_received+grpclb_shutdown");
}
}
void GrpcLb::BalancerCallState::OnBalancerStatusReceived(void* arg,
grpc_error* error) {
BalancerCallState* lb_calld = static_cast<BalancerCallState*>(arg);
lb_calld->grpclb_policy()->combiner()->Run(
GRPC_CLOSURE_INIT(&lb_calld->lb_on_balancer_status_received_,
OnBalancerStatusReceivedLocked, lb_calld, nullptr),
GRPC_ERROR_REF(error));
GRPC_ERROR_REF(error); // owned by lambda
lb_calld->grpclb_policy()->work_serializer()->Run(
[lb_calld, error]() { lb_calld->OnBalancerStatusReceivedLocked(error); },
DEBUG_LOCATION);
}
void GrpcLb::BalancerCallState::OnBalancerStatusReceivedLocked(
void* arg, grpc_error* error) {
BalancerCallState* lb_calld = static_cast<BalancerCallState*>(arg);
GrpcLb* grpclb_policy = lb_calld->grpclb_policy();
GPR_ASSERT(lb_calld->lb_call_ != nullptr);
grpc_error* error) {
GPR_ASSERT(lb_call_ != nullptr);
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
char* status_details =
grpc_slice_to_c_string(lb_calld->lb_call_status_details_);
char* status_details = grpc_slice_to_c_string(lb_call_status_details_);
gpr_log(GPR_INFO,
"[grpclb %p] lb_calld=%p: Status from LB server received. "
"Status = %d, details = '%s', (lb_call: %p), error '%s'",
grpclb_policy, lb_calld, lb_calld->lb_call_status_, status_details,
lb_calld->lb_call_, grpc_error_string(error));
grpclb_policy(), this, lb_call_status_, status_details, lb_call_,
grpc_error_string(error));
gpr_free(status_details);
}
GRPC_ERROR_UNREF(error);
// If this lb_calld is still in use, this call ended because of a failure so
// we want to retry connecting. Otherwise, we have deliberately ended this
// call and no further action is required.
if (lb_calld == grpclb_policy->lb_calld_.get()) {
if (this == grpclb_policy()->lb_calld_.get()) {
// If the fallback-at-startup checks are pending, go into fallback mode
// immediately. This short-circuits the timeout for the fallback-at-startup
// case.
if (grpclb_policy->fallback_at_startup_checks_pending_) {
GPR_ASSERT(!lb_calld->seen_serverlist_);
if (grpclb_policy()->fallback_at_startup_checks_pending_) {
GPR_ASSERT(!seen_serverlist_);
gpr_log(GPR_INFO,
"[grpclb %p] Balancer call finished without receiving "
"serverlist; entering fallback mode",
grpclb_policy);
grpclb_policy->fallback_at_startup_checks_pending_ = false;
grpc_timer_cancel(&grpclb_policy->lb_fallback_timer_);
grpclb_policy->CancelBalancerChannelConnectivityWatchLocked();
grpclb_policy->fallback_mode_ = true;
grpclb_policy->CreateOrUpdateChildPolicyLocked();
grpclb_policy());
grpclb_policy()->fallback_at_startup_checks_pending_ = false;
grpc_timer_cancel(&grpclb_policy()->lb_fallback_timer_);
grpclb_policy()->CancelBalancerChannelConnectivityWatchLocked();
grpclb_policy()->fallback_mode_ = true;
grpclb_policy()->CreateOrUpdateChildPolicyLocked();
} else {
// This handles the fallback-after-startup case.
grpclb_policy->MaybeEnterFallbackModeAfterStartup();
grpclb_policy()->MaybeEnterFallbackModeAfterStartup();
}
grpclb_policy->lb_calld_.reset();
GPR_ASSERT(!grpclb_policy->shutting_down_);
grpclb_policy->channel_control_helper()->RequestReresolution();
if (lb_calld->seen_initial_response_) {
grpclb_policy()->lb_calld_.reset();
GPR_ASSERT(!grpclb_policy()->shutting_down_);
grpclb_policy()->channel_control_helper()->RequestReresolution();
if (seen_initial_response_) {
// If we lose connection to the LB server, reset the backoff and restart
// the LB call immediately.
grpclb_policy->lb_call_backoff_.Reset();
grpclb_policy->StartBalancerCallLocked();
grpclb_policy()->lb_call_backoff_.Reset();
grpclb_policy()->StartBalancerCallLocked();
} else {
// If this LB call fails establishing any connection to the LB server,
// retry later.
grpclb_policy->StartBalancerCallRetryTimerLocked();
grpclb_policy()->StartBalancerCallRetryTimerLocked();
}
}
lb_calld->Unref(DEBUG_LOCATION, "lb_call_ended");
Unref(DEBUG_LOCATION, "lb_call_ended");
}
//
@ -1332,6 +1315,11 @@ GrpcLb::GrpcLb(Args args)
.set_jitter(GRPC_GRPCLB_RECONNECT_JITTER)
.set_max_backoff(GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS *
1000)) {
// Closure Initialization
GRPC_CLOSURE_INIT(&lb_on_fallback_, &GrpcLb::OnFallbackTimer, this,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&lb_on_call_retry_, &GrpcLb::OnBalancerCallRetryTimer, this,
grpc_schedule_on_exec_ctx);
// Record server name.
const grpc_arg* arg = grpc_channel_args_find(args.args, GRPC_ARG_SERVER_URI);
const char* server_uri = grpc_channel_arg_get_string(arg);
@ -1416,8 +1404,6 @@ void GrpcLb::UpdateLocked(UpdateArgs args) {
// Start timer.
grpc_millis deadline = ExecCtx::Get()->Now() + fallback_at_startup_timeout_;
Ref(DEBUG_LOCATION, "on_fallback_timer").release(); // Ref for callback
GRPC_CLOSURE_INIT(&lb_on_fallback_, &GrpcLb::OnFallbackTimer, this,
grpc_schedule_on_exec_ctx);
grpc_timer_init(&lb_fallback_timer_, deadline, &lb_on_fallback_);
// Start watching the channel's connectivity state. If the channel
// goes into state TRANSIENT_FAILURE before the timer fires, we go into
@ -1529,33 +1515,30 @@ void GrpcLb::StartBalancerCallRetryTimerLocked() {
// with the callback.
auto self = Ref(DEBUG_LOCATION, "on_balancer_call_retry_timer");
self.release();
GRPC_CLOSURE_INIT(&lb_on_call_retry_, &GrpcLb::OnBalancerCallRetryTimer, this,
grpc_schedule_on_exec_ctx);
retry_timer_callback_pending_ = true;
grpc_timer_init(&lb_call_retry_timer_, next_try, &lb_on_call_retry_);
}
void GrpcLb::OnBalancerCallRetryTimer(void* arg, grpc_error* error) {
GrpcLb* grpclb_policy = static_cast<GrpcLb*>(arg);
grpclb_policy->combiner()->Run(
GRPC_CLOSURE_INIT(&grpclb_policy->lb_on_call_retry_,
&GrpcLb::OnBalancerCallRetryTimerLocked, grpclb_policy,
nullptr),
GRPC_ERROR_REF(error));
GRPC_ERROR_REF(error); // ref owned by lambda
grpclb_policy->work_serializer()->Run(
[grpclb_policy, error]() {
grpclb_policy->OnBalancerCallRetryTimerLocked(error);
},
DEBUG_LOCATION);
}
void GrpcLb::OnBalancerCallRetryTimerLocked(void* arg, grpc_error* error) {
GrpcLb* grpclb_policy = static_cast<GrpcLb*>(arg);
grpclb_policy->retry_timer_callback_pending_ = false;
if (!grpclb_policy->shutting_down_ && error == GRPC_ERROR_NONE &&
grpclb_policy->lb_calld_ == nullptr) {
void GrpcLb::OnBalancerCallRetryTimerLocked(grpc_error* error) {
retry_timer_callback_pending_ = false;
if (!shutting_down_ && error == GRPC_ERROR_NONE && lb_calld_ == nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "[grpclb %p] Restarting call to LB server",
grpclb_policy);
gpr_log(GPR_INFO, "[grpclb %p] Restarting call to LB server", this);
}
grpclb_policy->StartBalancerCallLocked();
StartBalancerCallLocked();
}
grpclb_policy->Unref(DEBUG_LOCATION, "on_balancer_call_retry_timer");
Unref(DEBUG_LOCATION, "on_balancer_call_retry_timer");
GRPC_ERROR_UNREF(error);
}
//
@ -1582,28 +1565,28 @@ void GrpcLb::MaybeEnterFallbackModeAfterStartup() {
void GrpcLb::OnFallbackTimer(void* arg, grpc_error* error) {
GrpcLb* grpclb_policy = static_cast<GrpcLb*>(arg);
grpclb_policy->combiner()->Run(
GRPC_CLOSURE_INIT(&grpclb_policy->lb_on_fallback_,
&GrpcLb::OnFallbackTimerLocked, grpclb_policy, nullptr),
GRPC_ERROR_REF(error));
GRPC_ERROR_REF(error); // ref owned by lambda
grpclb_policy->work_serializer()->Run(
[grpclb_policy, error]() { grpclb_policy->OnFallbackTimerLocked(error); },
DEBUG_LOCATION);
}
void GrpcLb::OnFallbackTimerLocked(void* arg, grpc_error* error) {
GrpcLb* grpclb_policy = static_cast<GrpcLb*>(arg);
void GrpcLb::OnFallbackTimerLocked(grpc_error* error) {
// If we receive a serverlist after the timer fires but before this callback
// actually runs, don't fall back.
if (grpclb_policy->fallback_at_startup_checks_pending_ &&
!grpclb_policy->shutting_down_ && error == GRPC_ERROR_NONE) {
if (fallback_at_startup_checks_pending_ && !shutting_down_ &&
error == GRPC_ERROR_NONE) {
gpr_log(GPR_INFO,
"[grpclb %p] No response from balancer after fallback timeout; "
"entering fallback mode",
grpclb_policy);
grpclb_policy->fallback_at_startup_checks_pending_ = false;
grpclb_policy->CancelBalancerChannelConnectivityWatchLocked();
grpclb_policy->fallback_mode_ = true;
grpclb_policy->CreateOrUpdateChildPolicyLocked();
this);
fallback_at_startup_checks_pending_ = false;
CancelBalancerChannelConnectivityWatchLocked();
fallback_mode_ = true;
CreateOrUpdateChildPolicyLocked();
}
grpclb_policy->Unref(DEBUG_LOCATION, "on_fallback_timer");
Unref(DEBUG_LOCATION, "on_fallback_timer");
GRPC_ERROR_UNREF(error);
}
//
@ -1627,7 +1610,7 @@ grpc_channel_args* GrpcLb::CreateChildPolicyArgsLocked(
OrphanablePtr<LoadBalancingPolicy> GrpcLb::CreateChildPolicyLocked(
const grpc_channel_args* args) {
LoadBalancingPolicy::Args lb_policy_args;
lb_policy_args.combiner = combiner();
lb_policy_args.work_serializer = work_serializer();
lb_policy_args.args = args;
lb_policy_args.channel_control_helper = absl::make_unique<Helper>(Ref());
OrphanablePtr<LoadBalancingPolicy> lb_policy =

@ -31,8 +31,8 @@
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/iomgr/work_serializer.h"
namespace grpc_core {
@ -169,9 +169,9 @@ class PriorityLb : public LoadBalancingPolicy {
void StartFailoverTimerLocked();
static void OnFailoverTimer(void* arg, grpc_error* error);
static void OnFailoverTimerLocked(void* arg, grpc_error* error);
void OnFailoverTimerLocked(grpc_error* error);
static void OnDeactivationTimer(void* arg, grpc_error* error);
static void OnDeactivationTimerLocked(void* arg, grpc_error* error);
void OnDeactivationTimerLocked(grpc_error* error);
RefCountedPtr<PriorityLb> priority_policy_;
const std::string name_;
@ -189,7 +189,6 @@ class PriorityLb : public LoadBalancingPolicy {
// States of failover.
grpc_timer failover_timer_;
grpc_closure on_failover_timer_;
grpc_closure on_failover_timer_locked_;
bool failover_timer_callback_pending_ = false;
};
@ -492,8 +491,8 @@ PriorityLb::ChildPriority::ChildPriority(
}
GRPC_CLOSURE_INIT(&on_failover_timer_, OnFailoverTimer, this,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&on_failover_timer_locked_, OnFailoverTimerLocked, this,
nullptr);
GRPC_CLOSURE_INIT(&on_deactivation_timer_, OnDeactivationTimer, this,
grpc_schedule_on_exec_ctx);
// Start the failover timer.
StartFailoverTimerLocked();
}
@ -550,7 +549,7 @@ OrphanablePtr<LoadBalancingPolicy>
PriorityLb::ChildPriority::CreateChildPolicyLocked(
const grpc_channel_args* args) {
LoadBalancingPolicy::Args lb_policy_args;
lb_policy_args.combiner = priority_policy_->combiner();
lb_policy_args.work_serializer = priority_policy_->work_serializer();
lb_policy_args.args = args;
lb_policy_args.channel_control_helper =
absl::make_unique<Helper>(this->Ref(DEBUG_LOCATION, "Helper"));
@ -631,26 +630,25 @@ void PriorityLb::ChildPriority::MaybeCancelFailoverTimerLocked() {
void PriorityLb::ChildPriority::OnFailoverTimer(void* arg, grpc_error* error) {
ChildPriority* self = static_cast<ChildPriority*>(arg);
self->priority_policy_->combiner()->Run(&self->on_failover_timer_locked_,
GRPC_ERROR_REF(error));
GRPC_ERROR_REF(error); // ref owned by lambda
self->priority_policy_->work_serializer()->Run(
[self, error]() { self->OnFailoverTimerLocked(error); }, DEBUG_LOCATION);
}
void PriorityLb::ChildPriority::OnFailoverTimerLocked(void* arg,
grpc_error* error) {
ChildPriority* self = static_cast<ChildPriority*>(arg);
if (error == GRPC_ERROR_NONE && self->failover_timer_callback_pending_ &&
!self->priority_policy_->shutting_down_) {
void PriorityLb::ChildPriority::OnFailoverTimerLocked(grpc_error* error) {
if (error == GRPC_ERROR_NONE && failover_timer_callback_pending_ &&
!priority_policy_->shutting_down_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] child %s (%p): failover timer fired, "
"reporting TRANSIENT_FAILURE",
self->priority_policy_.get(), self->name_.c_str(), self);
priority_policy_.get(), name_.c_str(), this);
}
self->failover_timer_callback_pending_ = false;
self->OnConnectivityStateUpdateLocked(GRPC_CHANNEL_TRANSIENT_FAILURE,
nullptr);
failover_timer_callback_pending_ = false;
OnConnectivityStateUpdateLocked(GRPC_CHANNEL_TRANSIENT_FAILURE, nullptr);
}
self->Unref(DEBUG_LOCATION, "ChildPriority+OnFailoverTimerLocked");
Unref(DEBUG_LOCATION, "ChildPriority+OnFailoverTimerLocked");
GRPC_ERROR_UNREF(error);
}
void PriorityLb::ChildPriority::DeactivateLocked() {
@ -666,8 +664,6 @@ void PriorityLb::ChildPriority::DeactivateLocked() {
MaybeCancelFailoverTimerLocked();
// Start a timer to delete the child.
Ref(DEBUG_LOCATION, "ChildPriority+timer").release();
GRPC_CLOSURE_INIT(&on_deactivation_timer_, OnDeactivationTimer, this,
grpc_schedule_on_exec_ctx);
grpc_timer_init(&deactivation_timer_,
ExecCtx::Get()->Now() + kChildRetentionIntervalMs,
&on_deactivation_timer_);
@ -688,27 +684,26 @@ void PriorityLb::ChildPriority::MaybeReactivateLocked() {
void PriorityLb::ChildPriority::OnDeactivationTimer(void* arg,
grpc_error* error) {
ChildPriority* self = static_cast<ChildPriority*>(arg);
self->priority_policy_->combiner()->Run(
GRPC_CLOSURE_INIT(&self->on_deactivation_timer_,
OnDeactivationTimerLocked, self, nullptr),
GRPC_ERROR_REF(error));
GRPC_ERROR_REF(error); // ref owned by lambda
self->priority_policy_->work_serializer()->Run(
[self, error]() { self->OnDeactivationTimerLocked(error); },
DEBUG_LOCATION);
}
void PriorityLb::ChildPriority::OnDeactivationTimerLocked(void* arg,
grpc_error* error) {
ChildPriority* self = static_cast<ChildPriority*>(arg);
if (error == GRPC_ERROR_NONE && self->deactivation_timer_callback_pending_ &&
!self->priority_policy_->shutting_down_) {
void PriorityLb::ChildPriority::OnDeactivationTimerLocked(grpc_error* error) {
if (error == GRPC_ERROR_NONE && deactivation_timer_callback_pending_ &&
!priority_policy_->shutting_down_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] child %s (%p): deactivation timer fired, "
"deleting child",
self->priority_policy_.get(), self->name_.c_str(), self);
priority_policy_.get(), name_.c_str(), this);
}
self->deactivation_timer_callback_pending_ = false;
self->priority_policy_->DeleteChild(self);
deactivation_timer_callback_pending_ = false;
priority_policy_->DeleteChild(this);
}
self->Unref(DEBUG_LOCATION, "ChildPriority+timer");
Unref(DEBUG_LOCATION, "ChildPriority+timer");
GRPC_ERROR_UNREF(error);
}
//

@ -62,7 +62,7 @@ class MySubchannelList
};
*/
// All methods will be called from within the client_channel combiner.
// All methods will be called from within the client_channel work serializer.
namespace grpc_core {

@ -33,8 +33,8 @@
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/iomgr/work_serializer.h"
namespace grpc_core {
@ -163,7 +163,7 @@ class WeightedTargetLb : public LoadBalancingPolicy {
std::unique_ptr<SubchannelPicker> picker);
static void OnDelayedRemovalTimer(void* arg, grpc_error* error);
static void OnDelayedRemovalTimerLocked(void* arg, grpc_error* error);
void OnDelayedRemovalTimerLocked(grpc_error* error);
// The owning LB policy.
RefCountedPtr<WeightedTargetLb> weighted_target_policy_;
@ -392,6 +392,8 @@ WeightedTargetLb::WeightedChild::WeightedChild(
gpr_log(GPR_INFO, "[weighted_target_lb %p] created WeightedChild %p for %s",
weighted_target_policy_.get(), this, name_.c_str());
}
GRPC_CLOSURE_INIT(&on_delayed_removal_timer_, OnDelayedRemovalTimer, this,
grpc_schedule_on_exec_ctx);
}
WeightedTargetLb::WeightedChild::~WeightedChild() {
@ -430,7 +432,7 @@ OrphanablePtr<LoadBalancingPolicy>
WeightedTargetLb::WeightedChild::CreateChildPolicyLocked(
const grpc_channel_args* args) {
LoadBalancingPolicy::Args lb_policy_args;
lb_policy_args.combiner = weighted_target_policy_->combiner();
lb_policy_args.work_serializer = weighted_target_policy_->work_serializer();
lb_policy_args.args = args;
lb_policy_args.channel_control_helper =
absl::make_unique<Helper>(this->Ref(DEBUG_LOCATION, "Helper"));
@ -536,8 +538,6 @@ void WeightedTargetLb::WeightedChild::DeactivateLocked() {
weight_ = 0;
// Start a timer to delete the child.
Ref(DEBUG_LOCATION, "WeightedChild+timer").release();
GRPC_CLOSURE_INIT(&on_delayed_removal_timer_, OnDelayedRemovalTimer, this,
grpc_schedule_on_exec_ctx);
delayed_removal_timer_callback_pending_ = true;
grpc_timer_init(&delayed_removal_timer_,
ExecCtx::Get()->Now() + kChildRetentionIntervalMs,
@ -547,22 +547,21 @@ void WeightedTargetLb::WeightedChild::DeactivateLocked() {
void WeightedTargetLb::WeightedChild::OnDelayedRemovalTimer(void* arg,
grpc_error* error) {
WeightedChild* self = static_cast<WeightedChild*>(arg);
self->weighted_target_policy_->combiner()->Run(
GRPC_CLOSURE_INIT(&self->on_delayed_removal_timer_,
OnDelayedRemovalTimerLocked, self, nullptr),
GRPC_ERROR_REF(error));
GRPC_ERROR_REF(error); // ref owned by lambda
self->weighted_target_policy_->work_serializer()->Run(
[self, error]() { self->OnDelayedRemovalTimerLocked(error); },
DEBUG_LOCATION);
}
void WeightedTargetLb::WeightedChild::OnDelayedRemovalTimerLocked(
void* arg, grpc_error* error) {
WeightedChild* self = static_cast<WeightedChild*>(arg);
if (error == GRPC_ERROR_NONE &&
self->delayed_removal_timer_callback_pending_ && !self->shutdown_ &&
self->weight_ == 0) {
self->delayed_removal_timer_callback_pending_ = false;
self->weighted_target_policy_->targets_.erase(self->name_);
grpc_error* error) {
if (error == GRPC_ERROR_NONE && delayed_removal_timer_callback_pending_ &&
!shutdown_ && weight_ == 0) {
delayed_removal_timer_callback_pending_ = false;
weighted_target_policy_->targets_.erase(name_);
}
self->Unref(DEBUG_LOCATION, "WeightedChild+timer");
Unref(DEBUG_LOCATION, "WeightedChild+timer");
GRPC_ERROR_UNREF(error);
}
//

@ -168,7 +168,7 @@ void CdsLb::ClusterWatcher::OnClusterChanged(XdsApi::CdsUpdate cluster_data) {
// Create child policy if not already present.
if (parent_->child_policy_ == nullptr) {
LoadBalancingPolicy::Args args;
args.combiner = parent_->combiner();
args.work_serializer = parent_->work_serializer();
args.args = parent_->args_;
args.channel_control_helper = absl::make_unique<Helper>(parent_->Ref());
parent_->child_policy_ =

@ -37,8 +37,8 @@
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/iomgr/work_serializer.h"
#include "src/core/lib/uri/uri_parser.h"
#define GRPC_EDS_DEFAULT_FALLBACK_TIMEOUT 10000
@ -424,7 +424,7 @@ void EdsLb::UpdateLocked(UpdateArgs args) {
if (xds_client_from_channel_ == nullptr) {
grpc_error* error = GRPC_ERROR_NONE;
xds_client_ = MakeOrphanable<XdsClient>(
combiner(), interested_parties(), GetEdsResourceName(),
work_serializer(), interested_parties(), GetEdsResourceName(),
nullptr /* service config watcher */, *args_, &error);
// TODO(roth): If we decide that we care about EDS-only mode, add
// proper error handling here.
@ -711,7 +711,7 @@ grpc_channel_args* EdsLb::CreateChildPolicyArgsLocked(
OrphanablePtr<LoadBalancingPolicy> EdsLb::CreateChildPolicyLocked(
const grpc_channel_args* args) {
LoadBalancingPolicy::Args lb_policy_args;
lb_policy_args.combiner = combiner();
lb_policy_args.work_serializer = work_serializer();
lb_policy_args.args = args;
lb_policy_args.channel_control_helper =
absl::make_unique<Helper>(Ref(DEBUG_LOCATION, "Helper"));

@ -27,7 +27,7 @@
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/work_serializer.h"
namespace grpc_core {
@ -176,7 +176,7 @@ LoadBalancingPolicy::PickResult LrsLb::LoadReportingPicker::Pick(
locality_stats_->Ref(DEBUG_LOCATION, "LocalityStats+call").release();
result.recv_trailing_metadata_ready =
// Note: This callback does not run in either the control plane
// combiner or in the data plane mutex.
// work serializer or in the data plane mutex.
[locality_stats](grpc_error* error, MetadataInterface* /*metadata*/,
CallState* /*call_state*/) {
const bool call_failed = error != GRPC_ERROR_NONE;
@ -273,7 +273,7 @@ void LrsLb::MaybeUpdatePickerLocked() {
OrphanablePtr<LoadBalancingPolicy> LrsLb::CreateChildPolicyLocked(
const grpc_channel_args* args) {
LoadBalancingPolicy::Args lb_policy_args;
lb_policy_args.combiner = combiner();
lb_policy_args.work_serializer = work_serializer();
lb_policy_args.args = args;
lb_policy_args.channel_control_helper =
absl::make_unique<Helper>(Ref(DEBUG_LOCATION, "Helper"));

@ -34,8 +34,8 @@
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/iomgr/work_serializer.h"
#define GRPC_XDS_ROUTING_CHILD_RETENTION_INTERVAL_MS (15 * 60 * 1000)
@ -172,7 +172,7 @@ class XdsRoutingLb : public LoadBalancingPolicy {
const grpc_channel_args* args);
static void OnDelayedRemovalTimer(void* arg, grpc_error* error);
static void OnDelayedRemovalTimerLocked(void* arg, grpc_error* error);
void OnDelayedRemovalTimerLocked(grpc_error* error);
// The owning LB policy.
RefCountedPtr<XdsRoutingLb> xds_routing_policy_;
@ -400,6 +400,8 @@ XdsRoutingLb::XdsRoutingChild::XdsRoutingChild(
gpr_log(GPR_INFO, "[xds_routing_lb %p] created XdsRoutingChild %p for %s",
xds_routing_policy_.get(), this, name_.c_str());
}
GRPC_CLOSURE_INIT(&on_delayed_removal_timer_, OnDelayedRemovalTimer, this,
grpc_schedule_on_exec_ctx);
}
XdsRoutingLb::XdsRoutingChild::~XdsRoutingChild() {
@ -436,7 +438,7 @@ OrphanablePtr<LoadBalancingPolicy>
XdsRoutingLb::XdsRoutingChild::CreateChildPolicyLocked(
const grpc_channel_args* args) {
LoadBalancingPolicy::Args lb_policy_args;
lb_policy_args.combiner = xds_routing_policy_->combiner();
lb_policy_args.work_serializer = xds_routing_policy_->work_serializer();
lb_policy_args.args = args;
lb_policy_args.channel_control_helper =
absl::make_unique<Helper>(this->Ref(DEBUG_LOCATION, "Helper"));
@ -501,8 +503,6 @@ void XdsRoutingLb::XdsRoutingChild::DeactivateLocked() {
// Set the child weight to 0 so that future picker won't contain this child.
// Start a timer to delete the child.
Ref(DEBUG_LOCATION, "XdsRoutingChild+timer").release();
GRPC_CLOSURE_INIT(&on_delayed_removal_timer_, OnDelayedRemovalTimer, this,
grpc_schedule_on_exec_ctx);
grpc_timer_init(
&delayed_removal_timer_,
ExecCtx::Get()->Now() + GRPC_XDS_ROUTING_CHILD_RETENTION_INTERVAL_MS,
@ -513,20 +513,20 @@ void XdsRoutingLb::XdsRoutingChild::DeactivateLocked() {
void XdsRoutingLb::XdsRoutingChild::OnDelayedRemovalTimer(void* arg,
grpc_error* error) {
XdsRoutingChild* self = static_cast<XdsRoutingChild*>(arg);
self->xds_routing_policy_->combiner()->Run(
GRPC_CLOSURE_INIT(&self->on_delayed_removal_timer_,
OnDelayedRemovalTimerLocked, self, nullptr),
GRPC_ERROR_REF(error));
GRPC_ERROR_REF(error); // Ref owned by the lambda
self->xds_routing_policy_->work_serializer()->Run(
[self, error]() { self->OnDelayedRemovalTimerLocked(error); },
DEBUG_LOCATION);
}
void XdsRoutingLb::XdsRoutingChild::OnDelayedRemovalTimerLocked(
void* arg, grpc_error* error) {
XdsRoutingChild* self = static_cast<XdsRoutingChild*>(arg);
self->delayed_removal_timer_callback_pending_ = false;
if (error == GRPC_ERROR_NONE && !self->shutdown_) {
self->xds_routing_policy_->actions_.erase(self->name_);
grpc_error* error) {
delayed_removal_timer_callback_pending_ = false;
if (error == GRPC_ERROR_NONE && !shutdown_) {
xds_routing_policy_->actions_.erase(name_);
}
self->Unref(DEBUG_LOCATION, "XdsRoutingChild+timer");
Unref(DEBUG_LOCATION, "XdsRoutingChild+timer");
GRPC_ERROR_UNREF(error);
}
//

@ -38,7 +38,8 @@ class LocalSubchannelPool final : public SubchannelPoolInterface {
~LocalSubchannelPool() override;
// Implements interface methods.
// Thread-unsafe. Intended to be invoked within the client_channel combiner.
// Thread-unsafe. Intended to be invoked within the client_channel work
// serializer.
Subchannel* RegisterSubchannel(SubchannelKey* key,
Subchannel* constructed) override;
void UnregisterSubchannel(SubchannelKey* key) override;

@ -19,7 +19,6 @@
#include <grpc/support/port_platform.h>
#include "src/core/ext/filters/client_channel/resolver.h"
#include "src/core/lib/iomgr/combiner.h"
grpc_core::DebugOnlyTraceFlag grpc_trace_resolver_refcount(false,
"resolver_refcount");
@ -30,13 +29,11 @@ namespace grpc_core {
// Resolver
//
Resolver::Resolver(Combiner* combiner,
Resolver::Resolver(std::shared_ptr<WorkSerializer> work_serializer,
std::unique_ptr<ResultHandler> result_handler)
: InternallyRefCounted(&grpc_trace_resolver_refcount),
result_handler_(std::move(result_handler)),
combiner_(GRPC_COMBINER_REF(combiner, "resolver")) {}
Resolver::~Resolver() { GRPC_COMBINER_UNREF(combiner_, "resolver"); }
work_serializer_(std::move(work_serializer)),
result_handler_(std::move(result_handler)) {}
//
// Resolver::Result

@ -27,8 +27,8 @@
#include "src/core/ext/filters/client_channel/service_config.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/work_serializer.h"
extern grpc_core::DebugOnlyTraceFlag grpc_trace_resolver_refcount;
@ -45,7 +45,7 @@ namespace grpc_core {
/// DNS).
///
/// Note: All methods with a "Locked" suffix must be called from the
/// combiner passed to the constructor.
/// work_serializer passed to the constructor.
class Resolver : public InternallyRefCounted<Resolver> {
public:
/// Results returned by the resolver.
@ -87,7 +87,7 @@ class Resolver : public InternallyRefCounted<Resolver> {
// Not copyable nor movable.
Resolver(const Resolver&) = delete;
Resolver& operator=(const Resolver&) = delete;
virtual ~Resolver();
virtual ~Resolver() = default;
/// Starts resolving.
virtual void StartLocked() = 0;
@ -115,30 +115,28 @@ class Resolver : public InternallyRefCounted<Resolver> {
/// implementations. At that point, this method can go away.
virtual void ResetBackoffLocked() {}
// Note: This must be invoked while holding the combiner.
// Note: This must be invoked while holding the work_serializer.
void Orphan() override {
ShutdownLocked();
Unref();
}
protected:
/// Does NOT take ownership of the reference to \a combiner.
// TODO(roth): Once we have a C++-like interface for combiners, this
// API should change to take a RefCountedPtr<>, so that we always take
// ownership of a new ref.
explicit Resolver(Combiner* combiner,
std::unique_ptr<ResultHandler> result_handler);
Resolver(std::shared_ptr<WorkSerializer> work_serializer,
std::unique_ptr<ResultHandler> result_handler);
/// Shuts down the resolver.
virtual void ShutdownLocked() = 0;
Combiner* combiner() const { return combiner_; }
std::shared_ptr<WorkSerializer> work_serializer() const {
return work_serializer_;
}
ResultHandler* result_handler() const { return result_handler_.get(); }
private:
std::shared_ptr<WorkSerializer> work_serializer_;
std::unique_ptr<ResultHandler> result_handler_;
Combiner* combiner_;
};
} // namespace grpc_core

@ -41,11 +41,11 @@
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gprpp/manual_constructor.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/gethostname.h"
#include "src/core/lib/iomgr/iomgr_custom.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/iomgr/work_serializer.h"
#include "src/core/lib/json/json.h"
#define GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS 1
@ -79,8 +79,8 @@ class AresDnsResolver : public Resolver {
static void OnNextResolution(void* arg, grpc_error* error);
static void OnResolved(void* arg, grpc_error* error);
static void OnNextResolutionLocked(void* arg, grpc_error* error);
static void OnResolvedLocked(void* arg, grpc_error* error);
void OnNextResolutionLocked(grpc_error* error);
void OnResolvedLocked(grpc_error* error);
/// DNS server to use (if not system default)
char* dns_server_;
@ -92,7 +92,7 @@ class AresDnsResolver : public Resolver {
bool request_service_config_;
/// pollset_set to drive the name resolution process
grpc_pollset_set* interested_parties_;
/// closures used by the combiner
/// closures used by the work_serializer
grpc_closure on_next_resolution_;
grpc_closure on_resolved_;
/// are we currently resolving?
@ -123,7 +123,7 @@ class AresDnsResolver : public Resolver {
};
AresDnsResolver::AresDnsResolver(ResolverArgs args)
: Resolver(args.combiner, std::move(args.result_handler)),
: Resolver(std::move(args.work_serializer), std::move(args.result_handler)),
backoff_(
BackOff::Options()
.set_initial_backoff(GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS *
@ -131,6 +131,10 @@ AresDnsResolver::AresDnsResolver(ResolverArgs args)
.set_multiplier(GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER)
.set_jitter(GRPC_DNS_RECONNECT_JITTER)
.set_max_backoff(GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000)) {
// Closure Initialization
GRPC_CLOSURE_INIT(&on_next_resolution_, OnNextResolution, this,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&on_resolved_, OnResolved, this, grpc_schedule_on_exec_ctx);
// Get name to resolve from URI path.
const char* path = args.uri->path;
if (path[0] == '/') ++path;
@ -204,26 +208,26 @@ void AresDnsResolver::ShutdownLocked() {
void AresDnsResolver::OnNextResolution(void* arg, grpc_error* error) {
AresDnsResolver* r = static_cast<AresDnsResolver*>(arg);
r->combiner()->Run(GRPC_CLOSURE_INIT(&r->on_next_resolution_,
OnNextResolutionLocked, r, nullptr),
GRPC_ERROR_REF(error));
GRPC_ERROR_REF(error); // ref owned by lambda
r->work_serializer()->Run([r, error]() { r->OnNextResolutionLocked(error); },
DEBUG_LOCATION);
}
void AresDnsResolver::OnNextResolutionLocked(void* arg, grpc_error* error) {
AresDnsResolver* r = static_cast<AresDnsResolver*>(arg);
void AresDnsResolver::OnNextResolutionLocked(grpc_error* error) {
GRPC_CARES_TRACE_LOG(
"resolver:%p re-resolution timer fired. error: %s. shutdown_initiated_: "
"%d",
r, grpc_error_string(error), r->shutdown_initiated_);
r->have_next_resolution_timer_ = false;
if (error == GRPC_ERROR_NONE && !r->shutdown_initiated_) {
if (!r->resolving_) {
this, grpc_error_string(error), shutdown_initiated_);
have_next_resolution_timer_ = false;
if (error == GRPC_ERROR_NONE && !shutdown_initiated_) {
if (!resolving_) {
GRPC_CARES_TRACE_LOG(
"resolver:%p start resolving due to re-resolution timer", r);
r->StartResolvingLocked();
"resolver:%p start resolving due to re-resolution timer", this);
StartResolvingLocked();
}
}
r->Unref(DEBUG_LOCATION, "next_resolution_timer");
Unref(DEBUG_LOCATION, "next_resolution_timer");
GRPC_ERROR_UNREF(error);
}
bool ValueInJsonArray(const Json::Array& array, const char* value) {
@ -316,81 +320,79 @@ std::string ChooseServiceConfig(char* service_config_choice_json,
void AresDnsResolver::OnResolved(void* arg, grpc_error* error) {
AresDnsResolver* r = static_cast<AresDnsResolver*>(arg);
r->combiner()->Run(
GRPC_CLOSURE_INIT(&r->on_resolved_, OnResolvedLocked, r, nullptr),
GRPC_ERROR_REF(error));
GRPC_ERROR_REF(error); // ref owned by lambda
r->work_serializer()->Run([r, error]() { r->OnResolvedLocked(error); },
DEBUG_LOCATION);
}
void AresDnsResolver::OnResolvedLocked(void* arg, grpc_error* error) {
AresDnsResolver* r = static_cast<AresDnsResolver*>(arg);
GPR_ASSERT(r->resolving_);
r->resolving_ = false;
gpr_free(r->pending_request_);
r->pending_request_ = nullptr;
if (r->shutdown_initiated_) {
r->Unref(DEBUG_LOCATION, "OnResolvedLocked() shutdown");
void AresDnsResolver::OnResolvedLocked(grpc_error* error) {
GPR_ASSERT(resolving_);
resolving_ = false;
gpr_free(pending_request_);
pending_request_ = nullptr;
if (shutdown_initiated_) {
Unref(DEBUG_LOCATION, "OnResolvedLocked() shutdown");
GRPC_ERROR_UNREF(error);
return;
}
if (r->addresses_ != nullptr || r->balancer_addresses_ != nullptr) {
if (addresses_ != nullptr || balancer_addresses_ != nullptr) {
Result result;
if (r->addresses_ != nullptr) {
result.addresses = std::move(*r->addresses_);
if (addresses_ != nullptr) {
result.addresses = std::move(*addresses_);
}
if (r->service_config_json_ != nullptr) {
if (service_config_json_ != nullptr) {
std::string service_config_string = ChooseServiceConfig(
r->service_config_json_, &result.service_config_error);
gpr_free(r->service_config_json_);
service_config_json_, &result.service_config_error);
gpr_free(service_config_json_);
if (result.service_config_error == GRPC_ERROR_NONE &&
!service_config_string.empty()) {
GRPC_CARES_TRACE_LOG("resolver:%p selected service config choice: %s",
r, service_config_string.c_str());
this, service_config_string.c_str());
result.service_config = ServiceConfig::Create(
service_config_string, &result.service_config_error);
}
}
InlinedVector<grpc_arg, 1> new_args;
if (r->balancer_addresses_ != nullptr) {
if (balancer_addresses_ != nullptr) {
new_args.push_back(
CreateGrpclbBalancerAddressesArg(r->balancer_addresses_.get()));
CreateGrpclbBalancerAddressesArg(balancer_addresses_.get()));
}
result.args = grpc_channel_args_copy_and_add(
r->channel_args_, new_args.data(), new_args.size());
r->result_handler()->ReturnResult(std::move(result));
r->addresses_.reset();
r->balancer_addresses_.reset();
result.args = grpc_channel_args_copy_and_add(channel_args_, new_args.data(),
new_args.size());
result_handler()->ReturnResult(std::move(result));
addresses_.reset();
balancer_addresses_.reset();
// Reset backoff state so that we start from the beginning when the
// next request gets triggered.
r->backoff_.Reset();
backoff_.Reset();
} else {
GRPC_CARES_TRACE_LOG("resolver:%p dns resolution failed: %s", r,
GRPC_CARES_TRACE_LOG("resolver:%p dns resolution failed: %s", this,
grpc_error_string(error));
r->result_handler()->ReturnError(grpc_error_set_int(
result_handler()->ReturnError(grpc_error_set_int(
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"DNS resolution failed", &error, 1),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE));
// Set retry timer.
grpc_millis next_try = r->backoff_.NextAttemptTime();
grpc_millis next_try = backoff_.NextAttemptTime();
grpc_millis timeout = next_try - ExecCtx::Get()->Now();
GRPC_CARES_TRACE_LOG("resolver:%p dns resolution failed (will retry): %s",
r, grpc_error_string(error));
GPR_ASSERT(!r->have_next_resolution_timer_);
r->have_next_resolution_timer_ = true;
this, grpc_error_string(error));
GPR_ASSERT(!have_next_resolution_timer_);
have_next_resolution_timer_ = true;
// TODO(roth): We currently deal with this ref manually. Once the
// new closure API is done, find a way to track this ref with the timer
// callback as part of the type system.
r->Ref(DEBUG_LOCATION, "retry-timer").release();
Ref(DEBUG_LOCATION, "retry-timer").release();
if (timeout > 0) {
GRPC_CARES_TRACE_LOG("resolver:%p retrying in %" PRId64 " milliseconds",
r, timeout);
this, timeout);
} else {
GRPC_CARES_TRACE_LOG("resolver:%p retrying immediately", r);
GRPC_CARES_TRACE_LOG("resolver:%p retrying immediately", this);
}
GRPC_CLOSURE_INIT(&r->on_next_resolution_, OnNextResolution, r,
grpc_schedule_on_exec_ctx);
grpc_timer_init(&r->next_resolution_timer_, next_try,
&r->on_next_resolution_);
grpc_timer_init(&next_resolution_timer_, next_try, &on_next_resolution_);
}
r->Unref(DEBUG_LOCATION, "dns-resolving");
Unref(DEBUG_LOCATION, "dns-resolving");
GRPC_ERROR_UNREF(error);
}
void AresDnsResolver::MaybeStartResolvingLocked() {
@ -414,8 +416,6 @@ void AresDnsResolver::MaybeStartResolvingLocked() {
// new closure API is done, find a way to track this ref with the timer
// callback as part of the type system.
Ref(DEBUG_LOCATION, "next_resolution_timer_cooldown").release();
GRPC_CLOSURE_INIT(&on_next_resolution_, OnNextResolution, this,
grpc_schedule_on_exec_ctx);
grpc_timer_init(&next_resolution_timer_,
ExecCtx::Get()->Now() + ms_until_next_resolution,
&on_next_resolution_);
@ -433,13 +433,12 @@ void AresDnsResolver::StartResolvingLocked() {
GPR_ASSERT(!resolving_);
resolving_ = true;
service_config_json_ = nullptr;
GRPC_CLOSURE_INIT(&on_resolved_, OnResolved, this, grpc_schedule_on_exec_ctx);
pending_request_ = grpc_dns_lookup_ares_locked(
dns_server_, name_to_resolve_, kDefaultPort, interested_parties_,
&on_resolved_, &addresses_,
enable_srv_queries_ ? &balancer_addresses_ : nullptr,
request_service_config_ ? &service_config_json_ : nullptr,
query_timeout_ms_, combiner());
query_timeout_ms_, work_serializer());
last_resolution_timestamp_ = grpc_core::ExecCtx::Get()->Now();
GRPC_CARES_TRACE_LOG("resolver:%p Started resolving. pending_request_:%p",
this, pending_request_);

@ -31,7 +31,6 @@
#include <grpc/support/time.h>
#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/iomgr/timer.h"
@ -66,8 +65,8 @@ struct grpc_ares_ev_driver {
/** refcount of the event driver */
gpr_refcount refs;
/** combiner to synchronize c-ares and I/O callbacks on */
grpc_core::Combiner* combiner;
/** work_serializer to synchronize c-ares and I/O callbacks on */
std::shared_ptr<grpc_core::WorkSerializer> work_serializer;
/** a list of grpc_fd that this event driver is currently using. */
fd_node* fds;
/** is this event driver currently working? */
@ -107,7 +106,6 @@ static void grpc_ares_ev_driver_unref(grpc_ares_ev_driver* ev_driver) {
GRPC_CARES_TRACE_LOG("request:%p destroy ev_driver %p", ev_driver->request,
ev_driver);
GPR_ASSERT(ev_driver->fds == nullptr);
GRPC_COMBINER_UNREF(ev_driver->combiner, "free ares event driver");
ares_destroy(ev_driver->channel);
grpc_ares_complete_request_locked(ev_driver->request);
delete ev_driver;
@ -133,21 +131,22 @@ static void fd_node_shutdown_locked(fd_node* fdn, const char* reason) {
}
static void on_timeout(void* arg, grpc_error* error);
static void on_timeout_locked(void* arg, grpc_error* error);
static void on_timeout_locked(grpc_ares_ev_driver* arg, grpc_error* error);
static void on_ares_backup_poll_alarm(void* arg, grpc_error* error);
static void on_ares_backup_poll_alarm_locked(void* arg, grpc_error* error);
static void on_ares_backup_poll_alarm_locked(grpc_ares_ev_driver* arg,
grpc_error* error);
static void noop_inject_channel_config(ares_channel /*channel*/) {}
void (*grpc_ares_test_only_inject_config)(ares_channel channel) =
noop_inject_channel_config;
grpc_error* grpc_ares_ev_driver_create_locked(grpc_ares_ev_driver** ev_driver,
grpc_pollset_set* pollset_set,
int query_timeout_ms,
grpc_core::Combiner* combiner,
grpc_ares_request* request) {
grpc_error* grpc_ares_ev_driver_create_locked(
grpc_ares_ev_driver** ev_driver, grpc_pollset_set* pollset_set,
int query_timeout_ms,
std::shared_ptr<grpc_core::WorkSerializer> work_serializer,
grpc_ares_request* request) {
*ev_driver = new grpc_ares_ev_driver();
ares_options opts;
memset(&opts, 0, sizeof(opts));
@ -164,7 +163,7 @@ grpc_error* grpc_ares_ev_driver_create_locked(grpc_ares_ev_driver** ev_driver,
gpr_free(*ev_driver);
return err;
}
(*ev_driver)->combiner = GRPC_COMBINER_REF(combiner, "ares event driver");
(*ev_driver)->work_serializer = std::move(work_serializer);
gpr_ref_init(&(*ev_driver)->refs, 1);
(*ev_driver)->pollset_set = pollset_set;
(*ev_driver)->fds = nullptr;
@ -172,7 +171,7 @@ grpc_error* grpc_ares_ev_driver_create_locked(grpc_ares_ev_driver** ev_driver,
(*ev_driver)->shutting_down = false;
(*ev_driver)->request = request;
(*ev_driver)->polled_fd_factory =
grpc_core::NewGrpcPolledFdFactory((*ev_driver)->combiner);
grpc_core::NewGrpcPolledFdFactory((*ev_driver)->work_serializer);
(*ev_driver)
->polled_fd_factory->ConfigureAresChannelLocked((*ev_driver)->channel);
(*ev_driver)->query_timeout_ms = query_timeout_ms;
@ -234,13 +233,12 @@ static grpc_millis calculate_next_ares_backup_poll_alarm_ms(
static void on_timeout(void* arg, grpc_error* error) {
grpc_ares_ev_driver* driver = static_cast<grpc_ares_ev_driver*>(arg);
driver->combiner->Run(GRPC_CLOSURE_INIT(&driver->on_timeout_locked,
on_timeout_locked, driver, nullptr),
GRPC_ERROR_REF(error));
GRPC_ERROR_REF(error); // ref owned by lambda
driver->work_serializer->Run(
[driver, error]() { on_timeout_locked(driver, error); }, DEBUG_LOCATION);
}
static void on_timeout_locked(void* arg, grpc_error* error) {
grpc_ares_ev_driver* driver = static_cast<grpc_ares_ev_driver*>(arg);
static void on_timeout_locked(grpc_ares_ev_driver* driver, grpc_error* error) {
GRPC_CARES_TRACE_LOG(
"request:%p ev_driver=%p on_timeout_locked. driver->shutting_down=%d. "
"err=%s",
@ -249,14 +247,15 @@ static void on_timeout_locked(void* arg, grpc_error* error) {
grpc_ares_ev_driver_shutdown_locked(driver);
}
grpc_ares_ev_driver_unref(driver);
GRPC_ERROR_UNREF(error);
}
static void on_ares_backup_poll_alarm(void* arg, grpc_error* error) {
grpc_ares_ev_driver* driver = static_cast<grpc_ares_ev_driver*>(arg);
driver->combiner->Run(
GRPC_CLOSURE_INIT(&driver->on_ares_backup_poll_alarm_locked,
on_ares_backup_poll_alarm_locked, driver, nullptr),
GRPC_ERROR_REF(error));
GRPC_ERROR_REF(error);
driver->work_serializer->Run(
[driver, error]() { on_ares_backup_poll_alarm_locked(driver, error); },
DEBUG_LOCATION);
}
/* In case of non-responsive DNS servers, dropped packets, etc., c-ares has
@ -267,8 +266,8 @@ static void on_ares_backup_poll_alarm(void* arg, grpc_error* error) {
* b) when some time has passed without fd events having happened
* For the latter, we use this backup poller. Also see
* https://github.com/grpc/grpc/pull/17688 description for more details. */
static void on_ares_backup_poll_alarm_locked(void* arg, grpc_error* error) {
grpc_ares_ev_driver* driver = static_cast<grpc_ares_ev_driver*>(arg);
static void on_ares_backup_poll_alarm_locked(grpc_ares_ev_driver* driver,
grpc_error* error) {
GRPC_CARES_TRACE_LOG(
"request:%p ev_driver=%p on_ares_backup_poll_alarm_locked. "
"driver->shutting_down=%d. "
@ -301,10 +300,10 @@ static void on_ares_backup_poll_alarm_locked(void* arg, grpc_error* error) {
grpc_ares_notify_on_event_locked(driver);
}
grpc_ares_ev_driver_unref(driver);
GRPC_ERROR_UNREF(error);
}
static void on_readable_locked(void* arg, grpc_error* error) {
fd_node* fdn = static_cast<fd_node*>(arg);
static void on_readable_locked(fd_node* fdn, grpc_error* error) {
GPR_ASSERT(fdn->readable_registered);
grpc_ares_ev_driver* ev_driver = fdn->ev_driver;
const ares_socket_t as = fdn->grpc_polled_fd->GetWrappedAresSocketLocked();
@ -326,17 +325,17 @@ static void on_readable_locked(void* arg, grpc_error* error) {
}
grpc_ares_notify_on_event_locked(ev_driver);
grpc_ares_ev_driver_unref(ev_driver);
GRPC_ERROR_UNREF(error);
}
static void on_readable(void* arg, grpc_error* error) {
fd_node* fdn = static_cast<fd_node*>(arg);
fdn->ev_driver->combiner->Run(
GRPC_CLOSURE_INIT(&fdn->read_closure, on_readable_locked, fdn, nullptr),
GRPC_ERROR_REF(error));
GRPC_ERROR_REF(error); /* ref owned by lambda */
fdn->ev_driver->work_serializer->Run(
[fdn, error]() { on_readable_locked(fdn, error); }, DEBUG_LOCATION);
}
static void on_writable_locked(void* arg, grpc_error* error) {
fd_node* fdn = static_cast<fd_node*>(arg);
static void on_writable_locked(fd_node* fdn, grpc_error* error) {
GPR_ASSERT(fdn->writable_registered);
grpc_ares_ev_driver* ev_driver = fdn->ev_driver;
const ares_socket_t as = fdn->grpc_polled_fd->GetWrappedAresSocketLocked();
@ -356,13 +355,14 @@ static void on_writable_locked(void* arg, grpc_error* error) {
}
grpc_ares_notify_on_event_locked(ev_driver);
grpc_ares_ev_driver_unref(ev_driver);
GRPC_ERROR_UNREF(error);
}
static void on_writable(void* arg, grpc_error* error) {
fd_node* fdn = static_cast<fd_node*>(arg);
fdn->ev_driver->combiner->Run(
GRPC_CLOSURE_INIT(&fdn->write_closure, on_writable_locked, fdn, nullptr),
GRPC_ERROR_REF(error));
GRPC_ERROR_REF(error); /* ref owned by lambda */
fdn->ev_driver->work_serializer->Run(
[fdn, error]() { on_writable_locked(fdn, error); }, DEBUG_LOCATION);
}
ares_channel* grpc_ares_ev_driver_get_channel_locked(
@ -387,7 +387,7 @@ static void grpc_ares_notify_on_event_locked(grpc_ares_ev_driver* ev_driver) {
fdn = static_cast<fd_node*>(gpr_malloc(sizeof(fd_node)));
fdn->grpc_polled_fd =
ev_driver->polled_fd_factory->NewGrpcPolledFdLocked(
socks[i], ev_driver->pollset_set, ev_driver->combiner);
socks[i], ev_driver->pollset_set, ev_driver->work_serializer);
GRPC_CARES_TRACE_LOG("request:%p new fd: %s", ev_driver->request,
fdn->grpc_polled_fd->GetName());
fdn->ev_driver = ev_driver;

@ -40,11 +40,11 @@ ares_channel* grpc_ares_ev_driver_get_channel_locked(
/* Creates a new grpc_ares_ev_driver. Returns GRPC_ERROR_NONE if \a ev_driver is
created successfully. */
grpc_error* grpc_ares_ev_driver_create_locked(grpc_ares_ev_driver** ev_driver,
grpc_pollset_set* pollset_set,
int query_timeout_ms,
grpc_core::Combiner* combiner,
grpc_ares_request* request);
grpc_error* grpc_ares_ev_driver_create_locked(
grpc_ares_ev_driver** ev_driver, grpc_pollset_set* pollset_set,
int query_timeout_ms,
std::shared_ptr<grpc_core::WorkSerializer> work_serializer,
grpc_ares_request* request);
/* Called back when all DNS lookups have completed. */
void grpc_ares_ev_driver_on_queries_complete_locked(
@ -90,12 +90,13 @@ class GrpcPolledFdFactory {
/* Creates a new wrapped fd for the current platform */
virtual GrpcPolledFd* NewGrpcPolledFdLocked(
ares_socket_t as, grpc_pollset_set* driver_pollset_set,
Combiner* combiner) = 0;
std::shared_ptr<grpc_core::WorkSerializer> work_serializer) = 0;
/* Optionally configures the ares channel after creation */
virtual void ConfigureAresChannelLocked(ares_channel channel) = 0;
};
std::unique_ptr<GrpcPolledFdFactory> NewGrpcPolledFdFactory(Combiner* combiner);
std::unique_ptr<GrpcPolledFdFactory> NewGrpcPolledFdFactory(
std::shared_ptr<grpc_core::WorkSerializer> work_serializer);
} // namespace grpc_core

@ -31,7 +31,7 @@
#include <grpc/support/time.h>
#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/work_serializer.h"
namespace grpc_core {
@ -41,19 +41,16 @@ void ares_uv_poll_close_cb(uv_handle_t* handle) { delete handle; }
class GrpcPolledFdLibuv : public GrpcPolledFd {
public:
GrpcPolledFdLibuv(ares_socket_t as, Combiner* combiner)
: as_(as), combiner_(combiner) {
GrpcPolledFdLibuv(ares_socket_t as,
std::shared_ptr<WorkSerializer> work_serializer)
: as_(as), work_serializer_(std::move(work_serializer)) {
gpr_asprintf(&name_, "c-ares socket: %" PRIdPTR, (intptr_t)as);
handle_ = new uv_poll_t();
uv_poll_init_socket(uv_default_loop(), handle_, as);
handle_->data = this;
GRPC_COMBINER_REF(combiner_, "libuv ares event driver");
}
~GrpcPolledFdLibuv() {
gpr_free(name_);
GRPC_COMBINER_UNREF(combiner_, "libuv ares event driver");
}
~GrpcPolledFdLibuv() { gpr_free(name_); }
void RegisterForOnReadableLocked(grpc_closure* read_closure) override {
GPR_ASSERT(read_closure_ == nullptr);
@ -109,7 +106,7 @@ class GrpcPolledFdLibuv : public GrpcPolledFd {
grpc_closure* read_closure_ = nullptr;
grpc_closure* write_closure_ = nullptr;
int poll_events_ = 0;
Combiner* combiner_;
std::shared_ptr<WorkSerializer> work_serializer_;
};
struct AresUvPollCbArg {
@ -121,14 +118,14 @@ struct AresUvPollCbArg {
int events;
};
static void ares_uv_poll_cb_locked(void* arg, grpc_error* error) {
std::unique_ptr<AresUvPollCbArg> arg_struct(
reinterpret_cast<AresUvPollCbArg*>(arg));
static void ares_uv_poll_cb_locked(AresUvPollCbArg* arg) {
std::unique_ptr<AresUvPollCbArg> arg_struct(arg);
uv_poll_t* handle = arg_struct->handle;
int status = arg_struct->status;
int events = arg_struct->events;
GrpcPolledFdLibuv* polled_fd =
reinterpret_cast<GrpcPolledFdLibuv*>(handle->data);
grpc_error* error = GRPC_ERROR_NONE;
if (status < 0) {
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("cares polling error");
error =
@ -155,24 +152,23 @@ void ares_uv_poll_cb(uv_poll_t* handle, int status, int events) {
GrpcPolledFdLibuv* polled_fd =
reinterpret_cast<GrpcPolledFdLibuv*>(handle->data);
AresUvPollCbArg* arg = new AresUvPollCbArg(handle, status, events);
polled_fd->combiner_->Run(
GRPC_CLOSURE_CREATE(ares_uv_poll_cb_locked, arg, nullptr),
GRPC_ERROR_NONE);
polled_fd->work_serializer_->Run([arg]() { ares_uv_poll_cb_locked(arg); },
DEBUG_LOCATION);
}
class GrpcPolledFdFactoryLibuv : public GrpcPolledFdFactory {
public:
GrpcPolledFd* NewGrpcPolledFdLocked(ares_socket_t as,
grpc_pollset_set* driver_pollset_set,
Combiner* combiner) override {
return new GrpcPolledFdLibuv(as, combiner);
GrpcPolledFd* NewGrpcPolledFdLocked(
ares_socket_t as, grpc_pollset_set* driver_pollset_set,
std::shared_ptr<WorkSerializer> work_serializer) override {
return new GrpcPolledFdLibuv(as, std::move(work_serializer));
}
void ConfigureAresChannelLocked(ares_channel channel) override {}
};
std::unique_ptr<GrpcPolledFdFactory> NewGrpcPolledFdFactory(
Combiner* combiner) {
std::shared_ptr<WorkSerializer> work_serializer) {
return absl::make_unique<GrpcPolledFdFactoryLibuv>();
}

@ -88,9 +88,9 @@ class GrpcPolledFdPosix : public GrpcPolledFd {
class GrpcPolledFdFactoryPosix : public GrpcPolledFdFactory {
public:
GrpcPolledFd* NewGrpcPolledFdLocked(ares_socket_t as,
grpc_pollset_set* driver_pollset_set,
Combiner* /*combiner*/) override {
GrpcPolledFd* NewGrpcPolledFdLocked(
ares_socket_t as, grpc_pollset_set* driver_pollset_set,
std::shared_ptr<WorkSerializer> /*work_serializer*/) override {
return new GrpcPolledFdPosix(as, driver_pollset_set);
}
@ -98,7 +98,7 @@ class GrpcPolledFdFactoryPosix : public GrpcPolledFdFactory {
};
std::unique_ptr<GrpcPolledFdFactory> NewGrpcPolledFdFactory(
Combiner* /*combiner*/) {
std::shared_ptr<WorkSerializer> work_serializer) { /* NOLINT */
return absl::make_unique<GrpcPolledFdFactoryPosix>();
}

@ -30,12 +30,12 @@
#include <string.h>
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gprpp/memory.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/iocp_windows.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/iomgr/sockaddr_windows.h"
#include "src/core/lib/iomgr/socket_windows.h"
#include "src/core/lib/iomgr/tcp_windows.h"
#include "src/core/lib/iomgr/work_serializer.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h"
@ -97,28 +97,31 @@ class GrpcPolledFdWindows {
WRITE_WAITING_FOR_VERIFICATION_UPON_RETRY,
};
GrpcPolledFdWindows(ares_socket_t as, Combiner* combiner, int address_family,
int socket_type)
: read_buf_(grpc_empty_slice()),
GrpcPolledFdWindows(ares_socket_t as,
std::shared_ptr<WorkSerializer> work_serializer,
int address_family, int socket_type)
: work_serializer_(std::move(work_serializer)),
read_buf_(grpc_empty_slice()),
write_buf_(grpc_empty_slice()),
tcp_write_state_(WRITE_IDLE),
gotten_into_driver_list_(false),
address_family_(address_family),
socket_type_(socket_type) {
gpr_asprintf(&name_, "c-ares socket: %" PRIdPTR, as);
// Closure Initialization
GRPC_CLOSURE_INIT(&outer_read_closure_,
&GrpcPolledFdWindows::OnIocpReadable, this,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&outer_write_closure_,
&GrpcPolledFdWindows::OnIocpWriteable, this,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&on_tcp_connect_locked_,
&GrpcPolledFdWindows::OnTcpConnect, this,
grpc_schedule_on_exec_ctx);
winsocket_ = grpc_winsocket_create(as, name_);
combiner_ = GRPC_COMBINER_REF(combiner, name_);
GRPC_CLOSURE_INIT(&continue_register_for_on_readable_locked_,
&GrpcPolledFdWindows::ContinueRegisterForOnReadableLocked,
this, nullptr);
GRPC_CLOSURE_INIT(
&continue_register_for_on_writeable_locked_,
&GrpcPolledFdWindows::ContinueRegisterForOnWriteableLocked, this,
nullptr);
}
~GrpcPolledFdWindows() {
GRPC_COMBINER_UNREF(combiner_, name_);
grpc_slice_unref_internal(read_buf_);
grpc_slice_unref_internal(write_buf_);
GPR_ASSERT(read_closure_ == nullptr);
@ -145,23 +148,15 @@ class GrpcPolledFdWindows {
GPR_ASSERT(!read_buf_has_data_);
read_buf_ = GRPC_SLICE_MALLOC(4192);
if (connect_done_) {
combiner_->Run(&continue_register_for_on_readable_locked_,
GRPC_ERROR_NONE);
work_serializer_->Run([this]() { ContinueRegisterForOnReadableLocked(); },
DEBUG_LOCATION);
} else {
GPR_ASSERT(pending_continue_register_for_on_readable_locked_ == nullptr);
pending_continue_register_for_on_readable_locked_ =
&continue_register_for_on_readable_locked_;
GPR_ASSERT(pending_continue_register_for_on_readable_locked_ == false);
pending_continue_register_for_on_readable_locked_ = true;
}
}
static void ContinueRegisterForOnReadableLocked(void* arg,
grpc_error* unused_error) {
GrpcPolledFdWindows* grpc_polled_fd =
static_cast<GrpcPolledFdWindows*>(arg);
grpc_polled_fd->InnerContinueRegisterForOnReadableLocked(GRPC_ERROR_NONE);
}
void InnerContinueRegisterForOnReadableLocked(grpc_error* unused_error) {
void ContinueRegisterForOnReadableLocked() {
GRPC_CARES_TRACE_LOG(
"fd:|%s| InnerContinueRegisterForOnReadableLocked "
"wsa_connect_error_:%d",
@ -194,10 +189,7 @@ class GrpcPolledFdWindows {
return;
}
}
grpc_socket_notify_on_read(
winsocket_, GRPC_CLOSURE_INIT(&outer_read_closure_,
&GrpcPolledFdWindows::OnIocpReadable,
this, grpc_schedule_on_exec_ctx));
grpc_socket_notify_on_read(winsocket_, &outer_read_closure_);
}
void RegisterForOnWriteableLocked(grpc_closure* write_closure) {
@ -213,23 +205,15 @@ class GrpcPolledFdWindows {
GPR_ASSERT(write_closure_ == nullptr);
write_closure_ = write_closure;
if (connect_done_) {
combiner_->Run(&continue_register_for_on_writeable_locked_,
GRPC_ERROR_NONE);
work_serializer_->Run(
[this]() { ContinueRegisterForOnWriteableLocked(); }, DEBUG_LOCATION);
} else {
GPR_ASSERT(pending_continue_register_for_on_writeable_locked_ == nullptr);
pending_continue_register_for_on_writeable_locked_ =
&continue_register_for_on_writeable_locked_;
GPR_ASSERT(pending_continue_register_for_on_writeable_locked_ == false);
pending_continue_register_for_on_writeable_locked_ = true;
}
}
static void ContinueRegisterForOnWriteableLocked(void* arg,
grpc_error* unused_error) {
GrpcPolledFdWindows* grpc_polled_fd =
static_cast<GrpcPolledFdWindows*>(arg);
grpc_polled_fd->InnerContinueRegisterForOnWriteableLocked(GRPC_ERROR_NONE);
}
void InnerContinueRegisterForOnWriteableLocked(grpc_error* unused_error) {
void ContinueRegisterForOnWriteableLocked() {
GRPC_CARES_TRACE_LOG(
"fd:|%s| InnerContinueRegisterForOnWriteableLocked "
"wsa_connect_error_:%d",
@ -256,11 +240,7 @@ class GrpcPolledFdWindows {
ScheduleAndNullWriteClosure(
GRPC_WSA_ERROR(wsa_error_code, "WSASend (overlapped)"));
} else {
grpc_socket_notify_on_write(
winsocket_,
GRPC_CLOSURE_INIT(&outer_write_closure_,
&GrpcPolledFdWindows::OnIocpWriteable, this,
grpc_schedule_on_exec_ctx));
grpc_socket_notify_on_write(winsocket_, &outer_write_closure_);
}
break;
case WRITE_PENDING:
@ -440,24 +420,19 @@ class GrpcPolledFdWindows {
static void OnTcpConnect(void* arg, grpc_error* error) {
GrpcPolledFdWindows* grpc_polled_fd =
static_cast<GrpcPolledFdWindows*>(arg);
grpc_polled_fd->combiner_->Run(
GRPC_CLOSURE_INIT(&grpc_polled_fd->on_tcp_connect_locked_,
&GrpcPolledFdWindows::OnTcpConnectLocked,
grpc_polled_fd, nullptr),
GRPC_ERROR_REF(error));
}
static void OnTcpConnectLocked(void* arg, grpc_error* error) {
GrpcPolledFdWindows* grpc_polled_fd =
static_cast<GrpcPolledFdWindows*>(arg);
grpc_polled_fd->InnerOnTcpConnectLocked(error);
GRPC_ERROR_REF(error); // ref owned by lambda
grpc_polled_fd->work_serializer_->Run(
[grpc_polled_fd, error]() {
grpc_polled_fd->OnTcpConnectLocked(error);
},
DEBUG_LOCATION);
}
void InnerOnTcpConnectLocked(grpc_error* error) {
void OnTcpConnectLocked(grpc_error* error) {
GRPC_CARES_TRACE_LOG(
"fd:%s InnerOnTcpConnectLocked error:|%s| "
"pending_register_for_readable:%" PRIdPTR
" pending_register_for_writeable:%" PRIdPTR,
"pending_register_for_readable:%d"
" pending_register_for_writeable:%d",
GetName(), grpc_error_string(error),
pending_continue_register_for_on_readable_locked_,
pending_continue_register_for_on_writeable_locked_);
@ -486,14 +461,15 @@ class GrpcPolledFdWindows {
// this fd to abort.
wsa_connect_error_ = WSA_OPERATION_ABORTED;
}
if (pending_continue_register_for_on_readable_locked_ != nullptr) {
combiner_->Run(pending_continue_register_for_on_readable_locked_,
GRPC_ERROR_NONE);
if (pending_continue_register_for_on_readable_locked_) {
work_serializer_->Run([this]() { ContinueRegisterForOnReadableLocked(); },
DEBUG_LOCATION);
}
if (pending_continue_register_for_on_writeable_locked_ != nullptr) {
combiner_->Run(pending_continue_register_for_on_writeable_locked_,
GRPC_ERROR_NONE);
if (pending_continue_register_for_on_writeable_locked_) {
work_serializer_->Run(
[this]() { ContinueRegisterForOnWriteableLocked(); }, DEBUG_LOCATION);
}
GRPC_ERROR_UNREF(error);
}
int Connect(WSAErrorContext* wsa_error_ctx, const struct sockaddr* target,
@ -593,25 +569,16 @@ class GrpcPolledFdWindows {
return -1;
}
}
GRPC_CLOSURE_INIT(&on_tcp_connect_locked_,
&GrpcPolledFdWindows::OnTcpConnect, this,
grpc_schedule_on_exec_ctx);
grpc_socket_notify_on_write(winsocket_, &on_tcp_connect_locked_);
return out;
}
static void OnIocpReadable(void* arg, grpc_error* error) {
GrpcPolledFdWindows* polled_fd = static_cast<GrpcPolledFdWindows*>(arg);
polled_fd->combiner_->Run(
GRPC_CLOSURE_INIT(&polled_fd->outer_read_closure_,
&GrpcPolledFdWindows::OnIocpReadableLocked, polled_fd,
nullptr),
GRPC_ERROR_REF(error));
}
static void OnIocpReadableLocked(void* arg, grpc_error* error) {
GrpcPolledFdWindows* polled_fd = static_cast<GrpcPolledFdWindows*>(arg);
polled_fd->OnIocpReadableInner(error);
GRPC_ERROR_REF(error); // ref owned by lambda
polled_fd->work_serializer_->Run(
[polled_fd, error]() { polled_fd->OnIocpReadableLocked(error); },
DEBUG_LOCATION);
}
// TODO(apolcyn): improve this error handling to be less conversative.
@ -619,7 +586,7 @@ class GrpcPolledFdWindows {
// c-ares reads from this socket later, but it shouldn't necessarily cancel
// the entire resolution attempt. Doing so will allow the "inject broken
// nameserver list" test to pass on Windows.
void OnIocpReadableInner(grpc_error* error) {
void OnIocpReadableLocked(grpc_error* error) {
if (error == GRPC_ERROR_NONE) {
if (winsocket_->read_info.wsa_error != 0) {
/* WSAEMSGSIZE would be due to receiving more data
@ -627,7 +594,6 @@ class GrpcPolledFdWindows {
* the connection is TCP and read the leftovers
* in subsequent c-ares reads. */
if (winsocket_->read_info.wsa_error != WSAEMSGSIZE) {
GRPC_ERROR_UNREF(error);
error = GRPC_WSA_ERROR(winsocket_->read_info.wsa_error,
"OnIocpReadableInner");
GRPC_CARES_TRACE_LOG(
@ -654,24 +620,17 @@ class GrpcPolledFdWindows {
static void OnIocpWriteable(void* arg, grpc_error* error) {
GrpcPolledFdWindows* polled_fd = static_cast<GrpcPolledFdWindows*>(arg);
polled_fd->combiner_->Run(
GRPC_CLOSURE_INIT(&polled_fd->outer_write_closure_,
&GrpcPolledFdWindows::OnIocpWriteableLocked,
polled_fd, nullptr),
GRPC_ERROR_REF(error));
GRPC_ERROR_REF(error); // error owned by lambda
polled_fd->work_serializer_->Run(
[polled_fd, error]() { polled_fd->OnIocpWriteableLocked(error); },
DEBUG_LOCATION);
}
static void OnIocpWriteableLocked(void* arg, grpc_error* error) {
GrpcPolledFdWindows* polled_fd = static_cast<GrpcPolledFdWindows*>(arg);
polled_fd->OnIocpWriteableInner(error);
}
void OnIocpWriteableInner(grpc_error* error) {
void OnIocpWriteableLocked(grpc_error* error) {
GRPC_CARES_TRACE_LOG("OnIocpWriteableInner. fd:|%s|", GetName());
GPR_ASSERT(socket_type_ == SOCK_STREAM);
if (error == GRPC_ERROR_NONE) {
if (winsocket_->write_info.wsa_error != 0) {
GRPC_ERROR_UNREF(error);
error = GRPC_WSA_ERROR(winsocket_->write_info.wsa_error,
"OnIocpWriteableInner");
GRPC_CARES_TRACE_LOG(
@ -698,7 +657,7 @@ class GrpcPolledFdWindows {
bool gotten_into_driver_list() const { return gotten_into_driver_list_; }
void set_gotten_into_driver_list() { gotten_into_driver_list_ = true; }
Combiner* combiner_;
std::shared_ptr<WorkSerializer> work_serializer_;
char recv_from_source_addr_[200];
ares_socklen_t recv_from_source_addr_len_;
grpc_slice read_buf_;
@ -721,10 +680,8 @@ class GrpcPolledFdWindows {
// We don't run register_for_{readable,writeable} logic until
// a socket is connected. In the interim, we queue readable/writeable
// registrations with the following state.
grpc_closure continue_register_for_on_readable_locked_;
grpc_closure continue_register_for_on_writeable_locked_;
grpc_closure* pending_continue_register_for_on_readable_locked_ = nullptr;
grpc_closure* pending_continue_register_for_on_writeable_locked_ = nullptr;
bool pending_continue_register_for_on_readable_locked_ = false;
bool pending_continue_register_for_on_writeable_locked_ = false;
};
struct SockToPolledFdEntry {
@ -742,14 +699,10 @@ struct SockToPolledFdEntry {
* with a GrpcPolledFdWindows factory and event driver */
class SockToPolledFdMap {
public:
SockToPolledFdMap(Combiner* combiner) {
combiner_ = GRPC_COMBINER_REF(combiner, "sock to polled fd map");
}
explicit SockToPolledFdMap(std::shared_ptr<WorkSerializer> work_serializer)
: work_serializer_(std::move(work_serializer)) {}
~SockToPolledFdMap() {
GPR_ASSERT(head_ == nullptr);
GRPC_COMBINER_UNREF(combiner_, "sock to polled fd map");
}
~SockToPolledFdMap() { GPR_ASSERT(head_ == nullptr); }
void AddNewSocket(SOCKET s, GrpcPolledFdWindows* polled_fd) {
SockToPolledFdEntry* new_node = new SockToPolledFdEntry(s, polled_fd);
@ -805,7 +758,7 @@ class SockToPolledFdMap {
}
grpc_tcp_set_non_block(s);
GrpcPolledFdWindows* polled_fd =
new GrpcPolledFdWindows(s, map->combiner_, af, type);
new GrpcPolledFdWindows(s, map->work_serializer_, af, type);
GRPC_CARES_TRACE_LOG(
"fd:|%s| created with params af:%d type:%d protocol:%d",
polled_fd->GetName(), af, type, protocol);
@ -861,7 +814,7 @@ class SockToPolledFdMap {
private:
SockToPolledFdEntry* head_ = nullptr;
Combiner* combiner_;
std::shared_ptr<WorkSerializer> work_serializer_;
};
const struct ares_socket_functions custom_ares_sock_funcs = {
@ -877,7 +830,7 @@ const struct ares_socket_functions custom_ares_sock_funcs = {
so that c-ares can close it via usual socket teardown. */
class GrpcPolledFdWindowsWrapper : public GrpcPolledFd {
public:
GrpcPolledFdWindowsWrapper(GrpcPolledFdWindows* wrapped)
explicit GrpcPolledFdWindowsWrapper(GrpcPolledFdWindows* wrapped)
: wrapped_(wrapped) {}
~GrpcPolledFdWindowsWrapper() {}
@ -910,12 +863,13 @@ class GrpcPolledFdWindowsWrapper : public GrpcPolledFd {
class GrpcPolledFdFactoryWindows : public GrpcPolledFdFactory {
public:
GrpcPolledFdFactoryWindows(Combiner* combiner)
: sock_to_polled_fd_map_(combiner) {}
explicit GrpcPolledFdFactoryWindows(
std::shared_ptr<WorkSerializer> work_serializer)
: sock_to_polled_fd_map_(std::move(work_serializer)) {}
GrpcPolledFd* NewGrpcPolledFdLocked(ares_socket_t as,
grpc_pollset_set* driver_pollset_set,
Combiner* combiner) override {
GrpcPolledFd* NewGrpcPolledFdLocked(
ares_socket_t as, grpc_pollset_set* driver_pollset_set,
std::shared_ptr<WorkSerializer> work_serializer) override {
GrpcPolledFdWindows* polled_fd = sock_to_polled_fd_map_.LookupPolledFd(as);
// Set a flag so that the virtual socket "close" method knows it
// doesn't need to call ShutdownLocked, since now the driver will.
@ -933,8 +887,9 @@ class GrpcPolledFdFactoryWindows : public GrpcPolledFdFactory {
};
std::unique_ptr<GrpcPolledFdFactory> NewGrpcPolledFdFactory(
Combiner* combiner) {
return absl::make_unique<GrpcPolledFdFactoryWindows>(combiner);
std::shared_ptr<WorkSerializer> work_serializer) {
return absl::make_unique<GrpcPolledFdFactoryWindows>(
std::move(work_serializer));
}
} // namespace grpc_core

@ -38,7 +38,6 @@
#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gprpp/host_port.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
@ -360,7 +359,8 @@ done:
void grpc_dns_lookup_ares_continue_after_check_localhost_and_ip_literals_locked(
grpc_ares_request* r, const char* dns_server, const char* name,
const char* default_port, grpc_pollset_set* interested_parties,
int query_timeout_ms, grpc_core::Combiner* combiner) {
int query_timeout_ms,
std::shared_ptr<grpc_core::WorkSerializer> work_serializer) {
grpc_error* error = GRPC_ERROR_NONE;
grpc_ares_hostbyname_request* hr = nullptr;
ares_channel* channel = nullptr;
@ -383,7 +383,8 @@ void grpc_dns_lookup_ares_continue_after_check_localhost_and_ip_literals_locked(
port.reset(gpr_strdup(default_port));
}
error = grpc_ares_ev_driver_create_locked(&r->ev_driver, interested_parties,
query_timeout_ms, combiner, r);
query_timeout_ms,
std::move(work_serializer), r);
if (error != GRPC_ERROR_NONE) goto error_cleanup;
channel = grpc_ares_ev_driver_get_channel_locked(r->ev_driver);
// If dns_server is specified, use it.
@ -602,7 +603,7 @@ static grpc_ares_request* grpc_dns_lookup_ares_locked_impl(
std::unique_ptr<grpc_core::ServerAddressList>* addrs,
std::unique_ptr<grpc_core::ServerAddressList>* balancer_addrs,
char** service_config_json, int query_timeout_ms,
grpc_core::Combiner* combiner) {
std::shared_ptr<grpc_core::WorkSerializer> work_serializer) {
grpc_ares_request* r =
static_cast<grpc_ares_request*>(gpr_zalloc(sizeof(grpc_ares_request)));
r->ev_driver = nullptr;
@ -637,7 +638,7 @@ static grpc_ares_request* grpc_dns_lookup_ares_locked_impl(
// Look up name using c-ares lib.
grpc_dns_lookup_ares_continue_after_check_localhost_and_ip_literals_locked(
r, dns_server, name, default_port, interested_parties, query_timeout_ms,
combiner);
std::move(work_serializer));
return r;
}
@ -647,7 +648,8 @@ grpc_ares_request* (*grpc_dns_lookup_ares_locked)(
std::unique_ptr<grpc_core::ServerAddressList>* addrs,
std::unique_ptr<grpc_core::ServerAddressList>* balancer_addrs,
char** service_config_json, int query_timeout_ms,
grpc_core::Combiner* combiner) = grpc_dns_lookup_ares_locked_impl;
std::shared_ptr<grpc_core::WorkSerializer> work_serializer) =
grpc_dns_lookup_ares_locked_impl;
static void grpc_cancel_ares_request_locked_impl(grpc_ares_request* r) {
GPR_ASSERT(r != nullptr);
@ -687,8 +689,8 @@ void grpc_ares_cleanup(void) {}
*/
typedef struct grpc_resolve_address_ares_request {
/* combiner that queries and related callbacks run under */
grpc_core::Combiner* combiner;
/* work_serializer that queries and related callbacks run under */
std::shared_ptr<grpc_core::WorkSerializer> work_serializer;
/** the pointer to receive the resolved addresses */
grpc_resolved_addresses** addrs_out;
/** currently resolving addresses */
@ -708,9 +710,8 @@ typedef struct grpc_resolve_address_ares_request {
grpc_ares_request* ares_request = nullptr;
} grpc_resolve_address_ares_request;
static void on_dns_lookup_done_locked(void* arg, grpc_error* error) {
grpc_resolve_address_ares_request* r =
static_cast<grpc_resolve_address_ares_request*>(arg);
static void on_dns_lookup_done_locked(grpc_resolve_address_ares_request* r,
grpc_error* error) {
gpr_free(r->ares_request);
grpc_resolved_addresses** resolved_addresses = r->addrs_out;
if (r->addresses == nullptr || r->addresses->empty()) {
@ -727,22 +728,19 @@ static void on_dns_lookup_done_locked(void* arg, grpc_error* error) {
sizeof(grpc_resolved_address));
}
}
grpc_core::ExecCtx::Run(DEBUG_LOCATION, r->on_resolve_address_done,
GRPC_ERROR_REF(error));
GRPC_COMBINER_UNREF(r->combiner, "on_dns_lookup_done_cb");
grpc_core::ExecCtx::Run(DEBUG_LOCATION, r->on_resolve_address_done, error);
delete r;
}
static void on_dns_lookup_done(void* arg, grpc_error* error) {
grpc_resolve_address_ares_request* r =
static_cast<grpc_resolve_address_ares_request*>(arg);
r->combiner->Run(GRPC_CLOSURE_INIT(&r->on_dns_lookup_done_locked,
on_dns_lookup_done_locked, r, nullptr),
GRPC_ERROR_REF(error));
GRPC_ERROR_REF(error); // ref owned by lambda
r->work_serializer->Run([r, error]() { on_dns_lookup_done_locked(r, error); },
DEBUG_LOCATION);
}
static void grpc_resolve_address_invoke_dns_lookup_ares_locked(
void* arg, grpc_error* /*unused_error*/) {
static void grpc_resolve_address_invoke_dns_lookup_ares_locked(void* arg) {
grpc_resolve_address_ares_request* r =
static_cast<grpc_resolve_address_ares_request*>(arg);
GRPC_CLOSURE_INIT(&r->on_dns_lookup_done_locked, on_dns_lookup_done, r,
@ -751,7 +749,7 @@ static void grpc_resolve_address_invoke_dns_lookup_ares_locked(
nullptr /* dns_server */, r->name, r->default_port, r->interested_parties,
&r->on_dns_lookup_done_locked, &r->addresses,
nullptr /* balancer_addresses */, nullptr /* service_config_json */,
GRPC_DNS_ARES_DEFAULT_QUERY_TIMEOUT_MS, r->combiner);
GRPC_DNS_ARES_DEFAULT_QUERY_TIMEOUT_MS, r->work_serializer);
}
static void grpc_resolve_address_ares_impl(const char* name,
@ -761,16 +759,15 @@ static void grpc_resolve_address_ares_impl(const char* name,
grpc_resolved_addresses** addrs) {
grpc_resolve_address_ares_request* r =
new grpc_resolve_address_ares_request();
r->combiner = grpc_combiner_create();
r->work_serializer = std::make_shared<grpc_core::WorkSerializer>();
r->addrs_out = addrs;
r->on_resolve_address_done = on_done;
r->name = name;
r->default_port = default_port;
r->interested_parties = interested_parties;
r->combiner->Run(
GRPC_CLOSURE_CREATE(grpc_resolve_address_invoke_dns_lookup_ares_locked, r,
nullptr),
GRPC_ERROR_NONE);
r->work_serializer->Run(
[r]() { grpc_resolve_address_invoke_dns_lookup_ares_locked(r); },
DEBUG_LOCATION);
}
void (*grpc_resolve_address_ares)(

@ -25,6 +25,7 @@
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/work_serializer.h"
#define GRPC_DNS_ARES_DEFAULT_QUERY_TIMEOUT_MS 120000
@ -66,7 +67,7 @@ extern grpc_ares_request* (*grpc_dns_lookup_ares_locked)(
std::unique_ptr<grpc_core::ServerAddressList>* addresses,
std::unique_ptr<grpc_core::ServerAddressList>* balancer_addresses,
char** service_config_json, int query_timeout_ms,
grpc_core::Combiner* combiner);
std::shared_ptr<grpc_core::WorkSerializer> work_serializer);
/* Cancel the pending grpc_ares_request \a request */
extern void (*grpc_cancel_ares_request_locked)(grpc_ares_request* request);

@ -32,7 +32,7 @@ static grpc_ares_request* grpc_dns_lookup_ares_locked_impl(
std::unique_ptr<grpc_core::ServerAddressList>* addrs,
std::unique_ptr<grpc_core::ServerAddressList>* balancer_addrs,
char** service_config_json, int query_timeout_ms,
grpc_core::Combiner* combiner) {
std::shared_ptr<grpc_core::WorkSerializer> work_serializer) {
return NULL;
}
@ -42,7 +42,8 @@ grpc_ares_request* (*grpc_dns_lookup_ares_locked)(
std::unique_ptr<grpc_core::ServerAddressList>* addrs,
std::unique_ptr<grpc_core::ServerAddressList>* balancer_addrs,
char** service_config_json, int query_timeout_ms,
grpc_core::Combiner* combiner) = grpc_dns_lookup_ares_locked_impl;
std::shared_ptr<grpc_core::WorkSerializer> work_serializer) =
grpc_dns_lookup_ares_locked_impl;
static void grpc_cancel_ares_request_locked_impl(grpc_ares_request* r) {}

@ -33,9 +33,9 @@
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gprpp/manual_constructor.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/iomgr/work_serializer.h"
#define GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS 1
#define GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER 1.6
@ -67,9 +67,9 @@ class NativeDnsResolver : public Resolver {
void StartResolvingLocked();
static void OnNextResolution(void* arg, grpc_error* error);
static void OnNextResolutionLocked(void* arg, grpc_error* error);
void OnNextResolutionLocked(grpc_error* error);
static void OnResolved(void* arg, grpc_error* error);
static void OnResolvedLocked(void* arg, grpc_error* error);
void OnResolvedLocked(grpc_error* error);
/// name to resolve
char* name_to_resolve_ = nullptr;
@ -97,7 +97,7 @@ class NativeDnsResolver : public Resolver {
};
NativeDnsResolver::NativeDnsResolver(ResolverArgs args)
: Resolver(args.combiner, std::move(args.result_handler)),
: Resolver(std::move(args.work_serializer), std::move(args.result_handler)),
backoff_(
BackOff::Options()
.set_initial_backoff(GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS *
@ -149,79 +149,76 @@ void NativeDnsResolver::ShutdownLocked() {
void NativeDnsResolver::OnNextResolution(void* arg, grpc_error* error) {
NativeDnsResolver* r = static_cast<NativeDnsResolver*>(arg);
r->combiner()->Run(
GRPC_CLOSURE_INIT(&r->on_next_resolution_,
NativeDnsResolver::OnNextResolutionLocked, r, nullptr),
GRPC_ERROR_REF(error));
GRPC_ERROR_REF(error); // ref owned by lambda
r->work_serializer()->Run([r, error]() { r->OnNextResolutionLocked(error); },
DEBUG_LOCATION);
}
void NativeDnsResolver::OnNextResolutionLocked(void* arg, grpc_error* error) {
NativeDnsResolver* r = static_cast<NativeDnsResolver*>(arg);
r->have_next_resolution_timer_ = false;
if (error == GRPC_ERROR_NONE && !r->resolving_) {
r->StartResolvingLocked();
void NativeDnsResolver::OnNextResolutionLocked(grpc_error* error) {
have_next_resolution_timer_ = false;
if (error == GRPC_ERROR_NONE && !resolving_) {
StartResolvingLocked();
}
r->Unref(DEBUG_LOCATION, "retry-timer");
Unref(DEBUG_LOCATION, "retry-timer");
GRPC_ERROR_UNREF(error);
}
void NativeDnsResolver::OnResolved(void* arg, grpc_error* error) {
NativeDnsResolver* r = static_cast<NativeDnsResolver*>(arg);
r->combiner()->Run(
GRPC_CLOSURE_INIT(&r->on_resolved_, NativeDnsResolver::OnResolvedLocked,
r, nullptr),
GRPC_ERROR_REF(error));
GRPC_ERROR_REF(error); // owned by lambda
r->work_serializer()->Run([r, error]() { r->OnResolvedLocked(error); },
DEBUG_LOCATION);
}
void NativeDnsResolver::OnResolvedLocked(void* arg, grpc_error* error) {
NativeDnsResolver* r = static_cast<NativeDnsResolver*>(arg);
GPR_ASSERT(r->resolving_);
r->resolving_ = false;
if (r->shutdown_) {
r->Unref(DEBUG_LOCATION, "dns-resolving");
void NativeDnsResolver::OnResolvedLocked(grpc_error* error) {
GPR_ASSERT(resolving_);
resolving_ = false;
if (shutdown_) {
Unref(DEBUG_LOCATION, "dns-resolving");
GRPC_ERROR_UNREF(error);
return;
}
if (r->addresses_ != nullptr) {
if (addresses_ != nullptr) {
Result result;
for (size_t i = 0; i < r->addresses_->naddrs; ++i) {
result.addresses.emplace_back(&r->addresses_->addrs[i].addr,
r->addresses_->addrs[i].len,
for (size_t i = 0; i < addresses_->naddrs; ++i) {
result.addresses.emplace_back(&addresses_->addrs[i].addr,
addresses_->addrs[i].len,
nullptr /* args */);
}
grpc_resolved_addresses_destroy(r->addresses_);
result.args = grpc_channel_args_copy(r->channel_args_);
r->result_handler()->ReturnResult(std::move(result));
grpc_resolved_addresses_destroy(addresses_);
result.args = grpc_channel_args_copy(channel_args_);
result_handler()->ReturnResult(std::move(result));
// Reset backoff state so that we start from the beginning when the
// next request gets triggered.
r->backoff_.Reset();
backoff_.Reset();
} else {
gpr_log(GPR_INFO, "dns resolution failed (will retry): %s",
grpc_error_string(error));
// Return transient error.
r->result_handler()->ReturnError(grpc_error_set_int(
result_handler()->ReturnError(grpc_error_set_int(
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"DNS resolution failed", &error, 1),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE));
// Set up for retry.
grpc_millis next_try = r->backoff_.NextAttemptTime();
grpc_millis next_try = backoff_.NextAttemptTime();
grpc_millis timeout = next_try - ExecCtx::Get()->Now();
GPR_ASSERT(!r->have_next_resolution_timer_);
r->have_next_resolution_timer_ = true;
GPR_ASSERT(!have_next_resolution_timer_);
have_next_resolution_timer_ = true;
// TODO(roth): We currently deal with this ref manually. Once the
// new closure API is done, find a way to track this ref with the timer
// callback as part of the type system.
r->Ref(DEBUG_LOCATION, "next_resolution_timer").release();
Ref(DEBUG_LOCATION, "next_resolution_timer").release();
if (timeout > 0) {
gpr_log(GPR_DEBUG, "retrying in %" PRId64 " milliseconds", timeout);
} else {
gpr_log(GPR_DEBUG, "retrying immediately");
}
GRPC_CLOSURE_INIT(&r->on_next_resolution_,
NativeDnsResolver::OnNextResolution, r,
grpc_schedule_on_exec_ctx);
grpc_timer_init(&r->next_resolution_timer_, next_try,
&r->on_next_resolution_);
GRPC_CLOSURE_INIT(&on_next_resolution_, NativeDnsResolver::OnNextResolution,
this, grpc_schedule_on_exec_ctx);
grpc_timer_init(&next_resolution_timer_, next_try, &on_next_resolution_);
}
r->Unref(DEBUG_LOCATION, "dns-resolving");
Unref(DEBUG_LOCATION, "dns-resolving");
GRPC_ERROR_UNREF(error);
}
void NativeDnsResolver::MaybeStartResolvingLocked() {

@ -35,9 +35,9 @@
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gpr/useful.h"
#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/unix_sockets_posix.h"
#include "src/core/lib/iomgr/work_serializer.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
@ -57,20 +57,15 @@ class FakeResolver : public Resolver {
private:
friend class FakeResolverResponseGenerator;
friend class FakeResolverResponseSetter;
virtual ~FakeResolver();
void ShutdownLocked() override {
shutdown_ = true;
if (response_generator_ != nullptr) {
response_generator_->SetFakeResolver(nullptr);
response_generator_.reset();
}
}
void ShutdownLocked() override;
void MaybeSendResultLocked();
static void ReturnReresolutionResult(void* arg, grpc_error* error);
void ReturnReresolutionResult();
// passed-in parameters
grpc_channel_args* channel_args_ = nullptr;
@ -90,12 +85,11 @@ class FakeResolver : public Resolver {
// if true, return failure
bool return_failure_ = false;
// pending re-resolution
grpc_closure reresolution_closure_;
bool reresolution_closure_pending_ = false;
};
FakeResolver::FakeResolver(ResolverArgs args)
: Resolver(args.combiner, std::move(args.result_handler)),
: Resolver(std::move(args.work_serializer), std::move(args.result_handler)),
response_generator_(
FakeResolverResponseGenerator::GetFromArgs(args.args)) {
// Channels sharing the same subchannels may have different resolver response
@ -127,13 +121,20 @@ void FakeResolver::RequestReresolutionLocked() {
if (!reresolution_closure_pending_) {
reresolution_closure_pending_ = true;
Ref().release(); // ref held by closure
GRPC_CLOSURE_INIT(&reresolution_closure_, ReturnReresolutionResult, this,
nullptr);
combiner()->Run(&reresolution_closure_, GRPC_ERROR_NONE);
work_serializer()->Run([this]() { ReturnReresolutionResult(); },
DEBUG_LOCATION);
}
}
}
void FakeResolver::ShutdownLocked() {
shutdown_ = true;
if (response_generator_ != nullptr) {
response_generator_->SetFakeResolver(nullptr);
response_generator_.reset();
}
}
void FakeResolver::MaybeSendResultLocked() {
if (!started_ || shutdown_) return;
if (return_failure_) {
@ -159,11 +160,59 @@ void FakeResolver::MaybeSendResultLocked() {
}
}
void FakeResolver::ReturnReresolutionResult(void* arg, grpc_error* /*error*/) {
FakeResolver* self = static_cast<FakeResolver*>(arg);
self->reresolution_closure_pending_ = false;
self->MaybeSendResultLocked();
self->Unref();
void FakeResolver::ReturnReresolutionResult() {
reresolution_closure_pending_ = false;
MaybeSendResultLocked();
Unref();
}
class FakeResolverResponseSetter {
public:
explicit FakeResolverResponseSetter(RefCountedPtr<FakeResolver> resolver,
Resolver::Result result,
bool has_result = false,
bool immediate = true)
: resolver_(std::move(resolver)),
result_(std::move(result)),
has_result_(has_result),
immediate_(immediate) {}
void SetResponseLocked();
void SetReresolutionResponseLocked();
void SetFailureLocked();
private:
RefCountedPtr<FakeResolver> resolver_;
Resolver::Result result_;
bool has_result_;
bool immediate_;
};
// Deletes object when done
void FakeResolverResponseSetter::SetReresolutionResponseLocked() {
if (!resolver_->shutdown_) {
resolver_->reresolution_result_ = std::move(result_);
resolver_->has_reresolution_result_ = has_result_;
}
delete this;
}
// Deletes object when done
void FakeResolverResponseSetter::SetResponseLocked() {
if (!resolver_->shutdown_) {
resolver_->next_result_ = std::move(result_);
resolver_->has_next_result_ = true;
resolver_->MaybeSendResultLocked();
}
delete this;
}
// Deletes object when done
void FakeResolverResponseSetter::SetFailureLocked() {
if (!resolver_->shutdown_) {
resolver_->return_failure_ = true;
if (immediate_) resolver_->MaybeSendResultLocked();
}
delete this;
}
//
@ -174,26 +223,6 @@ FakeResolverResponseGenerator::FakeResolverResponseGenerator() {}
FakeResolverResponseGenerator::~FakeResolverResponseGenerator() {}
struct SetResponseClosureArg {
grpc_closure set_response_closure;
RefCountedPtr<FakeResolver> resolver;
Resolver::Result result;
bool has_result = false;
bool immediate = true;
};
void FakeResolverResponseGenerator::SetResponseLocked(void* arg,
grpc_error* /*error*/) {
SetResponseClosureArg* closure_arg = static_cast<SetResponseClosureArg*>(arg);
auto& resolver = closure_arg->resolver;
if (!resolver->shutdown_) {
resolver->next_result_ = std::move(closure_arg->result);
resolver->has_next_result_ = true;
resolver->MaybeSendResultLocked();
}
delete closure_arg;
}
void FakeResolverResponseGenerator::SetResponse(Resolver::Result result) {
RefCountedPtr<FakeResolver> resolver;
{
@ -205,24 +234,10 @@ void FakeResolverResponseGenerator::SetResponse(Resolver::Result result) {
}
resolver = resolver_->Ref();
}
SetResponseClosureArg* closure_arg = new SetResponseClosureArg();
closure_arg->resolver = std::move(resolver);
closure_arg->result = std::move(result);
closure_arg->resolver->combiner()->Run(
GRPC_CLOSURE_INIT(&closure_arg->set_response_closure, SetResponseLocked,
closure_arg, nullptr),
GRPC_ERROR_NONE);
}
void FakeResolverResponseGenerator::SetReresolutionResponseLocked(
void* arg, grpc_error* /*error*/) {
SetResponseClosureArg* closure_arg = static_cast<SetResponseClosureArg*>(arg);
auto& resolver = closure_arg->resolver;
if (!resolver->shutdown_) {
resolver->reresolution_result_ = std::move(closure_arg->result);
resolver->has_reresolution_result_ = closure_arg->has_result;
}
delete closure_arg;
FakeResolverResponseSetter* arg =
new FakeResolverResponseSetter(resolver, std::move(result));
resolver->work_serializer()->Run([arg]() { arg->SetResponseLocked(); },
DEBUG_LOCATION);
}
void FakeResolverResponseGenerator::SetReresolutionResponse(
@ -233,14 +248,10 @@ void FakeResolverResponseGenerator::SetReresolutionResponse(
GPR_ASSERT(resolver_ != nullptr);
resolver = resolver_->Ref();
}
SetResponseClosureArg* closure_arg = new SetResponseClosureArg();
closure_arg->resolver = std::move(resolver);
closure_arg->result = std::move(result);
closure_arg->has_result = true;
closure_arg->resolver->combiner()->Run(
GRPC_CLOSURE_INIT(&closure_arg->set_response_closure,
SetReresolutionResponseLocked, closure_arg, nullptr),
GRPC_ERROR_NONE);
FakeResolverResponseSetter* arg = new FakeResolverResponseSetter(
resolver, std::move(result), true /* has_result */);
resolver->work_serializer()->Run(
[arg]() { arg->SetReresolutionResponseLocked(); }, DEBUG_LOCATION);
}
void FakeResolverResponseGenerator::UnsetReresolutionResponse() {
@ -250,23 +261,10 @@ void FakeResolverResponseGenerator::UnsetReresolutionResponse() {
GPR_ASSERT(resolver_ != nullptr);
resolver = resolver_->Ref();
}
SetResponseClosureArg* closure_arg = new SetResponseClosureArg();
closure_arg->resolver = std::move(resolver);
closure_arg->resolver->combiner()->Run(
GRPC_CLOSURE_INIT(&closure_arg->set_response_closure,
SetReresolutionResponseLocked, closure_arg, nullptr),
GRPC_ERROR_NONE);
}
void FakeResolverResponseGenerator::SetFailureLocked(void* arg,
grpc_error* /*error*/) {
SetResponseClosureArg* closure_arg = static_cast<SetResponseClosureArg*>(arg);
auto& resolver = closure_arg->resolver;
if (!resolver->shutdown_) {
resolver->return_failure_ = true;
if (closure_arg->immediate) resolver->MaybeSendResultLocked();
}
delete closure_arg;
FakeResolverResponseSetter* arg =
new FakeResolverResponseSetter(resolver, Resolver::Result());
resolver->work_serializer()->Run(
[arg]() { arg->SetReresolutionResponseLocked(); }, DEBUG_LOCATION);
}
void FakeResolverResponseGenerator::SetFailure() {
@ -276,12 +274,10 @@ void FakeResolverResponseGenerator::SetFailure() {
GPR_ASSERT(resolver_ != nullptr);
resolver = resolver_->Ref();
}
SetResponseClosureArg* closure_arg = new SetResponseClosureArg();
closure_arg->resolver = std::move(resolver);
closure_arg->resolver->combiner()->Run(
GRPC_CLOSURE_INIT(&closure_arg->set_response_closure, SetFailureLocked,
closure_arg, nullptr),
GRPC_ERROR_NONE);
FakeResolverResponseSetter* arg =
new FakeResolverResponseSetter(resolver, Resolver::Result());
resolver->work_serializer()->Run([arg]() { arg->SetFailureLocked(); },
DEBUG_LOCATION);
}
void FakeResolverResponseGenerator::SetFailureOnReresolution() {
@ -291,13 +287,11 @@ void FakeResolverResponseGenerator::SetFailureOnReresolution() {
GPR_ASSERT(resolver_ != nullptr);
resolver = resolver_->Ref();
}
SetResponseClosureArg* closure_arg = new SetResponseClosureArg();
closure_arg->resolver = std::move(resolver);
closure_arg->immediate = false;
closure_arg->resolver->combiner()->Run(
GRPC_CLOSURE_INIT(&closure_arg->set_response_closure, SetFailureLocked,
closure_arg, nullptr),
GRPC_ERROR_NONE);
FakeResolverResponseSetter* arg = new FakeResolverResponseSetter(
resolver, Resolver::Result(), false /* has_result */,
false /* immediate */);
resolver->work_serializer()->Run([arg]() { arg->SetFailureLocked(); },
DEBUG_LOCATION);
}
void FakeResolverResponseGenerator::SetFakeResolver(
@ -306,13 +300,10 @@ void FakeResolverResponseGenerator::SetFakeResolver(
resolver_ = std::move(resolver);
if (resolver_ == nullptr) return;
if (has_result_) {
SetResponseClosureArg* closure_arg = new SetResponseClosureArg();
closure_arg->resolver = resolver_->Ref();
closure_arg->result = std::move(result_);
resolver_->combiner()->Run(
GRPC_CLOSURE_INIT(&closure_arg->set_response_closure, SetResponseLocked,
closure_arg, nullptr),
GRPC_ERROR_NONE);
FakeResolverResponseSetter* arg =
new FakeResolverResponseSetter(resolver_, std::move(result_));
resolver_->work_serializer()->Run([arg]() { arg->SetResponseLocked(); },
DEBUG_LOCATION);
has_result_ = false;
}
}

@ -80,10 +80,6 @@ class FakeResolverResponseGenerator
// Set the corresponding FakeResolver to this generator.
void SetFakeResolver(RefCountedPtr<FakeResolver> resolver);
static void SetResponseLocked(void* arg, grpc_error* error);
static void SetReresolutionResponseLocked(void* arg, grpc_error* error);
static void SetFailureLocked(void* arg, grpc_error* error);
// Mutex protecting the members below.
Mutex mu_;
RefCountedPtr<FakeResolver> resolver_;

@ -31,9 +31,9 @@
#include "src/core/ext/filters/client_channel/server_address.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/unix_sockets_posix.h"
#include "src/core/lib/iomgr/work_serializer.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
@ -57,7 +57,7 @@ class SockaddrResolver : public Resolver {
SockaddrResolver::SockaddrResolver(ServerAddressList addresses,
ResolverArgs args)
: Resolver(args.combiner, std::move(args.result_handler)),
: Resolver(std::move(args.work_serializer), std::move(args.result_handler)),
addresses_(std::move(addresses)),
channel_args_(grpc_channel_args_copy(args.args)) {}

@ -35,7 +35,8 @@ namespace {
class XdsResolver : public Resolver {
public:
explicit XdsResolver(ResolverArgs args)
: Resolver(args.combiner, std::move(args.result_handler)),
: Resolver(std::move(args.work_serializer),
std::move(args.result_handler)),
args_(grpc_channel_args_copy(args.args)),
interested_parties_(args.pollset_set) {
char* path = args.uri->path;
@ -112,7 +113,7 @@ void XdsResolver::ServiceConfigWatcher::OnError(grpc_error* error) {
void XdsResolver::StartLocked() {
grpc_error* error = GRPC_ERROR_NONE;
xds_client_ = MakeOrphanable<XdsClient>(
combiner(), interested_parties_, server_name_,
work_serializer(), interested_parties_, server_name_,
absl::make_unique<ServiceConfigWatcher>(Ref()), *args_, &error);
if (error != GRPC_ERROR_NONE) {
gpr_log(GPR_ERROR,

@ -38,8 +38,8 @@ struct ResolverArgs {
const grpc_channel_args* args = nullptr;
/// Used to drive I/O in the name resolution process.
grpc_pollset_set* pollset_set = nullptr;
/// The combiner under which all resolver calls will be run.
Combiner* combiner = nullptr;
/// The work_serializer under which all resolver calls will be run.
std::shared_ptr<WorkSerializer> work_serializer;
/// The result handler to be used by the resolver.
std::unique_ptr<Resolver::ResultHandler> result_handler;
};

@ -145,7 +145,8 @@ bool ResolverRegistry::IsValidTarget(const char* target) {
OrphanablePtr<Resolver> ResolverRegistry::CreateResolver(
const char* target, const grpc_channel_args* args,
grpc_pollset_set* pollset_set, Combiner* combiner,
grpc_pollset_set* pollset_set,
std::shared_ptr<WorkSerializer> work_serializer,
std::unique_ptr<Resolver::ResultHandler> result_handler) {
GPR_ASSERT(g_state != nullptr);
grpc_uri* uri = nullptr;
@ -156,7 +157,7 @@ OrphanablePtr<Resolver> ResolverRegistry::CreateResolver(
resolver_args.uri = uri;
resolver_args.args = args;
resolver_args.pollset_set = pollset_set;
resolver_args.combiner = combiner;
resolver_args.work_serializer = std::move(work_serializer);
resolver_args.result_handler = std::move(result_handler);
OrphanablePtr<Resolver> resolver =
factory == nullptr ? nullptr

@ -61,15 +61,16 @@ class ResolverRegistry {
/// prepends default_prefix to target and tries again.
/// If a resolver factory is found, uses it to instantiate a resolver and
/// returns it; otherwise, returns nullptr.
/// \a args, \a pollset_set, and \a combiner are passed to the factory's
/// \a CreateResolver() method.
/// \a args are the channel args to be included in resolver results.
/// \a pollset_set is used to drive I/O in the name resolution process.
/// \a combiner is the combiner under which all resolver calls will be run.
/// \a result_handler is used to return results from the resolver.
/// \a args, \a pollset_set, and \a work_serializer are passed to the
/// factory's \a CreateResolver() method. \a args are the channel args to be
/// included in resolver results. \a pollset_set is used to drive I/O in the
/// name resolution process. \a work_serializer is the work_serializer under
/// which all resolver calls will be run. \a result_handler is used to return
/// results from the resolver.
static OrphanablePtr<Resolver> CreateResolver(
const char* target, const grpc_channel_args* args,
grpc_pollset_set* pollset_set, Combiner* combiner,
grpc_pollset_set* pollset_set,
std::shared_ptr<WorkSerializer> work_serializer,
std::unique_ptr<Resolver::ResultHandler> result_handler);
/// Returns the default authority to pass from a client for \a target.

@ -50,7 +50,6 @@
#include "src/core/lib/gprpp/inlined_vector.h"
#include "src/core/lib/gprpp/manual_constructor.h"
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/profiling/timers.h"
@ -152,7 +151,7 @@ ResolvingLoadBalancingPolicy::ResolvingLoadBalancingPolicy(
process_resolver_result_user_data_(process_resolver_result_user_data) {
GPR_ASSERT(process_resolver_result != nullptr);
resolver_ = ResolverRegistry::CreateResolver(
target_uri_.get(), args.args, interested_parties(), combiner(),
target_uri_.get(), args.args, interested_parties(), work_serializer(),
absl::make_unique<ResolverResultHandler>(Ref()));
// Since the validity of args has been checked when create the channel,
// CreateResolver() must return a non-null result.
@ -247,7 +246,7 @@ OrphanablePtr<LoadBalancingPolicy>
ResolvingLoadBalancingPolicy::CreateLbPolicyLocked(
const grpc_channel_args& args) {
LoadBalancingPolicy::Args lb_policy_args;
lb_policy_args.combiner = combiner();
lb_policy_args.work_serializer = work_serializer();
lb_policy_args.channel_control_helper =
absl::make_unique<ResolvingControlHelper>(Ref());
lb_policy_args.args = &args;

@ -362,12 +362,44 @@ class Subchannel::ConnectedSubchannelStateWatcher
Subchannel* subchannel_;
};
// Asynchronously notifies the \a watcher of a change in the connectvity state
// of \a subchannel to the current \a state. Deletes itself when done.
class Subchannel::AsyncWatcherNotifierLocked {
public:
AsyncWatcherNotifierLocked(
RefCountedPtr<Subchannel::ConnectivityStateWatcherInterface> watcher,
Subchannel* subchannel, grpc_connectivity_state state)
: watcher_(std::move(watcher)) {
RefCountedPtr<ConnectedSubchannel> connected_subchannel;
if (state == GRPC_CHANNEL_READY) {
connected_subchannel = subchannel->connected_subchannel_;
}
watcher_->PushConnectivityStateChange(
{state, std::move(connected_subchannel)});
ExecCtx::Run(
DEBUG_LOCATION,
GRPC_CLOSURE_INIT(&closure_,
[](void* arg, grpc_error* /*error*/) {
auto* self =
static_cast<AsyncWatcherNotifierLocked*>(arg);
self->watcher_->OnConnectivityStateChange();
delete self;
},
this, nullptr),
GRPC_ERROR_NONE);
}
private:
RefCountedPtr<Subchannel::ConnectivityStateWatcherInterface> watcher_;
grpc_closure closure_;
};
//
// Subchannel::ConnectivityStateWatcherList
//
void Subchannel::ConnectivityStateWatcherList::AddWatcherLocked(
OrphanablePtr<ConnectivityStateWatcherInterface> watcher) {
RefCountedPtr<ConnectivityStateWatcherInterface> watcher) {
watchers_.insert(std::make_pair(watcher.get(), std::move(watcher)));
}
@ -379,19 +411,7 @@ void Subchannel::ConnectivityStateWatcherList::RemoveWatcherLocked(
void Subchannel::ConnectivityStateWatcherList::NotifyLocked(
Subchannel* subchannel, grpc_connectivity_state state) {
for (const auto& p : watchers_) {
RefCountedPtr<ConnectedSubchannel> connected_subchannel;
if (state == GRPC_CHANNEL_READY) {
connected_subchannel = subchannel->connected_subchannel_;
}
// TODO(roth): In principle, it seems wrong to send this notification
// to the watcher while holding the subchannel's mutex, since it could
// lead to a deadlock if the watcher calls back into the subchannel
// before returning back to us. In practice, this doesn't happen,
// because the LB policy code that watches subchannels always bounces
// the notification into the client_channel control-plane combiner
// before processing it. But if we ever have any other callers here,
// we will probably need to change this.
p.second->OnConnectivityStateChange(state, std::move(connected_subchannel));
new AsyncWatcherNotifierLocked(p.second, subchannel, state);
}
}
@ -428,14 +448,9 @@ class Subchannel::HealthWatcherMap::HealthWatcher
void AddWatcherLocked(
grpc_connectivity_state initial_state,
OrphanablePtr<Subchannel::ConnectivityStateWatcherInterface> watcher) {
RefCountedPtr<Subchannel::ConnectivityStateWatcherInterface> watcher) {
if (state_ != initial_state) {
RefCountedPtr<ConnectedSubchannel> connected_subchannel;
if (state_ == GRPC_CHANNEL_READY) {
connected_subchannel = subchannel_->connected_subchannel_;
}
watcher->OnConnectivityStateChange(state_,
std::move(connected_subchannel));
new AsyncWatcherNotifierLocked(watcher, subchannel_, state_);
}
watcher_list_.AddWatcherLocked(std::move(watcher));
}
@ -503,7 +518,7 @@ class Subchannel::HealthWatcherMap::HealthWatcher
void Subchannel::HealthWatcherMap::AddWatcherLocked(
Subchannel* subchannel, grpc_connectivity_state initial_state,
grpc_core::UniquePtr<char> health_check_service_name,
OrphanablePtr<ConnectivityStateWatcherInterface> watcher) {
RefCountedPtr<ConnectivityStateWatcherInterface> watcher) {
// If the health check service name is not already present in the map,
// add it.
auto it = map_.find(health_check_service_name.get());
@ -613,6 +628,21 @@ BackOff::Options ParseArgsForBackoffValues(
} // namespace
void Subchannel::ConnectivityStateWatcherInterface::PushConnectivityStateChange(
ConnectivityStateChange state_change) {
MutexLock lock(&mu_);
connectivity_state_queue_.push_back(std::move(state_change));
}
Subchannel::ConnectivityStateWatcherInterface::ConnectivityStateChange
Subchannel::ConnectivityStateWatcherInterface::PopConnectivityStateChange() {
MutexLock lock(&mu_);
GPR_ASSERT(!connectivity_state_queue_.empty());
ConnectivityStateChange state_change = connectivity_state_queue_.front();
connectivity_state_queue_.pop_front();
return state_change;
}
Subchannel::Subchannel(SubchannelKey* key,
OrphanablePtr<SubchannelConnector> connector,
const grpc_channel_args* args)
@ -788,7 +818,7 @@ grpc_connectivity_state Subchannel::CheckConnectivityState(
void Subchannel::WatchConnectivityState(
grpc_connectivity_state initial_state,
grpc_core::UniquePtr<char> health_check_service_name,
OrphanablePtr<ConnectivityStateWatcherInterface> watcher) {
RefCountedPtr<ConnectivityStateWatcherInterface> watcher) {
MutexLock lock(&mu_);
grpc_pollset_set* interested_parties = watcher->interested_parties();
if (interested_parties != nullptr) {
@ -796,7 +826,7 @@ void Subchannel::WatchConnectivityState(
}
if (health_check_service_name == nullptr) {
if (state_ != initial_state) {
watcher->OnConnectivityStateChange(state_, connected_subchannel_);
new AsyncWatcherNotifierLocked(watcher, this, state_);
}
watcher_list_.AddWatcherLocked(std::move(watcher));
} else {

@ -21,6 +21,8 @@
#include <grpc/support/port_platform.h>
#include <deque>
#include "src/core/ext/filters/client_channel/client_channel_channelz.h"
#include "src/core/ext/filters/client_channel/connector.h"
#include "src/core/ext/filters/client_channel/subchannel_pool_interface.h"
@ -176,24 +178,44 @@ class SubchannelCall {
class Subchannel {
public:
class ConnectivityStateWatcherInterface
: public InternallyRefCounted<ConnectivityStateWatcherInterface> {
: public RefCounted<ConnectivityStateWatcherInterface> {
public:
struct ConnectivityStateChange {
grpc_connectivity_state state;
RefCountedPtr<ConnectedSubchannel> connected_subchannel;
};
virtual ~ConnectivityStateWatcherInterface() = default;
// Will be invoked whenever the subchannel's connectivity state
// changes. There will be only one invocation of this method on a
// given watcher instance at any given time.
//
// Implementations should call PopConnectivityStateChange to get the next
// connectivity state change.
virtual void OnConnectivityStateChange() = 0;
virtual grpc_pollset_set* interested_parties() = 0;
// Enqueues connectivity state change notifications.
// When the state changes to READY, connected_subchannel will
// contain a ref to the connected subchannel. When it changes from
// READY to some other state, the implementation must release its
// ref to the connected subchannel.
virtual void OnConnectivityStateChange(
grpc_connectivity_state new_state,
RefCountedPtr<ConnectedSubchannel> connected_subchannel) // NOLINT
= 0;
// TODO(yashkt): This is currently needed to send the state updates in the
// right order when asynchronously notifying. This will no longer be
// necessary when we have access to EventManager.
void PushConnectivityStateChange(ConnectivityStateChange state_change);
virtual grpc_pollset_set* interested_parties() = 0;
// Dequeues connectivity state change notifications.
ConnectivityStateChange PopConnectivityStateChange();
private:
// Keeps track of the updates that the watcher instance must be notified of.
// TODO(yashkt): This is currently needed to send the state updates in the
// right order when asynchronously notifying. This will no longer be
// necessary when we have access to EventManager.
std::deque<ConnectivityStateChange> connectivity_state_queue_;
Mutex mu_; // protects the queue
};
// The ctor and dtor are not intended to use directly.
@ -243,7 +265,7 @@ class Subchannel {
void WatchConnectivityState(
grpc_connectivity_state initial_state,
grpc_core::UniquePtr<char> health_check_service_name,
OrphanablePtr<ConnectivityStateWatcherInterface> watcher);
RefCountedPtr<ConnectivityStateWatcherInterface> watcher);
// Cancels a connectivity state watch.
// If the watcher has already been destroyed, this is a no-op.
@ -280,7 +302,7 @@ class Subchannel {
~ConnectivityStateWatcherList() { Clear(); }
void AddWatcherLocked(
OrphanablePtr<ConnectivityStateWatcherInterface> watcher);
RefCountedPtr<ConnectivityStateWatcherInterface> watcher);
void RemoveWatcherLocked(ConnectivityStateWatcherInterface* watcher);
// Notifies all watchers in the list about a change to state.
@ -294,7 +316,7 @@ class Subchannel {
// TODO(roth): Once we can use C++-14 heterogeneous lookups, this can
// be a set instead of a map.
std::map<ConnectivityStateWatcherInterface*,
OrphanablePtr<ConnectivityStateWatcherInterface>>
RefCountedPtr<ConnectivityStateWatcherInterface>>
watchers_;
};
@ -312,7 +334,7 @@ class Subchannel {
void AddWatcherLocked(
Subchannel* subchannel, grpc_connectivity_state initial_state,
grpc_core::UniquePtr<char> health_check_service_name,
OrphanablePtr<ConnectivityStateWatcherInterface> watcher);
RefCountedPtr<ConnectivityStateWatcherInterface> watcher);
void RemoveWatcherLocked(const char* health_check_service_name,
ConnectivityStateWatcherInterface* watcher);
@ -332,6 +354,8 @@ class Subchannel {
class ConnectedSubchannelStateWatcher;
class AsyncWatcherNotifierLocked;
// Sets the subchannel's connectivity state to \a state.
void SetConnectivityStateLocked(grpc_connectivity_state state);

@ -49,10 +49,10 @@
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/sockaddr.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/iomgr/work_serializer.h"
#include "src/core/lib/slice/slice_hash_table.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
@ -96,7 +96,7 @@ class XdsClient::ChannelState::RetryableCall
void StartNewCallLocked();
void StartRetryTimerLocked();
static void OnRetryTimer(void* arg, grpc_error* error);
static void OnRetryTimerLocked(void* arg, grpc_error* error);
void OnRetryTimerLocked(grpc_error* error);
// The wrapped xds call that talks to the xds server. It's instantiated
// every time we start a new call. It's null during call retry backoff.
@ -170,51 +170,48 @@ class XdsClient::ChannelState::AdsCallState
private:
static void OnTimer(void* arg, grpc_error* error) {
ResourceState* self = static_cast<ResourceState*>(arg);
self->ads_calld_->xds_client()->combiner_->Run(
GRPC_CLOSURE_INIT(&self->timer_callback_, OnTimerLocked, self,
nullptr),
GRPC_ERROR_REF(error));
GRPC_ERROR_REF(error); // ref owned by lambda
self->ads_calld_->xds_client()->work_serializer_->Run(
[self, error]() { self->OnTimerLocked(error); }, DEBUG_LOCATION);
}
static void OnTimerLocked(void* arg, grpc_error* error) {
ResourceState* self = static_cast<ResourceState*>(arg);
if (error == GRPC_ERROR_NONE && self->timer_pending_) {
self->timer_pending_ = false;
void OnTimerLocked(grpc_error* error) {
if (error == GRPC_ERROR_NONE && timer_pending_) {
timer_pending_ = false;
char* msg;
gpr_asprintf(
&msg,
"timeout obtaining resource {type=%s name=%s} from xds server",
self->type_url_.c_str(), self->name_.c_str());
grpc_error* error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
type_url_.c_str(), name_.c_str());
grpc_error* watcher_error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO, "[xds_client %p] %s",
self->ads_calld_->xds_client(), grpc_error_string(error));
gpr_log(GPR_INFO, "[xds_client %p] %s", ads_calld_->xds_client(),
grpc_error_string(watcher_error));
}
if (self->type_url_ == XdsApi::kLdsTypeUrl ||
self->type_url_ == XdsApi::kRdsTypeUrl) {
self->ads_calld_->xds_client()->service_config_watcher_->OnError(
error);
} else if (self->type_url_ == XdsApi::kCdsTypeUrl) {
ClusterState& state =
self->ads_calld_->xds_client()->cluster_map_[self->name_];
if (type_url_ == XdsApi::kLdsTypeUrl ||
type_url_ == XdsApi::kRdsTypeUrl) {
ads_calld_->xds_client()->service_config_watcher_->OnError(
watcher_error);
} else if (type_url_ == XdsApi::kCdsTypeUrl) {
ClusterState& state = ads_calld_->xds_client()->cluster_map_[name_];
for (const auto& p : state.watchers) {
p.first->OnError(GRPC_ERROR_REF(error));
p.first->OnError(GRPC_ERROR_REF(watcher_error));
}
GRPC_ERROR_UNREF(error);
} else if (self->type_url_ == XdsApi::kEdsTypeUrl) {
EndpointState& state =
self->ads_calld_->xds_client()->endpoint_map_[self->name_];
GRPC_ERROR_UNREF(watcher_error);
} else if (type_url_ == XdsApi::kEdsTypeUrl) {
EndpointState& state = ads_calld_->xds_client()->endpoint_map_[name_];
for (const auto& p : state.watchers) {
p.first->OnError(GRPC_ERROR_REF(error));
p.first->OnError(GRPC_ERROR_REF(watcher_error));
}
GRPC_ERROR_UNREF(error);
GRPC_ERROR_UNREF(watcher_error);
} else {
GPR_UNREACHABLE_CODE(return );
}
}
self->ads_calld_.reset();
self->Unref();
ads_calld_.reset();
Unref();
GRPC_ERROR_UNREF(error);
}
const std::string type_url_;
@ -248,11 +245,11 @@ class XdsClient::ChannelState::AdsCallState
void AcceptEdsUpdate(XdsApi::EdsUpdateMap eds_update_map);
static void OnRequestSent(void* arg, grpc_error* error);
static void OnRequestSentLocked(void* arg, grpc_error* error);
void OnRequestSentLocked(grpc_error* error);
static void OnResponseReceived(void* arg, grpc_error* error);
static void OnResponseReceivedLocked(void* arg, grpc_error* error);
void OnResponseReceivedLocked();
static void OnStatusReceived(void* arg, grpc_error* error);
static void OnStatusReceivedLocked(void* arg, grpc_error* error);
void OnStatusReceivedLocked(grpc_error* error);
bool IsCurrentCallOnChannel() const;
@ -315,6 +312,10 @@ class XdsClient::ChannelState::LrsCallState
public:
Reporter(RefCountedPtr<LrsCallState> parent, grpc_millis report_interval)
: parent_(std::move(parent)), report_interval_(report_interval) {
GRPC_CLOSURE_INIT(&on_next_report_timer_, OnNextReportTimer, this,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&on_report_done_, OnReportDone, this,
grpc_schedule_on_exec_ctx);
ScheduleNextReportLocked();
}
@ -323,10 +324,10 @@ class XdsClient::ChannelState::LrsCallState
private:
void ScheduleNextReportLocked();
static void OnNextReportTimer(void* arg, grpc_error* error);
static void OnNextReportTimerLocked(void* arg, grpc_error* error);
void OnNextReportTimerLocked(grpc_error* error);
void SendReportLocked();
static void OnReportDone(void* arg, grpc_error* error);
static void OnReportDoneLocked(void* arg, grpc_error* error);
void OnReportDoneLocked(grpc_error* error);
bool IsCurrentReporterOnCall() const {
return this == parent_->reporter_.get();
@ -346,11 +347,11 @@ class XdsClient::ChannelState::LrsCallState
};
static void OnInitialRequestSent(void* arg, grpc_error* error);
void OnInitialRequestSentLocked();
static void OnResponseReceived(void* arg, grpc_error* error);
void OnResponseReceivedLocked();
static void OnStatusReceived(void* arg, grpc_error* error);
static void OnInitialRequestSentLocked(void* arg, grpc_error* error);
static void OnResponseReceivedLocked(void* arg, grpc_error* error);
static void OnStatusReceivedLocked(void* arg, grpc_error* error);
void OnStatusReceivedLocked(grpc_error* error);
bool IsCurrentCallOnChannel() const;
@ -392,7 +393,8 @@ class XdsClient::ChannelState::StateWatcher
: public AsyncConnectivityStateWatcherInterface {
public:
explicit StateWatcher(RefCountedPtr<ChannelState> parent)
: AsyncConnectivityStateWatcherInterface(parent->xds_client()->combiner_),
: AsyncConnectivityStateWatcherInterface(
parent->xds_client()->work_serializer_),
parent_(std::move(parent)) {}
private:
@ -581,6 +583,9 @@ XdsClient::ChannelState::RetryableCall<T>::RetryableCall(
.set_multiplier(GRPC_XDS_RECONNECT_BACKOFF_MULTIPLIER)
.set_jitter(GRPC_XDS_RECONNECT_JITTER)
.set_max_backoff(GRPC_XDS_RECONNECT_MAX_BACKOFF_SECONDS * 1000)) {
// Closure Initialization
GRPC_CLOSURE_INIT(&on_retry_timer_, OnRetryTimer, this,
grpc_schedule_on_exec_ctx);
StartNewCallLocked();
}
@ -634,8 +639,6 @@ void XdsClient::ChannelState::RetryableCall<T>::StartRetryTimerLocked() {
chand()->xds_client(), chand(), timeout);
}
this->Ref(DEBUG_LOCATION, "RetryableCall+retry_timer_start").release();
GRPC_CLOSURE_INIT(&on_retry_timer_, OnRetryTimer, this,
grpc_schedule_on_exec_ctx);
grpc_timer_init(&retry_timer_, next_attempt_time, &on_retry_timer_);
retry_timer_callback_pending_ = true;
}
@ -644,27 +647,26 @@ template <typename T>
void XdsClient::ChannelState::RetryableCall<T>::OnRetryTimer(
void* arg, grpc_error* error) {
RetryableCall* calld = static_cast<RetryableCall*>(arg);
calld->chand_->xds_client()->combiner_->Run(
GRPC_CLOSURE_INIT(&calld->on_retry_timer_, OnRetryTimerLocked, calld,
nullptr),
GRPC_ERROR_REF(error));
GRPC_ERROR_REF(error); // ref owned by lambda
calld->chand_->xds_client()->work_serializer_->Run(
[calld, error]() { calld->OnRetryTimerLocked(error); }, DEBUG_LOCATION);
}
template <typename T>
void XdsClient::ChannelState::RetryableCall<T>::OnRetryTimerLocked(
void* arg, grpc_error* error) {
RetryableCall* calld = static_cast<RetryableCall*>(arg);
calld->retry_timer_callback_pending_ = false;
if (!calld->shutting_down_ && error == GRPC_ERROR_NONE) {
grpc_error* error) {
retry_timer_callback_pending_ = false;
if (!shutting_down_ && error == GRPC_ERROR_NONE) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(
GPR_INFO,
"[xds_client %p] Retry timer fires (chand: %p, retryable call: %p)",
calld->chand()->xds_client(), calld->chand(), calld);
chand()->xds_client(), chand(), this);
}
calld->StartNewCallLocked();
StartNewCallLocked();
}
calld->Unref(DEBUG_LOCATION, "RetryableCall+retry_timer_done");
this->Unref(DEBUG_LOCATION, "RetryableCall+retry_timer_done");
GRPC_ERROR_UNREF(error);
}
//
@ -1164,19 +1166,18 @@ void XdsClient::ChannelState::AdsCallState::AcceptEdsUpdate(
void XdsClient::ChannelState::AdsCallState::OnRequestSent(void* arg,
grpc_error* error) {
AdsCallState* ads_calld = static_cast<AdsCallState*>(arg);
ads_calld->xds_client()->combiner_->Run(
GRPC_CLOSURE_INIT(&ads_calld->on_request_sent_, OnRequestSentLocked,
ads_calld, nullptr),
GRPC_ERROR_REF(error));
GRPC_ERROR_REF(error); // ref owned by lambda
ads_calld->xds_client()->work_serializer_->Run(
[ads_calld, error]() { ads_calld->OnRequestSentLocked(error); },
DEBUG_LOCATION);
}
void XdsClient::ChannelState::AdsCallState::OnRequestSentLocked(
void* arg, grpc_error* error) {
AdsCallState* self = static_cast<AdsCallState*>(arg);
if (self->IsCurrentCallOnChannel() && error == GRPC_ERROR_NONE) {
grpc_error* error) {
if (IsCurrentCallOnChannel() && error == GRPC_ERROR_NONE) {
// Clean up the sent message.
grpc_byte_buffer_destroy(self->send_message_payload_);
self->send_message_payload_ = nullptr;
grpc_byte_buffer_destroy(send_message_payload_);
send_message_payload_ = nullptr;
// Continue to send another pending message if any.
// TODO(roth): The current code to handle buffered messages has the
// advantage of sending only the most recent list of resource names for
@ -1186,41 +1187,36 @@ void XdsClient::ChannelState::AdsCallState::OnRequestSentLocked(
// order of resource types. We need to fix this if we are seeing some
// resource type(s) starved due to frequent requests of other resource
// type(s).
auto it = self->buffered_requests_.begin();
if (it != self->buffered_requests_.end()) {
self->SendMessageLocked(*it);
self->buffered_requests_.erase(it);
auto it = buffered_requests_.begin();
if (it != buffered_requests_.end()) {
SendMessageLocked(*it);
buffered_requests_.erase(it);
}
}
self->Unref(DEBUG_LOCATION, "ADS+OnRequestSentLocked");
Unref(DEBUG_LOCATION, "ADS+OnRequestSentLocked");
GRPC_ERROR_UNREF(error);
}
void XdsClient::ChannelState::AdsCallState::OnResponseReceived(
void* arg, grpc_error* error) {
void* arg, grpc_error* /* error */) {
AdsCallState* ads_calld = static_cast<AdsCallState*>(arg);
ads_calld->xds_client()->combiner_->Run(
GRPC_CLOSURE_INIT(&ads_calld->on_response_received_,
OnResponseReceivedLocked, ads_calld, nullptr),
GRPC_ERROR_REF(error));
ads_calld->xds_client()->work_serializer_->Run(
[ads_calld]() { ads_calld->OnResponseReceivedLocked(); }, DEBUG_LOCATION);
}
void XdsClient::ChannelState::AdsCallState::OnResponseReceivedLocked(
void* arg, grpc_error* /*error*/) {
AdsCallState* ads_calld = static_cast<AdsCallState*>(arg);
XdsClient* xds_client = ads_calld->xds_client();
void XdsClient::ChannelState::AdsCallState::OnResponseReceivedLocked() {
// Empty payload means the call was cancelled.
if (!ads_calld->IsCurrentCallOnChannel() ||
ads_calld->recv_message_payload_ == nullptr) {
ads_calld->Unref(DEBUG_LOCATION, "ADS+OnResponseReceivedLocked");
if (!IsCurrentCallOnChannel() || recv_message_payload_ == nullptr) {
Unref(DEBUG_LOCATION, "ADS+OnResponseReceivedLocked");
return;
}
// Read the response.
grpc_byte_buffer_reader bbr;
grpc_byte_buffer_reader_init(&bbr, ads_calld->recv_message_payload_);
grpc_byte_buffer_reader_init(&bbr, recv_message_payload_);
grpc_slice response_slice = grpc_byte_buffer_reader_readall(&bbr);
grpc_byte_buffer_reader_destroy(&bbr);
grpc_byte_buffer_destroy(ads_calld->recv_message_payload_);
ads_calld->recv_message_payload_ = nullptr;
grpc_byte_buffer_destroy(recv_message_payload_);
recv_message_payload_ = nullptr;
// TODO(juanlishen): When we convert this to use the xds protocol, the
// balancer will send us a fallback timeout such that we should go into
// fallback mode if we have lost contact with the balancer after a certain
@ -1238,24 +1234,24 @@ void XdsClient::ChannelState::AdsCallState::OnResponseReceivedLocked(
std::string nonce;
std::string type_url;
// Note that ParseAdsResponse() also validates the response.
grpc_error* parse_error = xds_client->api_.ParseAdsResponse(
response_slice, xds_client->server_name_,
(xds_client->lds_result_.has_value()
? xds_client->lds_result_->route_config_name
grpc_error* parse_error = xds_client()->api_.ParseAdsResponse(
response_slice, xds_client()->server_name_,
(xds_client()->lds_result_.has_value()
? xds_client()->lds_result_->route_config_name
: ""),
xds_client->xds_routing_enabled_, ads_calld->ClusterNamesForRequest(),
ads_calld->EdsServiceNamesForRequest(), &lds_update, &rds_update,
&cds_update_map, &eds_update_map, &version, &nonce, &type_url);
xds_client()->xds_routing_enabled_, ClusterNamesForRequest(),
EdsServiceNamesForRequest(), &lds_update, &rds_update, &cds_update_map,
&eds_update_map, &version, &nonce, &type_url);
grpc_slice_unref_internal(response_slice);
if (type_url.empty()) {
// Ignore unparsable response.
gpr_log(GPR_ERROR,
"[xds_client %p] Error parsing ADS response (%s) -- ignoring",
xds_client, grpc_error_string(parse_error));
xds_client(), grpc_error_string(parse_error));
GRPC_ERROR_UNREF(parse_error);
} else {
// Update nonce.
auto& state = ads_calld->state_map_[type_url];
auto& state = state_map_[type_url];
state.nonce = std::move(nonce);
// NACK or ACK the response.
if (parse_error != GRPC_ERROR_NONE) {
@ -1265,85 +1261,80 @@ void XdsClient::ChannelState::AdsCallState::OnResponseReceivedLocked(
gpr_log(GPR_ERROR,
"[xds_client %p] ADS response invalid for resource type %s "
"version %s, will NACK: nonce=%s error=%s",
xds_client, type_url.c_str(), version.c_str(),
xds_client(), type_url.c_str(), version.c_str(),
state.nonce.c_str(), grpc_error_string(parse_error));
ads_calld->SendMessageLocked(type_url);
SendMessageLocked(type_url);
} else {
ads_calld->seen_response_ = true;
seen_response_ = true;
// Accept the ADS response according to the type_url.
if (type_url == XdsApi::kLdsTypeUrl) {
ads_calld->AcceptLdsUpdate(std::move(lds_update));
AcceptLdsUpdate(std::move(lds_update));
} else if (type_url == XdsApi::kRdsTypeUrl) {
ads_calld->AcceptRdsUpdate(std::move(rds_update));
AcceptRdsUpdate(std::move(rds_update));
} else if (type_url == XdsApi::kCdsTypeUrl) {
ads_calld->AcceptCdsUpdate(std::move(cds_update_map));
AcceptCdsUpdate(std::move(cds_update_map));
} else if (type_url == XdsApi::kEdsTypeUrl) {
ads_calld->AcceptEdsUpdate(std::move(eds_update_map));
AcceptEdsUpdate(std::move(eds_update_map));
}
state.version = std::move(version);
// ACK the update.
ads_calld->SendMessageLocked(type_url);
SendMessageLocked(type_url);
// Start load reporting if needed.
auto& lrs_call = ads_calld->chand()->lrs_calld_;
auto& lrs_call = chand()->lrs_calld_;
if (lrs_call != nullptr) {
LrsCallState* lrs_calld = lrs_call->calld();
if (lrs_calld != nullptr) lrs_calld->MaybeStartReportingLocked();
}
}
}
if (xds_client->shutting_down_) {
ads_calld->Unref(DEBUG_LOCATION,
"ADS+OnResponseReceivedLocked+xds_shutdown");
if (xds_client()->shutting_down_) {
Unref(DEBUG_LOCATION, "ADS+OnResponseReceivedLocked+xds_shutdown");
return;
}
// Keep listening for updates.
grpc_op op;
memset(&op, 0, sizeof(op));
op.op = GRPC_OP_RECV_MESSAGE;
op.data.recv_message.recv_message = &ads_calld->recv_message_payload_;
op.data.recv_message.recv_message = &recv_message_payload_;
op.flags = 0;
op.reserved = nullptr;
GPR_ASSERT(ads_calld->call_ != nullptr);
GPR_ASSERT(call_ != nullptr);
// Reuse the "ADS+OnResponseReceivedLocked" ref taken in ctor.
GRPC_CLOSURE_INIT(&ads_calld->on_response_received_, OnResponseReceived,
ads_calld, grpc_schedule_on_exec_ctx);
const grpc_call_error call_error = grpc_call_start_batch_and_execute(
ads_calld->call_, &op, 1, &ads_calld->on_response_received_);
const grpc_call_error call_error =
grpc_call_start_batch_and_execute(call_, &op, 1, &on_response_received_);
GPR_ASSERT(GRPC_CALL_OK == call_error);
}
void XdsClient::ChannelState::AdsCallState::OnStatusReceived(
void* arg, grpc_error* error) {
AdsCallState* ads_calld = static_cast<AdsCallState*>(arg);
ads_calld->xds_client()->combiner_->Run(
GRPC_CLOSURE_INIT(&ads_calld->on_status_received_, OnStatusReceivedLocked,
ads_calld, nullptr),
GRPC_ERROR_REF(error));
GRPC_ERROR_REF(error); // ref owned by lambda
ads_calld->xds_client()->work_serializer_->Run(
[ads_calld, error]() { ads_calld->OnStatusReceivedLocked(error); },
DEBUG_LOCATION);
}
void XdsClient::ChannelState::AdsCallState::OnStatusReceivedLocked(
void* arg, grpc_error* error) {
AdsCallState* ads_calld = static_cast<AdsCallState*>(arg);
ChannelState* chand = ads_calld->chand();
XdsClient* xds_client = ads_calld->xds_client();
grpc_error* error) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
char* status_details = grpc_slice_to_c_string(ads_calld->status_details_);
char* status_details = grpc_slice_to_c_string(status_details_);
gpr_log(GPR_INFO,
"[xds_client %p] ADS call status received. Status = %d, details "
"= '%s', (chand: %p, ads_calld: %p, call: %p), error '%s'",
xds_client, ads_calld->status_code_, status_details, chand,
ads_calld, ads_calld->call_, grpc_error_string(error));
xds_client(), status_code_, status_details, chand(), this, call_,
grpc_error_string(error));
gpr_free(status_details);
}
// Ignore status from a stale call.
if (ads_calld->IsCurrentCallOnChannel()) {
if (IsCurrentCallOnChannel()) {
// Try to restart the call.
ads_calld->parent_->OnCallFinishedLocked();
parent_->OnCallFinishedLocked();
// Send error to all watchers.
xds_client->NotifyOnError(
xds_client()->NotifyOnError(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("xds call failed"));
}
ads_calld->Unref(DEBUG_LOCATION, "ADS+OnStatusReceivedLocked");
Unref(DEBUG_LOCATION, "ADS+OnStatusReceivedLocked");
GRPC_ERROR_UNREF(error);
}
bool XdsClient::ChannelState::AdsCallState::IsCurrentCallOnChannel() const {
@ -1388,8 +1379,6 @@ void XdsClient::ChannelState::LrsCallState::Reporter::Orphan() {
void XdsClient::ChannelState::LrsCallState::Reporter::
ScheduleNextReportLocked() {
const grpc_millis next_report_time = ExecCtx::Get()->Now() + report_interval_;
GRPC_CLOSURE_INIT(&on_next_report_timer_, OnNextReportTimer, this,
grpc_schedule_on_exec_ctx);
grpc_timer_init(&next_report_timer_, next_report_time,
&on_next_report_timer_);
next_report_timer_callback_pending_ = true;
@ -1398,21 +1387,21 @@ void XdsClient::ChannelState::LrsCallState::Reporter::
void XdsClient::ChannelState::LrsCallState::Reporter::OnNextReportTimer(
void* arg, grpc_error* error) {
Reporter* self = static_cast<Reporter*>(arg);
self->xds_client()->combiner_->Run(
GRPC_CLOSURE_INIT(&self->on_next_report_timer_, OnNextReportTimerLocked,
self, nullptr),
GRPC_ERROR_REF(error));
GRPC_ERROR_REF(error); // ref owned by lambda
self->xds_client()->work_serializer_->Run(
[self, error]() { self->OnNextReportTimerLocked(error); },
DEBUG_LOCATION);
}
void XdsClient::ChannelState::LrsCallState::Reporter::OnNextReportTimerLocked(
void* arg, grpc_error* error) {
Reporter* self = static_cast<Reporter*>(arg);
self->next_report_timer_callback_pending_ = false;
if (error != GRPC_ERROR_NONE || !self->IsCurrentReporterOnCall()) {
self->Unref(DEBUG_LOCATION, "Reporter+timer");
return;
grpc_error* error) {
next_report_timer_callback_pending_ = false;
if (error != GRPC_ERROR_NONE || !IsCurrentReporterOnCall()) {
Unref(DEBUG_LOCATION, "Reporter+timer");
} else {
SendReportLocked();
}
self->SendReportLocked();
GRPC_ERROR_UNREF(error);
}
namespace {
@ -1456,8 +1445,6 @@ void XdsClient::ChannelState::LrsCallState::Reporter::SendReportLocked() {
memset(&op, 0, sizeof(op));
op.op = GRPC_OP_SEND_MESSAGE;
op.data.send_message.send_message = parent_->send_message_payload_;
GRPC_CLOSURE_INIT(&on_report_done_, OnReportDone, this,
grpc_schedule_on_exec_ctx);
grpc_call_error call_error = grpc_call_start_batch_and_execute(
parent_->call_, &op, 1, &on_report_done_);
if (GPR_UNLIKELY(call_error != GRPC_CALL_OK)) {
@ -1471,33 +1458,32 @@ void XdsClient::ChannelState::LrsCallState::Reporter::SendReportLocked() {
void XdsClient::ChannelState::LrsCallState::Reporter::OnReportDone(
void* arg, grpc_error* error) {
Reporter* self = static_cast<Reporter*>(arg);
self->xds_client()->combiner_->Run(
GRPC_CLOSURE_INIT(&self->on_report_done_, OnReportDoneLocked, self,
nullptr),
GRPC_ERROR_REF(error));
GRPC_ERROR_REF(error); // ref owned by lambda
self->xds_client()->work_serializer_->Run(
[self, error]() { self->OnReportDoneLocked(error); }, DEBUG_LOCATION);
}
void XdsClient::ChannelState::LrsCallState::Reporter::OnReportDoneLocked(
void* arg, grpc_error* error) {
Reporter* self = static_cast<Reporter*>(arg);
grpc_byte_buffer_destroy(self->parent_->send_message_payload_);
self->parent_->send_message_payload_ = nullptr;
grpc_error* error) {
grpc_byte_buffer_destroy(parent_->send_message_payload_);
parent_->send_message_payload_ = nullptr;
// If there are no more registered stats to report, cancel the call.
if (self->xds_client()->load_report_map_.empty()) {
self->parent_->chand()->StopLrsCall();
self->Unref(DEBUG_LOCATION, "Reporter+report_done+no_more_reporters");
if (xds_client()->load_report_map_.empty()) {
parent_->chand()->StopLrsCall();
Unref(DEBUG_LOCATION, "Reporter+report_done+no_more_reporters");
return;
}
if (error != GRPC_ERROR_NONE || !self->IsCurrentReporterOnCall()) {
if (error != GRPC_ERROR_NONE || !IsCurrentReporterOnCall()) {
// If this reporter is no longer the current one on the call, the reason
// might be that it was orphaned for a new one due to config update.
if (!self->IsCurrentReporterOnCall()) {
self->parent_->MaybeStartReportingLocked();
if (!IsCurrentReporterOnCall()) {
parent_->MaybeStartReportingLocked();
}
self->Unref(DEBUG_LOCATION, "Reporter+report_done");
return;
Unref(DEBUG_LOCATION, "Reporter+report_done");
} else {
ScheduleNextReportLocked();
}
self->ScheduleNextReportLocked();
GRPC_ERROR_UNREF(error);
}
//
@ -1643,75 +1629,66 @@ void XdsClient::ChannelState::LrsCallState::MaybeStartReportingLocked() {
}
void XdsClient::ChannelState::LrsCallState::OnInitialRequestSent(
void* arg, grpc_error* error) {
void* arg, grpc_error* /*error*/) {
LrsCallState* lrs_calld = static_cast<LrsCallState*>(arg);
lrs_calld->xds_client()->combiner_->Run(
GRPC_CLOSURE_INIT(&lrs_calld->on_initial_request_sent_,
OnInitialRequestSentLocked, lrs_calld, nullptr),
GRPC_ERROR_REF(error));
lrs_calld->xds_client()->work_serializer_->Run(
[lrs_calld]() { lrs_calld->OnInitialRequestSentLocked(); },
DEBUG_LOCATION);
}
void XdsClient::ChannelState::LrsCallState::OnInitialRequestSentLocked(
void* arg, grpc_error* /*error*/) {
LrsCallState* lrs_calld = static_cast<LrsCallState*>(arg);
void XdsClient::ChannelState::LrsCallState::OnInitialRequestSentLocked() {
// Clear the send_message_payload_.
grpc_byte_buffer_destroy(lrs_calld->send_message_payload_);
lrs_calld->send_message_payload_ = nullptr;
lrs_calld->MaybeStartReportingLocked();
lrs_calld->Unref(DEBUG_LOCATION, "LRS+OnInitialRequestSentLocked");
grpc_byte_buffer_destroy(send_message_payload_);
send_message_payload_ = nullptr;
MaybeStartReportingLocked();
Unref(DEBUG_LOCATION, "LRS+OnInitialRequestSentLocked");
}
void XdsClient::ChannelState::LrsCallState::OnResponseReceived(
void* arg, grpc_error* error) {
void* arg, grpc_error* /*error*/) {
LrsCallState* lrs_calld = static_cast<LrsCallState*>(arg);
lrs_calld->xds_client()->combiner_->Run(
GRPC_CLOSURE_INIT(&lrs_calld->on_response_received_,
OnResponseReceivedLocked, lrs_calld, nullptr),
GRPC_ERROR_REF(error));
lrs_calld->xds_client()->work_serializer_->Run(
[lrs_calld]() { lrs_calld->OnResponseReceivedLocked(); }, DEBUG_LOCATION);
}
void XdsClient::ChannelState::LrsCallState::OnResponseReceivedLocked(
void* arg, grpc_error* /*error*/) {
LrsCallState* lrs_calld = static_cast<LrsCallState*>(arg);
XdsClient* xds_client = lrs_calld->xds_client();
void XdsClient::ChannelState::LrsCallState::OnResponseReceivedLocked() {
// Empty payload means the call was cancelled.
if (!lrs_calld->IsCurrentCallOnChannel() ||
lrs_calld->recv_message_payload_ == nullptr) {
lrs_calld->Unref(DEBUG_LOCATION, "LRS+OnResponseReceivedLocked");
if (!IsCurrentCallOnChannel() || recv_message_payload_ == nullptr) {
Unref(DEBUG_LOCATION, "LRS+OnResponseReceivedLocked");
return;
}
// Read the response.
grpc_byte_buffer_reader bbr;
grpc_byte_buffer_reader_init(&bbr, lrs_calld->recv_message_payload_);
grpc_byte_buffer_reader_init(&bbr, recv_message_payload_);
grpc_slice response_slice = grpc_byte_buffer_reader_readall(&bbr);
grpc_byte_buffer_reader_destroy(&bbr);
grpc_byte_buffer_destroy(lrs_calld->recv_message_payload_);
lrs_calld->recv_message_payload_ = nullptr;
grpc_byte_buffer_destroy(recv_message_payload_);
recv_message_payload_ = nullptr;
// This anonymous lambda is a hack to avoid the usage of goto.
[&]() {
// Parse the response.
std::set<std::string> new_cluster_names;
grpc_millis new_load_reporting_interval;
grpc_error* parse_error = xds_client->api_.ParseLrsResponse(
grpc_error* parse_error = xds_client()->api_.ParseLrsResponse(
response_slice, &new_cluster_names, &new_load_reporting_interval);
if (parse_error != GRPC_ERROR_NONE) {
gpr_log(GPR_ERROR,
"[xds_client %p] LRS response parsing failed. error=%s",
xds_client, grpc_error_string(parse_error));
xds_client(), grpc_error_string(parse_error));
GRPC_ERROR_UNREF(parse_error);
return;
}
lrs_calld->seen_response_ = true;
seen_response_ = true;
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO,
"[xds_client %p] LRS response received, %" PRIuPTR
" cluster names, load_report_interval=%" PRId64 "ms",
xds_client, new_cluster_names.size(),
xds_client(), new_cluster_names.size(),
new_load_reporting_interval);
size_t i = 0;
for (const auto& name : new_cluster_names) {
gpr_log(GPR_INFO, "[xds_client %p] cluster_name %" PRIuPTR ": %s",
xds_client, i++, name.c_str());
xds_client(), i++, name.c_str());
}
}
if (new_load_reporting_interval <
@ -1722,81 +1699,76 @@ void XdsClient::ChannelState::LrsCallState::OnResponseReceivedLocked(
gpr_log(GPR_INFO,
"[xds_client %p] Increased load_report_interval to minimum "
"value %dms",
xds_client, GRPC_XDS_MIN_CLIENT_LOAD_REPORTING_INTERVAL_MS);
xds_client(), GRPC_XDS_MIN_CLIENT_LOAD_REPORTING_INTERVAL_MS);
}
}
// Ignore identical update.
if (lrs_calld->cluster_names_ == new_cluster_names &&
lrs_calld->load_reporting_interval_ == new_load_reporting_interval) {
if (cluster_names_ == new_cluster_names &&
load_reporting_interval_ == new_load_reporting_interval) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO,
"[xds_client %p] Incoming LRS response identical to current, "
"ignoring.",
xds_client);
xds_client());
}
return;
}
// Stop current load reporting (if any) to adopt the new config.
lrs_calld->reporter_.reset();
reporter_.reset();
// Record the new config.
lrs_calld->cluster_names_ = std::move(new_cluster_names);
lrs_calld->load_reporting_interval_ = new_load_reporting_interval;
cluster_names_ = std::move(new_cluster_names);
load_reporting_interval_ = new_load_reporting_interval;
// Try starting sending load report.
lrs_calld->MaybeStartReportingLocked();
MaybeStartReportingLocked();
}();
grpc_slice_unref_internal(response_slice);
if (xds_client->shutting_down_) {
lrs_calld->Unref(DEBUG_LOCATION,
"LRS+OnResponseReceivedLocked+xds_shutdown");
if (xds_client()->shutting_down_) {
Unref(DEBUG_LOCATION, "LRS+OnResponseReceivedLocked+xds_shutdown");
return;
}
// Keep listening for LRS config updates.
grpc_op op;
memset(&op, 0, sizeof(op));
op.op = GRPC_OP_RECV_MESSAGE;
op.data.recv_message.recv_message = &lrs_calld->recv_message_payload_;
op.data.recv_message.recv_message = &recv_message_payload_;
op.flags = 0;
op.reserved = nullptr;
GPR_ASSERT(lrs_calld->call_ != nullptr);
GPR_ASSERT(call_ != nullptr);
// Reuse the "OnResponseReceivedLocked" ref taken in ctor.
GRPC_CLOSURE_INIT(&lrs_calld->on_response_received_, OnResponseReceived,
lrs_calld, grpc_schedule_on_exec_ctx);
const grpc_call_error call_error = grpc_call_start_batch_and_execute(
lrs_calld->call_, &op, 1, &lrs_calld->on_response_received_);
const grpc_call_error call_error =
grpc_call_start_batch_and_execute(call_, &op, 1, &on_response_received_);
GPR_ASSERT(GRPC_CALL_OK == call_error);
}
void XdsClient::ChannelState::LrsCallState::OnStatusReceived(
void* arg, grpc_error* error) {
LrsCallState* lrs_calld = static_cast<LrsCallState*>(arg);
lrs_calld->xds_client()->combiner_->Run(
GRPC_CLOSURE_INIT(&lrs_calld->on_status_received_, OnStatusReceivedLocked,
lrs_calld, nullptr),
GRPC_ERROR_REF(error));
GRPC_ERROR_REF(error); // ref owned by lambda
lrs_calld->xds_client()->work_serializer_->Run(
[lrs_calld, error]() { lrs_calld->OnStatusReceivedLocked(error); },
DEBUG_LOCATION);
}
void XdsClient::ChannelState::LrsCallState::OnStatusReceivedLocked(
void* arg, grpc_error* error) {
LrsCallState* lrs_calld = static_cast<LrsCallState*>(arg);
XdsClient* xds_client = lrs_calld->xds_client();
ChannelState* chand = lrs_calld->chand();
GPR_ASSERT(lrs_calld->call_ != nullptr);
grpc_error* error) {
GPR_ASSERT(call_ != nullptr);
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
char* status_details = grpc_slice_to_c_string(lrs_calld->status_details_);
char* status_details = grpc_slice_to_c_string(status_details_);
gpr_log(GPR_INFO,
"[xds_client %p] LRS call status received. Status = %d, details "
"= '%s', (chand: %p, calld: %p, call: %p), error '%s'",
xds_client, lrs_calld->status_code_, status_details, chand,
lrs_calld, lrs_calld->call_, grpc_error_string(error));
xds_client(), status_code_, status_details, chand(), this, call_,
grpc_error_string(error));
gpr_free(status_details);
}
// Ignore status from a stale call.
if (lrs_calld->IsCurrentCallOnChannel()) {
GPR_ASSERT(!xds_client->shutting_down_);
if (IsCurrentCallOnChannel()) {
GPR_ASSERT(!xds_client()->shutting_down_);
// Try to restart the call.
lrs_calld->parent_->OnCallFinishedLocked();
parent_->OnCallFinishedLocked();
}
lrs_calld->Unref(DEBUG_LOCATION, "LRS+OnStatusReceivedLocked");
Unref(DEBUG_LOCATION, "LRS+OnStatusReceivedLocked");
GRPC_ERROR_UNREF(error);
}
bool XdsClient::ChannelState::LrsCallState::IsCurrentCallOnChannel() const {
@ -1825,14 +1797,15 @@ bool GetXdsRoutingEnabled(const grpc_channel_args& args) {
} // namespace
XdsClient::XdsClient(Combiner* combiner, grpc_pollset_set* interested_parties,
XdsClient::XdsClient(std::shared_ptr<WorkSerializer> work_serializer,
grpc_pollset_set* interested_parties,
StringView server_name,
std::unique_ptr<ServiceConfigWatcherInterface> watcher,
const grpc_channel_args& channel_args, grpc_error** error)
: InternallyRefCounted<XdsClient>(&grpc_xds_client_trace),
request_timeout_(GetRequestTimeout(channel_args)),
xds_routing_enabled_(GetXdsRoutingEnabled(channel_args)),
combiner_(GRPC_COMBINER_REF(combiner, "xds_client")),
work_serializer_(std::move(work_serializer)),
interested_parties_(interested_parties),
bootstrap_(
XdsBootstrap::ReadFromFile(this, &grpc_xds_client_trace, error)),
@ -1871,7 +1844,6 @@ XdsClient::~XdsClient() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO, "[xds_client %p] destroying xds client", this);
}
GRPC_COMBINER_UNREF(combiner_, "xds_client");
}
void XdsClient::Orphan() {

@ -33,7 +33,7 @@
#include "src/core/lib/gprpp/ref_counted.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/gprpp/string_view.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/work_serializer.h"
namespace grpc_core {
@ -74,8 +74,8 @@ class XdsClient : public InternallyRefCounted<XdsClient> {
// If *error is not GRPC_ERROR_NONE after construction, then there was
// an error initializing the client.
XdsClient(Combiner* combiner, grpc_pollset_set* interested_parties,
StringView server_name,
XdsClient(std::shared_ptr<WorkSerializer> work_serializer,
grpc_pollset_set* interested_parties, StringView server_name,
std::unique_ptr<ServiceConfigWatcherInterface> watcher,
const grpc_channel_args& channel_args, grpc_error** error);
~XdsClient();
@ -243,7 +243,7 @@ class XdsClient : public InternallyRefCounted<XdsClient> {
const bool xds_routing_enabled_;
Combiner* combiner_;
std::shared_ptr<WorkSerializer> work_serializer_;
grpc_pollset_set* interested_parties_;
std::unique_ptr<XdsBootstrap> bootstrap_;

@ -196,8 +196,8 @@ class XdsClusterLocalityStats : public RefCounted<XdsClusterLocalityStats> {
// Protects backend_metrics_. A mutex is necessary because the length of
// backend_metrics_ can be accessed by both the callback intercepting the
// call's recv_trailing_metadata (not from the control plane combiner) and
// the load reporting thread (from the control plane combiner).
// call's recv_trailing_metadata (not from the control plane work serializer)
// and the load reporting thread (from the control plane work serializer).
Mutex backend_metrics_mu_;
std::map<std::string, BackendMetric> backend_metrics_;
};

@ -58,12 +58,13 @@ const char* ConnectivityStateName(grpc_connectivity_state state) {
class AsyncConnectivityStateWatcherInterface::Notifier {
public:
Notifier(RefCountedPtr<AsyncConnectivityStateWatcherInterface> watcher,
grpc_connectivity_state state, Combiner* combiner)
grpc_connectivity_state state,
const std::shared_ptr<WorkSerializer>& work_serializer)
: watcher_(std::move(watcher)), state_(state) {
if (combiner != nullptr) {
combiner->Run(
GRPC_CLOSURE_INIT(&closure_, SendNotification, this, nullptr),
GRPC_ERROR_NONE);
if (work_serializer != nullptr) {
work_serializer->Run(
[this]() { SendNotification(this, GRPC_ERROR_NONE); },
DEBUG_LOCATION);
} else {
GRPC_CLOSURE_INIT(&closure_, SendNotification, this,
grpc_schedule_on_exec_ctx);
@ -89,7 +90,7 @@ class AsyncConnectivityStateWatcherInterface::Notifier {
void AsyncConnectivityStateWatcherInterface::Notify(
grpc_connectivity_state state) {
new Notifier(Ref(), state, combiner_); // Deletes itself when done.
new Notifier(Ref(), state, work_serializer_); // Deletes itself when done.
}
//

@ -29,6 +29,7 @@
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/work_serializer.h"
namespace grpc_core {
@ -70,14 +71,15 @@ class AsyncConnectivityStateWatcherInterface
// If \a combiner is nullptr, then the notification will be scheduled on the
// ExecCtx.
explicit AsyncConnectivityStateWatcherInterface(Combiner* combiner = nullptr)
: combiner_(combiner) {}
explicit AsyncConnectivityStateWatcherInterface(
std::shared_ptr<WorkSerializer> work_serializer = nullptr)
: work_serializer_(std::move(work_serializer)) {}
// Invoked asynchronously when Notify() is called.
virtual void OnConnectivityStateChange(grpc_connectivity_state new_state) = 0;
private:
Combiner* combiner_;
std::shared_ptr<WorkSerializer> work_serializer_;
};
// Tracks connectivity state. Maintains a list of watchers that are

@ -26,14 +26,14 @@
#include "src/core/ext/filters/client_channel/resolver_registry.h"
#include "src/core/ext/filters/client_channel/server_address.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/iomgr/work_serializer.h"
#include "test/core/util/test_config.h"
static gpr_mu g_mu;
static bool g_fail_resolution = true;
static grpc_core::Combiner* g_combiner;
static std::shared_ptr<grpc_core::WorkSerializer>* g_work_serializer;
static void my_resolve_address(const char* addr, const char* /*default_port*/,
grpc_pollset_set* /*interested_parties*/,
@ -66,7 +66,7 @@ static grpc_ares_request* my_dns_lookup_ares_locked(
std::unique_ptr<grpc_core::ServerAddressList>* addresses,
std::unique_ptr<grpc_core::ServerAddressList>* /*balancer_addresses*/,
char** /*service_config_json*/, int /*query_timeout_ms*/,
grpc_core::Combiner* /*combiner*/) {
std::shared_ptr<grpc_core::WorkSerializer> /*combiner*/) { // NOLINT
gpr_mu_lock(&g_mu);
GPR_ASSERT(0 == strcmp("test", addr));
grpc_error* error = GRPC_ERROR_NONE;
@ -99,7 +99,7 @@ static grpc_core::OrphanablePtr<grpc_core::Resolver> create_resolver(
GPR_ASSERT(uri);
grpc_core::ResolverArgs args;
args.uri = uri;
args.combiner = g_combiner;
args.work_serializer = *g_work_serializer;
args.result_handler = std::move(result_handler);
grpc_core::OrphanablePtr<grpc_core::Resolver> resolver =
factory->CreateResolver(std::move(args));
@ -161,7 +161,8 @@ int main(int argc, char** argv) {
grpc_init();
gpr_mu_init(&g_mu);
g_combiner = grpc_combiner_create();
auto work_serializer = std::make_shared<grpc_core::WorkSerializer>();
g_work_serializer = &work_serializer;
grpc_set_resolver_impl(&test_resolver);
grpc_dns_lookup_ares_locked = my_dns_lookup_ares_locked;
grpc_cancel_ares_request_locked = my_cancel_ares_request_locked;
@ -186,8 +187,6 @@ int main(int argc, char** argv) {
GPR_ASSERT(wait_loop(30, &output2.ev));
GPR_ASSERT(!output2.result.addresses.empty());
GPR_ASSERT(output2.error == GRPC_ERROR_NONE);
GRPC_COMBINER_UNREF(g_combiner, "test");
}
grpc_shutdown();

@ -26,8 +26,8 @@
#include "src/core/ext/filters/client_channel/server_address.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gprpp/memory.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/iomgr/work_serializer.h"
#include "test/core/util/test_config.h"
constexpr int kMinResolutionPeriodMs = 1000;
@ -37,7 +37,7 @@ constexpr int kMinResolutionPeriodForCheckMs = 900;
extern grpc_address_resolver_vtable* grpc_resolve_address_impl;
static grpc_address_resolver_vtable* default_resolve_address;
static grpc_core::Combiner* g_combiner;
static std::shared_ptr<grpc_core::WorkSerializer>* g_work_serializer;
static grpc_ares_request* (*g_default_dns_lookup_ares_locked)(
const char* dns_server, const char* name, const char* default_port,
@ -45,7 +45,7 @@ static grpc_ares_request* (*g_default_dns_lookup_ares_locked)(
std::unique_ptr<grpc_core::ServerAddressList>* addresses,
std::unique_ptr<grpc_core::ServerAddressList>* balancer_addresses,
char** service_config_json, int query_timeout_ms,
grpc_core::Combiner* combiner);
std::shared_ptr<grpc_core::WorkSerializer> work_serializer);
// Counter incremented by test_resolve_address_impl indicating the number of
// times a system-level resolution has happened.
@ -97,11 +97,11 @@ static grpc_ares_request* test_dns_lookup_ares_locked(
std::unique_ptr<grpc_core::ServerAddressList>* addresses,
std::unique_ptr<grpc_core::ServerAddressList>* balancer_addresses,
char** service_config_json, int query_timeout_ms,
grpc_core::Combiner* combiner) {
std::shared_ptr<grpc_core::WorkSerializer> work_serializer) {
grpc_ares_request* result = g_default_dns_lookup_ares_locked(
dns_server, name, default_port, g_iomgr_args.pollset_set, on_done,
addresses, balancer_addresses, service_config_json, query_timeout_ms,
combiner);
std::move(work_serializer));
++g_resolution_count;
static grpc_millis last_resolution_time = 0;
grpc_millis now =
@ -274,7 +274,7 @@ static void on_first_resolution(OnResolutionCallbackArg* cb_arg) {
gpr_mu_unlock(g_iomgr_args.mu);
}
static void start_test_under_combiner(void* arg, grpc_error* /*error*/) {
static void start_test_under_work_serializer(void* arg) {
OnResolutionCallbackArg* res_cb_arg =
static_cast<OnResolutionCallbackArg*>(arg);
res_cb_arg->result_handler = new ResultHandler();
@ -286,7 +286,7 @@ static void start_test_under_combiner(void* arg, grpc_error* /*error*/) {
GPR_ASSERT(uri != nullptr);
grpc_core::ResolverArgs args;
args.uri = uri;
args.combiner = g_combiner;
args.work_serializer = *g_work_serializer;
args.result_handler = std::unique_ptr<grpc_core::Resolver::ResultHandler>(
res_cb_arg->result_handler);
g_resolution_count = 0;
@ -310,9 +310,9 @@ static void test_cooldown() {
OnResolutionCallbackArg* res_cb_arg = new OnResolutionCallbackArg();
res_cb_arg->uri_str = "dns:127.0.0.1";
g_combiner->Run(
GRPC_CLOSURE_CREATE(start_test_under_combiner, res_cb_arg, nullptr),
GRPC_ERROR_NONE);
(*g_work_serializer)
->Run([res_cb_arg]() { start_test_under_work_serializer(res_cb_arg); },
DEBUG_LOCATION);
grpc_core::ExecCtx::Get()->Flush();
poll_pollset_until_request_done(&g_iomgr_args);
iomgr_args_finish(&g_iomgr_args);
@ -322,7 +322,8 @@ int main(int argc, char** argv) {
grpc::testing::TestEnvironment env(argc, argv);
grpc_init();
g_combiner = grpc_combiner_create();
auto work_serializer = std::make_shared<grpc_core::WorkSerializer>();
g_work_serializer = &work_serializer;
g_default_dns_lookup_ares_locked = grpc_dns_lookup_ares_locked;
grpc_dns_lookup_ares_locked = test_dns_lookup_ares_locked;
@ -330,10 +331,7 @@ int main(int argc, char** argv) {
grpc_set_resolver_impl(&test_resolver);
test_cooldown();
{
grpc_core::ExecCtx exec_ctx;
GRPC_COMBINER_UNREF(g_combiner, "test");
}
grpc_shutdown_blocking();
GPR_ASSERT(g_all_callbacks_invoked);
return 0;

@ -25,10 +25,10 @@
#include "src/core/ext/filters/client_channel/resolver_registry.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gprpp/memory.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/work_serializer.h"
#include "test/core/util/test_config.h"
static grpc_core::Combiner* g_combiner;
static std::shared_ptr<grpc_core::WorkSerializer>* g_work_serializer;
class TestResultHandler : public grpc_core::Resolver::ResultHandler {
void ReturnResult(grpc_core::Resolver::Result /*result*/) override {}
@ -44,7 +44,7 @@ static void test_succeeds(grpc_core::ResolverFactory* factory,
GPR_ASSERT(uri);
grpc_core::ResolverArgs args;
args.uri = uri;
args.combiner = g_combiner;
args.work_serializer = *g_work_serializer;
args.result_handler = absl::make_unique<TestResultHandler>();
grpc_core::OrphanablePtr<grpc_core::Resolver> resolver =
factory->CreateResolver(std::move(args));
@ -61,7 +61,7 @@ static void test_fails(grpc_core::ResolverFactory* factory,
GPR_ASSERT(uri);
grpc_core::ResolverArgs args;
args.uri = uri;
args.combiner = g_combiner;
args.work_serializer = *g_work_serializer;
args.result_handler = absl::make_unique<TestResultHandler>();
grpc_core::OrphanablePtr<grpc_core::Resolver> resolver =
factory->CreateResolver(std::move(args));
@ -73,7 +73,8 @@ int main(int argc, char** argv) {
grpc::testing::TestEnvironment env(argc, argv);
grpc_init();
g_combiner = grpc_combiner_create();
auto work_serializer = std::make_shared<grpc_core::WorkSerializer>();
g_work_serializer = &work_serializer;
grpc_core::ResolverFactory* dns =
grpc_core::ResolverRegistry::LookupResolverFactory("dns");
@ -89,10 +90,6 @@ int main(int argc, char** argv) {
} else {
test_succeeds(dns, "dns://8.8.8.8/8.8.8.8:8888");
}
{
grpc_core::ExecCtx exec_ctx;
GRPC_COMBINER_UNREF(g_combiner, "test");
}
grpc_shutdown();
return 0;

@ -28,7 +28,7 @@
#include "src/core/ext/filters/client_channel/server_address.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/work_serializer.h"
#include "src/core/lib/security/credentials/fake/fake_credentials.h"
#include "test/core/util/test_config.h"
@ -63,7 +63,7 @@ class ResultHandler : public grpc_core::Resolver::ResultHandler {
};
static grpc_core::OrphanablePtr<grpc_core::Resolver> build_fake_resolver(
grpc_core::Combiner* combiner,
std::shared_ptr<grpc_core::WorkSerializer> work_serializer,
grpc_core::FakeResolverResponseGenerator* response_generator,
std::unique_ptr<grpc_core::Resolver::ResultHandler> result_handler) {
grpc_core::ResolverFactory* factory =
@ -74,7 +74,7 @@ static grpc_core::OrphanablePtr<grpc_core::Resolver> build_fake_resolver(
grpc_channel_args channel_args = {1, &generator_arg};
grpc_core::ResolverArgs args;
args.args = &channel_args;
args.combiner = combiner;
args.work_serializer = std::move(work_serializer);
args.result_handler = std::move(result_handler);
grpc_core::OrphanablePtr<grpc_core::Resolver> resolver =
factory->CreateResolver(std::move(args));
@ -107,14 +107,15 @@ static grpc_core::Resolver::Result create_new_resolver_result() {
static void test_fake_resolver() {
grpc_core::ExecCtx exec_ctx;
grpc_core::Combiner* combiner = grpc_combiner_create();
std::shared_ptr<grpc_core::WorkSerializer> work_serializer =
std::make_shared<grpc_core::WorkSerializer>();
// Create resolver.
ResultHandler* result_handler = new ResultHandler();
grpc_core::RefCountedPtr<grpc_core::FakeResolverResponseGenerator>
response_generator =
grpc_core::MakeRefCounted<grpc_core::FakeResolverResponseGenerator>();
grpc_core::OrphanablePtr<grpc_core::Resolver> resolver = build_fake_resolver(
combiner, response_generator.get(),
work_serializer, response_generator.get(),
std::unique_ptr<grpc_core::Resolver::ResultHandler>(result_handler));
GPR_ASSERT(resolver.get() != nullptr);
resolver->StartLocked();
@ -195,7 +196,6 @@ static void test_fake_resolver() {
nullptr);
// Clean up.
resolver.reset();
GRPC_COMBINER_UNREF(combiner, "test_fake_resolver");
}
int main(int argc, char** argv) {

@ -24,11 +24,11 @@
#include "src/core/ext/filters/client_channel/resolver_registry.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/work_serializer.h"
#include "test/core/util/test_config.h"
static grpc_core::Combiner* g_combiner;
static std::shared_ptr<grpc_core::WorkSerializer>* g_work_serializer;
class ResultHandler : public grpc_core::Resolver::ResultHandler {
public:
@ -46,7 +46,7 @@ static void test_succeeds(grpc_core::ResolverFactory* factory,
GPR_ASSERT(uri);
grpc_core::ResolverArgs args;
args.uri = uri;
args.combiner = g_combiner;
args.work_serializer = *g_work_serializer;
args.result_handler = absl::make_unique<ResultHandler>();
grpc_core::OrphanablePtr<grpc_core::Resolver> resolver =
factory->CreateResolver(std::move(args));
@ -67,7 +67,7 @@ static void test_fails(grpc_core::ResolverFactory* factory,
GPR_ASSERT(uri);
grpc_core::ResolverArgs args;
args.uri = uri;
args.combiner = g_combiner;
args.work_serializer = *g_work_serializer;
args.result_handler = absl::make_unique<ResultHandler>();
grpc_core::OrphanablePtr<grpc_core::Resolver> resolver =
factory->CreateResolver(std::move(args));
@ -79,7 +79,8 @@ int main(int argc, char** argv) {
grpc::testing::TestEnvironment env(argc, argv);
grpc_init();
g_combiner = grpc_combiner_create();
auto work_serializer = std::make_shared<grpc_core::WorkSerializer>();
g_work_serializer = &work_serializer;
grpc_core::ResolverFactory* ipv4 =
grpc_core::ResolverRegistry::LookupResolverFactory("ipv4");
@ -100,10 +101,6 @@ int main(int argc, char** argv) {
test_fails(ipv6, "ipv6:[::]:123456");
test_fails(ipv6, "ipv6:www.google.com");
{
grpc_core::ExecCtx exec_ctx;
GRPC_COMBINER_UNREF(g_combiner, "test");
}
grpc_shutdown();
return 0;

@ -50,7 +50,7 @@ static grpc_ares_request* (*iomgr_dns_lookup_ares_locked)(
std::unique_ptr<grpc_core::ServerAddressList>* addresses,
std::unique_ptr<grpc_core::ServerAddressList>* balancer_addresses,
char** service_config_json, int query_timeout_ms,
grpc_core::Combiner* combiner);
std::shared_ptr<grpc_core::WorkSerializer> combiner);
static void (*iomgr_cancel_ares_request_locked)(grpc_ares_request* request);
@ -108,11 +108,12 @@ static grpc_ares_request* my_dns_lookup_ares_locked(
std::unique_ptr<grpc_core::ServerAddressList>* addresses,
std::unique_ptr<grpc_core::ServerAddressList>* balancer_addresses,
char** service_config_json, int query_timeout_ms,
grpc_core::Combiner* combiner) {
std::shared_ptr<grpc_core::WorkSerializer> work_serializer) {
if (0 != strcmp(addr, "test")) {
return iomgr_dns_lookup_ares_locked(
dns_server, addr, default_port, interested_parties, on_done, addresses,
balancer_addresses, service_config_json, query_timeout_ms, combiner);
balancer_addresses, service_config_json, query_timeout_ms,
std::move(work_serializer));
}
grpc_error* error = GRPC_ERROR_NONE;

@ -55,7 +55,7 @@ class ForwardingLoadBalancingPolicy : public LoadBalancingPolicy {
const std::string& delegate_policy_name, intptr_t initial_refcount = 1)
: LoadBalancingPolicy(std::move(args), initial_refcount) {
Args delegate_args;
delegate_args.combiner = combiner();
delegate_args.work_serializer = work_serializer();
delegate_args.channel_control_helper = std::move(delegating_helper);
delegate_args.args = args.args;
delegate_ = LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(

@ -225,10 +225,8 @@ class CountedService : public ServiceType {
response_count_ = 0;
}
protected:
grpc_core::Mutex mu_;
private:
grpc_core::Mutex mu_;
size_t request_count_ = 0;
size_t response_count_ = 0;
};
@ -286,7 +284,6 @@ class BackendServiceImpl
clients_.insert(client);
}
grpc_core::Mutex mu_;
grpc_core::Mutex clients_mu_;
std::set<grpc::string> clients_;
};

@ -36,9 +36,9 @@
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/thd.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/pollset_set.h"
#include "src/core/lib/iomgr/work_serializer.h"
#include "test/core/end2end/cq_verifier.h"
#include "test/core/util/cmdline.h"
#include "test/core/util/port.h"
@ -81,7 +81,7 @@ struct ArgsStruct {
gpr_mu* mu;
grpc_pollset* pollset;
grpc_pollset_set* pollset_set;
grpc_core::Combiner* lock;
std::shared_ptr<grpc_core::WorkSerializer> lock;
grpc_channel_args* channel_args;
};
@ -90,7 +90,7 @@ void ArgsInit(ArgsStruct* args) {
grpc_pollset_init(args->pollset, &args->mu);
args->pollset_set = grpc_pollset_set_create();
grpc_pollset_set_add_pollset(args->pollset_set, args->pollset);
args->lock = grpc_combiner_create();
args->lock = std::make_shared<grpc_core::WorkSerializer>();
gpr_atm_rel_store(&args->done_atm, 0);
args->channel_args = nullptr;
}
@ -109,7 +109,6 @@ void ArgsFinish(ArgsStruct* args) {
grpc_core::ExecCtx::Get()->Flush();
grpc_pollset_destroy(args->pollset);
gpr_free(args->pollset);
GRPC_COMBINER_UNREF(args->lock, nullptr);
}
void PollPollsetUntilRequestDone(ArgsStruct* args) {

@ -50,12 +50,12 @@
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gprpp/host_port.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/iomgr/socket_utils.h"
#include "src/core/lib/iomgr/work_serializer.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
@ -193,7 +193,7 @@ struct ArgsStruct {
gpr_mu* mu;
grpc_pollset* pollset;
grpc_pollset_set* pollset_set;
grpc_core::Combiner* lock;
std::shared_ptr<grpc_core::WorkSerializer> lock;
grpc_channel_args* channel_args;
vector<GrpcLBAddress> expected_addrs;
std::string expected_service_config_string;
@ -207,7 +207,7 @@ void ArgsInit(ArgsStruct* args) {
grpc_pollset_init(args->pollset, &args->mu);
args->pollset_set = grpc_pollset_set_create();
grpc_pollset_set_add_pollset(args->pollset_set, args->pollset);
args->lock = grpc_combiner_create();
args->lock = std::make_shared<grpc_core::WorkSerializer>();
gpr_atm_rel_store(&args->done_atm, 0);
args->channel_args = nullptr;
}
@ -227,7 +227,6 @@ void ArgsFinish(ArgsStruct* args) {
grpc_core::ExecCtx::Get()->Flush();
grpc_pollset_destroy(args->pollset);
gpr_free(args->pollset);
GRPC_COMBINER_UNREF(args->lock, nullptr);
}
gpr_timespec NSecondDeadline(int seconds) {
@ -550,10 +549,7 @@ void InjectBrokenNameServerList(ares_channel channel) {
GPR_ASSERT(ares_set_servers_ports(channel, dns_server_addrs) == ARES_SUCCESS);
}
void StartResolvingLocked(void* arg, grpc_error* /*unused*/) {
grpc_core::Resolver* r = static_cast<grpc_core::Resolver*>(arg);
r->StartLocked();
}
void StartResolvingLocked(grpc_core::Resolver* r) { r->StartLocked(); }
void RunResolvesRelevantRecordsTest(
std::unique_ptr<grpc_core::Resolver::ResultHandler> (*CreateResultHandler)(
@ -632,9 +628,9 @@ void RunResolvesRelevantRecordsTest(
CreateResultHandler(&args));
grpc_channel_args_destroy(resolver_args);
gpr_free(whole_uri);
args.lock->Run(
GRPC_CLOSURE_CREATE(StartResolvingLocked, resolver.get(), nullptr),
GRPC_ERROR_NONE);
auto* resolver_ptr = resolver.get();
args.lock->Run([resolver_ptr]() { StartResolvingLocked(resolver_ptr); },
DEBUG_LOCATION);
grpc_core::ExecCtx::Get()->Flush();
PollPollsetUntilRequestDone(&args);
ArgsFinish(&args);

Loading…
Cancel
Save