From 780d41224c9e4c3198bfc805faf3fec19e25d929 Mon Sep 17 00:00:00 2001 From: Yash Tibrewal Date: Fri, 18 Oct 2019 15:07:54 -0700 Subject: [PATCH] Squashed commit of the following: commit 1547cb209ad4f6899bf10c06c34b814783fd3924 Author: Yash Tibrewal Date: Fri Oct 18 13:12:55 2019 -0700 Revert some other GRPC_CLOSURE_RUN till other issues are fixed commit 3edeee7ce9a06eec8901dfe01ea45fb6f5505dbc Merge: 22b343e4fb e8f78e7a5d Author: Yash Tibrewal Date: Fri Oct 18 12:26:26 2019 -0700 Merge branch 'master' into combinernew commit 22b343e4fb823e625646864454fe6ea3f9f371de Author: Yash Tibrewal Date: Fri Oct 18 12:22:34 2019 -0700 Change some TCP posix closures to GRPC_CLOSURE_RUN commit 19e60dfe8f6426236c618779f569df7dfed51597 Merge: 153bdcbc97 feae38d3ab Author: Yash Tibrewal Date: Thu Oct 17 11:56:46 2019 -0700 Merge branch 'master' into combinernew commit 153bdcbc974126dfd123b48e9bfb60e89dc3890e Author: Yash Tibrewal Date: Thu Oct 17 11:41:14 2019 -0700 Proxy fixture fix commit c6da80bcce6fdc9bec62a6144d775382f165ba93 Merge: 6a32264cdf 98abc22f4c Author: Yash Tibrewal Date: Fri Oct 11 17:05:18 2019 -0700 Merge branch 'master' into combinernew commit 6a32264cdf35c4b833748bdc64efd553435ce87c Author: Yash Tibrewal Date: Fri Oct 11 17:01:55 2019 -0700 Reviewer comments commit 6bbd3a1c3c6c23ae4401020f1981ac003524aa7d Author: Yash Tibrewal Date: Thu Oct 10 11:55:43 2019 -0700 Fallback cleanup commit aaa04526a2acf225825e218174f07a845af34546 Author: Yash Tibrewal Date: Thu Oct 10 11:24:18 2019 -0700 Clean up commit 4266be13d554136af02d50e5f92dd95a914f9839 Author: Yash Tibrewal Date: Thu Oct 10 11:20:05 2019 -0700 Make sure start_ping is called before finish_ping for bdp and keepalive commit 14107957aa5a17bd0a46b019cffdab41ff60b0f2 Author: Yash Tibrewal Date: Wed Oct 9 18:56:07 2019 -0700 chttp2 fixes commit 5643aa6cb388a508d45cd9aa6a9b65d06a009c7e Author: Yash Tibrewal Date: Wed Oct 9 18:25:19 2019 -0700 Remove closure list scheduling from combiners commit c59644943084ca7c2f0b67cc4a4679901454f207 Author: Yash Tibrewal Date: Wed Oct 9 17:35:54 2019 -0700 ares windows fix commit 9f933903b969d290acd8fb52e57c37d820f16f34 Author: Yash Tibrewal Date: Wed Oct 9 17:23:11 2019 -0700 More fixes commit 3c3a7d0e9b365b086c2bec9b87c600dae05c4689 Author: Yash Tibrewal Date: Wed Oct 9 16:08:07 2019 -0700 Fix errors commit 56539cc448a225c4936e712d29cd9a1022320aae Author: Yash Tibrewal Date: Wed Oct 9 15:22:28 2019 -0700 Everything compiles commit 714ec01e4b61972c32fd1102d9e46da9e91d70fb Author: Yash Tibrewal Date: Wed Oct 9 13:44:18 2019 -0700 src compiles commit 54dcbd170d08e91b20a83cc0c9e23d01bc1e05a7 Author: Yash Tibrewal Date: Wed Oct 9 13:16:08 2019 -0700 chttp2_transport changes commit 7a3388b077d92b741679e834f22f4f4731394c66 Author: Yash Tibrewal Date: Tue Oct 8 18:33:55 2019 -0700 resource quota and lb policy commit 714e4c849fea25b2650b04c05fc9f9116c9659ca Author: Yash Tibrewal Date: Tue Oct 8 17:23:04 2019 -0700 Further commit 1d17ad7d444a018dd7a0bab3c2351d25502d8a16 Author: Yash Tibrewal Date: Tue Oct 8 13:34:52 2019 -0700 ares ev driver windows changes commit 3110c062c5e57eb8241db7699c6638cdb68a5a88 Author: Yash Tibrewal Date: Tue Oct 8 12:47:37 2019 -0700 ares dns changes commit 0e10bc17eac160189c390e54f9c91bbc666ff697 Author: Yash Tibrewal Date: Tue Oct 8 12:24:45 2019 -0700 Add dns_resolver changes commit 4a71a911e8249dfbcf4a2c1397e3b0691265fe0b Author: Yash Tibrewal Date: Tue Oct 8 12:08:10 2019 -0700 Add fake_resolver changes commit 8610a64ec9cc3bae10a5816e2a4800e0755bbb71 Author: Yash Tibrewal Date: Mon Oct 7 19:31:13 2019 -0700 Remaning one from xds_client commit 5f22055d0d3d177b8e3256e371b828abd3b88641 Author: Yash Tibrewal Date: Mon Oct 7 18:47:28 2019 -0700 One left from xds_client.cc commit 4b1223f8753c103760cf9a15660a6786859a607f Author: Yash Tibrewal Date: Mon Oct 7 17:17:12 2019 -0700 modifications for xds.cc commit a17bbbd840f56cbc608b463fb04ebabcc8152c01 Author: Yash Tibrewal Date: Mon Oct 7 13:06:25 2019 -0700 grpclb.cc changes commit 3a33ed4762616397281a7d0e60e07d92439438f3 Merge: 11058748fd 3d363368ca Author: Yash Tibrewal Date: Mon Oct 7 11:24:11 2019 -0700 Merge branch 'combinernew' of github.com:yashykt/grpc into combinernew commit 3d363368ca0a0779182afeda0eb3279dc951d757 Author: Yash Tibrewal Date: Mon Oct 7 11:18:00 2019 -0700 New combiner --- .../filters/client_channel/client_channel.cc | 52 ++-- .../ext/filters/client_channel/lb_policy.cc | 5 +- .../ext/filters/client_channel/lb_policy.h | 6 +- .../client_channel/lb_policy/grpclb/grpclb.cc | 126 ++++++-- .../client_channel/lb_policy/xds/xds.cc | 70 ++++- .../ext/filters/client_channel/resolver.cc | 3 +- .../ext/filters/client_channel/resolver.h | 6 +- .../resolver/dns/c_ares/dns_resolver_ares.cc | 26 +- .../dns/c_ares/grpc_ares_ev_driver.cc | 56 +++- .../resolver/dns/c_ares/grpc_ares_ev_driver.h | 6 +- .../dns/c_ares/grpc_ares_ev_driver_libuv.cc | 13 +- .../dns/c_ares/grpc_ares_ev_driver_posix.cc | 4 +- .../dns/c_ares/grpc_ares_ev_driver_windows.cc | 87 ++++-- .../resolver/dns/c_ares/grpc_ares_wrapper.cc | 24 +- .../resolver/dns/c_ares/grpc_ares_wrapper.h | 2 +- .../dns/c_ares/grpc_ares_wrapper_fallback.cc | 4 +- .../resolver/dns/native/dns_resolver.cc | 31 +- .../resolver/fake/fake_resolver.cc | 48 ++- .../filters/client_channel/resolver_factory.h | 2 +- .../client_channel/resolver_registry.cc | 2 +- .../client_channel/resolver_registry.h | 2 +- .../filters/client_channel/xds/xds_client.cc | 127 ++++++-- .../filters/client_channel/xds/xds_client.h | 4 +- .../chttp2/transport/chttp2_transport.cc | 290 +++++++++++++----- .../chttp2/transport/hpack_parser.cc | 5 +- .../ext/transport/chttp2/transport/internal.h | 8 +- .../ext/transport/chttp2/transport/writing.cc | 3 + src/core/lib/iomgr/combiner.cc | 116 +++---- src/core/lib/iomgr/combiner.h | 39 ++- src/core/lib/iomgr/exec_ctx.h | 5 +- src/core/lib/iomgr/resource_quota.cc | 46 ++- src/core/lib/iomgr/tcp_posix.cc | 2 +- src/core/lib/transport/connectivity_state.cc | 16 +- src/core/lib/transport/connectivity_state.h | 9 +- .../dns_resolver_connectivity_test.cc | 4 +- .../resolvers/dns_resolver_cooldown_test.cc | 12 +- .../resolvers/dns_resolver_test.cc | 2 +- .../resolvers/fake_resolver_test.cc | 4 +- .../resolvers/sockaddr_resolver_test.cc | 2 +- .../end2end/fixtures/http_proxy_fixture.cc | 123 ++++++-- test/core/end2end/fuzzers/api_fuzzer.cc | 2 +- test/core/end2end/goaway_server_test.cc | 4 +- test/core/iomgr/combiner_test.cc | 33 +- test/cpp/microbenchmarks/bm_closure.cc | 100 ++---- test/cpp/naming/cancel_ares_query_test.cc | 2 +- test/cpp/naming/resolver_component_test.cc | 8 +- 46 files changed, 1007 insertions(+), 534 deletions(-) diff --git a/src/core/ext/filters/client_channel/client_channel.cc b/src/core/ext/filters/client_channel/client_channel.cc index 6168000c6cc..033816a0b43 100644 --- a/src/core/ext/filters/client_channel/client_channel.cc +++ b/src/core/ext/filters/client_channel/client_channel.cc @@ -282,7 +282,7 @@ class ChannelData { // // Fields used in the control plane. Guarded by combiner. // - grpc_combiner* combiner_; + Combiner* combiner_; grpc_pollset_set* interested_parties_; RefCountedPtr subchannel_pool_; OrphanablePtr resolving_lb_policy_; @@ -1044,10 +1044,10 @@ class ChannelData::SubchannelWrapper : public SubchannelInterface { : parent_(std::move(parent)), state_(new_state), connected_subchannel_(std::move(connected_subchannel)) { - GRPC_CLOSURE_INIT( - &closure_, ApplyUpdateInControlPlaneCombiner, this, - grpc_combiner_scheduler(parent_->parent_->chand_->combiner_)); - GRPC_CLOSURE_SCHED(&closure_, GRPC_ERROR_NONE); + parent_->parent_->chand_->combiner_->Run( + GRPC_CLOSURE_INIT(&closure_, ApplyUpdateInControlPlaneCombiner, + this, nullptr), + GRPC_ERROR_NONE); } private: @@ -1140,9 +1140,8 @@ ChannelData::ExternalConnectivityWatcher::ExternalConnectivityWatcher( grpc_polling_entity_add_to_pollset_set(&pollent_, chand_->interested_parties_); GRPC_CHANNEL_STACK_REF(chand_->owning_stack_, "ExternalConnectivityWatcher"); - GRPC_CLOSURE_SCHED( - GRPC_CLOSURE_INIT(&add_closure_, AddWatcherLocked, this, - grpc_combiner_scheduler(chand_->combiner_)), + chand_->combiner_->Run( + GRPC_CLOSURE_INIT(&add_closure_, AddWatcherLocked, this, nullptr), GRPC_ERROR_NONE); } @@ -1169,9 +1168,8 @@ void ChannelData::ExternalConnectivityWatcher::Notify( // Not needed in state SHUTDOWN, because the tracker will // automatically remove all watchers in that case. if (state != GRPC_CHANNEL_SHUTDOWN) { - GRPC_CLOSURE_SCHED( - GRPC_CLOSURE_INIT(&remove_closure_, RemoveWatcherLocked, this, - grpc_combiner_scheduler(chand_->combiner_)), + chand_->combiner_->Run( + GRPC_CLOSURE_INIT(&remove_closure_, RemoveWatcherLocked, this, nullptr), GRPC_ERROR_NONE); } } @@ -1184,9 +1182,8 @@ void ChannelData::ExternalConnectivityWatcher::Cancel() { } GRPC_CLOSURE_SCHED(on_complete_, GRPC_ERROR_CANCELLED); // Hop back into the combiner to clean up. - GRPC_CLOSURE_SCHED( - GRPC_CLOSURE_INIT(&remove_closure_, RemoveWatcherLocked, this, - grpc_combiner_scheduler(chand_->combiner_)), + chand_->combiner_->Run( + GRPC_CLOSURE_INIT(&remove_closure_, RemoveWatcherLocked, this, nullptr), GRPC_ERROR_NONE); } @@ -1223,9 +1220,11 @@ class ChannelData::ConnectivityWatcherAdder { initial_state_(initial_state), watcher_(std::move(watcher)) { GRPC_CHANNEL_STACK_REF(chand_->owning_stack_, "ConnectivityWatcherAdder"); - GRPC_CLOSURE_INIT(&closure_, &ConnectivityWatcherAdder::AddWatcherLocked, - this, grpc_combiner_scheduler(chand_->combiner_)); - GRPC_CLOSURE_SCHED(&closure_, GRPC_ERROR_NONE); + chand_->combiner_->Run( + GRPC_CLOSURE_INIT(&closure_, + &ConnectivityWatcherAdder::AddWatcherLocked, this, + nullptr), + GRPC_ERROR_NONE); } private: @@ -1255,10 +1254,11 @@ class ChannelData::ConnectivityWatcherRemover { AsyncConnectivityStateWatcherInterface* watcher) : chand_(chand), watcher_(watcher) { GRPC_CHANNEL_STACK_REF(chand_->owning_stack_, "ConnectivityWatcherRemover"); - GRPC_CLOSURE_INIT(&closure_, - &ConnectivityWatcherRemover::RemoveWatcherLocked, this, - grpc_combiner_scheduler(chand_->combiner_)); - GRPC_CLOSURE_SCHED(&closure_, GRPC_ERROR_NONE); + chand_->combiner_->Run( + GRPC_CLOSURE_INIT(&closure_, + &ConnectivityWatcherRemover::RemoveWatcherLocked, + this, nullptr), + GRPC_ERROR_NONE); } private: @@ -1883,10 +1883,9 @@ void ChannelData::StartTransportOp(grpc_channel_element* elem, // Pop into control plane combiner for remaining ops. op->handler_private.extra_arg = elem; GRPC_CHANNEL_STACK_REF(chand->owning_stack_, "start_transport_op"); - GRPC_CLOSURE_SCHED( + chand->combiner_->Run( GRPC_CLOSURE_INIT(&op->handler_private.closure, - ChannelData::StartTransportOpLocked, op, - grpc_combiner_scheduler(chand->combiner_)), + ChannelData::StartTransportOpLocked, op, nullptr), GRPC_ERROR_NONE); } @@ -1953,9 +1952,8 @@ grpc_connectivity_state ChannelData::CheckConnectivityState( grpc_connectivity_state out = state_tracker_.state(); if (out == GRPC_CHANNEL_IDLE && try_to_connect) { GRPC_CHANNEL_STACK_REF(owning_stack_, "TryToConnect"); - GRPC_CLOSURE_SCHED(GRPC_CLOSURE_CREATE(TryToConnectLocked, this, - grpc_combiner_scheduler(combiner_)), - GRPC_ERROR_NONE); + combiner_->Run(GRPC_CLOSURE_CREATE(TryToConnectLocked, this, nullptr), + GRPC_ERROR_NONE); } return out; } diff --git a/src/core/ext/filters/client_channel/lb_policy.cc b/src/core/ext/filters/client_channel/lb_policy.cc index 41a7d2d07be..d601b9f31b4 100644 --- a/src/core/ext/filters/client_channel/lb_policy.cc +++ b/src/core/ext/filters/client_channel/lb_policy.cc @@ -105,9 +105,8 @@ LoadBalancingPolicy::PickResult LoadBalancingPolicy::QueuePicker::Pick( if (!exit_idle_called_) { exit_idle_called_ = true; parent_->Ref().release(); // ref held by closure. - GRPC_CLOSURE_SCHED( - GRPC_CLOSURE_CREATE(&CallExitIdle, parent_.get(), - grpc_combiner_scheduler(parent_->combiner())), + parent_->combiner()->Run( + GRPC_CLOSURE_CREATE(&CallExitIdle, parent_.get(), nullptr), GRPC_ERROR_NONE); } PickResult result; diff --git a/src/core/ext/filters/client_channel/lb_policy.h b/src/core/ext/filters/client_channel/lb_policy.h index 3b7c604d4fa..1a5ea06615c 100644 --- a/src/core/ext/filters/client_channel/lb_policy.h +++ b/src/core/ext/filters/client_channel/lb_policy.h @@ -313,7 +313,7 @@ class LoadBalancingPolicy : public InternallyRefCounted { // TODO(roth): Once we have a C++-like interface for combiners, this // API should change to take a smart pointer that does pass ownership // of a reference. - grpc_combiner* combiner = nullptr; + Combiner* combiner = nullptr; /// Channel control helper. /// Note: LB policies MUST NOT call any method on the helper from /// their constructor. @@ -383,7 +383,7 @@ class LoadBalancingPolicy : public InternallyRefCounted { }; protected: - grpc_combiner* combiner() const { return combiner_; } + Combiner* combiner() const { return combiner_; } // Note: LB policies MUST NOT call any method on the helper from their // constructor. @@ -396,7 +396,7 @@ class LoadBalancingPolicy : public InternallyRefCounted { private: /// Combiner under which LB policy actions take place. - grpc_combiner* combiner_; + Combiner* combiner_; /// Owned pointer to interested parties in load balancing decisions. grpc_pollset_set* interested_parties_; /// Channel control helper. diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc index 54b3dd457f7..84f5da7f9b4 100644 --- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc @@ -174,6 +174,12 @@ class GrpcLb : public LoadBalancingPolicy { static bool LoadReportCountersAreZero(grpc_grpclb_request* request); + static void MaybeSendClientLoadReport(void* arg, grpc_error* error); + static void ClientLoadReportDone(void* arg, grpc_error* error); + static void OnInitialRequestSent(void* arg, grpc_error* error); + static void OnBalancerMessageReceived(void* arg, grpc_error* error); + static void OnBalancerStatusReceived(void* arg, grpc_error* error); + static void MaybeSendClientLoadReportLocked(void* arg, grpc_error* error); static void ClientLoadReportDoneLocked(void* arg, grpc_error* error); static void OnInitialRequestSentLocked(void* arg, grpc_error* error); @@ -312,17 +318,21 @@ class GrpcLb : public LoadBalancingPolicy { // Helper functions used in UpdateLocked(). void ProcessAddressesAndChannelArgsLocked(const ServerAddressList& addresses, const grpc_channel_args& args); + static void OnBalancerChannelConnectivityChanged(void* arg, + grpc_error* error); static void OnBalancerChannelConnectivityChangedLocked(void* arg, grpc_error* error); void CancelBalancerChannelConnectivityWatchLocked(); // Methods for dealing with fallback state. void MaybeEnterFallbackModeAfterStartup(); + static void OnFallbackTimer(void* arg, grpc_error* error); static void OnFallbackTimerLocked(void* arg, grpc_error* error); // Methods for dealing with the balancer call. void StartBalancerCallLocked(); void StartBalancerCallRetryTimerLocked(); + static void OnBalancerCallRetryTimer(void* arg, grpc_error* error); static void OnBalancerCallRetryTimerLocked(void* arg, grpc_error* error); // Methods for dealing with the child policy. @@ -783,14 +793,6 @@ GrpcLb::BalancerCallState::BalancerCallState( // Init other data associated with the LB call. grpc_metadata_array_init(&lb_initial_metadata_recv_); grpc_metadata_array_init(&lb_trailing_metadata_recv_); - GRPC_CLOSURE_INIT(&lb_on_initial_request_sent_, OnInitialRequestSentLocked, - this, grpc_combiner_scheduler(grpclb_policy()->combiner())); - GRPC_CLOSURE_INIT(&lb_on_balancer_message_received_, - OnBalancerMessageReceivedLocked, this, - grpc_combiner_scheduler(grpclb_policy()->combiner())); - GRPC_CLOSURE_INIT(&lb_on_balancer_status_received_, - OnBalancerStatusReceivedLocked, this, - grpc_combiner_scheduler(grpclb_policy()->combiner())); } GrpcLb::BalancerCallState::~BalancerCallState() { @@ -848,6 +850,8 @@ void GrpcLb::BalancerCallState::StartQuery() { // with the callback. auto self = Ref(DEBUG_LOCATION, "on_initial_request_sent"); self.release(); + GRPC_CLOSURE_INIT(&lb_on_initial_request_sent_, OnInitialRequestSent, this, + grpc_schedule_on_exec_ctx); call_error = grpc_call_start_batch_and_execute( lb_call_, ops, (size_t)(op - ops), &lb_on_initial_request_sent_); GPR_ASSERT(GRPC_CALL_OK == call_error); @@ -870,6 +874,8 @@ void GrpcLb::BalancerCallState::StartQuery() { // with the callback. self = Ref(DEBUG_LOCATION, "on_message_received"); self.release(); + GRPC_CLOSURE_INIT(&lb_on_balancer_message_received_, + OnBalancerMessageReceived, this, grpc_schedule_on_exec_ctx); call_error = grpc_call_start_batch_and_execute( lb_call_, ops, (size_t)(op - ops), &lb_on_balancer_message_received_); GPR_ASSERT(GRPC_CALL_OK == call_error); @@ -886,6 +892,8 @@ void GrpcLb::BalancerCallState::StartQuery() { // This callback signals the end of the LB call, so it relies on the initial // ref instead of a new ref. When it's invoked, it's the initial ref that is // unreffed. + GRPC_CLOSURE_INIT(&lb_on_balancer_status_received_, OnBalancerStatusReceived, + this, grpc_schedule_on_exec_ctx); call_error = grpc_call_start_batch_and_execute( lb_call_, ops, (size_t)(op - ops), &lb_on_balancer_status_received_); GPR_ASSERT(GRPC_CALL_OK == call_error); @@ -894,14 +902,22 @@ void GrpcLb::BalancerCallState::StartQuery() { void GrpcLb::BalancerCallState::ScheduleNextClientLoadReportLocked() { const grpc_millis next_client_load_report_time = ExecCtx::Get()->Now() + client_stats_report_interval_; - GRPC_CLOSURE_INIT(&client_load_report_closure_, - MaybeSendClientLoadReportLocked, this, - grpc_combiner_scheduler(grpclb_policy()->combiner())); + GRPC_CLOSURE_INIT(&client_load_report_closure_, MaybeSendClientLoadReport, + this, grpc_schedule_on_exec_ctx); grpc_timer_init(&client_load_report_timer_, next_client_load_report_time, &client_load_report_closure_); client_load_report_timer_callback_pending_ = true; } +void GrpcLb::BalancerCallState::MaybeSendClientLoadReport(void* arg, + grpc_error* error) { + BalancerCallState* lb_calld = static_cast(arg); + lb_calld->grpclb_policy()->combiner()->Run( + GRPC_CLOSURE_INIT(&lb_calld->client_load_report_closure_, + MaybeSendClientLoadReportLocked, lb_calld, nullptr), + GRPC_ERROR_REF(error)); +} + void GrpcLb::BalancerCallState::MaybeSendClientLoadReportLocked( void* arg, grpc_error* error) { BalancerCallState* lb_calld = static_cast(arg); @@ -966,8 +982,8 @@ void GrpcLb::BalancerCallState::SendClientLoadReportLocked() { memset(&op, 0, sizeof(op)); op.op = GRPC_OP_SEND_MESSAGE; op.data.send_message.send_message = send_message_payload_; - GRPC_CLOSURE_INIT(&client_load_report_closure_, ClientLoadReportDoneLocked, - this, grpc_combiner_scheduler(grpclb_policy()->combiner())); + GRPC_CLOSURE_INIT(&client_load_report_closure_, ClientLoadReportDone, this, + grpc_schedule_on_exec_ctx); grpc_call_error call_error = grpc_call_start_batch_and_execute( lb_call_, &op, 1, &client_load_report_closure_); if (GPR_UNLIKELY(call_error != GRPC_CALL_OK)) { @@ -978,6 +994,15 @@ void GrpcLb::BalancerCallState::SendClientLoadReportLocked() { } } +void GrpcLb::BalancerCallState::ClientLoadReportDone(void* arg, + grpc_error* error) { + BalancerCallState* lb_calld = static_cast(arg); + lb_calld->grpclb_policy()->combiner()->Run( + GRPC_CLOSURE_INIT(&lb_calld->client_load_report_closure_, + ClientLoadReportDoneLocked, lb_calld, nullptr), + GRPC_ERROR_REF(error)); +} + void GrpcLb::BalancerCallState::ClientLoadReportDoneLocked(void* arg, grpc_error* error) { BalancerCallState* lb_calld = static_cast(arg); @@ -991,6 +1016,15 @@ void GrpcLb::BalancerCallState::ClientLoadReportDoneLocked(void* arg, lb_calld->ScheduleNextClientLoadReportLocked(); } +void GrpcLb::BalancerCallState::OnInitialRequestSent(void* arg, + grpc_error* error) { + BalancerCallState* lb_calld = static_cast(arg); + lb_calld->grpclb_policy()->combiner()->Run( + GRPC_CLOSURE_INIT(&lb_calld->lb_on_initial_request_sent_, + OnInitialRequestSentLocked, lb_calld, nullptr), + GRPC_ERROR_REF(error)); +} + void GrpcLb::BalancerCallState::OnInitialRequestSentLocked(void* arg, grpc_error* error) { BalancerCallState* lb_calld = static_cast(arg); @@ -1006,6 +1040,15 @@ void GrpcLb::BalancerCallState::OnInitialRequestSentLocked(void* arg, lb_calld->Unref(DEBUG_LOCATION, "on_initial_request_sent"); } +void GrpcLb::BalancerCallState::OnBalancerMessageReceived(void* arg, + grpc_error* error) { + BalancerCallState* lb_calld = static_cast(arg); + lb_calld->grpclb_policy()->combiner()->Run( + GRPC_CLOSURE_INIT(&lb_calld->lb_on_balancer_message_received_, + OnBalancerMessageReceivedLocked, lb_calld, nullptr), + GRPC_ERROR_REF(error)); +} + void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked( void* arg, grpc_error* error) { BalancerCallState* lb_calld = static_cast(arg); @@ -1141,6 +1184,9 @@ void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked( op.flags = 0; op.reserved = nullptr; // Reuse the "OnBalancerMessageReceivedLocked" ref taken in StartQuery(). + GRPC_CLOSURE_INIT(&lb_calld->lb_on_balancer_message_received_, + GrpcLb::BalancerCallState::OnBalancerMessageReceived, + lb_calld, grpc_schedule_on_exec_ctx); const grpc_call_error call_error = grpc_call_start_batch_and_execute( lb_calld->lb_call_, &op, 1, &lb_calld->lb_on_balancer_message_received_); @@ -1150,6 +1196,15 @@ void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked( } } +void GrpcLb::BalancerCallState::OnBalancerStatusReceived(void* arg, + grpc_error* error) { + BalancerCallState* lb_calld = static_cast(arg); + lb_calld->grpclb_policy()->combiner()->Run( + GRPC_CLOSURE_INIT(&lb_calld->lb_on_balancer_status_received_, + OnBalancerStatusReceivedLocked, lb_calld, nullptr), + GRPC_ERROR_REF(error)); +} + void GrpcLb::BalancerCallState::OnBalancerStatusReceivedLocked( void* arg, grpc_error* error) { BalancerCallState* lb_calld = static_cast(arg); @@ -1312,12 +1367,6 @@ GrpcLb::GrpcLb(Args args) .set_jitter(GRPC_GRPCLB_RECONNECT_JITTER) .set_max_backoff(GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000)) { - // Initialization. - GRPC_CLOSURE_INIT(&lb_on_fallback_, &GrpcLb::OnFallbackTimerLocked, this, - grpc_combiner_scheduler(combiner())); - GRPC_CLOSURE_INIT(&lb_channel_on_connectivity_changed_, - &GrpcLb::OnBalancerChannelConnectivityChangedLocked, this, - grpc_combiner_scheduler(args.combiner)); // Record server name. const grpc_arg* arg = grpc_channel_args_find(args.args, GRPC_ARG_SERVER_URI); const char* server_uri = grpc_channel_arg_get_string(arg); @@ -1410,6 +1459,8 @@ void GrpcLb::UpdateLocked(UpdateArgs args) { // Start timer. grpc_millis deadline = ExecCtx::Get()->Now() + fallback_at_startup_timeout_; Ref(DEBUG_LOCATION, "on_fallback_timer").release(); // Ref for callback + GRPC_CLOSURE_INIT(&lb_on_fallback_, &GrpcLb::OnFallbackTimer, this, + grpc_schedule_on_exec_ctx); grpc_timer_init(&lb_fallback_timer_, deadline, &lb_on_fallback_); // Start watching the channel's connectivity state. If the channel // goes into state TRANSIENT_FAILURE before the timer fires, we go into @@ -1419,6 +1470,9 @@ void GrpcLb::UpdateLocked(UpdateArgs args) { GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter); // Ref held by callback. Ref(DEBUG_LOCATION, "watch_lb_channel_connectivity").release(); + GRPC_CLOSURE_INIT(&lb_channel_on_connectivity_changed_, + &GrpcLb::OnBalancerChannelConnectivityChanged, this, + grpc_schedule_on_exec_ctx); grpc_client_channel_watch_connectivity_state( client_channel_elem, grpc_polling_entity_create_from_pollset_set(interested_parties()), @@ -1482,6 +1536,16 @@ void GrpcLb::ProcessAddressesAndChannelArgsLocked( response_generator_->SetResponse(std::move(result)); } +void GrpcLb::OnBalancerChannelConnectivityChanged(void* arg, + grpc_error* error) { + GrpcLb* self = static_cast(arg); + self->combiner()->Run( + GRPC_CLOSURE_INIT(&self->lb_channel_on_connectivity_changed_, + &GrpcLb::OnBalancerChannelConnectivityChangedLocked, + self, nullptr), + GRPC_ERROR_REF(error)); +} + void GrpcLb::OnBalancerChannelConnectivityChangedLocked(void* arg, grpc_error* error) { GrpcLb* self = static_cast(arg); @@ -1492,6 +1556,9 @@ void GrpcLb::OnBalancerChannelConnectivityChangedLocked(void* arg, grpc_channel_stack_last_element( grpc_channel_get_channel_stack(self->lb_channel_)); GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter); + GRPC_CLOSURE_INIT(&self->lb_channel_on_connectivity_changed_, + &GrpcLb::OnBalancerChannelConnectivityChanged, self, + grpc_schedule_on_exec_ctx); grpc_client_channel_watch_connectivity_state( client_channel_elem, grpc_polling_entity_create_from_pollset_set( @@ -1561,12 +1628,21 @@ void GrpcLb::StartBalancerCallRetryTimerLocked() { // with the callback. auto self = Ref(DEBUG_LOCATION, "on_balancer_call_retry_timer"); self.release(); - GRPC_CLOSURE_INIT(&lb_on_call_retry_, &GrpcLb::OnBalancerCallRetryTimerLocked, - this, grpc_combiner_scheduler(combiner())); + GRPC_CLOSURE_INIT(&lb_on_call_retry_, &GrpcLb::OnBalancerCallRetryTimer, this, + grpc_schedule_on_exec_ctx); retry_timer_callback_pending_ = true; grpc_timer_init(&lb_call_retry_timer_, next_try, &lb_on_call_retry_); } +void GrpcLb::OnBalancerCallRetryTimer(void* arg, grpc_error* error) { + GrpcLb* grpclb_policy = static_cast(arg); + grpclb_policy->combiner()->Run( + GRPC_CLOSURE_INIT(&grpclb_policy->lb_on_call_retry_, + &GrpcLb::OnBalancerCallRetryTimerLocked, grpclb_policy, + nullptr), + GRPC_ERROR_REF(error)); +} + void GrpcLb::OnBalancerCallRetryTimerLocked(void* arg, grpc_error* error) { GrpcLb* grpclb_policy = static_cast(arg); grpclb_policy->retry_timer_callback_pending_ = false; @@ -1603,6 +1679,14 @@ void GrpcLb::MaybeEnterFallbackModeAfterStartup() { } } +void GrpcLb::OnFallbackTimer(void* arg, grpc_error* error) { + GrpcLb* grpclb_policy = static_cast(arg); + grpclb_policy->combiner()->Run( + GRPC_CLOSURE_INIT(&grpclb_policy->lb_on_fallback_, + &GrpcLb::OnFallbackTimerLocked, grpclb_policy, nullptr), + GRPC_ERROR_REF(error)); +} + void GrpcLb::OnFallbackTimerLocked(void* arg, grpc_error* error) { GrpcLb* grpclb_policy = static_cast(arg); // If we receive a serverlist after the timer fires but before this callback diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc b/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc index 617193e068b..e2e6c4f3332 100644 --- a/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc +++ b/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc @@ -260,6 +260,7 @@ class XdsLb : public LoadBalancingPolicy { grpc_channel_args* CreateChildPolicyArgsLocked( const grpc_channel_args* args); + static void OnDelayedRemovalTimer(void* arg, grpc_error* error); static void OnDelayedRemovalTimerLocked(void* arg, grpc_error* error); XdsLb* xds_policy() const { return locality_map_->xds_policy(); } @@ -309,6 +310,8 @@ class XdsLb : public LoadBalancingPolicy { private: void OnLocalityStateUpdateLocked(); void UpdateConnectivityStateLocked(); + static void OnDelayedRemovalTimer(void* arg, grpc_error* error); + static void OnFailoverTimer(void* arg, grpc_error* error); static void OnDelayedRemovalTimerLocked(void* arg, grpc_error* error); static void OnFailoverTimerLocked(void* arg, grpc_error* error); @@ -382,6 +385,7 @@ class XdsLb : public LoadBalancingPolicy { // Methods for dealing with fallback state. void MaybeCancelFallbackAtStartupChecks(); + static void OnFallbackTimer(void* arg, grpc_error* error); static void OnFallbackTimerLocked(void* arg, grpc_error* error); void UpdateFallbackPolicyLocked(); OrphanablePtr CreateFallbackPolicyLocked( @@ -803,8 +807,8 @@ void XdsLb::UpdateLocked(UpdateArgs args) { // Start fallback-at-startup checks. grpc_millis deadline = ExecCtx::Get()->Now() + lb_fallback_timeout_ms_; Ref(DEBUG_LOCATION, "on_fallback_timer").release(); // Held by closure - GRPC_CLOSURE_INIT(&lb_on_fallback_, &XdsLb::OnFallbackTimerLocked, this, - grpc_combiner_scheduler(combiner())); + GRPC_CLOSURE_INIT(&lb_on_fallback_, &XdsLb::OnFallbackTimer, this, + grpc_schedule_on_exec_ctx); fallback_at_startup_checks_pending_ = true; grpc_timer_init(&lb_fallback_timer_, deadline, &lb_on_fallback_); } @@ -859,6 +863,14 @@ void XdsLb::MaybeCancelFallbackAtStartupChecks() { fallback_at_startup_checks_pending_ = false; } +void XdsLb::OnFallbackTimer(void* arg, grpc_error* error) { + XdsLb* xdslb_policy = static_cast(arg); + xdslb_policy->combiner()->Run( + GRPC_CLOSURE_INIT(&xdslb_policy->lb_on_fallback_, + &XdsLb::OnFallbackTimerLocked, xdslb_policy, nullptr), + GRPC_ERROR_REF(error)); +} + void XdsLb::OnFallbackTimerLocked(void* arg, grpc_error* error) { XdsLb* xdslb_policy = static_cast(arg); // If some fallback-at-startup check is done after the timer fires but before @@ -1139,10 +1151,9 @@ XdsLb::PriorityList::LocalityMap::LocalityMap(RefCountedPtr xds_policy, gpr_log(GPR_INFO, "[xdslb %p] Creating priority %" PRIu32, xds_policy_.get(), priority_); } - GRPC_CLOSURE_INIT(&on_delayed_removal_timer_, OnDelayedRemovalTimerLocked, - this, grpc_combiner_scheduler(xds_policy_->combiner())); - GRPC_CLOSURE_INIT(&on_failover_timer_, OnFailoverTimerLocked, this, - grpc_combiner_scheduler(xds_policy_->combiner())); + + GRPC_CLOSURE_INIT(&on_failover_timer_, OnFailoverTimer, this, + grpc_schedule_on_exec_ctx); // Start the failover timer. Ref(DEBUG_LOCATION, "LocalityMap+OnFailoverTimerLocked").release(); grpc_timer_init( @@ -1258,6 +1269,8 @@ void XdsLb::PriorityList::LocalityMap::DeactivateLocked() { xds_policy(), priority_, xds_policy()->locality_retention_interval_ms_); } + GRPC_CLOSURE_INIT(&on_delayed_removal_timer_, OnDelayedRemovalTimer, this, + grpc_schedule_on_exec_ctx); grpc_timer_init( &delayed_removal_timer_, ExecCtx::Get()->Now() + xds_policy()->locality_retention_interval_ms_, @@ -1386,6 +1399,15 @@ void XdsLb::PriorityList::LocalityMap::UpdateConnectivityStateLocked() { } } +void XdsLb::PriorityList::LocalityMap::OnDelayedRemovalTimer( + void* arg, grpc_error* error) { + LocalityMap* self = static_cast(arg); + self->xds_policy_->combiner()->Run( + GRPC_CLOSURE_INIT(&self->on_delayed_removal_timer_, + OnDelayedRemovalTimerLocked, self, nullptr), + GRPC_ERROR_REF(error)); +} + void XdsLb::PriorityList::LocalityMap::OnDelayedRemovalTimerLocked( void* arg, grpc_error* error) { LocalityMap* self = static_cast(arg); @@ -1395,13 +1417,13 @@ void XdsLb::PriorityList::LocalityMap::OnDelayedRemovalTimerLocked( const bool keep = self->priority_list_update().Contains(self->priority_) && self->priority_ <= priority_list->current_priority(); if (!keep) { - // This check is to make sure we always delete the locality maps from the - // lowest priority even if the closures of the back-to-back timers are not - // run in FIFO order. + // This check is to make sure we always delete the locality maps from + // the lowest priority even if the closures of the back-to-back timers + // are not run in FIFO order. // TODO(juanlishen): Eliminate unnecessary maintenance overhead for some // deactivated locality maps when out-of-order closures are run. - // TODO(juanlishen): Check the timer implementation to see if this defense - // is necessary. + // TODO(juanlishen): Check the timer implementation to see if this + // defense is necessary. if (self->priority_ == priority_list->LowestPriority()) { priority_list->priorities_.pop_back(); } else { @@ -1416,6 +1438,15 @@ void XdsLb::PriorityList::LocalityMap::OnDelayedRemovalTimerLocked( self->Unref(DEBUG_LOCATION, "LocalityMap+timer"); } +void XdsLb::PriorityList::LocalityMap::OnFailoverTimer(void* arg, + grpc_error* error) { + LocalityMap* self = static_cast(arg); + self->xds_policy_->combiner()->Run( + GRPC_CLOSURE_INIT(&self->on_failover_timer_, OnFailoverTimerLocked, self, + nullptr), + GRPC_ERROR_REF(error)); +} + void XdsLb::PriorityList::LocalityMap::OnFailoverTimerLocked( void* arg, grpc_error* error) { LocalityMap* self = static_cast(arg); @@ -1438,8 +1469,6 @@ XdsLb::PriorityList::LocalityMap::Locality::Locality( gpr_log(GPR_INFO, "[xdslb %p] created Locality %p for %s", xds_policy(), this, name_->AsHumanReadableString()); } - GRPC_CLOSURE_INIT(&on_delayed_removal_timer_, OnDelayedRemovalTimerLocked, - this, grpc_combiner_scheduler(xds_policy()->combiner())); } XdsLb::PriorityList::LocalityMap::Locality::~Locality() { @@ -1494,8 +1523,8 @@ XdsLb::PriorityList::LocalityMap::Locality::CreateChildPolicyLocked( lb_policy.get()); } // Add the xDS's interested_parties pollset_set to that of the newly created - // child policy. This will make the child policy progress upon activity on xDS - // LB, which in turn is tied to the application's call. + // child policy. This will make the child policy progress upon activity on + // xDS LB, which in turn is tied to the application's call. grpc_pollset_set_add_pollset_set(lb_policy->interested_parties(), xds_policy()->interested_parties()); return lb_policy; @@ -1656,6 +1685,8 @@ void XdsLb::PriorityList::LocalityMap::Locality::DeactivateLocked() { weight_ = 0; // Start a timer to delete the locality. Ref(DEBUG_LOCATION, "Locality+timer").release(); + GRPC_CLOSURE_INIT(&on_delayed_removal_timer_, OnDelayedRemovalTimer, this, + grpc_schedule_on_exec_ctx); grpc_timer_init( &delayed_removal_timer_, ExecCtx::Get()->Now() + xds_policy()->locality_retention_interval_ms_, @@ -1663,6 +1694,15 @@ void XdsLb::PriorityList::LocalityMap::Locality::DeactivateLocked() { delayed_removal_timer_callback_pending_ = true; } +void XdsLb::PriorityList::LocalityMap::Locality::OnDelayedRemovalTimer( + void* arg, grpc_error* error) { + Locality* self = static_cast(arg); + self->xds_policy()->combiner()->Run( + GRPC_CLOSURE_INIT(&self->on_delayed_removal_timer_, + OnDelayedRemovalTimerLocked, self, nullptr), + GRPC_ERROR_REF(error)); +} + void XdsLb::PriorityList::LocalityMap::Locality::OnDelayedRemovalTimerLocked( void* arg, grpc_error* error) { Locality* self = static_cast(arg); diff --git a/src/core/ext/filters/client_channel/resolver.cc b/src/core/ext/filters/client_channel/resolver.cc index b50c42f6a1c..fd20def0d85 100644 --- a/src/core/ext/filters/client_channel/resolver.cc +++ b/src/core/ext/filters/client_channel/resolver.cc @@ -30,8 +30,7 @@ namespace grpc_core { // Resolver // -Resolver::Resolver(grpc_combiner* combiner, - UniquePtr result_handler) +Resolver::Resolver(Combiner* combiner, UniquePtr result_handler) : InternallyRefCounted(&grpc_trace_resolver_refcount), result_handler_(std::move(result_handler)), combiner_(GRPC_COMBINER_REF(combiner, "resolver")) {} diff --git a/src/core/ext/filters/client_channel/resolver.h b/src/core/ext/filters/client_channel/resolver.h index ebeb3912032..e57ca12a786 100644 --- a/src/core/ext/filters/client_channel/resolver.h +++ b/src/core/ext/filters/client_channel/resolver.h @@ -126,19 +126,19 @@ class Resolver : public InternallyRefCounted { // TODO(roth): Once we have a C++-like interface for combiners, this // API should change to take a RefCountedPtr<>, so that we always take // ownership of a new ref. - explicit Resolver(grpc_combiner* combiner, + explicit Resolver(Combiner* combiner, UniquePtr result_handler); /// Shuts down the resolver. virtual void ShutdownLocked() = 0; - grpc_combiner* combiner() const { return combiner_; } + Combiner* combiner() const { return combiner_; } ResultHandler* result_handler() const { return result_handler_.get(); } private: UniquePtr result_handler_; - grpc_combiner* combiner_; + Combiner* combiner_; }; } // namespace grpc_core diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc index 8924fdf4172..7b142f79001 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc @@ -76,6 +76,8 @@ class AresDnsResolver : public Resolver { void MaybeStartResolvingLocked(); void StartResolvingLocked(); + static void OnNextResolution(void* arg, grpc_error* error); + static void OnResolved(void* arg, grpc_error* error); static void OnNextResolutionLocked(void* arg, grpc_error* error); static void OnResolvedLocked(void* arg, grpc_error* error); @@ -152,10 +154,7 @@ AresDnsResolver::AresDnsResolver(ResolverArgs args) if (args.pollset_set != nullptr) { grpc_pollset_set_add_pollset_set(interested_parties_, args.pollset_set); } - GRPC_CLOSURE_INIT(&on_next_resolution_, OnNextResolutionLocked, this, - grpc_combiner_scheduler(combiner())); - GRPC_CLOSURE_INIT(&on_resolved_, OnResolvedLocked, this, - grpc_combiner_scheduler(combiner())); + const grpc_arg* query_timeout_ms_arg = grpc_channel_args_find(channel_args_, GRPC_ARG_DNS_ARES_QUERY_TIMEOUT_MS); query_timeout_ms_ = grpc_channel_arg_get_integer( @@ -200,6 +199,13 @@ void AresDnsResolver::ShutdownLocked() { } } +void AresDnsResolver::OnNextResolution(void* arg, grpc_error* error) { + AresDnsResolver* r = static_cast(arg); + r->combiner()->Run(GRPC_CLOSURE_INIT(&r->on_next_resolution_, + OnNextResolutionLocked, r, nullptr), + GRPC_ERROR_REF(error)); +} + void AresDnsResolver::OnNextResolutionLocked(void* arg, grpc_error* error) { AresDnsResolver* r = static_cast(arg); GRPC_CARES_TRACE_LOG( @@ -317,6 +323,13 @@ char* ChooseServiceConfig(char* service_config_choice_json, return service_config; } +void AresDnsResolver::OnResolved(void* arg, grpc_error* error) { + AresDnsResolver* r = static_cast(arg); + r->combiner()->Run( + GRPC_CLOSURE_INIT(&r->on_resolved_, OnResolvedLocked, r, nullptr), + GRPC_ERROR_REF(error)); +} + void AresDnsResolver::OnResolvedLocked(void* arg, grpc_error* error) { AresDnsResolver* r = static_cast(arg); GPR_ASSERT(r->resolving_); @@ -373,6 +386,8 @@ void AresDnsResolver::OnResolvedLocked(void* arg, grpc_error* error) { } else { GRPC_CARES_TRACE_LOG("resolver:%p retrying immediately", r); } + GRPC_CLOSURE_INIT(&r->on_next_resolution_, OnNextResolution, r, + grpc_schedule_on_exec_ctx); grpc_timer_init(&r->next_resolution_timer_, next_try, &r->on_next_resolution_); } @@ -400,6 +415,8 @@ void AresDnsResolver::MaybeStartResolvingLocked() { // new closure API is done, find a way to track this ref with the timer // callback as part of the type system. Ref(DEBUG_LOCATION, "next_resolution_timer_cooldown").release(); + GRPC_CLOSURE_INIT(&on_next_resolution_, OnNextResolution, this, + grpc_schedule_on_exec_ctx); grpc_timer_init(&next_resolution_timer_, ExecCtx::Get()->Now() + ms_until_next_resolution, &on_next_resolution_); @@ -417,6 +434,7 @@ void AresDnsResolver::StartResolvingLocked() { GPR_ASSERT(!resolving_); resolving_ = true; service_config_json_ = nullptr; + GRPC_CLOSURE_INIT(&on_resolved_, OnResolved, this, grpc_schedule_on_exec_ctx); pending_request_ = grpc_dns_lookup_ares_locked( dns_server_, name_to_resolve_, kDefaultPort, interested_parties_, &on_resolved_, &addresses_, enable_srv_queries_ /* check_grpclb */, diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc index 4ad80786519..cb2ab885807 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc @@ -67,7 +67,7 @@ struct grpc_ares_ev_driver { gpr_refcount refs; /** combiner to synchronize c-ares and I/O callbacks on */ - grpc_combiner* combiner; + grpc_core::Combiner* combiner; /** a list of grpc_fd that this event driver is currently using. */ fd_node* fds; /** is this event driver currently working? */ @@ -132,8 +132,10 @@ static void fd_node_shutdown_locked(fd_node* fdn, const char* reason) { } } +static void on_timeout(void* arg, grpc_error* error); static void on_timeout_locked(void* arg, grpc_error* error); +static void on_ares_backup_poll_alarm(void* arg, grpc_error* error); static void on_ares_backup_poll_alarm_locked(void* arg, grpc_error* error); static void noop_inject_channel_config(ares_channel channel) {} @@ -144,7 +146,7 @@ void (*grpc_ares_test_only_inject_config)(ares_channel channel) = grpc_error* grpc_ares_ev_driver_create_locked(grpc_ares_ev_driver** ev_driver, grpc_pollset_set* pollset_set, int query_timeout_ms, - grpc_combiner* combiner, + grpc_core::Combiner* combiner, grpc_ares_request* request) { *ev_driver = grpc_core::New(); ares_options opts; @@ -173,11 +175,6 @@ grpc_error* grpc_ares_ev_driver_create_locked(grpc_ares_ev_driver** ev_driver, grpc_core::NewGrpcPolledFdFactory((*ev_driver)->combiner); (*ev_driver) ->polled_fd_factory->ConfigureAresChannelLocked((*ev_driver)->channel); - GRPC_CLOSURE_INIT(&(*ev_driver)->on_timeout_locked, on_timeout_locked, - *ev_driver, grpc_combiner_scheduler(combiner)); - GRPC_CLOSURE_INIT(&(*ev_driver)->on_ares_backup_poll_alarm_locked, - on_ares_backup_poll_alarm_locked, *ev_driver, - grpc_combiner_scheduler(combiner)); (*ev_driver)->query_timeout_ms = query_timeout_ms; return GRPC_ERROR_NONE; } @@ -235,6 +232,13 @@ static grpc_millis calculate_next_ares_backup_poll_alarm_ms( grpc_core::ExecCtx::Get()->Now(); } +static void on_timeout(void* arg, grpc_error* error) { + grpc_ares_ev_driver* driver = static_cast(arg); + driver->combiner->Run(GRPC_CLOSURE_INIT(&driver->on_timeout_locked, + on_timeout_locked, driver, nullptr), + GRPC_ERROR_REF(error)); +} + static void on_timeout_locked(void* arg, grpc_error* error) { grpc_ares_ev_driver* driver = static_cast(arg); GRPC_CARES_TRACE_LOG( @@ -247,6 +251,14 @@ static void on_timeout_locked(void* arg, grpc_error* error) { grpc_ares_ev_driver_unref(driver); } +static void on_ares_backup_poll_alarm(void* arg, grpc_error* error) { + grpc_ares_ev_driver* driver = static_cast(arg); + driver->combiner->Run( + GRPC_CLOSURE_INIT(&driver->on_ares_backup_poll_alarm_locked, + on_ares_backup_poll_alarm_locked, driver, nullptr), + GRPC_ERROR_REF(error)); +} + /* In case of non-responsive DNS servers, dropped packets, etc., c-ares has * intelligent timeout and retry logic, which we can take advantage of by * polling ares_process_fd on time intervals. Overall, the c-ares library is @@ -279,6 +291,9 @@ static void on_ares_backup_poll_alarm_locked(void* arg, grpc_error* error) { grpc_millis next_ares_backup_poll_alarm = calculate_next_ares_backup_poll_alarm_ms(driver); grpc_ares_ev_driver_ref(driver); + GRPC_CLOSURE_INIT(&driver->on_ares_backup_poll_alarm_locked, + on_ares_backup_poll_alarm, driver, + grpc_schedule_on_exec_ctx); grpc_timer_init(&driver->ares_backup_poll_alarm, next_ares_backup_poll_alarm, &driver->on_ares_backup_poll_alarm_locked); @@ -313,6 +328,13 @@ static void on_readable_locked(void* arg, grpc_error* error) { grpc_ares_ev_driver_unref(ev_driver); } +static void on_readable(void* arg, grpc_error* error) { + fd_node* fdn = static_cast(arg); + fdn->ev_driver->combiner->Run( + GRPC_CLOSURE_INIT(&fdn->read_closure, on_readable_locked, fdn, nullptr), + GRPC_ERROR_REF(error)); +} + static void on_writable_locked(void* arg, grpc_error* error) { fd_node* fdn = static_cast(arg); GPR_ASSERT(fdn->writable_registered); @@ -336,6 +358,13 @@ static void on_writable_locked(void* arg, grpc_error* error) { grpc_ares_ev_driver_unref(ev_driver); } +static void on_writable(void* arg, grpc_error* error) { + fd_node* fdn = static_cast(arg); + fdn->ev_driver->combiner->Run( + GRPC_CLOSURE_INIT(&fdn->write_closure, on_writable_locked, fdn, nullptr), + GRPC_ERROR_REF(error)); +} + ares_channel* grpc_ares_ev_driver_get_channel_locked( grpc_ares_ev_driver* ev_driver) { return &ev_driver->channel; @@ -365,10 +394,6 @@ static void grpc_ares_notify_on_event_locked(grpc_ares_ev_driver* ev_driver) { fdn->readable_registered = false; fdn->writable_registered = false; fdn->already_shutdown = false; - GRPC_CLOSURE_INIT(&fdn->read_closure, on_readable_locked, fdn, - grpc_combiner_scheduler(ev_driver->combiner)); - GRPC_CLOSURE_INIT(&fdn->write_closure, on_writable_locked, fdn, - grpc_combiner_scheduler(ev_driver->combiner)); } fdn->next = new_list; new_list = fdn; @@ -380,6 +405,8 @@ static void grpc_ares_notify_on_event_locked(grpc_ares_ev_driver* ev_driver) { GRPC_CARES_TRACE_LOG("request:%p notify read on: %s", ev_driver->request, fdn->grpc_polled_fd->GetName()); + GRPC_CLOSURE_INIT(&fdn->read_closure, on_readable, fdn, + grpc_schedule_on_exec_ctx); fdn->grpc_polled_fd->RegisterForOnReadableLocked(&fdn->read_closure); fdn->readable_registered = true; } @@ -391,6 +418,8 @@ static void grpc_ares_notify_on_event_locked(grpc_ares_ev_driver* ev_driver) { ev_driver->request, fdn->grpc_polled_fd->GetName()); grpc_ares_ev_driver_ref(ev_driver); + GRPC_CLOSURE_INIT(&fdn->write_closure, on_writable, fdn, + grpc_schedule_on_exec_ctx); fdn->grpc_polled_fd->RegisterForOnWriteableLocked( &fdn->write_closure); fdn->writable_registered = true; @@ -435,12 +464,17 @@ void grpc_ares_ev_driver_start_locked(grpc_ares_ev_driver* ev_driver) { "%" PRId64 " ms", ev_driver->request, ev_driver, timeout); grpc_ares_ev_driver_ref(ev_driver); + GRPC_CLOSURE_INIT(&ev_driver->on_timeout_locked, on_timeout, ev_driver, + grpc_schedule_on_exec_ctx); grpc_timer_init(&ev_driver->query_timeout, timeout, &ev_driver->on_timeout_locked); // Initialize the backup poll alarm grpc_millis next_ares_backup_poll_alarm = calculate_next_ares_backup_poll_alarm_ms(ev_driver); grpc_ares_ev_driver_ref(ev_driver); + GRPC_CLOSURE_INIT(&ev_driver->on_ares_backup_poll_alarm_locked, + on_ares_backup_poll_alarm, ev_driver, + grpc_schedule_on_exec_ctx); grpc_timer_init(&ev_driver->ares_backup_poll_alarm, next_ares_backup_poll_alarm, &ev_driver->on_ares_backup_poll_alarm_locked); diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h index fe51c34bc3e..c17ba176c21 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h @@ -43,7 +43,7 @@ ares_channel* grpc_ares_ev_driver_get_channel_locked( grpc_error* grpc_ares_ev_driver_create_locked(grpc_ares_ev_driver** ev_driver, grpc_pollset_set* pollset_set, int query_timeout_ms, - grpc_combiner* combiner, + grpc_core::Combiner* combiner, grpc_ares_request* request); /* Called back when all DNS lookups have completed. */ @@ -90,12 +90,12 @@ class GrpcPolledFdFactory { /* Creates a new wrapped fd for the current platform */ virtual GrpcPolledFd* NewGrpcPolledFdLocked( ares_socket_t as, grpc_pollset_set* driver_pollset_set, - grpc_combiner* combiner) = 0; + Combiner* combiner) = 0; /* Optionally configures the ares channel after creation */ virtual void ConfigureAresChannelLocked(ares_channel channel) = 0; }; -UniquePtr NewGrpcPolledFdFactory(grpc_combiner* combiner); +UniquePtr NewGrpcPolledFdFactory(Combiner* combiner); } // namespace grpc_core diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_libuv.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_libuv.cc index 81d2f7becd4..f5acec4fda7 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_libuv.cc +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_libuv.cc @@ -41,7 +41,7 @@ void ares_uv_poll_close_cb(uv_handle_t* handle) { Delete(handle); } class GrpcPolledFdLibuv : public GrpcPolledFd { public: - GrpcPolledFdLibuv(ares_socket_t as, grpc_combiner* combiner) + GrpcPolledFdLibuv(ares_socket_t as, Combiner* combiner) : as_(as), combiner_(combiner) { gpr_asprintf(&name_, "c-ares socket: %" PRIdPTR, (intptr_t)as); handle_ = New(); @@ -107,7 +107,7 @@ class GrpcPolledFdLibuv : public GrpcPolledFd { grpc_closure* read_closure_ = nullptr; grpc_closure* write_closure_ = nullptr; int poll_events_ = 0; - grpc_combiner* combiner_; + Combiner* combiner_; }; struct AresUvPollCbArg { @@ -153,9 +153,8 @@ void ares_uv_poll_cb(uv_poll_t* handle, int status, int events) { GrpcPolledFdLibuv* polled_fd = reinterpret_cast(handle->data); AresUvPollCbArg* arg = New(handle, status, events); - GRPC_CLOSURE_SCHED( - GRPC_CLOSURE_CREATE(ares_uv_poll_cb_locked, arg, - grpc_combiner_scheduler(polled_fd->combiner_)), + polled_fd->combiner_->Run( + GRPC_CLOSURE_CREATE(ares_uv_poll_cb_locked, arg, nullptr), GRPC_ERROR_NONE); } @@ -163,14 +162,14 @@ class GrpcPolledFdFactoryLibuv : public GrpcPolledFdFactory { public: GrpcPolledFd* NewGrpcPolledFdLocked(ares_socket_t as, grpc_pollset_set* driver_pollset_set, - grpc_combiner* combiner) override { + Combiner* combiner) override { return New(as, combiner); } void ConfigureAresChannelLocked(ares_channel channel) override {} }; -UniquePtr NewGrpcPolledFdFactory(grpc_combiner* combiner) { +UniquePtr NewGrpcPolledFdFactory(Combiner* combiner) { return MakeUnique(); } diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc index f167047ebc9..e588010badb 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc @@ -90,14 +90,14 @@ class GrpcPolledFdFactoryPosix : public GrpcPolledFdFactory { public: GrpcPolledFd* NewGrpcPolledFdLocked(ares_socket_t as, grpc_pollset_set* driver_pollset_set, - grpc_combiner* combiner) override { + Combiner* combiner) override { return New(as, driver_pollset_set); } void ConfigureAresChannelLocked(ares_channel channel) override {} }; -UniquePtr NewGrpcPolledFdFactory(grpc_combiner* combiner) { +UniquePtr NewGrpcPolledFdFactory(Combiner* combiner) { return MakeUnique(); } diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc index cc49d0a9f7c..437645345ba 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc @@ -97,8 +97,8 @@ class GrpcPolledFdWindows { WRITE_WAITING_FOR_VERIFICATION_UPON_RETRY, }; - GrpcPolledFdWindows(ares_socket_t as, grpc_combiner* combiner, - int address_family, int socket_type) + GrpcPolledFdWindows(ares_socket_t as, Combiner* combiner, int address_family, + int socket_type) : read_buf_(grpc_empty_slice()), write_buf_(grpc_empty_slice()), tcp_write_state_(WRITE_IDLE), @@ -108,22 +108,13 @@ class GrpcPolledFdWindows { gpr_asprintf(&name_, "c-ares socket: %" PRIdPTR, as); winsocket_ = grpc_winsocket_create(as, name_); combiner_ = GRPC_COMBINER_REF(combiner, name_); - GRPC_CLOSURE_INIT(&outer_read_closure_, - &GrpcPolledFdWindows::OnIocpReadable, this, - grpc_combiner_scheduler(combiner_)); - GRPC_CLOSURE_INIT(&outer_write_closure_, - &GrpcPolledFdWindows::OnIocpWriteable, this, - grpc_combiner_scheduler(combiner_)); - GRPC_CLOSURE_INIT(&on_tcp_connect_locked_, - &GrpcPolledFdWindows::OnTcpConnectLocked, this, - grpc_combiner_scheduler(combiner_)); GRPC_CLOSURE_INIT(&continue_register_for_on_readable_locked_, &GrpcPolledFdWindows::ContinueRegisterForOnReadableLocked, - this, grpc_combiner_scheduler(combiner_)); + this, nullptr); GRPC_CLOSURE_INIT( &continue_register_for_on_writeable_locked_, &GrpcPolledFdWindows::ContinueRegisterForOnWriteableLocked, this, - grpc_combiner_scheduler(combiner_)); + nullptr); } ~GrpcPolledFdWindows() { @@ -154,8 +145,8 @@ class GrpcPolledFdWindows { GPR_ASSERT(!read_buf_has_data_); read_buf_ = GRPC_SLICE_MALLOC(4192); if (connect_done_) { - GRPC_CLOSURE_SCHED(&continue_register_for_on_readable_locked_, - GRPC_ERROR_NONE); + combiner_->Run(&continue_register_for_on_readable_locked_, + GRPC_ERROR_NONE); } else { GPR_ASSERT(pending_continue_register_for_on_readable_locked_ == nullptr); pending_continue_register_for_on_readable_locked_ = @@ -203,7 +194,10 @@ class GrpcPolledFdWindows { return; } } - grpc_socket_notify_on_read(winsocket_, &outer_read_closure_); + grpc_socket_notify_on_read( + winsocket_, GRPC_CLOSURE_INIT(&outer_read_closure_, + &GrpcPolledFdWindows::OnIocpReadable, + this, grpc_schedule_on_exec_ctx)); } void RegisterForOnWriteableLocked(grpc_closure* write_closure) { @@ -219,8 +213,8 @@ class GrpcPolledFdWindows { GPR_ASSERT(write_closure_ == nullptr); write_closure_ = write_closure; if (connect_done_) { - GRPC_CLOSURE_SCHED(&continue_register_for_on_writeable_locked_, - GRPC_ERROR_NONE); + combiner_->Run(&continue_register_for_on_writeable_locked_, + GRPC_ERROR_NONE); } else { GPR_ASSERT(pending_continue_register_for_on_writeable_locked_ == nullptr); pending_continue_register_for_on_writeable_locked_ = @@ -262,7 +256,11 @@ class GrpcPolledFdWindows { ScheduleAndNullWriteClosure( GRPC_WSA_ERROR(wsa_error_code, "WSASend (overlapped)")); } else { - grpc_socket_notify_on_write(winsocket_, &outer_write_closure_); + grpc_socket_notify_on_write( + winsocket_, + GRPC_CLOSURE_INIT(&outer_write_closure_, + &GrpcPolledFdWindows::OnIocpWriteable, this, + grpc_schedule_on_exec_ctx)); } break; case WRITE_PENDING: @@ -439,6 +437,16 @@ class GrpcPolledFdWindows { abort(); } + static void OnTcpConnect(void* arg, grpc_error* error) { + GrpcPolledFdWindows* grpc_polled_fd = + static_cast(arg); + grpc_polled_fd->combiner_->Run( + GRPC_CLOSURE_INIT(&grpc_polled_fd->on_tcp_connect_locked_, + &GrpcPolledFdWindows::OnTcpConnectLocked, + grpc_polled_fd, nullptr), + GRPC_ERROR_REF(error)); + } + static void OnTcpConnectLocked(void* arg, grpc_error* error) { GrpcPolledFdWindows* grpc_polled_fd = static_cast(arg); @@ -479,12 +487,12 @@ class GrpcPolledFdWindows { wsa_connect_error_ = WSA_OPERATION_ABORTED; } if (pending_continue_register_for_on_readable_locked_ != nullptr) { - GRPC_CLOSURE_SCHED(pending_continue_register_for_on_readable_locked_, - GRPC_ERROR_NONE); + combiner_->Run(pending_continue_register_for_on_readable_locked_, + GRPC_ERROR_NONE); } if (pending_continue_register_for_on_writeable_locked_ != nullptr) { - GRPC_CLOSURE_SCHED(pending_continue_register_for_on_writeable_locked_, - GRPC_ERROR_NONE); + combiner_->Run(pending_continue_register_for_on_writeable_locked_, + GRPC_ERROR_NONE); } } @@ -585,11 +593,23 @@ class GrpcPolledFdWindows { return -1; } } + GRPC_CLOSURE_INIT(&on_tcp_connect_locked_, + &GrpcPolledFdWindows::OnTcpConnect, this, + grpc_schedule_on_exec_ctx); grpc_socket_notify_on_write(winsocket_, &on_tcp_connect_locked_); return out; } static void OnIocpReadable(void* arg, grpc_error* error) { + GrpcPolledFdWindows* polled_fd = static_cast(arg); + polled_fd->combiner_->Run( + GRPC_CLOSURE_INIT(&polled_fd->outer_read_closure_, + &GrpcPolledFdWindows::OnIocpReadableLocked, polled_fd, + nullptr), + GRPC_ERROR_REF(error)); + } + + static void OnIocpReadableLocked(void* arg, grpc_error* error) { GrpcPolledFdWindows* polled_fd = static_cast(arg); polled_fd->OnIocpReadableInner(error); } @@ -633,6 +653,15 @@ class GrpcPolledFdWindows { } static void OnIocpWriteable(void* arg, grpc_error* error) { + GrpcPolledFdWindows* polled_fd = static_cast(arg); + polled_fd->combiner_->Run( + GRPC_CLOSURE_INIT(&polled_fd->outer_write_closure_, + &GrpcPolledFdWindows::OnIocpWriteableLocked, + polled_fd, nullptr), + GRPC_ERROR_REF(error)); + } + + static void OnIocpWriteableLocked(void* arg, grpc_error* error) { GrpcPolledFdWindows* polled_fd = static_cast(arg); polled_fd->OnIocpWriteableInner(error); } @@ -669,7 +698,7 @@ class GrpcPolledFdWindows { bool gotten_into_driver_list() const { return gotten_into_driver_list_; } void set_gotten_into_driver_list() { gotten_into_driver_list_ = true; } - grpc_combiner* combiner_; + Combiner* combiner_; char recv_from_source_addr_[200]; ares_socklen_t recv_from_source_addr_len_; grpc_slice read_buf_; @@ -713,7 +742,7 @@ struct SockToPolledFdEntry { * with a GrpcPolledFdWindows factory and event driver */ class SockToPolledFdMap { public: - SockToPolledFdMap(grpc_combiner* combiner) { + SockToPolledFdMap(Combiner* combiner) { combiner_ = GRPC_COMBINER_REF(combiner, "sock to polled fd map"); } @@ -832,7 +861,7 @@ class SockToPolledFdMap { private: SockToPolledFdEntry* head_ = nullptr; - grpc_combiner* combiner_; + Combiner* combiner_; }; const struct ares_socket_functions custom_ares_sock_funcs = { @@ -881,12 +910,12 @@ class GrpcPolledFdWindowsWrapper : public GrpcPolledFd { class GrpcPolledFdFactoryWindows : public GrpcPolledFdFactory { public: - GrpcPolledFdFactoryWindows(grpc_combiner* combiner) + GrpcPolledFdFactoryWindows(Combiner* combiner) : sock_to_polled_fd_map_(combiner) {} GrpcPolledFd* NewGrpcPolledFdLocked(ares_socket_t as, grpc_pollset_set* driver_pollset_set, - grpc_combiner* combiner) override { + Combiner* combiner) override { GrpcPolledFdWindows* polled_fd = sock_to_polled_fd_map_.LookupPolledFd(as); // Set a flag so that the virtual socket "close" method knows it // doesn't need to call ShutdownLocked, since now the driver will. @@ -903,7 +932,7 @@ class GrpcPolledFdFactoryWindows : public GrpcPolledFdFactory { SockToPolledFdMap sock_to_polled_fd_map_; }; -UniquePtr NewGrpcPolledFdFactory(grpc_combiner* combiner) { +UniquePtr NewGrpcPolledFdFactory(Combiner* combiner) { return MakeUnique(combiner); } diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc index 0c1a8ba828f..fc1bca76de9 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc @@ -350,7 +350,7 @@ done: void grpc_dns_lookup_ares_continue_after_check_localhost_and_ip_literals_locked( grpc_ares_request* r, const char* dns_server, const char* name, const char* default_port, grpc_pollset_set* interested_parties, - bool check_grpclb, int query_timeout_ms, grpc_combiner* combiner) { + bool check_grpclb, int query_timeout_ms, grpc_core::Combiner* combiner) { grpc_error* error = GRPC_ERROR_NONE; grpc_ares_hostbyname_request* hr = nullptr; ares_channel* channel = nullptr; @@ -590,7 +590,7 @@ static grpc_ares_request* grpc_dns_lookup_ares_locked_impl( grpc_pollset_set* interested_parties, grpc_closure* on_done, grpc_core::UniquePtr* addrs, bool check_grpclb, char** service_config_json, int query_timeout_ms, - grpc_combiner* combiner) { + grpc_core::Combiner* combiner) { grpc_ares_request* r = static_cast(gpr_zalloc(sizeof(grpc_ares_request))); r->ev_driver = nullptr; @@ -633,7 +633,7 @@ grpc_ares_request* (*grpc_dns_lookup_ares_locked)( grpc_pollset_set* interested_parties, grpc_closure* on_done, grpc_core::UniquePtr* addrs, bool check_grpclb, char** service_config_json, int query_timeout_ms, - grpc_combiner* combiner) = grpc_dns_lookup_ares_locked_impl; + grpc_core::Combiner* combiner) = grpc_dns_lookup_ares_locked_impl; static void grpc_cancel_ares_request_locked_impl(grpc_ares_request* r) { GPR_ASSERT(r != nullptr); @@ -674,7 +674,7 @@ void grpc_ares_cleanup(void) {} typedef struct grpc_resolve_address_ares_request { /* combiner that queries and related callbacks run under */ - grpc_combiner* combiner; + grpc_core::Combiner* combiner; /** the pointer to receive the resolved addresses */ grpc_resolved_addresses** addrs_out; /** currently resolving addresses */ @@ -719,10 +719,20 @@ static void on_dns_lookup_done_locked(void* arg, grpc_error* error) { grpc_core::Delete(r); } +static void on_dns_lookup_done(void* arg, grpc_error* error) { + grpc_resolve_address_ares_request* r = + static_cast(arg); + r->combiner->Run(GRPC_CLOSURE_INIT(&r->on_dns_lookup_done_locked, + on_dns_lookup_done_locked, r, nullptr), + GRPC_ERROR_REF(error)); +} + static void grpc_resolve_address_invoke_dns_lookup_ares_locked( void* arg, grpc_error* unused_error) { grpc_resolve_address_ares_request* r = static_cast(arg); + GRPC_CLOSURE_INIT(&r->on_dns_lookup_done_locked, on_dns_lookup_done, r, + grpc_schedule_on_exec_ctx); r->ares_request = grpc_dns_lookup_ares_locked( nullptr /* dns_server */, r->name, r->default_port, r->interested_parties, &r->on_dns_lookup_done_locked, &r->addresses, false /* check_grpclb */, @@ -740,14 +750,12 @@ static void grpc_resolve_address_ares_impl(const char* name, r->combiner = grpc_combiner_create(); r->addrs_out = addrs; r->on_resolve_address_done = on_done; - GRPC_CLOSURE_INIT(&r->on_dns_lookup_done_locked, on_dns_lookup_done_locked, r, - grpc_combiner_scheduler(r->combiner)); r->name = name; r->default_port = default_port; r->interested_parties = interested_parties; - GRPC_CLOSURE_SCHED( + r->combiner->Run( GRPC_CLOSURE_CREATE(grpc_resolve_address_invoke_dns_lookup_ares_locked, r, - grpc_combiner_scheduler(r->combiner)), + nullptr), GRPC_ERROR_NONE); } diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h index cc977c06b25..ca5e91139bf 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h @@ -65,7 +65,7 @@ extern grpc_ares_request* (*grpc_dns_lookup_ares_locked)( grpc_pollset_set* interested_parties, grpc_closure* on_done, grpc_core::UniquePtr* addresses, bool check_grpclb, char** service_config_json, int query_timeout_ms, - grpc_combiner* combiner); + grpc_core::Combiner* combiner); /* Cancel the pending grpc_ares_request \a request */ extern void (*grpc_cancel_ares_request_locked)(grpc_ares_request* request); diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc index d2de88eaa1e..05d6eaace66 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc @@ -31,7 +31,7 @@ static grpc_ares_request* grpc_dns_lookup_ares_locked_impl( grpc_pollset_set* interested_parties, grpc_closure* on_done, grpc_core::UniquePtr* addrs, bool check_grpclb, char** service_config_json, int query_timeout_ms, - grpc_combiner* combiner) { + grpc_core::Combiner* combiner) { return NULL; } @@ -40,7 +40,7 @@ grpc_ares_request* (*grpc_dns_lookup_ares_locked)( grpc_pollset_set* interested_parties, grpc_closure* on_done, grpc_core::UniquePtr* addrs, bool check_grpclb, char** service_config_json, int query_timeout_ms, - grpc_combiner* combiner) = grpc_dns_lookup_ares_locked_impl; + grpc_core::Combiner* combiner) = grpc_dns_lookup_ares_locked_impl; static void grpc_cancel_ares_request_locked_impl(grpc_ares_request* r) {} diff --git a/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc b/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc index 5f674992e64..d7028a5eaa3 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc +++ b/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc @@ -66,7 +66,9 @@ class NativeDnsResolver : public Resolver { void MaybeStartResolvingLocked(); void StartResolvingLocked(); + static void OnNextResolution(void* arg, grpc_error* error); static void OnNextResolutionLocked(void* arg, grpc_error* error); + static void OnResolved(void* arg, grpc_error* error); static void OnResolvedLocked(void* arg, grpc_error* error); /// name to resolve @@ -115,11 +117,6 @@ NativeDnsResolver::NativeDnsResolver(ResolverArgs args) if (args.pollset_set != nullptr) { grpc_pollset_set_add_pollset_set(interested_parties_, args.pollset_set); } - GRPC_CLOSURE_INIT(&on_next_resolution_, - NativeDnsResolver::OnNextResolutionLocked, this, - grpc_combiner_scheduler(args.combiner)); - GRPC_CLOSURE_INIT(&on_resolved_, NativeDnsResolver::OnResolvedLocked, this, - grpc_combiner_scheduler(args.combiner)); } NativeDnsResolver::~NativeDnsResolver() { @@ -150,6 +147,14 @@ void NativeDnsResolver::ShutdownLocked() { } } +void NativeDnsResolver::OnNextResolution(void* arg, grpc_error* error) { + NativeDnsResolver* r = static_cast(arg); + r->combiner()->Run( + GRPC_CLOSURE_INIT(&r->on_next_resolution_, + NativeDnsResolver::OnNextResolutionLocked, r, nullptr), + GRPC_ERROR_REF(error)); +} + void NativeDnsResolver::OnNextResolutionLocked(void* arg, grpc_error* error) { NativeDnsResolver* r = static_cast(arg); r->have_next_resolution_timer_ = false; @@ -159,6 +164,14 @@ void NativeDnsResolver::OnNextResolutionLocked(void* arg, grpc_error* error) { r->Unref(DEBUG_LOCATION, "retry-timer"); } +void NativeDnsResolver::OnResolved(void* arg, grpc_error* error) { + NativeDnsResolver* r = static_cast(arg); + r->combiner()->Run( + GRPC_CLOSURE_INIT(&r->on_resolved_, NativeDnsResolver::OnResolvedLocked, + r, nullptr), + GRPC_ERROR_REF(error)); +} + void NativeDnsResolver::OnResolvedLocked(void* arg, grpc_error* error) { NativeDnsResolver* r = static_cast(arg); GPR_ASSERT(r->resolving_); @@ -202,6 +215,9 @@ void NativeDnsResolver::OnResolvedLocked(void* arg, grpc_error* error) { } else { gpr_log(GPR_DEBUG, "retrying immediately"); } + GRPC_CLOSURE_INIT(&r->on_next_resolution_, + NativeDnsResolver::OnNextResolution, r, + grpc_schedule_on_exec_ctx); grpc_timer_init(&r->next_resolution_timer_, next_try, &r->on_next_resolution_); } @@ -229,6 +245,9 @@ void NativeDnsResolver::MaybeStartResolvingLocked() { // new closure API is done, find a way to track this ref with the timer // callback as part of the type system. Ref(DEBUG_LOCATION, "next_resolution_timer_cooldown").release(); + GRPC_CLOSURE_INIT(&on_next_resolution_, + NativeDnsResolver::OnNextResolution, this, + grpc_schedule_on_exec_ctx); grpc_timer_init(&next_resolution_timer_, ExecCtx::Get()->Now() + ms_until_next_resolution, &on_next_resolution_); @@ -247,6 +266,8 @@ void NativeDnsResolver::StartResolvingLocked() { GPR_ASSERT(!resolving_); resolving_ = true; addresses_ = nullptr; + GRPC_CLOSURE_INIT(&on_resolved_, NativeDnsResolver::OnResolved, this, + grpc_schedule_on_exec_ctx); grpc_resolve_address(name_to_resolve_, kDefaultPort, interested_parties_, &on_resolved_, &addresses_); last_resolution_timestamp_ = grpc_core::ExecCtx::Get()->Now(); diff --git a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc index 98beef37de3..83b16afbd00 100644 --- a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc +++ b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc @@ -98,8 +98,6 @@ FakeResolver::FakeResolver(ResolverArgs args) : Resolver(args.combiner, std::move(args.result_handler)), response_generator_( FakeResolverResponseGenerator::GetFromArgs(args.args)) { - GRPC_CLOSURE_INIT(&reresolution_closure_, ReturnReresolutionResult, this, - grpc_combiner_scheduler(combiner())); // Channels sharing the same subchannels may have different resolver response // generators. If we don't remove this arg, subchannel pool will create new // subchannels for the same address instead of reusing existing ones because @@ -129,7 +127,9 @@ void FakeResolver::RequestReresolutionLocked() { if (!reresolution_closure_pending_) { reresolution_closure_pending_ = true; Ref().release(); // ref held by closure - GRPC_CLOSURE_SCHED(&reresolution_closure_, GRPC_ERROR_NONE); + GRPC_CLOSURE_INIT(&reresolution_closure_, ReturnReresolutionResult, this, + nullptr); + combiner()->Run(&reresolution_closure_, GRPC_ERROR_NONE); } } } @@ -208,10 +208,9 @@ void FakeResolverResponseGenerator::SetResponse(Resolver::Result result) { SetResponseClosureArg* closure_arg = New(); closure_arg->resolver = std::move(resolver); closure_arg->result = std::move(result); - GRPC_CLOSURE_SCHED( - GRPC_CLOSURE_INIT( - &closure_arg->set_response_closure, SetResponseLocked, closure_arg, - grpc_combiner_scheduler(closure_arg->resolver->combiner())), + closure_arg->resolver->combiner()->Run( + GRPC_CLOSURE_INIT(&closure_arg->set_response_closure, SetResponseLocked, + closure_arg, nullptr), GRPC_ERROR_NONE); } @@ -238,11 +237,9 @@ void FakeResolverResponseGenerator::SetReresolutionResponse( closure_arg->resolver = std::move(resolver); closure_arg->result = std::move(result); closure_arg->has_result = true; - GRPC_CLOSURE_SCHED( - GRPC_CLOSURE_INIT( - &closure_arg->set_response_closure, SetReresolutionResponseLocked, - closure_arg, - grpc_combiner_scheduler(closure_arg->resolver->combiner())), + closure_arg->resolver->combiner()->Run( + GRPC_CLOSURE_INIT(&closure_arg->set_response_closure, + SetReresolutionResponseLocked, closure_arg, nullptr), GRPC_ERROR_NONE); } @@ -255,11 +252,9 @@ void FakeResolverResponseGenerator::UnsetReresolutionResponse() { } SetResponseClosureArg* closure_arg = New(); closure_arg->resolver = std::move(resolver); - GRPC_CLOSURE_SCHED( - GRPC_CLOSURE_INIT( - &closure_arg->set_response_closure, SetReresolutionResponseLocked, - closure_arg, - grpc_combiner_scheduler(closure_arg->resolver->combiner())), + closure_arg->resolver->combiner()->Run( + GRPC_CLOSURE_INIT(&closure_arg->set_response_closure, + SetReresolutionResponseLocked, closure_arg, nullptr), GRPC_ERROR_NONE); } @@ -283,10 +278,9 @@ void FakeResolverResponseGenerator::SetFailure() { } SetResponseClosureArg* closure_arg = New(); closure_arg->resolver = std::move(resolver); - GRPC_CLOSURE_SCHED( - GRPC_CLOSURE_INIT( - &closure_arg->set_response_closure, SetFailureLocked, closure_arg, - grpc_combiner_scheduler(closure_arg->resolver->combiner())), + closure_arg->resolver->combiner()->Run( + GRPC_CLOSURE_INIT(&closure_arg->set_response_closure, SetFailureLocked, + closure_arg, nullptr), GRPC_ERROR_NONE); } @@ -300,10 +294,9 @@ void FakeResolverResponseGenerator::SetFailureOnReresolution() { SetResponseClosureArg* closure_arg = New(); closure_arg->resolver = std::move(resolver); closure_arg->immediate = false; - GRPC_CLOSURE_SCHED( - GRPC_CLOSURE_INIT( - &closure_arg->set_response_closure, SetFailureLocked, closure_arg, - grpc_combiner_scheduler(closure_arg->resolver->combiner())), + closure_arg->resolver->combiner()->Run( + GRPC_CLOSURE_INIT(&closure_arg->set_response_closure, SetFailureLocked, + closure_arg, nullptr), GRPC_ERROR_NONE); } @@ -316,10 +309,9 @@ void FakeResolverResponseGenerator::SetFakeResolver( SetResponseClosureArg* closure_arg = New(); closure_arg->resolver = resolver_->Ref(); closure_arg->result = std::move(result_); - GRPC_CLOSURE_SCHED( + resolver_->combiner()->Run( GRPC_CLOSURE_INIT(&closure_arg->set_response_closure, SetResponseLocked, - closure_arg, - grpc_combiner_scheduler(resolver_->combiner())), + closure_arg, nullptr), GRPC_ERROR_NONE); has_result_ = false; } diff --git a/src/core/ext/filters/client_channel/resolver_factory.h b/src/core/ext/filters/client_channel/resolver_factory.h index 7cee4879814..0349c4745ff 100644 --- a/src/core/ext/filters/client_channel/resolver_factory.h +++ b/src/core/ext/filters/client_channel/resolver_factory.h @@ -39,7 +39,7 @@ struct ResolverArgs { /// Used to drive I/O in the name resolution process. grpc_pollset_set* pollset_set = nullptr; /// The combiner under which all resolver calls will be run. - grpc_combiner* combiner = nullptr; + Combiner* combiner = nullptr; /// The result handler to be used by the resolver. UniquePtr result_handler; }; diff --git a/src/core/ext/filters/client_channel/resolver_registry.cc b/src/core/ext/filters/client_channel/resolver_registry.cc index 509c4ef38fa..0776551a9b6 100644 --- a/src/core/ext/filters/client_channel/resolver_registry.cc +++ b/src/core/ext/filters/client_channel/resolver_registry.cc @@ -145,7 +145,7 @@ bool ResolverRegistry::IsValidTarget(const char* target) { OrphanablePtr ResolverRegistry::CreateResolver( const char* target, const grpc_channel_args* args, - grpc_pollset_set* pollset_set, grpc_combiner* combiner, + grpc_pollset_set* pollset_set, Combiner* combiner, UniquePtr result_handler) { GPR_ASSERT(g_state != nullptr); grpc_uri* uri = nullptr; diff --git a/src/core/ext/filters/client_channel/resolver_registry.h b/src/core/ext/filters/client_channel/resolver_registry.h index 4248a065439..dd3ac4713e9 100644 --- a/src/core/ext/filters/client_channel/resolver_registry.h +++ b/src/core/ext/filters/client_channel/resolver_registry.h @@ -68,7 +68,7 @@ class ResolverRegistry { /// \a result_handler is used to return results from the resolver. static OrphanablePtr CreateResolver( const char* target, const grpc_channel_args* args, - grpc_pollset_set* pollset_set, grpc_combiner* combiner, + grpc_pollset_set* pollset_set, Combiner* combiner, UniquePtr result_handler); /// Returns the default authority to pass from a client for \a target. diff --git a/src/core/ext/filters/client_channel/xds/xds_client.cc b/src/core/ext/filters/client_channel/xds/xds_client.cc index 16881f3a59e..2f09e12ab94 100644 --- a/src/core/ext/filters/client_channel/xds/xds_client.cc +++ b/src/core/ext/filters/client_channel/xds/xds_client.cc @@ -92,6 +92,7 @@ class XdsClient::ChannelState::RetryableCall private: void StartNewCallLocked(); void StartRetryTimerLocked(); + static void OnRetryTimer(void* arg, grpc_error* error); static void OnRetryTimerLocked(void* arg, grpc_error* error); // The wrapped xds call that talks to the xds server. It's instantiated @@ -125,6 +126,8 @@ class XdsClient::ChannelState::AdsCallState bool seen_response() const { return seen_response_; } private: + static void OnResponseReceived(void* arg, grpc_error* error); + static void OnStatusReceived(void* arg, grpc_error* error); static void OnResponseReceivedLocked(void* arg, grpc_error* error); static void OnStatusReceivedLocked(void* arg, grpc_error* error); @@ -177,10 +180,6 @@ class XdsClient::ChannelState::LrsCallState public: Reporter(RefCountedPtr parent, grpc_millis report_interval) : parent_(std::move(parent)), report_interval_(report_interval) { - GRPC_CLOSURE_INIT(&on_next_report_timer_, OnNextReportTimerLocked, this, - grpc_combiner_scheduler(xds_client()->combiner_)); - GRPC_CLOSURE_INIT(&on_report_done_, OnReportDoneLocked, this, - grpc_combiner_scheduler(xds_client()->combiner_)); ScheduleNextReportLocked(); } @@ -188,8 +187,10 @@ class XdsClient::ChannelState::LrsCallState private: void ScheduleNextReportLocked(); + static void OnNextReportTimer(void* arg, grpc_error* error); static void OnNextReportTimerLocked(void* arg, grpc_error* error); void SendReportLocked(); + static void OnReportDone(void* arg, grpc_error* error); static void OnReportDoneLocked(void* arg, grpc_error* error); bool IsCurrentReporterOnCall() const { @@ -209,6 +210,9 @@ class XdsClient::ChannelState::LrsCallState grpc_closure on_report_done_; }; + static void OnInitialRequestSent(void* arg, grpc_error* error); + static void OnResponseReceived(void* arg, grpc_error* error); + static void OnStatusReceived(void* arg, grpc_error* error); static void OnInitialRequestSentLocked(void* arg, grpc_error* error); static void OnResponseReceivedLocked(void* arg, grpc_error* error); static void OnStatusReceivedLocked(void* arg, grpc_error* error); @@ -253,8 +257,7 @@ class XdsClient::ChannelState::StateWatcher : public AsyncConnectivityStateWatcherInterface { public: explicit StateWatcher(RefCountedPtr parent) - : AsyncConnectivityStateWatcherInterface( - grpc_combiner_scheduler(parent->xds_client()->combiner_)), + : AsyncConnectivityStateWatcherInterface(parent->xds_client()->combiner_), parent_(std::move(parent)) {} private: @@ -421,8 +424,6 @@ XdsClient::ChannelState::RetryableCall::RetryableCall( .set_multiplier(GRPC_XDS_RECONNECT_BACKOFF_MULTIPLIER) .set_jitter(GRPC_XDS_RECONNECT_JITTER) .set_max_backoff(GRPC_XDS_RECONNECT_MAX_BACKOFF_SECONDS * 1000)) { - GRPC_CLOSURE_INIT(&on_retry_timer_, OnRetryTimerLocked, this, - grpc_combiner_scheduler(chand_->xds_client()->combiner_)); StartNewCallLocked(); } @@ -476,10 +477,22 @@ void XdsClient::ChannelState::RetryableCall::StartRetryTimerLocked() { chand()->xds_client(), chand(), timeout); } this->Ref(DEBUG_LOCATION, "RetryableCall+retry_timer_start").release(); + GRPC_CLOSURE_INIT(&on_retry_timer_, OnRetryTimer, this, + grpc_schedule_on_exec_ctx); grpc_timer_init(&retry_timer_, next_attempt_time, &on_retry_timer_); retry_timer_callback_pending_ = true; } +template +void XdsClient::ChannelState::RetryableCall::OnRetryTimer( + void* arg, grpc_error* error) { + RetryableCall* calld = static_cast(arg); + calld->chand_->xds_client()->combiner_->Run( + GRPC_CLOSURE_INIT(&calld->on_retry_timer_, OnRetryTimerLocked, calld, + nullptr), + GRPC_ERROR_REF(error)); +} + template void XdsClient::ChannelState::RetryableCall::OnRetryTimerLocked( void* arg, grpc_error* error) { @@ -528,10 +541,6 @@ XdsClient::ChannelState::AdsCallState::AdsCallState( // Init other data associated with the call. grpc_metadata_array_init(&initial_metadata_recv_); grpc_metadata_array_init(&trailing_metadata_recv_); - GRPC_CLOSURE_INIT(&on_response_received_, OnResponseReceivedLocked, this, - grpc_combiner_scheduler(xds_client()->combiner_)); - GRPC_CLOSURE_INIT(&on_status_received_, OnStatusReceivedLocked, this, - grpc_combiner_scheduler(xds_client()->combiner_)); // Start the call. if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) { gpr_log(GPR_INFO, @@ -575,6 +584,8 @@ XdsClient::ChannelState::AdsCallState::AdsCallState( op->reserved = nullptr; op++; Ref(DEBUG_LOCATION, "ADS+OnResponseReceivedLocked").release(); + GRPC_CLOSURE_INIT(&on_response_received_, OnResponseReceived, this, + grpc_schedule_on_exec_ctx); call_error = grpc_call_start_batch_and_execute(call_, ops, (size_t)(op - ops), &on_response_received_); GPR_ASSERT(GRPC_CALL_OK == call_error); @@ -590,6 +601,8 @@ XdsClient::ChannelState::AdsCallState::AdsCallState( // This callback signals the end of the call, so it relies on the initial // ref instead of a new ref. When it's invoked, it's the initial ref that is // unreffed. + GRPC_CLOSURE_INIT(&on_status_received_, OnStatusReceived, this, + grpc_schedule_on_exec_ctx); call_error = grpc_call_start_batch_and_execute(call_, ops, (size_t)(op - ops), &on_status_received_); GPR_ASSERT(GRPC_CALL_OK == call_error); @@ -616,6 +629,15 @@ void XdsClient::ChannelState::AdsCallState::Orphan() { // corresponding unref happens in on_status_received_ instead of here. } +void XdsClient::ChannelState::AdsCallState::OnResponseReceived( + void* arg, grpc_error* error) { + AdsCallState* ads_calld = static_cast(arg); + ads_calld->xds_client()->combiner_->Run( + GRPC_CLOSURE_INIT(&ads_calld->on_response_received_, + OnResponseReceivedLocked, ads_calld, nullptr), + GRPC_ERROR_REF(error)); +} + void XdsClient::ChannelState::AdsCallState::OnResponseReceivedLocked( void* arg, grpc_error* error) { AdsCallState* ads_calld = static_cast(arg); @@ -757,11 +779,22 @@ void XdsClient::ChannelState::AdsCallState::OnResponseReceivedLocked( op.reserved = nullptr; GPR_ASSERT(ads_calld->call_ != nullptr); // Reuse the "ADS+OnResponseReceivedLocked" ref taken in ctor. + GRPC_CLOSURE_INIT(&ads_calld->on_response_received_, OnResponseReceived, + ads_calld, grpc_schedule_on_exec_ctx); const grpc_call_error call_error = grpc_call_start_batch_and_execute( ads_calld->call_, &op, 1, &ads_calld->on_response_received_); GPR_ASSERT(GRPC_CALL_OK == call_error); } +void XdsClient::ChannelState::AdsCallState::OnStatusReceived( + void* arg, grpc_error* error) { + AdsCallState* ads_calld = static_cast(arg); + ads_calld->xds_client()->combiner_->Run( + GRPC_CLOSURE_INIT(&ads_calld->on_status_received_, OnStatusReceivedLocked, + ads_calld, nullptr), + GRPC_ERROR_REF(error)); +} + void XdsClient::ChannelState::AdsCallState::OnStatusReceivedLocked( void* arg, grpc_error* error) { AdsCallState* ads_calld = static_cast(arg); @@ -807,11 +840,22 @@ void XdsClient::ChannelState::LrsCallState::Reporter::Orphan() { void XdsClient::ChannelState::LrsCallState::Reporter:: ScheduleNextReportLocked() { const grpc_millis next_report_time = ExecCtx::Get()->Now() + report_interval_; + GRPC_CLOSURE_INIT(&on_next_report_timer_, OnNextReportTimer, this, + grpc_schedule_on_exec_ctx); grpc_timer_init(&next_report_timer_, next_report_time, &on_next_report_timer_); next_report_timer_callback_pending_ = true; } +void XdsClient::ChannelState::LrsCallState::Reporter::OnNextReportTimer( + void* arg, grpc_error* error) { + Reporter* self = static_cast(arg); + self->xds_client()->combiner_->Run( + GRPC_CLOSURE_INIT(&self->on_next_report_timer_, OnNextReportTimerLocked, + self, nullptr), + GRPC_ERROR_REF(error)); +} + void XdsClient::ChannelState::LrsCallState::Reporter::OnNextReportTimerLocked( void* arg, grpc_error* error) { Reporter* self = static_cast(arg); @@ -851,6 +895,8 @@ void XdsClient::ChannelState::LrsCallState::Reporter::SendReportLocked() { memset(&op, 0, sizeof(op)); op.op = GRPC_OP_SEND_MESSAGE; op.data.send_message.send_message = parent_->send_message_payload_; + GRPC_CLOSURE_INIT(&on_report_done_, OnReportDone, this, + grpc_schedule_on_exec_ctx); grpc_call_error call_error = grpc_call_start_batch_and_execute( parent_->call_, &op, 1, &on_report_done_); if (GPR_UNLIKELY(call_error != GRPC_CALL_OK)) { @@ -861,6 +907,15 @@ void XdsClient::ChannelState::LrsCallState::Reporter::SendReportLocked() { } } +void XdsClient::ChannelState::LrsCallState::Reporter::OnReportDone( + void* arg, grpc_error* error) { + Reporter* self = static_cast(arg); + self->xds_client()->combiner_->Run( + GRPC_CLOSURE_INIT(&self->on_report_done_, OnReportDoneLocked, self, + nullptr), + GRPC_ERROR_REF(error)); +} + void XdsClient::ChannelState::LrsCallState::Reporter::OnReportDoneLocked( void* arg, grpc_error* error) { Reporter* self = static_cast(arg); @@ -908,12 +963,6 @@ XdsClient::ChannelState::LrsCallState::LrsCallState( // Init other data associated with the LRS call. grpc_metadata_array_init(&initial_metadata_recv_); grpc_metadata_array_init(&trailing_metadata_recv_); - GRPC_CLOSURE_INIT(&on_initial_request_sent_, OnInitialRequestSentLocked, this, - grpc_combiner_scheduler(xds_client()->combiner_)); - GRPC_CLOSURE_INIT(&on_response_received_, OnResponseReceivedLocked, this, - grpc_combiner_scheduler(xds_client()->combiner_)); - GRPC_CLOSURE_INIT(&on_status_received_, OnStatusReceivedLocked, this, - grpc_combiner_scheduler(xds_client()->combiner_)); // Start the call. if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) { gpr_log(GPR_INFO, @@ -940,6 +989,8 @@ XdsClient::ChannelState::LrsCallState::LrsCallState( op->reserved = nullptr; op++; Ref(DEBUG_LOCATION, "LRS+OnInitialRequestSentLocked").release(); + GRPC_CLOSURE_INIT(&on_initial_request_sent_, OnInitialRequestSent, this, + grpc_schedule_on_exec_ctx); call_error = grpc_call_start_batch_and_execute(call_, ops, (size_t)(op - ops), &on_initial_request_sent_); GPR_ASSERT(GRPC_CALL_OK == call_error); @@ -958,6 +1009,8 @@ XdsClient::ChannelState::LrsCallState::LrsCallState( op->reserved = nullptr; op++; Ref(DEBUG_LOCATION, "LRS+OnResponseReceivedLocked").release(); + GRPC_CLOSURE_INIT(&on_response_received_, OnResponseReceived, this, + grpc_schedule_on_exec_ctx); call_error = grpc_call_start_batch_and_execute(call_, ops, (size_t)(op - ops), &on_response_received_); GPR_ASSERT(GRPC_CALL_OK == call_error); @@ -973,6 +1026,8 @@ XdsClient::ChannelState::LrsCallState::LrsCallState( // This callback signals the end of the call, so it relies on the initial // ref instead of a new ref. When it's invoked, it's the initial ref that is // unreffed. + GRPC_CLOSURE_INIT(&on_status_received_, OnStatusReceived, this, + grpc_schedule_on_exec_ctx); call_error = grpc_call_start_batch_and_execute(call_, ops, (size_t)(op - ops), &on_status_received_); GPR_ASSERT(GRPC_CALL_OK == call_error); @@ -1021,6 +1076,15 @@ void XdsClient::ChannelState::LrsCallState::MaybeStartReportingLocked() { Ref(DEBUG_LOCATION, "LRS+load_report+start"), load_reporting_interval_); } +void XdsClient::ChannelState::LrsCallState::OnInitialRequestSent( + void* arg, grpc_error* error) { + LrsCallState* lrs_calld = static_cast(arg); + lrs_calld->xds_client()->combiner_->Run( + GRPC_CLOSURE_INIT(&lrs_calld->on_initial_request_sent_, + OnInitialRequestSentLocked, lrs_calld, nullptr), + GRPC_ERROR_REF(error)); +} + void XdsClient::ChannelState::LrsCallState::OnInitialRequestSentLocked( void* arg, grpc_error* error) { LrsCallState* lrs_calld = static_cast(arg); @@ -1031,6 +1095,15 @@ void XdsClient::ChannelState::LrsCallState::OnInitialRequestSentLocked( lrs_calld->Unref(DEBUG_LOCATION, "LRS+OnInitialRequestSentLocked"); } +void XdsClient::ChannelState::LrsCallState::OnResponseReceived( + void* arg, grpc_error* error) { + LrsCallState* lrs_calld = static_cast(arg); + lrs_calld->xds_client()->combiner_->Run( + GRPC_CLOSURE_INIT(&lrs_calld->on_response_received_, + OnResponseReceivedLocked, lrs_calld, nullptr), + GRPC_ERROR_REF(error)); +} + void XdsClient::ChannelState::LrsCallState::OnResponseReceivedLocked( void* arg, grpc_error* error) { LrsCallState* lrs_calld = static_cast(arg); @@ -1114,11 +1187,22 @@ void XdsClient::ChannelState::LrsCallState::OnResponseReceivedLocked( op.reserved = nullptr; GPR_ASSERT(lrs_calld->call_ != nullptr); // Reuse the "OnResponseReceivedLocked" ref taken in ctor. + GRPC_CLOSURE_INIT(&lrs_calld->on_response_received_, OnResponseReceived, + lrs_calld, grpc_schedule_on_exec_ctx); const grpc_call_error call_error = grpc_call_start_batch_and_execute( lrs_calld->call_, &op, 1, &lrs_calld->on_response_received_); GPR_ASSERT(GRPC_CALL_OK == call_error); } +void XdsClient::ChannelState::LrsCallState::OnStatusReceived( + void* arg, grpc_error* error) { + LrsCallState* lrs_calld = static_cast(arg); + lrs_calld->xds_client()->combiner_->Run( + GRPC_CLOSURE_INIT(&lrs_calld->on_status_received_, OnStatusReceivedLocked, + lrs_calld, nullptr), + GRPC_ERROR_REF(error)); +} + void XdsClient::ChannelState::LrsCallState::OnStatusReceivedLocked( void* arg, grpc_error* error) { LrsCallState* lrs_calld = static_cast(arg); @@ -1165,8 +1249,7 @@ UniquePtr GenerateBuildVersionString() { } // namespace -XdsClient::XdsClient(grpc_combiner* combiner, - grpc_pollset_set* interested_parties, +XdsClient::XdsClient(Combiner* combiner, grpc_pollset_set* interested_parties, StringView server_name, UniquePtr watcher, const grpc_channel_args& channel_args, grpc_error** error) @@ -1193,8 +1276,8 @@ XdsClient::XdsClient(grpc_combiner* combiner, // TODO(juanlishen): Start LDS call and do not return service config // until we get the first LDS response. GRPC_CLOSURE_INIT(&service_config_notify_, NotifyOnServiceConfig, - Ref().release(), grpc_combiner_scheduler(combiner_)); - GRPC_CLOSURE_SCHED(&service_config_notify_, GRPC_ERROR_NONE); + Ref().release(), nullptr); + combiner_->Run(&service_config_notify_, GRPC_ERROR_NONE); } } diff --git a/src/core/ext/filters/client_channel/xds/xds_client.h b/src/core/ext/filters/client_channel/xds/xds_client.h index 05c04b8e2cb..c5fc08cf0c2 100644 --- a/src/core/ext/filters/client_channel/xds/xds_client.h +++ b/src/core/ext/filters/client_channel/xds/xds_client.h @@ -71,7 +71,7 @@ class XdsClient : public InternallyRefCounted { // If *error is not GRPC_ERROR_NONE after construction, then there was // an error initializing the client. - XdsClient(grpc_combiner* combiner, grpc_pollset_set* interested_parties, + XdsClient(Combiner* combiner, grpc_pollset_set* interested_parties, StringView server_name, UniquePtr watcher, const grpc_channel_args& channel_args, grpc_error** error); @@ -196,7 +196,7 @@ class XdsClient : public InternallyRefCounted { UniquePtr build_version_; - grpc_combiner* combiner_; + Combiner* combiner_; grpc_pollset_set* interested_parties_; UniquePtr bootstrap_; diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.cc b/src/core/ext/transport/chttp2/transport/chttp2_transport.cc index 2879d571d22..40cf83e44e7 100644 --- a/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.cc @@ -104,11 +104,14 @@ grpc_core::DebugOnlyTraceFlag grpc_trace_chttp2_refcount(false, /* forward declarations of various callbacks that we'll build closures around */ static void write_action_begin_locked(void* t, grpc_error* error); static void write_action(void* t, grpc_error* error); +static void write_action_end(void* t, grpc_error* error); static void write_action_end_locked(void* t, grpc_error* error); +static void read_action(void* t, grpc_error* error); static void read_action_locked(void* t, grpc_error* error); static void continue_read_action_locked(grpc_chttp2_transport* t); +static void complete_fetch(void* gs, grpc_error* error); static void complete_fetch_locked(void* gs, grpc_error* error); /** Set a transport level setting, and push it to our peer */ static void queue_setting_update(grpc_chttp2_transport* t, @@ -124,6 +127,8 @@ static void connectivity_state_set(grpc_chttp2_transport* t, grpc_connectivity_state state, const char* reason); +static void benign_reclaimer(void* t, grpc_error* error); +static void destructive_reclaimer(void* t, grpc_error* error); static void benign_reclaimer_locked(void* t, grpc_error* error); static void destructive_reclaimer_locked(void* t, grpc_error* error); @@ -134,8 +139,11 @@ static void close_transport_locked(grpc_chttp2_transport* t, grpc_error* error); static void end_all_the_calls(grpc_chttp2_transport* t, grpc_error* error); static void schedule_bdp_ping_locked(grpc_chttp2_transport* t); +static void start_bdp_ping(void* tp, grpc_error* error); +static void finish_bdp_ping(void* tp, grpc_error* error); static void start_bdp_ping_locked(void* tp, grpc_error* error); static void finish_bdp_ping_locked(void* tp, grpc_error* error); +static void next_bdp_ping_timer_expired(void* tp, grpc_error* error); static void next_bdp_ping_timer_expired_locked(void* tp, grpc_error* error); static void cancel_pings(grpc_chttp2_transport* t, grpc_error* error); @@ -145,9 +153,13 @@ static void send_ping_locked(grpc_chttp2_transport* t, static void retry_initiate_ping_locked(void* tp, grpc_error* error); /** keepalive-relevant functions */ +static void init_keepalive_ping(void* arg, grpc_error* error); static void init_keepalive_ping_locked(void* arg, grpc_error* error); +static void start_keepalive_ping(void* arg, grpc_error* error); +static void finish_keepalive_ping(void* arg, grpc_error* error); static void start_keepalive_ping_locked(void* arg, grpc_error* error); static void finish_keepalive_ping_locked(void* arg, grpc_error* error); +static void keepalive_watchdog_fired(void* arg, grpc_error* error); static void keepalive_watchdog_fired_locked(void* arg, grpc_error* error); static void reset_byte_stream(void* arg, grpc_error* error); @@ -377,36 +389,6 @@ static bool read_channel_args(grpc_chttp2_transport* t, return enable_bdp; } -static void init_transport_closures(grpc_chttp2_transport* t) { - GRPC_CLOSURE_INIT(&t->read_action_locked, read_action_locked, t, - grpc_combiner_scheduler(t->combiner)); - GRPC_CLOSURE_INIT(&t->benign_reclaimer_locked, benign_reclaimer_locked, t, - grpc_combiner_scheduler(t->combiner)); - GRPC_CLOSURE_INIT(&t->destructive_reclaimer_locked, - destructive_reclaimer_locked, t, - grpc_combiner_scheduler(t->combiner)); - GRPC_CLOSURE_INIT(&t->retry_initiate_ping_locked, retry_initiate_ping_locked, - t, grpc_combiner_scheduler(t->combiner)); - GRPC_CLOSURE_INIT(&t->start_bdp_ping_locked, start_bdp_ping_locked, t, - grpc_combiner_scheduler(t->combiner)); - GRPC_CLOSURE_INIT(&t->finish_bdp_ping_locked, finish_bdp_ping_locked, t, - grpc_combiner_scheduler(t->combiner)); - GRPC_CLOSURE_INIT(&t->next_bdp_ping_timer_expired_locked, - next_bdp_ping_timer_expired_locked, t, - grpc_combiner_scheduler(t->combiner)); - GRPC_CLOSURE_INIT(&t->init_keepalive_ping_locked, init_keepalive_ping_locked, - t, grpc_combiner_scheduler(t->combiner)); - GRPC_CLOSURE_INIT(&t->start_keepalive_ping_locked, - start_keepalive_ping_locked, t, - grpc_combiner_scheduler(t->combiner)); - GRPC_CLOSURE_INIT(&t->finish_keepalive_ping_locked, - finish_keepalive_ping_locked, t, - grpc_combiner_scheduler(t->combiner)); - GRPC_CLOSURE_INIT(&t->keepalive_watchdog_fired_locked, - keepalive_watchdog_fired_locked, t, - grpc_combiner_scheduler(t->combiner)); -} - static void init_transport_keepalive_settings(grpc_chttp2_transport* t) { if (t->is_client) { t->keepalive_time = g_default_client_keepalive_time_ms == INT_MAX @@ -442,6 +424,8 @@ static void init_keepalive_pings_if_enabled(grpc_chttp2_transport* t) { if (t->keepalive_time != GRPC_MILLIS_INF_FUTURE) { t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_WAITING; GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping"); + GRPC_CLOSURE_INIT(&t->init_keepalive_ping_locked, init_keepalive_ping, t, + grpc_schedule_on_exec_ctx); grpc_timer_init(&t->keepalive_ping_timer, grpc_core::ExecCtx::Get()->Now() + t->keepalive_time, &t->init_keepalive_ping_locked); @@ -494,8 +478,6 @@ grpc_chttp2_transport::grpc_chttp2_transport( grpc_chttp2_hpack_parser_init(&hpack_parser); grpc_chttp2_goaway_parser_init(&goaway_parser); - init_transport_closures(this); - /* configure http2 the way we like it */ if (is_client) { queue_setting_update(this, GRPC_CHTTP2_SETTINGS_ENABLE_PUSH, 0); @@ -556,9 +538,8 @@ static void destroy_transport_locked(void* tp, grpc_error* error) { static void destroy_transport(grpc_transport* gt) { grpc_chttp2_transport* t = reinterpret_cast(gt); - GRPC_CLOSURE_SCHED(GRPC_CLOSURE_CREATE(destroy_transport_locked, t, - grpc_combiner_scheduler(t->combiner)), - GRPC_ERROR_NONE); + t->combiner->Run(GRPC_CLOSURE_CREATE(destroy_transport_locked, t, nullptr), + GRPC_ERROR_NONE); } static void close_transport_locked(grpc_chttp2_transport* t, @@ -669,11 +650,7 @@ grpc_chttp2_stream::grpc_chttp2_stream(grpc_chttp2_transport* t, grpc_slice_buffer_init(&frame_storage); grpc_slice_buffer_init(&unprocessed_incoming_frames_buffer); grpc_slice_buffer_init(&flow_controlled_buffer); - - GRPC_CLOSURE_INIT(&complete_fetch_locked, ::complete_fetch_locked, this, - grpc_combiner_scheduler(t->combiner)); - GRPC_CLOSURE_INIT(&reset_byte_stream, ::reset_byte_stream, this, - grpc_combiner_scheduler(t->combiner)); + GRPC_CLOSURE_INIT(&reset_byte_stream, ::reset_byte_stream, this, nullptr); } grpc_chttp2_stream::~grpc_chttp2_stream() { @@ -766,9 +743,8 @@ static void destroy_stream(grpc_transport* gt, grpc_stream* gs, } s->destroy_stream_arg = then_schedule_closure; - GRPC_CLOSURE_SCHED( - GRPC_CLOSURE_INIT(&s->destroy_stream, destroy_stream_locked, s, - grpc_combiner_scheduler(t->combiner)), + t->combiner->Run( + GRPC_CLOSURE_INIT(&s->destroy_stream, destroy_stream_locked, s, nullptr), GRPC_ERROR_NONE); } @@ -929,10 +905,9 @@ void grpc_chttp2_initiate_write(grpc_chttp2_transport* t, * Also, 'write_action_begin_locked' only gathers the bytes into outbuf. * It does not call the endpoint to write the bytes. That is done by the * 'write_action' (which is scheduled by 'write_action_begin_locked') */ - GRPC_CLOSURE_SCHED( + t->combiner->FinallyRun( GRPC_CLOSURE_INIT(&t->write_action_begin_locked, - write_action_begin_locked, t, - grpc_combiner_finally_scheduler(t->combiner)), + write_action_begin_locked, t, nullptr), GRPC_ERROR_NONE); break; case GRPC_CHTTP2_WRITE_STATE_WRITING: @@ -1006,11 +981,18 @@ static void write_action(void* gt, grpc_error* error) { t->cl = nullptr; grpc_endpoint_write( t->ep, &t->outbuf, - GRPC_CLOSURE_INIT(&t->write_action_end_locked, write_action_end_locked, t, - grpc_combiner_scheduler(t->combiner)), + GRPC_CLOSURE_INIT(&t->write_action_end_locked, write_action_end, t, + grpc_schedule_on_exec_ctx), cl); } +static void write_action_end(void* tp, grpc_error* error) { + grpc_chttp2_transport* t = static_cast(tp); + t->combiner->Run(GRPC_CLOSURE_INIT(&t->write_action_end_locked, + write_action_end_locked, t, nullptr), + GRPC_ERROR_REF(error)); +} + /* Callback from the grpc_endpoint after bytes have been written by calling * sendmsg */ static void write_action_end_locked(void* tp, grpc_error* error) { @@ -1051,10 +1033,9 @@ static void write_action_end_locked(void* tp, grpc_error* error) { if (!closed) { GRPC_CLOSURE_LIST_SCHED(&t->run_after_write); } - GRPC_CLOSURE_RUN( + t->combiner->FinallyRun( GRPC_CLOSURE_INIT(&t->write_action_begin_locked, - write_action_begin_locked, t, - grpc_combiner_finally_scheduler(t->combiner)), + write_action_begin_locked, t, nullptr), GRPC_ERROR_NONE); break; } @@ -1305,8 +1286,10 @@ static void continue_fetching_send_locked(grpc_chttp2_transport* t, } s->fetching_send_message.reset(); return; /* early out */ - } else if (s->fetching_send_message->Next(UINT32_MAX, - &s->complete_fetch_locked)) { + } else if (s->fetching_send_message->Next( + UINT32_MAX, GRPC_CLOSURE_INIT(&s->complete_fetch_locked, + ::complete_fetch, s, + grpc_schedule_on_exec_ctx))) { grpc_error* error = s->fetching_send_message->Pull(&s->fetching_slice); if (error != GRPC_ERROR_NONE) { s->fetching_send_message.reset(); @@ -1318,6 +1301,13 @@ static void continue_fetching_send_locked(grpc_chttp2_transport* t, } } +static void complete_fetch(void* gs, grpc_error* error) { + grpc_chttp2_stream* s = static_cast(gs); + s->t->combiner->Run(GRPC_CLOSURE_INIT(&s->complete_fetch_locked, + ::complete_fetch_locked, s, nullptr), + GRPC_ERROR_REF(error)); +} + static void complete_fetch_locked(void* gs, grpc_error* error) { grpc_chttp2_stream* s = static_cast(gs); grpc_chttp2_transport* t = s->t; @@ -1668,10 +1658,9 @@ static void perform_stream_op(grpc_transport* gt, grpc_stream* gs, GRPC_CHTTP2_STREAM_REF(s, "perform_stream_op"); op->handler_private.extra_arg = gs; - GRPC_CLOSURE_SCHED( - GRPC_CLOSURE_INIT(&op->handler_private.closure, perform_stream_op_locked, - op, grpc_combiner_scheduler(t->combiner)), - GRPC_ERROR_NONE); + t->combiner->Run(GRPC_CLOSURE_INIT(&op->handler_private.closure, + perform_stream_op_locked, op, nullptr), + GRPC_ERROR_NONE); } static void cancel_pings(grpc_chttp2_transport* t, grpc_error* error) { @@ -1707,24 +1696,45 @@ static void send_ping_locked(grpc_chttp2_transport* t, */ static void send_keepalive_ping_locked(grpc_chttp2_transport* t) { if (t->closed_with_error != GRPC_ERROR_NONE) { - GRPC_CLOSURE_RUN(&t->start_keepalive_ping_locked, - GRPC_ERROR_REF(t->closed_with_error)); - GRPC_CLOSURE_RUN(&t->finish_keepalive_ping_locked, + t->combiner->Run(GRPC_CLOSURE_INIT(&t->start_keepalive_ping_locked, + start_keepalive_ping_locked, t, nullptr), GRPC_ERROR_REF(t->closed_with_error)); + t->combiner->Run( + GRPC_CLOSURE_INIT(&t->finish_keepalive_ping_locked, + finish_keepalive_ping_locked, t, nullptr), + GRPC_ERROR_REF(t->closed_with_error)); return; } grpc_chttp2_ping_queue* pq = &t->ping_queue; if (!grpc_closure_list_empty(pq->lists[GRPC_CHTTP2_PCL_INFLIGHT])) { /* There is a ping in flight. Add yourself to the inflight closure list. */ - GRPC_CLOSURE_RUN(&t->start_keepalive_ping_locked, GRPC_ERROR_NONE); - grpc_closure_list_append(&pq->lists[GRPC_CHTTP2_PCL_INFLIGHT], - &t->finish_keepalive_ping_locked, GRPC_ERROR_NONE); + t->combiner->Run(GRPC_CLOSURE_INIT(&t->start_keepalive_ping_locked, + start_keepalive_ping_locked, t, nullptr), + GRPC_ERROR_REF(t->closed_with_error)); + grpc_closure_list_append( + &pq->lists[GRPC_CHTTP2_PCL_INFLIGHT], + GRPC_CLOSURE_INIT(&t->finish_keepalive_ping_locked, + finish_keepalive_ping, t, grpc_schedule_on_exec_ctx), + GRPC_ERROR_NONE); return; } - grpc_closure_list_append(&pq->lists[GRPC_CHTTP2_PCL_INITIATE], - &t->start_keepalive_ping_locked, GRPC_ERROR_NONE); - grpc_closure_list_append(&pq->lists[GRPC_CHTTP2_PCL_NEXT], - &t->finish_keepalive_ping_locked, GRPC_ERROR_NONE); + grpc_closure_list_append( + &pq->lists[GRPC_CHTTP2_PCL_INITIATE], + GRPC_CLOSURE_INIT(&t->start_keepalive_ping_locked, start_keepalive_ping, + t, grpc_schedule_on_exec_ctx), + GRPC_ERROR_NONE); + grpc_closure_list_append( + &pq->lists[GRPC_CHTTP2_PCL_NEXT], + GRPC_CLOSURE_INIT(&t->finish_keepalive_ping_locked, finish_keepalive_ping, + t, grpc_schedule_on_exec_ctx), + GRPC_ERROR_NONE); +} + +void grpc_chttp2_retry_initiate_ping(void* tp, grpc_error* error) { + grpc_chttp2_transport* t = static_cast(tp); + t->combiner->Run(GRPC_CLOSURE_INIT(&t->retry_initiate_ping_locked, + retry_initiate_ping_locked, t, nullptr), + GRPC_ERROR_REF(error)); } static void retry_initiate_ping_locked(void* tp, grpc_error* error) { @@ -1835,10 +1845,9 @@ static void perform_transport_op(grpc_transport* gt, grpc_transport_op* op) { } op->handler_private.extra_arg = gt; GRPC_CHTTP2_REF_TRANSPORT(t, "transport_op"); - GRPC_CLOSURE_SCHED(GRPC_CLOSURE_INIT(&op->handler_private.closure, - perform_transport_op_locked, op, - grpc_combiner_scheduler(t->combiner)), - GRPC_ERROR_NONE); + t->combiner->Run(GRPC_CLOSURE_INIT(&op->handler_private.closure, + perform_transport_op_locked, op, nullptr), + GRPC_ERROR_NONE); } /******************************************************************************* @@ -2479,6 +2488,13 @@ static grpc_error* try_http_parsing(grpc_chttp2_transport* t) { return error; } +static void read_action(void* tp, grpc_error* error) { + grpc_chttp2_transport* t = static_cast(tp); + t->combiner->Run( + GRPC_CLOSURE_INIT(&t->read_action_locked, read_action_locked, t, nullptr), + GRPC_ERROR_REF(error)); +} + static void read_action_locked(void* tp, grpc_error* error) { GPR_TIMER_SCOPE("reading_action_locked", 0); @@ -2576,6 +2592,8 @@ static void read_action_locked(void* tp, grpc_error* error) { static void continue_read_action_locked(grpc_chttp2_transport* t) { const bool urgent = t->goaway_error != GRPC_ERROR_NONE; + GRPC_CLOSURE_INIT(&t->read_action_locked, read_action, t, + grpc_schedule_on_exec_ctx); grpc_endpoint_read(t->ep, &t->read_buffer, &t->read_action_locked, urgent); grpc_chttp2_act_on_flowctl_action(t->flow_control->MakeAction(), t, nullptr); } @@ -2584,7 +2602,19 @@ static void continue_read_action_locked(grpc_chttp2_transport* t) { // that kicks off finishes, it's unreffed static void schedule_bdp_ping_locked(grpc_chttp2_transport* t) { t->flow_control->bdp_estimator()->SchedulePing(); - send_ping_locked(t, &t->start_bdp_ping_locked, &t->finish_bdp_ping_locked); + send_ping_locked( + t, + GRPC_CLOSURE_INIT(&t->start_bdp_ping_locked, start_bdp_ping, t, + grpc_schedule_on_exec_ctx), + GRPC_CLOSURE_INIT(&t->finish_bdp_ping_locked, finish_bdp_ping, t, + grpc_schedule_on_exec_ctx)); +} + +static void start_bdp_ping(void* tp, grpc_error* error) { + grpc_chttp2_transport* t = static_cast(tp); + t->combiner->Run(GRPC_CLOSURE_INIT(&t->start_bdp_ping_locked, + start_bdp_ping_locked, t, nullptr), + GRPC_ERROR_REF(error)); } static void start_bdp_ping_locked(void* tp, grpc_error* error) { @@ -2601,6 +2631,14 @@ static void start_bdp_ping_locked(void* tp, grpc_error* error) { grpc_timer_cancel(&t->keepalive_ping_timer); } t->flow_control->bdp_estimator()->StartPing(); + t->bdp_ping_started = true; +} + +static void finish_bdp_ping(void* tp, grpc_error* error) { + grpc_chttp2_transport* t = static_cast(tp); + t->combiner->Run(GRPC_CLOSURE_INIT(&t->finish_bdp_ping_locked, + finish_bdp_ping_locked, t, nullptr), + GRPC_ERROR_REF(error)); } static void finish_bdp_ping_locked(void* tp, grpc_error* error) { @@ -2613,15 +2651,34 @@ static void finish_bdp_ping_locked(void* tp, grpc_error* error) { GRPC_CHTTP2_UNREF_TRANSPORT(t, "bdp_ping"); return; } + if (!t->bdp_ping_started) { + /* start_bdp_ping_locked has not been run yet. Schedule + * finish_bdp_ping_locked to be run later. */ + t->combiner->Run(GRPC_CLOSURE_INIT(&t->finish_bdp_ping_locked, + finish_bdp_ping_locked, t, nullptr), + GRPC_ERROR_REF(error)); + return; + } + t->bdp_ping_started = false; grpc_millis next_ping = t->flow_control->bdp_estimator()->CompletePing(); grpc_chttp2_act_on_flowctl_action(t->flow_control->PeriodicUpdate(), t, nullptr); GPR_ASSERT(!t->have_next_bdp_ping_timer); t->have_next_bdp_ping_timer = true; + GRPC_CLOSURE_INIT(&t->next_bdp_ping_timer_expired_locked, + next_bdp_ping_timer_expired, t, grpc_schedule_on_exec_ctx); grpc_timer_init(&t->next_bdp_ping_timer, next_ping, &t->next_bdp_ping_timer_expired_locked); } +static void next_bdp_ping_timer_expired(void* tp, grpc_error* error) { + grpc_chttp2_transport* t = static_cast(tp); + t->combiner->Run( + GRPC_CLOSURE_INIT(&t->next_bdp_ping_timer_expired_locked, + next_bdp_ping_timer_expired_locked, t, nullptr), + GRPC_ERROR_REF(error)); +} + static void next_bdp_ping_timer_expired_locked(void* tp, grpc_error* error) { grpc_chttp2_transport* t = static_cast(tp); GPR_ASSERT(t->have_next_bdp_ping_timer); @@ -2700,6 +2757,13 @@ void grpc_chttp2_config_default_keepalive_args(grpc_channel_args* args, } } +static void init_keepalive_ping(void* arg, grpc_error* error) { + grpc_chttp2_transport* t = static_cast(arg); + t->combiner->Run(GRPC_CLOSURE_INIT(&t->init_keepalive_ping_locked, + init_keepalive_ping_locked, t, nullptr), + GRPC_ERROR_REF(error)); +} + static void init_keepalive_ping_locked(void* arg, grpc_error* error) { grpc_chttp2_transport* t = static_cast(arg); GPR_ASSERT(t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_WAITING); @@ -2715,6 +2779,8 @@ static void init_keepalive_ping_locked(void* arg, grpc_error* error) { grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_KEEPALIVE_PING); } else { GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping"); + GRPC_CLOSURE_INIT(&t->init_keepalive_ping_locked, init_keepalive_ping, t, + grpc_schedule_on_exec_ctx); grpc_timer_init(&t->keepalive_ping_timer, grpc_core::ExecCtx::Get()->Now() + t->keepalive_time, &t->init_keepalive_ping_locked); @@ -2722,6 +2788,8 @@ static void init_keepalive_ping_locked(void* arg, grpc_error* error) { } else if (error == GRPC_ERROR_CANCELLED) { /* The keepalive ping timer may be cancelled by bdp */ GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping"); + GRPC_CLOSURE_INIT(&t->init_keepalive_ping_locked, init_keepalive_ping, t, + grpc_schedule_on_exec_ctx); grpc_timer_init(&t->keepalive_ping_timer, grpc_core::ExecCtx::Get()->Now() + t->keepalive_time, &t->init_keepalive_ping_locked); @@ -2729,6 +2797,13 @@ static void init_keepalive_ping_locked(void* arg, grpc_error* error) { GRPC_CHTTP2_UNREF_TRANSPORT(t, "init keepalive ping"); } +static void start_keepalive_ping(void* arg, grpc_error* error) { + grpc_chttp2_transport* t = static_cast(arg); + t->combiner->Run(GRPC_CLOSURE_INIT(&t->start_keepalive_ping_locked, + start_keepalive_ping_locked, t, nullptr), + GRPC_ERROR_REF(error)); +} + static void start_keepalive_ping_locked(void* arg, grpc_error* error) { grpc_chttp2_transport* t = static_cast(arg); if (error != GRPC_ERROR_NONE) { @@ -2741,9 +2816,19 @@ static void start_keepalive_ping_locked(void* arg, grpc_error* error) { gpr_log(GPR_INFO, "%s: Start keepalive ping", t->peer_string); } GRPC_CHTTP2_REF_TRANSPORT(t, "keepalive watchdog"); + GRPC_CLOSURE_INIT(&t->keepalive_watchdog_fired_locked, + keepalive_watchdog_fired, t, grpc_schedule_on_exec_ctx); grpc_timer_init(&t->keepalive_watchdog_timer, grpc_core::ExecCtx::Get()->Now() + t->keepalive_timeout, &t->keepalive_watchdog_fired_locked); + t->keepalive_ping_started = true; +} + +static void finish_keepalive_ping(void* arg, grpc_error* error) { + grpc_chttp2_transport* t = static_cast(arg); + t->combiner->Run(GRPC_CLOSURE_INIT(&t->finish_keepalive_ping_locked, + finish_keepalive_ping_locked, t, nullptr), + GRPC_ERROR_REF(error)); } static void finish_keepalive_ping_locked(void* arg, grpc_error* error) { @@ -2753,9 +2838,21 @@ static void finish_keepalive_ping_locked(void* arg, grpc_error* error) { if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) { gpr_log(GPR_INFO, "%s: Finish keepalive ping", t->peer_string); } + if (!t->keepalive_ping_started) { + /* start_keepalive_ping_locked has not run yet. Reschedule + * finish_keepalive_ping_locked for it to be run later. */ + t->combiner->Run( + GRPC_CLOSURE_INIT(&t->finish_keepalive_ping_locked, + finish_keepalive_ping_locked, t, nullptr), + GRPC_ERROR_REF(error)); + return; + } + t->keepalive_ping_started = false; t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_WAITING; grpc_timer_cancel(&t->keepalive_watchdog_timer); GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping"); + GRPC_CLOSURE_INIT(&t->init_keepalive_ping_locked, init_keepalive_ping, t, + grpc_schedule_on_exec_ctx); grpc_timer_init(&t->keepalive_ping_timer, grpc_core::ExecCtx::Get()->Now() + t->keepalive_time, &t->init_keepalive_ping_locked); @@ -2764,6 +2861,14 @@ static void finish_keepalive_ping_locked(void* arg, grpc_error* error) { GRPC_CHTTP2_UNREF_TRANSPORT(t, "keepalive ping end"); } +static void keepalive_watchdog_fired(void* arg, grpc_error* error) { + grpc_chttp2_transport* t = static_cast(arg); + t->combiner->Run( + GRPC_CLOSURE_INIT(&t->keepalive_watchdog_fired_locked, + keepalive_watchdog_fired_locked, t, nullptr), + GRPC_ERROR_REF(error)); +} + static void keepalive_watchdog_fired_locked(void* arg, grpc_error* error) { grpc_chttp2_transport* t = static_cast(arg); if (t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_PINGING) { @@ -2864,10 +2969,9 @@ void Chttp2IncomingByteStream::OrphanLocked(void* arg, void Chttp2IncomingByteStream::Orphan() { GPR_TIMER_SCOPE("incoming_byte_stream_destroy", 0); - GRPC_CLOSURE_SCHED( + transport_->combiner->Run( GRPC_CLOSURE_INIT(&destroy_action_, - &Chttp2IncomingByteStream::OrphanLocked, this, - grpc_combiner_scheduler(transport_->combiner)), + &Chttp2IncomingByteStream::OrphanLocked, this, nullptr), GRPC_ERROR_NONE); } @@ -2924,10 +3028,9 @@ bool Chttp2IncomingByteStream::Next(size_t max_size_hint, Ref(); next_action_.max_size_hint = max_size_hint; next_action_.on_complete = on_complete; - GRPC_CLOSURE_SCHED( + transport_->combiner->Run( GRPC_CLOSURE_INIT(&next_action_.closure, - &Chttp2IncomingByteStream::NextLocked, this, - grpc_combiner_scheduler(transport_->combiner)), + &Chttp2IncomingByteStream::NextLocked, this, nullptr), GRPC_ERROR_NONE); return false; } @@ -2980,7 +3083,8 @@ grpc_error* Chttp2IncomingByteStream::Pull(grpc_slice* slice) { } } else { error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Truncated message"); - GRPC_CLOSURE_SCHED(&stream_->reset_byte_stream, GRPC_ERROR_REF(error)); + stream_->t->combiner->Run(&stream_->reset_byte_stream, + GRPC_ERROR_REF(error)); return error; } return GRPC_ERROR_NONE; @@ -3000,7 +3104,8 @@ grpc_error* Chttp2IncomingByteStream::Push(const grpc_slice& slice, if (remaining_bytes_ < GRPC_SLICE_LENGTH(slice)) { grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Too many bytes in stream"); - GRPC_CLOSURE_SCHED(&stream_->reset_byte_stream, GRPC_ERROR_REF(error)); + transport_->combiner->Run(&stream_->reset_byte_stream, + GRPC_ERROR_REF(error)); grpc_slice_unref_internal(slice); return error; } else { @@ -3020,7 +3125,8 @@ grpc_error* Chttp2IncomingByteStream::Finished(grpc_error* error, } } if (error != GRPC_ERROR_NONE && reset_on_error) { - GRPC_CLOSURE_SCHED(&stream_->reset_byte_stream, GRPC_ERROR_REF(error)); + transport_->combiner->Run(&stream_->reset_byte_stream, + GRPC_ERROR_REF(error)); } Unref(); return error; @@ -3040,6 +3146,8 @@ static void post_benign_reclaimer(grpc_chttp2_transport* t) { if (!t->benign_reclaimer_registered) { t->benign_reclaimer_registered = true; GRPC_CHTTP2_REF_TRANSPORT(t, "benign_reclaimer"); + GRPC_CLOSURE_INIT(&t->benign_reclaimer_locked, benign_reclaimer, t, + grpc_schedule_on_exec_ctx); grpc_resource_user_post_reclaimer(grpc_endpoint_get_resource_user(t->ep), false, &t->benign_reclaimer_locked); } @@ -3049,11 +3157,20 @@ static void post_destructive_reclaimer(grpc_chttp2_transport* t) { if (!t->destructive_reclaimer_registered) { t->destructive_reclaimer_registered = true; GRPC_CHTTP2_REF_TRANSPORT(t, "destructive_reclaimer"); + GRPC_CLOSURE_INIT(&t->destructive_reclaimer_locked, destructive_reclaimer, + t, grpc_schedule_on_exec_ctx); grpc_resource_user_post_reclaimer(grpc_endpoint_get_resource_user(t->ep), true, &t->destructive_reclaimer_locked); } } +static void benign_reclaimer(void* arg, grpc_error* error) { + grpc_chttp2_transport* t = static_cast(arg); + t->combiner->Run(GRPC_CLOSURE_INIT(&t->benign_reclaimer_locked, + benign_reclaimer_locked, t, nullptr), + GRPC_ERROR_REF(error)); +} + static void benign_reclaimer_locked(void* arg, grpc_error* error) { grpc_chttp2_transport* t = static_cast(arg); if (error == GRPC_ERROR_NONE && @@ -3083,6 +3200,13 @@ static void benign_reclaimer_locked(void* arg, grpc_error* error) { GRPC_CHTTP2_UNREF_TRANSPORT(t, "benign_reclaimer"); } +static void destructive_reclaimer(void* arg, grpc_error* error) { + grpc_chttp2_transport* t = static_cast(arg); + t->combiner->Run(GRPC_CLOSURE_INIT(&t->destructive_reclaimer_locked, + destructive_reclaimer_locked, t, nullptr), + GRPC_ERROR_REF(error)); +} + static void destructive_reclaimer_locked(void* arg, grpc_error* error) { grpc_chttp2_transport* t = static_cast(arg); size_t n = grpc_chttp2_stream_map_size(&t->stream_map); @@ -3209,5 +3333,7 @@ void grpc_chttp2_transport_start_reading( gpr_free(read_buffer); } t->notify_on_receive_settings = notify_on_receive_settings; - GRPC_CLOSURE_SCHED(&t->read_action_locked, GRPC_ERROR_NONE); + t->combiner->Run( + GRPC_CLOSURE_INIT(&t->read_action_locked, read_action_locked, t, nullptr), + GRPC_ERROR_NONE); } diff --git a/src/core/ext/transport/chttp2/transport/hpack_parser.cc b/src/core/ext/transport/chttp2/transport/hpack_parser.cc index a5142ffd96f..6c2496dc7ca 100644 --- a/src/core/ext/transport/chttp2/transport/hpack_parser.cc +++ b/src/core/ext/transport/chttp2/transport/hpack_parser.cc @@ -1741,9 +1741,8 @@ grpc_error* grpc_chttp2_header_parser_parse(void* hpack_parser, however -- it might be that we receive a RST_STREAM following this and can avoid the extra write */ GRPC_CHTTP2_STREAM_REF(s, "final_rst"); - GRPC_CLOSURE_SCHED( - GRPC_CLOSURE_CREATE(force_client_rst_stream, s, - grpc_combiner_finally_scheduler(t->combiner)), + t->combiner->FinallyRun( + GRPC_CLOSURE_CREATE(force_client_rst_stream, s, nullptr), GRPC_ERROR_NONE); } grpc_chttp2_mark_stream_closed(t, s, true, false, GRPC_ERROR_NONE); diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h index 57411de5450..845a94e919c 100644 --- a/src/core/ext/transport/chttp2/transport/internal.h +++ b/src/core/ext/transport/chttp2/transport/internal.h @@ -300,7 +300,7 @@ struct grpc_chttp2_transport { grpc_resource_user* resource_user; - grpc_combiner* combiner; + grpc_core::Combiner* combiner; grpc_closure* notify_on_receive_settings = nullptr; @@ -459,6 +459,8 @@ struct grpc_chttp2_transport { /* next bdp ping timer */ bool have_next_bdp_ping_timer = false; + /** If start_bdp_ping_locked has been called */ + bool bdp_ping_started = false; grpc_timer next_bdp_ping_timer; /* keep-alive ping support */ @@ -480,6 +482,8 @@ struct grpc_chttp2_transport { grpc_millis keepalive_timeout; /** if keepalive pings are allowed when there's no outstanding streams */ bool keepalive_permit_without_calls = false; + /** If start_keepalive_ping_locked has been called */ + bool keepalive_ping_started = false; /** keep-alive state machine state */ grpc_chttp2_keepalive_state keepalive_state; grpc_core::ContextList* cl = nullptr; @@ -862,4 +866,6 @@ void grpc_chttp2_fail_pending_writes(grpc_chttp2_transport* t, void grpc_chttp2_config_default_keepalive_args(grpc_channel_args* args, bool is_client); +void grpc_chttp2_retry_initiate_ping(void* tp, grpc_error* error); + #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_INTERNAL_H */ diff --git a/src/core/ext/transport/chttp2/transport/writing.cc b/src/core/ext/transport/chttp2/transport/writing.cc index d6d9e4521f6..5fb21c8c86b 100644 --- a/src/core/ext/transport/chttp2/transport/writing.cc +++ b/src/core/ext/transport/chttp2/transport/writing.cc @@ -96,6 +96,9 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) { if (!t->ping_state.is_delayed_ping_timer_set) { t->ping_state.is_delayed_ping_timer_set = true; GRPC_CHTTP2_REF_TRANSPORT(t, "retry_initiate_ping_locked"); + GRPC_CLOSURE_INIT(&t->retry_initiate_ping_locked, + grpc_chttp2_retry_initiate_ping, t, + grpc_schedule_on_exec_ctx); grpc_timer_init(&t->ping_state.delayed_ping_timer, next_allowed_ping, &t->retry_initiate_ping_locked); } diff --git a/src/core/lib/iomgr/combiner.cc b/src/core/lib/iomgr/combiner.cc index 31db1b70bab..cfb4bbdd96a 100644 --- a/src/core/lib/iomgr/combiner.cc +++ b/src/core/lib/iomgr/combiner.cc @@ -45,42 +45,16 @@ grpc_core::DebugOnlyTraceFlag grpc_combiner_trace(false, "combiner"); #define STATE_UNORPHANED 1 #define STATE_ELEM_COUNT_LOW_BIT 2 -struct grpc_combiner { - grpc_combiner* next_combiner_on_this_exec_ctx = nullptr; - grpc_closure_scheduler scheduler; - grpc_closure_scheduler finally_scheduler; - grpc_core::MultiProducerSingleConsumerQueue queue; - // either: - // a pointer to the initiating exec ctx if that is the only exec_ctx that has - // ever queued to this combiner, or NULL. If this is non-null, it's not - // dereferencable (since the initiating exec_ctx may have gone out of scope) - gpr_atm initiating_exec_ctx_or_null; - // state is: - // lower bit - zero if orphaned (STATE_UNORPHANED) - // other bits - number of items queued on the lock (STATE_ELEM_COUNT_LOW_BIT) - gpr_atm state; - bool time_to_execute_final_list = false; - grpc_closure_list final_list; - grpc_closure offload; - gpr_refcount refs; -}; - -static void combiner_run(grpc_closure* closure, grpc_error* error); -static void combiner_exec(grpc_closure* closure, grpc_error* error); -static void combiner_finally_exec(grpc_closure* closure, grpc_error* error); - -static const grpc_closure_scheduler_vtable scheduler = { - combiner_run, combiner_exec, "combiner:immediately"}; -static const grpc_closure_scheduler_vtable finally_scheduler = { - combiner_finally_exec, combiner_finally_exec, "combiner:finally"}; +static void combiner_exec(grpc_core::Combiner* lock, grpc_closure* closure, + grpc_error* error); +static void combiner_finally_exec(grpc_core::Combiner* lock, + grpc_closure* closure, grpc_error* error); static void offload(void* arg, grpc_error* error); -grpc_combiner* grpc_combiner_create(void) { - grpc_combiner* lock = grpc_core::New(); +grpc_core::Combiner* grpc_combiner_create(void) { + grpc_core::Combiner* lock = grpc_core::New(); gpr_ref_init(&lock->refs, 1); - lock->scheduler.vtable = &scheduler; - lock->finally_scheduler.vtable = &finally_scheduler; gpr_atm_no_barrier_store(&lock->state, STATE_UNORPHANED); grpc_closure_list_init(&lock->final_list); GRPC_CLOSURE_INIT( @@ -90,13 +64,13 @@ grpc_combiner* grpc_combiner_create(void) { return lock; } -static void really_destroy(grpc_combiner* lock) { +static void really_destroy(grpc_core::Combiner* lock) { GRPC_COMBINER_TRACE(gpr_log(GPR_INFO, "C:%p really_destroy", lock)); GPR_ASSERT(gpr_atm_no_barrier_load(&lock->state) == 0); grpc_core::Delete(lock); } -static void start_destroy(grpc_combiner* lock) { +static void start_destroy(grpc_core::Combiner* lock) { gpr_atm old_state = gpr_atm_full_fetch_add(&lock->state, -STATE_UNORPHANED); GRPC_COMBINER_TRACE(gpr_log( GPR_INFO, "C:%p really_destroy old_state=%" PRIdPTR, lock, old_state)); @@ -117,20 +91,21 @@ static void start_destroy(grpc_combiner* lock) { #define GRPC_COMBINER_DEBUG_SPAM(op, delta) #endif -void grpc_combiner_unref(grpc_combiner* lock GRPC_COMBINER_DEBUG_ARGS) { +void grpc_combiner_unref(grpc_core::Combiner* lock GRPC_COMBINER_DEBUG_ARGS) { GRPC_COMBINER_DEBUG_SPAM("UNREF", -1); if (gpr_unref(&lock->refs)) { start_destroy(lock); } } -grpc_combiner* grpc_combiner_ref(grpc_combiner* lock GRPC_COMBINER_DEBUG_ARGS) { +grpc_core::Combiner* grpc_combiner_ref( + grpc_core::Combiner* lock GRPC_COMBINER_DEBUG_ARGS) { GRPC_COMBINER_DEBUG_SPAM(" REF", 1); gpr_ref(&lock->refs); return lock; } -static void push_last_on_exec_ctx(grpc_combiner* lock) { +static void push_last_on_exec_ctx(grpc_core::Combiner* lock) { lock->next_combiner_on_this_exec_ctx = nullptr; if (grpc_core::ExecCtx::Get()->combiner_data()->active_combiner == nullptr) { grpc_core::ExecCtx::Get()->combiner_data()->active_combiner = @@ -143,7 +118,7 @@ static void push_last_on_exec_ctx(grpc_combiner* lock) { } } -static void push_first_on_exec_ctx(grpc_combiner* lock) { +static void push_first_on_exec_ctx(grpc_core::Combiner* lock) { lock->next_combiner_on_this_exec_ctx = grpc_core::ExecCtx::Get()->combiner_data()->active_combiner; grpc_core::ExecCtx::Get()->combiner_data()->active_combiner = lock; @@ -152,14 +127,10 @@ static void push_first_on_exec_ctx(grpc_combiner* lock) { } } -#define COMBINER_FROM_CLOSURE_SCHEDULER(closure, scheduler_name) \ - ((grpc_combiner*)(((char*)((closure)->scheduler)) - \ - offsetof(grpc_combiner, scheduler_name))) - -static void combiner_exec(grpc_closure* cl, grpc_error* error) { +static void combiner_exec(grpc_core::Combiner* lock, grpc_closure* cl, + grpc_error* error) { GPR_TIMER_SCOPE("combiner.execute", 0); GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_ITEMS(); - grpc_combiner* lock = COMBINER_FROM_CLOSURE_SCHEDULER(cl, scheduler); gpr_atm last = gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT); GRPC_COMBINER_TRACE(gpr_log(GPR_INFO, "C:%p grpc_combiner_execute c=%p last=%" PRIdPTR, @@ -198,11 +169,11 @@ static void move_next() { } static void offload(void* arg, grpc_error* error) { - grpc_combiner* lock = static_cast(arg); + grpc_core::Combiner* lock = static_cast(arg); push_last_on_exec_ctx(lock); } -static void queue_offload(grpc_combiner* lock) { +static void queue_offload(grpc_core::Combiner* lock) { GRPC_STATS_INC_COMBINER_LOCKS_OFFLOADED(); move_next(); GRPC_COMBINER_TRACE(gpr_log(GPR_INFO, "C:%p queue_offload", lock)); @@ -211,7 +182,7 @@ static void queue_offload(grpc_combiner* lock) { bool grpc_combiner_continue_exec_ctx() { GPR_TIMER_SCOPE("combiner.continue_exec_ctx", 0); - grpc_combiner* lock = + grpc_core::Combiner* lock = grpc_core::ExecCtx::Get()->combiner_data()->active_combiner; if (lock == nullptr) { return false; @@ -329,19 +300,20 @@ bool grpc_combiner_continue_exec_ctx() { static void enqueue_finally(void* closure, grpc_error* error); -static void combiner_finally_exec(grpc_closure* closure, grpc_error* error) { +static void combiner_finally_exec(grpc_core::Combiner* lock, + grpc_closure* closure, grpc_error* error) { + GPR_ASSERT(lock != nullptr); GPR_TIMER_SCOPE("combiner.execute_finally", 0); GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS(); - grpc_combiner* lock = - COMBINER_FROM_CLOSURE_SCHEDULER(closure, finally_scheduler); GRPC_COMBINER_TRACE(gpr_log( GPR_INFO, "C:%p grpc_combiner_execute_finally c=%p; ac=%p", lock, closure, grpc_core::ExecCtx::Get()->combiner_data()->active_combiner)); if (grpc_core::ExecCtx::Get()->combiner_data()->active_combiner != lock) { GPR_TIMER_MARK("slowpath", 0); - GRPC_CLOSURE_SCHED(GRPC_CLOSURE_CREATE(enqueue_finally, closure, - grpc_combiner_scheduler(lock)), - error); + // Reusing scheduler to store the combiner so that it can be accessed in + // enqueue_finally + closure->scheduler = reinterpret_cast(lock); + lock->Run(GRPC_CLOSURE_CREATE(enqueue_finally, closure, nullptr), error); return; } @@ -351,32 +323,24 @@ static void combiner_finally_exec(grpc_closure* closure, grpc_error* error) { grpc_closure_list_append(&lock->final_list, closure, error); } -static void combiner_run(grpc_closure* closure, grpc_error* error) { - grpc_combiner* lock = COMBINER_FROM_CLOSURE_SCHEDULER(closure, scheduler); -#ifndef NDEBUG - closure->scheduled = false; - GRPC_COMBINER_TRACE(gpr_log( - GPR_DEBUG, - "Combiner:%p grpc_combiner_run closure:%p created [%s:%d] run [%s:%d]", - lock, closure, closure->file_created, closure->line_created, - closure->file_initiated, closure->line_initiated)); -#endif - GPR_ASSERT(grpc_core::ExecCtx::Get()->combiner_data()->active_combiner == - lock); - closure->cb(closure->cb_arg, error); - GRPC_ERROR_UNREF(error); -} - static void enqueue_finally(void* closure, grpc_error* error) { - combiner_finally_exec(static_cast(closure), - GRPC_ERROR_REF(error)); + grpc_closure* cl = static_cast(closure); + combiner_finally_exec(reinterpret_cast(cl->scheduler), + cl, GRPC_ERROR_REF(error)); } -grpc_closure_scheduler* grpc_combiner_scheduler(grpc_combiner* combiner) { - return &combiner->scheduler; +namespace grpc_core { +void Combiner::Run(grpc_closure* closure, grpc_error* error) { + GPR_ASSERT(closure->scheduler == nullptr || + closure->scheduler == + reinterpret_cast(this)); + combiner_exec(this, closure, error); } -grpc_closure_scheduler* grpc_combiner_finally_scheduler( - grpc_combiner* combiner) { - return &combiner->finally_scheduler; +void Combiner::FinallyRun(grpc_closure* closure, grpc_error* error) { + GPR_ASSERT(closure->scheduler == nullptr || + closure->scheduler == + reinterpret_cast(this)); + combiner_finally_exec(this, closure, error); } +} // namespace grpc_core diff --git a/src/core/lib/iomgr/combiner.h b/src/core/lib/iomgr/combiner.h index 8401dd97d2d..53e52c1bbba 100644 --- a/src/core/lib/iomgr/combiner.h +++ b/src/core/lib/iomgr/combiner.h @@ -27,6 +27,34 @@ #include "src/core/lib/debug/trace.h" #include "src/core/lib/iomgr/exec_ctx.h" +namespace grpc_core { +// TODO(yashkt) : Remove this class and replace it with a class that does not +// use ExecCtx +class Combiner { + public: + void Run(grpc_closure* closure, grpc_error* error); + // TODO(yashkt) : Remove this method + void FinallyRun(grpc_closure* closure, grpc_error* error); + Combiner* next_combiner_on_this_exec_ctx = nullptr; + grpc_closure_scheduler scheduler; + grpc_closure_scheduler finally_scheduler; + MultiProducerSingleConsumerQueue queue; + // either: + // a pointer to the initiating exec ctx if that is the only exec_ctx that has + // ever queued to this combiner, or NULL. If this is non-null, it's not + // dereferencable (since the initiating exec_ctx may have gone out of scope) + gpr_atm initiating_exec_ctx_or_null; + // state is: + // lower bit - zero if orphaned (STATE_UNORPHANED) + // other bits - number of items queued on the lock (STATE_ELEM_COUNT_LOW_BIT) + gpr_atm state; + bool time_to_execute_final_list = false; + grpc_closure_list final_list; + grpc_closure offload; + gpr_refcount refs; +}; +} // namespace grpc_core + // Provides serialized access to some resource. // Each action queued on a combiner is executed serially in a borrowed thread. // The actual thread executing actions may change over time (but there will only @@ -34,7 +62,7 @@ // Initialize the lock, with an optional workqueue to shift load to when // necessary -grpc_combiner* grpc_combiner_create(void); +grpc_core::Combiner* grpc_combiner_create(void); #ifndef NDEBUG #define GRPC_COMBINER_DEBUG_ARGS \ @@ -51,12 +79,9 @@ grpc_combiner* grpc_combiner_create(void); // Ref/unref the lock, for when we're sharing the lock ownership // Prefer to use the macros above -grpc_combiner* grpc_combiner_ref(grpc_combiner* lock GRPC_COMBINER_DEBUG_ARGS); -void grpc_combiner_unref(grpc_combiner* lock GRPC_COMBINER_DEBUG_ARGS); -// Fetch a scheduler to schedule closures against -grpc_closure_scheduler* grpc_combiner_scheduler(grpc_combiner* lock); -// Scheduler to execute \a action within the lock just prior to unlocking. -grpc_closure_scheduler* grpc_combiner_finally_scheduler(grpc_combiner* lock); +grpc_core::Combiner* grpc_combiner_ref( + grpc_core::Combiner* lock GRPC_COMBINER_DEBUG_ARGS); +void grpc_combiner_unref(grpc_core::Combiner* lock GRPC_COMBINER_DEBUG_ARGS); bool grpc_combiner_continue_exec_ctx(); diff --git a/src/core/lib/iomgr/exec_ctx.h b/src/core/lib/iomgr/exec_ctx.h index f1907396e14..32bbcbf8a19 100644 --- a/src/core/lib/iomgr/exec_ctx.h +++ b/src/core/lib/iomgr/exec_ctx.h @@ -63,6 +63,7 @@ grpc_millis grpc_cycle_counter_to_millis_round_down(gpr_cycle_counter cycles); grpc_millis grpc_cycle_counter_to_millis_round_up(gpr_cycle_counter cycles); namespace grpc_core { +class Combiner; /** Execution context. * A bag of data that collects information along a callstack. * It is created on the stack at core entry points (public API or iomgr), and @@ -136,9 +137,9 @@ class ExecCtx { struct CombinerData { /* currently active combiner: updated only via combiner.c */ - grpc_combiner* active_combiner; + Combiner* active_combiner; /* last active combiner in the active combiner list */ - grpc_combiner* last_combiner; + Combiner* last_combiner; }; /** Only to be used by grpc-combiner code */ diff --git a/src/core/lib/iomgr/resource_quota.cc b/src/core/lib/iomgr/resource_quota.cc index d29655e532d..ce982a64106 100644 --- a/src/core/lib/iomgr/resource_quota.cc +++ b/src/core/lib/iomgr/resource_quota.cc @@ -132,7 +132,7 @@ struct grpc_resource_quota { /* Master combiner lock: all activity on a quota executes under this combiner * (so no mutex is needed for this data structure) */ - grpc_combiner* combiner; + grpc_core::Combiner* combiner; /* Size of the resource quota */ int64_t size; /* Amount of free memory in the resource quota */ @@ -293,7 +293,8 @@ static void rq_step_sched(grpc_resource_quota* resource_quota) { if (resource_quota->step_scheduled) return; resource_quota->step_scheduled = true; grpc_resource_quota_ref_internal(resource_quota); - GRPC_CLOSURE_SCHED(&resource_quota->rq_step_closure, GRPC_ERROR_NONE); + resource_quota->combiner->FinallyRun(&resource_quota->rq_step_closure, + GRPC_ERROR_NONE); } /* update the atomically available resource estimate - use no barriers since @@ -655,10 +656,9 @@ grpc_resource_quota* grpc_resource_quota_create(const char* name) { (intptr_t)resource_quota); } GRPC_CLOSURE_INIT(&resource_quota->rq_step_closure, rq_step, resource_quota, - grpc_combiner_finally_scheduler(resource_quota->combiner)); + nullptr); GRPC_CLOSURE_INIT(&resource_quota->rq_reclamation_done_closure, - rq_reclamation_done, resource_quota, - grpc_combiner_scheduler(resource_quota->combiner)); + rq_reclamation_done, resource_quota, nullptr); for (int i = 0; i < GRPC_RULIST_COUNT; i++) { resource_quota->roots[i] = nullptr; } @@ -774,19 +774,15 @@ grpc_resource_user* grpc_resource_user_create( resource_user->resource_quota = grpc_resource_quota_ref_internal(resource_quota); GRPC_CLOSURE_INIT(&resource_user->allocate_closure, &ru_allocate, - resource_user, - grpc_combiner_scheduler(resource_quota->combiner)); + resource_user, nullptr); GRPC_CLOSURE_INIT(&resource_user->add_to_free_pool_closure, - &ru_add_to_free_pool, resource_user, - grpc_combiner_scheduler(resource_quota->combiner)); + &ru_add_to_free_pool, resource_user, nullptr); GRPC_CLOSURE_INIT(&resource_user->post_reclaimer_closure[0], - &ru_post_benign_reclaimer, resource_user, - grpc_combiner_scheduler(resource_quota->combiner)); + &ru_post_benign_reclaimer, resource_user, nullptr); GRPC_CLOSURE_INIT(&resource_user->post_reclaimer_closure[1], - &ru_post_destructive_reclaimer, resource_user, - grpc_combiner_scheduler(resource_quota->combiner)); + &ru_post_destructive_reclaimer, resource_user, nullptr); GRPC_CLOSURE_INIT(&resource_user->destroy_closure, &ru_destroy, resource_user, - grpc_combiner_scheduler(resource_quota->combiner)); + nullptr); gpr_mu_init(&resource_user->mu); gpr_atm_rel_store(&resource_user->refs, 1); gpr_atm_rel_store(&resource_user->shutdown, 0); @@ -827,7 +823,8 @@ static void ru_unref_by(grpc_resource_user* resource_user, gpr_atm amount) { gpr_atm old = gpr_atm_full_fetch_add(&resource_user->refs, -amount); GPR_ASSERT(old >= amount); if (old == amount) { - GRPC_CLOSURE_SCHED(&resource_user->destroy_closure, GRPC_ERROR_NONE); + resource_user->resource_quota->combiner->Run( + &resource_user->destroy_closure, GRPC_ERROR_NONE); } } @@ -841,10 +838,8 @@ void grpc_resource_user_unref(grpc_resource_user* resource_user) { void grpc_resource_user_shutdown(grpc_resource_user* resource_user) { if (gpr_atm_full_fetch_add(&resource_user->shutdown, 1) == 0) { - GRPC_CLOSURE_SCHED( - GRPC_CLOSURE_CREATE( - ru_shutdown, resource_user, - grpc_combiner_scheduler(resource_user->resource_quota->combiner)), + resource_user->resource_quota->combiner->Run( + GRPC_CLOSURE_CREATE(ru_shutdown, resource_user, nullptr), GRPC_ERROR_NONE); } } @@ -902,7 +897,8 @@ static bool resource_user_alloc_locked(grpc_resource_user* resource_user, } if (!resource_user->allocating) { resource_user->allocating = true; - GRPC_CLOSURE_SCHED(&resource_user->allocate_closure, GRPC_ERROR_NONE); + resource_user->resource_quota->combiner->Run( + &resource_user->allocate_closure, GRPC_ERROR_NONE); } return false; } @@ -957,8 +953,8 @@ void grpc_resource_user_free(grpc_resource_user* resource_user, size_t size) { if (is_bigger_than_zero && was_zero_or_negative && !resource_user->added_to_free_pool) { resource_user->added_to_free_pool = true; - GRPC_CLOSURE_SCHED(&resource_user->add_to_free_pool_closure, - GRPC_ERROR_NONE); + resource_quota->combiner->Run(&resource_user->add_to_free_pool_closure, + GRPC_ERROR_NONE); } gpr_mu_unlock(&resource_user->mu); ru_unref_by(resource_user, static_cast(size)); @@ -969,8 +965,8 @@ void grpc_resource_user_post_reclaimer(grpc_resource_user* resource_user, grpc_closure* closure) { GPR_ASSERT(resource_user->new_reclaimers[destructive] == nullptr); resource_user->new_reclaimers[destructive] = closure; - GRPC_CLOSURE_SCHED(&resource_user->post_reclaimer_closure[destructive], - GRPC_ERROR_NONE); + resource_user->resource_quota->combiner->Run( + &resource_user->post_reclaimer_closure[destructive], GRPC_ERROR_NONE); } void grpc_resource_user_finish_reclamation(grpc_resource_user* resource_user) { @@ -978,7 +974,7 @@ void grpc_resource_user_finish_reclamation(grpc_resource_user* resource_user) { gpr_log(GPR_INFO, "RQ %s %s: reclamation complete", resource_user->resource_quota->name, resource_user->name); } - GRPC_CLOSURE_SCHED( + resource_user->resource_quota->combiner->Run( &resource_user->resource_quota->rq_reclamation_done_closure, GRPC_ERROR_NONE); } diff --git a/src/core/lib/iomgr/tcp_posix.cc b/src/core/lib/iomgr/tcp_posix.cc index 19d462da416..3c7c5496281 100644 --- a/src/core/lib/iomgr/tcp_posix.cc +++ b/src/core/lib/iomgr/tcp_posix.cc @@ -1043,7 +1043,7 @@ static void tcp_handle_write(void* arg /* grpc_tcp */, grpc_error* error) { gpr_log(GPR_INFO, "write: %s", str); } // No need to take a ref on error since tcp_flush provides a ref. - GRPC_CLOSURE_SCHED(cb, error); + GRPC_CLOSURE_RUN(cb, error); TCP_UNREF(tcp, "write"); } } diff --git a/src/core/lib/transport/connectivity_state.cc b/src/core/lib/transport/connectivity_state.cc index 6c1b51b3e2d..c553ba240ca 100644 --- a/src/core/lib/transport/connectivity_state.cc +++ b/src/core/lib/transport/connectivity_state.cc @@ -26,6 +26,7 @@ #include #include +#include "src/core/lib/iomgr/combiner.h" #include "src/core/lib/iomgr/exec_ctx.h" namespace grpc_core { @@ -57,10 +58,17 @@ const char* ConnectivityStateName(grpc_connectivity_state state) { class AsyncConnectivityStateWatcherInterface::Notifier { public: Notifier(RefCountedPtr watcher, - grpc_connectivity_state state, grpc_closure_scheduler* scheduler) + grpc_connectivity_state state, Combiner* combiner) : watcher_(std::move(watcher)), state_(state) { - GRPC_CLOSURE_INIT(&closure_, SendNotification, this, scheduler); - GRPC_CLOSURE_SCHED(&closure_, GRPC_ERROR_NONE); + if (combiner != nullptr) { + combiner->Run( + GRPC_CLOSURE_INIT(&closure_, SendNotification, this, nullptr), + GRPC_ERROR_NONE); + } else { + GRPC_CLOSURE_INIT(&closure_, SendNotification, this, + grpc_schedule_on_exec_ctx); + GRPC_CLOSURE_SCHED(&closure_, GRPC_ERROR_NONE); + } } private: @@ -81,7 +89,7 @@ class AsyncConnectivityStateWatcherInterface::Notifier { void AsyncConnectivityStateWatcherInterface::Notify( grpc_connectivity_state state) { - New(Ref(), state, scheduler_); // Deletes itself when done. + New(Ref(), state, combiner_); // Deletes itself when done. } // diff --git a/src/core/lib/transport/connectivity_state.h b/src/core/lib/transport/connectivity_state.h index 8126ebc730d..06817340739 100644 --- a/src/core/lib/transport/connectivity_state.h +++ b/src/core/lib/transport/connectivity_state.h @@ -68,15 +68,16 @@ class AsyncConnectivityStateWatcherInterface protected: class Notifier; - explicit AsyncConnectivityStateWatcherInterface( - grpc_closure_scheduler* scheduler = grpc_schedule_on_exec_ctx) - : scheduler_(scheduler) {} + // If \a combiner is nullptr, then the notification will be scheduled on the + // ExecCtx. + explicit AsyncConnectivityStateWatcherInterface(Combiner* combiner = nullptr) + : combiner_(combiner) {} // Invoked asynchronously when Notify() is called. virtual void OnConnectivityStateChange(grpc_connectivity_state new_state) = 0; private: - grpc_closure_scheduler* scheduler_; + Combiner* combiner_; }; // Tracks connectivity state. Maintains a list of watchers that are diff --git a/test/core/client_channel/resolvers/dns_resolver_connectivity_test.cc b/test/core/client_channel/resolvers/dns_resolver_connectivity_test.cc index e558d069df9..a7b08393b33 100644 --- a/test/core/client_channel/resolvers/dns_resolver_connectivity_test.cc +++ b/test/core/client_channel/resolvers/dns_resolver_connectivity_test.cc @@ -33,7 +33,7 @@ static gpr_mu g_mu; static bool g_fail_resolution = true; -static grpc_combiner* g_combiner; +static grpc_core::Combiner* g_combiner; static void my_resolve_address(const char* addr, const char* /*default_port*/, grpc_pollset_set* /*interested_parties*/, @@ -65,7 +65,7 @@ static grpc_ares_request* my_dns_lookup_ares_locked( grpc_pollset_set* /*interested_parties*/, grpc_closure* on_done, grpc_core::UniquePtr* addresses, bool /*check_grpclb*/, char** /*service_config_json*/, - int /*query_timeout_ms*/, grpc_combiner* /*combiner*/) { + int /*query_timeout_ms*/, grpc_core::Combiner* /*combiner*/) { gpr_mu_lock(&g_mu); GPR_ASSERT(0 == strcmp("test", addr)); grpc_error* error = GRPC_ERROR_NONE; diff --git a/test/core/client_channel/resolvers/dns_resolver_cooldown_test.cc b/test/core/client_channel/resolvers/dns_resolver_cooldown_test.cc index 1566bc19507..5eb42b09c53 100644 --- a/test/core/client_channel/resolvers/dns_resolver_cooldown_test.cc +++ b/test/core/client_channel/resolvers/dns_resolver_cooldown_test.cc @@ -37,14 +37,14 @@ constexpr int kMinResolutionPeriodForCheckMs = 900; extern grpc_address_resolver_vtable* grpc_resolve_address_impl; static grpc_address_resolver_vtable* default_resolve_address; -static grpc_combiner* g_combiner; +static grpc_core::Combiner* g_combiner; static grpc_ares_request* (*g_default_dns_lookup_ares_locked)( const char* dns_server, const char* name, const char* default_port, grpc_pollset_set* interested_parties, grpc_closure* on_done, grpc_core::UniquePtr* addresses, bool check_grpclb, char** service_config_json, int query_timeout_ms, - grpc_combiner* combiner); + grpc_core::Combiner* combiner); // Counter incremented by test_resolve_address_impl indicating the number of // times a system-level resolution has happened. @@ -95,7 +95,7 @@ static grpc_ares_request* test_dns_lookup_ares_locked( grpc_pollset_set* /*interested_parties*/, grpc_closure* on_done, grpc_core::UniquePtr* addresses, bool check_grpclb, char** service_config_json, int query_timeout_ms, - grpc_combiner* combiner) { + grpc_core::Combiner* combiner) { grpc_ares_request* result = g_default_dns_lookup_ares_locked( dns_server, name, default_port, g_iomgr_args.pollset_set, on_done, addresses, check_grpclb, service_config_json, query_timeout_ms, combiner); @@ -309,9 +309,9 @@ static void test_cooldown() { grpc_core::New(); res_cb_arg->uri_str = "dns:127.0.0.1"; - GRPC_CLOSURE_SCHED(GRPC_CLOSURE_CREATE(start_test_under_combiner, res_cb_arg, - grpc_combiner_scheduler(g_combiner)), - GRPC_ERROR_NONE); + g_combiner->Run( + GRPC_CLOSURE_CREATE(start_test_under_combiner, res_cb_arg, nullptr), + GRPC_ERROR_NONE); grpc_core::ExecCtx::Get()->Flush(); poll_pollset_until_request_done(&g_iomgr_args); iomgr_args_finish(&g_iomgr_args); diff --git a/test/core/client_channel/resolvers/dns_resolver_test.cc b/test/core/client_channel/resolvers/dns_resolver_test.cc index d23024ce5f6..f86f52b4e92 100644 --- a/test/core/client_channel/resolvers/dns_resolver_test.cc +++ b/test/core/client_channel/resolvers/dns_resolver_test.cc @@ -28,7 +28,7 @@ #include "src/core/lib/iomgr/combiner.h" #include "test/core/util/test_config.h" -static grpc_combiner* g_combiner; +static grpc_core::Combiner* g_combiner; class TestResultHandler : public grpc_core::Resolver::ResultHandler { void ReturnResult(grpc_core::Resolver::Result /*result*/) override {} diff --git a/test/core/client_channel/resolvers/fake_resolver_test.cc b/test/core/client_channel/resolvers/fake_resolver_test.cc index 20103348b80..6bfe953b8ae 100644 --- a/test/core/client_channel/resolvers/fake_resolver_test.cc +++ b/test/core/client_channel/resolvers/fake_resolver_test.cc @@ -63,7 +63,7 @@ class ResultHandler : public grpc_core::Resolver::ResultHandler { }; static grpc_core::OrphanablePtr build_fake_resolver( - grpc_combiner* combiner, + grpc_core::Combiner* combiner, grpc_core::FakeResolverResponseGenerator* response_generator, grpc_core::UniquePtr result_handler) { grpc_core::ResolverFactory* factory = @@ -118,7 +118,7 @@ static grpc_core::Resolver::Result create_new_resolver_result() { static void test_fake_resolver() { grpc_core::ExecCtx exec_ctx; - grpc_combiner* combiner = grpc_combiner_create(); + grpc_core::Combiner* combiner = grpc_combiner_create(); // Create resolver. ResultHandler* result_handler = grpc_core::New(); grpc_core::RefCountedPtr diff --git a/test/core/client_channel/resolvers/sockaddr_resolver_test.cc b/test/core/client_channel/resolvers/sockaddr_resolver_test.cc index 0d83f0ec5bf..9b1d6779e8e 100644 --- a/test/core/client_channel/resolvers/sockaddr_resolver_test.cc +++ b/test/core/client_channel/resolvers/sockaddr_resolver_test.cc @@ -28,7 +28,7 @@ #include "test/core/util/test_config.h" -static grpc_combiner* g_combiner; +static grpc_core::Combiner* g_combiner; class ResultHandler : public grpc_core::Resolver::ResultHandler { public: diff --git a/test/core/end2end/fixtures/http_proxy_fixture.cc b/test/core/end2end/fixtures/http_proxy_fixture.cc index da2381aa0a0..8f8ddcc8a49 100644 --- a/test/core/end2end/fixtures/http_proxy_fixture.cc +++ b/test/core/end2end/fixtures/http_proxy_fixture.cc @@ -69,7 +69,7 @@ struct grpc_end2end_http_proxy { grpc_pollset* pollset; gpr_refcount users; - grpc_combiner* combiner; + grpc_core::Combiner* combiner; }; // @@ -154,6 +154,21 @@ enum failure_type { SERVER_WRITE_FAILED, }; +// Forward declarations +static void on_client_write_done(void* arg, grpc_error* error); +static void on_server_write_done(void* arg, grpc_error* error); +static void on_client_read_done(void* arg, grpc_error* error); +static void on_server_read_done(void* arg, grpc_error* error); +static void on_server_connect_done(void* arg, grpc_error* error); +static void on_read_request_done(void* arg, grpc_error* error); + +static void on_client_write_done_locked(void* arg, grpc_error* error); +static void on_server_write_done_locked(void* arg, grpc_error* error); +static void on_client_read_done_locked(void* arg, grpc_error* error); +static void on_server_read_done_locked(void* arg, grpc_error* error); +static void on_server_connect_done_locked(void* arg, grpc_error* error); +static void on_read_request_done_locked(void* arg, grpc_error* error); + // Helper function to shut down the proxy connection. static void proxy_connection_failed(proxy_connection* conn, failure_type failure, const char* prefix, @@ -193,7 +208,7 @@ static void proxy_connection_failed(proxy_connection* conn, } // Callback for writing proxy data to the client. -static void on_client_write_done(void* arg, grpc_error* error) { +static void on_client_write_done_locked(void* arg, grpc_error* error) { proxy_connection* conn = static_cast(arg); conn->client_is_writing = false; if (error != GRPC_ERROR_NONE) { @@ -209,6 +224,8 @@ static void on_client_write_done(void* arg, grpc_error* error) { grpc_slice_buffer_move_into(&conn->client_deferred_write_buffer, &conn->client_write_buffer); conn->client_is_writing = true; + GRPC_CLOSURE_INIT(&conn->on_client_write_done, on_client_write_done, conn, + grpc_schedule_on_exec_ctx); grpc_endpoint_write(conn->client_endpoint, &conn->client_write_buffer, &conn->on_client_write_done, nullptr); } else { @@ -217,8 +234,16 @@ static void on_client_write_done(void* arg, grpc_error* error) { } } +static void on_client_write_done(void* arg, grpc_error* error) { + proxy_connection* conn = static_cast(arg); + GRPC_CLOSURE_INIT(&conn->on_client_write_done, on_client_write_done_locked, + conn, nullptr); + conn->proxy->combiner->Run(&conn->on_client_write_done, + GRPC_ERROR_REF(error)); +} + // Callback for writing proxy data to the backend server. -static void on_server_write_done(void* arg, grpc_error* error) { +static void on_server_write_done_locked(void* arg, grpc_error* error) { proxy_connection* conn = static_cast(arg); conn->server_is_writing = false; if (error != GRPC_ERROR_NONE) { @@ -234,6 +259,8 @@ static void on_server_write_done(void* arg, grpc_error* error) { grpc_slice_buffer_move_into(&conn->server_deferred_write_buffer, &conn->server_write_buffer); conn->server_is_writing = true; + GRPC_CLOSURE_INIT(&conn->on_server_write_done, on_server_write_done, conn, + grpc_schedule_on_exec_ctx); grpc_endpoint_write(conn->server_endpoint, &conn->server_write_buffer, &conn->on_server_write_done, nullptr); } else { @@ -242,9 +269,17 @@ static void on_server_write_done(void* arg, grpc_error* error) { } } +static void on_server_write_done(void* arg, grpc_error* error) { + proxy_connection* conn = static_cast(arg); + GRPC_CLOSURE_INIT(&conn->on_server_write_done, on_server_write_done_locked, + conn, nullptr); + conn->proxy->combiner->Run(&conn->on_server_write_done, + GRPC_ERROR_REF(error)); +} + // Callback for reading data from the client, which will be proxied to // the backend server. -static void on_client_read_done(void* arg, grpc_error* error) { +static void on_client_read_done_locked(void* arg, grpc_error* error) { proxy_connection* conn = static_cast(arg); if (error != GRPC_ERROR_NONE) { proxy_connection_failed(conn, CLIENT_READ_FAILED, "HTTP proxy client read", @@ -265,17 +300,28 @@ static void on_client_read_done(void* arg, grpc_error* error) { &conn->server_write_buffer); proxy_connection_ref(conn, "client_read"); conn->server_is_writing = true; + GRPC_CLOSURE_INIT(&conn->on_server_write_done, on_server_write_done, conn, + grpc_schedule_on_exec_ctx); grpc_endpoint_write(conn->server_endpoint, &conn->server_write_buffer, &conn->on_server_write_done, nullptr); } // Read more data. + GRPC_CLOSURE_INIT(&conn->on_client_read_done, on_client_read_done, conn, + grpc_schedule_on_exec_ctx); grpc_endpoint_read(conn->client_endpoint, &conn->client_read_buffer, &conn->on_client_read_done, /*urgent=*/false); } +static void on_client_read_done(void* arg, grpc_error* error) { + proxy_connection* conn = static_cast(arg); + GRPC_CLOSURE_INIT(&conn->on_client_read_done, on_client_read_done_locked, + conn, nullptr); + conn->proxy->combiner->Run(&conn->on_client_read_done, GRPC_ERROR_REF(error)); +} + // Callback for reading data from the backend server, which will be // proxied to the client. -static void on_server_read_done(void* arg, grpc_error* error) { +static void on_server_read_done_locked(void* arg, grpc_error* error) { proxy_connection* conn = static_cast(arg); if (error != GRPC_ERROR_NONE) { proxy_connection_failed(conn, SERVER_READ_FAILED, "HTTP proxy server read", @@ -296,16 +342,27 @@ static void on_server_read_done(void* arg, grpc_error* error) { &conn->client_write_buffer); proxy_connection_ref(conn, "server_read"); conn->client_is_writing = true; + GRPC_CLOSURE_INIT(&conn->on_client_write_done, on_client_write_done, conn, + grpc_schedule_on_exec_ctx); grpc_endpoint_write(conn->client_endpoint, &conn->client_write_buffer, &conn->on_client_write_done, nullptr); } // Read more data. + GRPC_CLOSURE_INIT(&conn->on_server_read_done, on_server_read_done, conn, + grpc_schedule_on_exec_ctx); grpc_endpoint_read(conn->server_endpoint, &conn->server_read_buffer, &conn->on_server_read_done, /*urgent=*/false); } +static void on_server_read_done(void* arg, grpc_error* error) { + proxy_connection* conn = static_cast(arg); + GRPC_CLOSURE_INIT(&conn->on_server_read_done, on_server_read_done_locked, + conn, nullptr); + conn->proxy->combiner->Run(&conn->on_server_read_done, GRPC_ERROR_REF(error)); +} + // Callback to write the HTTP response for the CONNECT request. -static void on_write_response_done(void* arg, grpc_error* error) { +static void on_write_response_done_locked(void* arg, grpc_error* error) { proxy_connection* conn = static_cast(arg); conn->client_is_writing = false; if (error != GRPC_ERROR_NONE) { @@ -321,15 +378,27 @@ static void on_write_response_done(void* arg, grpc_error* error) { proxy_connection_ref(conn, "client_read"); proxy_connection_ref(conn, "server_read"); proxy_connection_unref(conn, "write_response"); + GRPC_CLOSURE_INIT(&conn->on_client_read_done, on_client_read_done, conn, + grpc_schedule_on_exec_ctx); grpc_endpoint_read(conn->client_endpoint, &conn->client_read_buffer, &conn->on_client_read_done, /*urgent=*/false); + GRPC_CLOSURE_INIT(&conn->on_server_read_done, on_server_read_done, conn, + grpc_schedule_on_exec_ctx); grpc_endpoint_read(conn->server_endpoint, &conn->server_read_buffer, &conn->on_server_read_done, /*urgent=*/false); } +static void on_write_response_done(void* arg, grpc_error* error) { + proxy_connection* conn = static_cast(arg); + GRPC_CLOSURE_INIT(&conn->on_write_response_done, + on_write_response_done_locked, conn, nullptr); + conn->proxy->combiner->Run(&conn->on_write_response_done, + GRPC_ERROR_REF(error)); +} + // Callback to connect to the backend server specified by the HTTP // CONNECT request. -static void on_server_connect_done(void* arg, grpc_error* error) { +static void on_server_connect_done_locked(void* arg, grpc_error* error) { proxy_connection* conn = static_cast(arg); if (error != GRPC_ERROR_NONE) { // TODO(roth): Technically, in this case, we should handle the error @@ -348,10 +417,20 @@ static void on_server_connect_done(void* arg, grpc_error* error) { grpc_slice_from_copied_string("HTTP/1.0 200 connected\r\n\r\n"); grpc_slice_buffer_add(&conn->client_write_buffer, slice); conn->client_is_writing = true; + GRPC_CLOSURE_INIT(&conn->on_write_response_done, on_write_response_done, conn, + grpc_schedule_on_exec_ctx); grpc_endpoint_write(conn->client_endpoint, &conn->client_write_buffer, &conn->on_write_response_done, nullptr); } +static void on_server_connect_done(void* arg, grpc_error* error) { + proxy_connection* conn = static_cast(arg); + GRPC_CLOSURE_INIT(&conn->on_server_connect_done, + on_server_connect_done_locked, conn, nullptr); + conn->proxy->combiner->Run(&conn->on_server_connect_done, + GRPC_ERROR_REF(error)); +} + /** * Parses the proxy auth header value to check if it matches :- * Basic @@ -378,7 +457,7 @@ static bool proxy_auth_header_matches(char* proxy_auth_header_val, // the client indicating that the request failed. However, for the purposes // of this test code, it's fine to pretend this is a client-side error, // which will cause the client connection to be dropped. -static void on_read_request_done(void* arg, grpc_error* error) { +static void on_read_request_done_locked(void* arg, grpc_error* error) { proxy_connection* conn = static_cast(arg); gpr_log(GPR_DEBUG, "on_read_request_done: %p %s", conn, grpc_error_string(error)); @@ -403,6 +482,8 @@ static void on_read_request_done(void* arg, grpc_error* error) { grpc_slice_buffer_reset_and_unref(&conn->client_read_buffer); // If we're not done reading the request, read more data. if (conn->http_parser.state != GRPC_HTTP_BODY) { + GRPC_CLOSURE_INIT(&conn->on_read_request_done, on_read_request_done, conn, + grpc_schedule_on_exec_ctx); grpc_endpoint_read(conn->client_endpoint, &conn->client_read_buffer, &conn->on_read_request_done, /*urgent=*/false); return; @@ -456,12 +537,22 @@ static void on_read_request_done(void* arg, grpc_error* error) { // The connection callback inherits our reference to conn. const grpc_millis deadline = grpc_core::ExecCtx::Get()->Now() + 10 * GPR_MS_PER_SEC; + GRPC_CLOSURE_INIT(&conn->on_server_connect_done, on_server_connect_done, conn, + grpc_schedule_on_exec_ctx); grpc_tcp_client_connect(&conn->on_server_connect_done, &conn->server_endpoint, conn->pollset_set, nullptr, &resolved_addresses->addrs[0], deadline); grpc_resolved_addresses_destroy(resolved_addresses); } +static void on_read_request_done(void* arg, grpc_error* error) { + proxy_connection* conn = static_cast(arg); + GRPC_CLOSURE_INIT(&conn->on_read_request_done, on_read_request_done_locked, + conn, nullptr); + conn->proxy->combiner->Run(&conn->on_read_request_done, + GRPC_ERROR_REF(error)); +} + static void on_accept(void* arg, grpc_endpoint* endpoint, grpc_pollset* accepting_pollset, grpc_tcp_server_acceptor* acceptor) { @@ -477,20 +568,6 @@ static void on_accept(void* arg, grpc_endpoint* endpoint, conn->pollset_set = grpc_pollset_set_create(); grpc_pollset_set_add_pollset(conn->pollset_set, proxy->pollset); grpc_endpoint_add_to_pollset_set(endpoint, conn->pollset_set); - GRPC_CLOSURE_INIT(&conn->on_read_request_done, on_read_request_done, conn, - grpc_combiner_scheduler(conn->proxy->combiner)); - GRPC_CLOSURE_INIT(&conn->on_server_connect_done, on_server_connect_done, conn, - grpc_combiner_scheduler(conn->proxy->combiner)); - GRPC_CLOSURE_INIT(&conn->on_write_response_done, on_write_response_done, conn, - grpc_combiner_scheduler(conn->proxy->combiner)); - GRPC_CLOSURE_INIT(&conn->on_client_read_done, on_client_read_done, conn, - grpc_combiner_scheduler(conn->proxy->combiner)); - GRPC_CLOSURE_INIT(&conn->on_client_write_done, on_client_write_done, conn, - grpc_combiner_scheduler(conn->proxy->combiner)); - GRPC_CLOSURE_INIT(&conn->on_server_read_done, on_server_read_done, conn, - grpc_combiner_scheduler(conn->proxy->combiner)); - GRPC_CLOSURE_INIT(&conn->on_server_write_done, on_server_write_done, conn, - grpc_combiner_scheduler(conn->proxy->combiner)); grpc_slice_buffer_init(&conn->client_read_buffer); grpc_slice_buffer_init(&conn->client_deferred_write_buffer); conn->client_is_writing = false; @@ -501,6 +578,8 @@ static void on_accept(void* arg, grpc_endpoint* endpoint, grpc_slice_buffer_init(&conn->server_write_buffer); grpc_http_parser_init(&conn->http_parser, GRPC_HTTP_REQUEST, &conn->http_request); + GRPC_CLOSURE_INIT(&conn->on_read_request_done, on_read_request_done, conn, + grpc_schedule_on_exec_ctx); grpc_endpoint_read(conn->client_endpoint, &conn->client_read_buffer, &conn->on_read_request_done, /*urgent=*/false); } diff --git a/test/core/end2end/fuzzers/api_fuzzer.cc b/test/core/end2end/fuzzers/api_fuzzer.cc index 96a9bbc30c9..c5d5aa8afc7 100644 --- a/test/core/end2end/fuzzers/api_fuzzer.cc +++ b/test/core/end2end/fuzzers/api_fuzzer.cc @@ -379,7 +379,7 @@ grpc_ares_request* my_dns_lookup_ares_locked( grpc_pollset_set* /*interested_parties*/, grpc_closure* on_done, grpc_core::UniquePtr* addresses, bool /*check_grpclb*/, char** /*service_config_json*/, - int /*query_timeout*/, grpc_combiner* /*combiner*/) { + int /*query_timeout*/, grpc_core::Combiner* /*combiner*/) { addr_req* r = static_cast(gpr_malloc(sizeof(*r))); r->addr = gpr_strdup(addr); r->on_done = on_done; diff --git a/test/core/end2end/goaway_server_test.cc b/test/core/end2end/goaway_server_test.cc index 5db2aebe9a1..e180ec8f29c 100644 --- a/test/core/end2end/goaway_server_test.cc +++ b/test/core/end2end/goaway_server_test.cc @@ -49,7 +49,7 @@ static grpc_ares_request* (*iomgr_dns_lookup_ares_locked)( grpc_pollset_set* interested_parties, grpc_closure* on_done, grpc_core::UniquePtr* addresses, bool check_grpclb, char** service_config_json, int query_timeout_ms, - grpc_combiner* combiner); + grpc_core::Combiner* combiner); static void (*iomgr_cancel_ares_request_locked)(grpc_ares_request* request); @@ -106,7 +106,7 @@ static grpc_ares_request* my_dns_lookup_ares_locked( grpc_pollset_set* interested_parties, grpc_closure* on_done, grpc_core::UniquePtr* addresses, bool check_grpclb, char** service_config_json, int query_timeout_ms, - grpc_combiner* combiner) { + grpc_core::Combiner* combiner) { if (0 != strcmp(addr, "test")) { return iomgr_dns_lookup_ares_locked( dns_server, addr, default_port, interested_parties, on_done, addresses, diff --git a/test/core/iomgr/combiner_test.cc b/test/core/iomgr/combiner_test.cc index e6e5cdfbd5f..1554a0909cd 100644 --- a/test/core/iomgr/combiner_test.cc +++ b/test/core/iomgr/combiner_test.cc @@ -39,13 +39,12 @@ static void set_event_to_true(void* value, grpc_error* /*error*/) { static void test_execute_one(void) { gpr_log(GPR_DEBUG, "test_execute_one"); - grpc_combiner* lock = grpc_combiner_create(); + grpc_core::Combiner* lock = grpc_combiner_create(); gpr_event done; gpr_event_init(&done); grpc_core::ExecCtx exec_ctx; - GRPC_CLOSURE_SCHED(GRPC_CLOSURE_CREATE(set_event_to_true, &done, - grpc_combiner_scheduler(lock)), - GRPC_ERROR_NONE); + lock->Run(GRPC_CLOSURE_CREATE(set_event_to_true, &done, nullptr), + GRPC_ERROR_NONE); grpc_core::ExecCtx::Get()->Flush(); GPR_ASSERT(gpr_event_wait(&done, grpc_timeout_seconds_to_deadline(5)) != nullptr); @@ -54,7 +53,7 @@ static void test_execute_one(void) { typedef struct { size_t ctr; - grpc_combiner* lock; + grpc_core::Combiner* lock; gpr_event done; } thd_args; @@ -79,24 +78,22 @@ static void execute_many_loop(void* a) { ex_args* c = static_cast(gpr_malloc(sizeof(*c))); c->ctr = &args->ctr; c->value = n++; - GRPC_CLOSURE_SCHED(GRPC_CLOSURE_CREATE( - check_one, c, grpc_combiner_scheduler(args->lock)), - GRPC_ERROR_NONE); + args->lock->Run(GRPC_CLOSURE_CREATE(check_one, c, nullptr), + GRPC_ERROR_NONE); grpc_core::ExecCtx::Get()->Flush(); } // sleep for a little bit, to test a combiner draining and another thread // picking it up gpr_sleep_until(grpc_timeout_milliseconds_to_deadline(100)); } - GRPC_CLOSURE_SCHED(GRPC_CLOSURE_CREATE(set_event_to_true, &args->done, - grpc_combiner_scheduler(args->lock)), - GRPC_ERROR_NONE); + args->lock->Run(GRPC_CLOSURE_CREATE(set_event_to_true, &args->done, nullptr), + GRPC_ERROR_NONE); } static void test_execute_many(void) { gpr_log(GPR_DEBUG, "test_execute_many"); - grpc_combiner* lock = grpc_combiner_create(); + grpc_core::Combiner* lock = grpc_combiner_create(); grpc_core::Thread thds[100]; thd_args ta[GPR_ARRAY_SIZE(thds)]; for (size_t i = 0; i < GPR_ARRAY_SIZE(thds); i++) { @@ -122,21 +119,17 @@ static void in_finally(void* /*arg*/, grpc_error* /*error*/) { } static void add_finally(void* arg, grpc_error* /*error*/) { - GRPC_CLOSURE_SCHED(GRPC_CLOSURE_CREATE(in_finally, arg, - grpc_combiner_finally_scheduler( - static_cast(arg))), - GRPC_ERROR_NONE); + static_cast(arg)->Run( + GRPC_CLOSURE_CREATE(in_finally, arg, nullptr), GRPC_ERROR_NONE); } static void test_execute_finally(void) { gpr_log(GPR_DEBUG, "test_execute_finally"); - grpc_combiner* lock = grpc_combiner_create(); + grpc_core::Combiner* lock = grpc_combiner_create(); grpc_core::ExecCtx exec_ctx; gpr_event_init(&got_in_finally); - GRPC_CLOSURE_SCHED( - GRPC_CLOSURE_CREATE(add_finally, lock, grpc_combiner_scheduler(lock)), - GRPC_ERROR_NONE); + lock->Run(GRPC_CLOSURE_CREATE(add_finally, lock, nullptr), GRPC_ERROR_NONE); grpc_core::ExecCtx::Get()->Flush(); GPR_ASSERT(gpr_event_wait(&got_in_finally, grpc_timeout_seconds_to_deadline(5)) != nullptr); diff --git a/test/cpp/microbenchmarks/bm_closure.cc b/test/cpp/microbenchmarks/bm_closure.cc index 4c8fedd2825..0a0e8d664e7 100644 --- a/test/cpp/microbenchmarks/bm_closure.cc +++ b/test/cpp/microbenchmarks/bm_closure.cc @@ -65,12 +65,12 @@ BENCHMARK(BM_ClosureInitAgainstExecCtx); static void BM_ClosureInitAgainstCombiner(benchmark::State& state) { TrackCounters track_counters; - grpc_combiner* combiner = grpc_combiner_create(); + grpc_core::Combiner* combiner = grpc_combiner_create(); grpc_closure c; grpc_core::ExecCtx exec_ctx; for (auto _ : state) { - benchmark::DoNotOptimize(GRPC_CLOSURE_INIT( - &c, DoNothing, nullptr, grpc_combiner_scheduler(combiner))); + benchmark::DoNotOptimize( + GRPC_CLOSURE_INIT(&c, DoNothing, nullptr, nullptr)); } GRPC_COMBINER_UNREF(combiner, "finished"); @@ -242,12 +242,12 @@ BENCHMARK(BM_TryAcquireSpinlock); static void BM_ClosureSchedOnCombiner(benchmark::State& state) { TrackCounters track_counters; - grpc_combiner* combiner = grpc_combiner_create(); + grpc_core::Combiner* combiner = grpc_combiner_create(); grpc_closure c; - GRPC_CLOSURE_INIT(&c, DoNothing, nullptr, grpc_combiner_scheduler(combiner)); + GRPC_CLOSURE_INIT(&c, DoNothing, nullptr, nullptr); grpc_core::ExecCtx exec_ctx; for (auto _ : state) { - GRPC_CLOSURE_SCHED(&c, GRPC_ERROR_NONE); + combiner->Run(&c, GRPC_ERROR_NONE); grpc_core::ExecCtx::Get()->Flush(); } GRPC_COMBINER_UNREF(combiner, "finished"); @@ -258,15 +258,15 @@ BENCHMARK(BM_ClosureSchedOnCombiner); static void BM_ClosureSched2OnCombiner(benchmark::State& state) { TrackCounters track_counters; - grpc_combiner* combiner = grpc_combiner_create(); + grpc_core::Combiner* combiner = grpc_combiner_create(); grpc_closure c1; grpc_closure c2; - GRPC_CLOSURE_INIT(&c1, DoNothing, nullptr, grpc_combiner_scheduler(combiner)); - GRPC_CLOSURE_INIT(&c2, DoNothing, nullptr, grpc_combiner_scheduler(combiner)); + GRPC_CLOSURE_INIT(&c1, DoNothing, nullptr, nullptr); + GRPC_CLOSURE_INIT(&c2, DoNothing, nullptr, nullptr); grpc_core::ExecCtx exec_ctx; for (auto _ : state) { - GRPC_CLOSURE_SCHED(&c1, GRPC_ERROR_NONE); - GRPC_CLOSURE_SCHED(&c2, GRPC_ERROR_NONE); + combiner->Run(&c1, GRPC_ERROR_NONE); + combiner->Run(&c2, GRPC_ERROR_NONE); grpc_core::ExecCtx::Get()->Flush(); } GRPC_COMBINER_UNREF(combiner, "finished"); @@ -277,18 +277,18 @@ BENCHMARK(BM_ClosureSched2OnCombiner); static void BM_ClosureSched3OnCombiner(benchmark::State& state) { TrackCounters track_counters; - grpc_combiner* combiner = grpc_combiner_create(); + grpc_core::Combiner* combiner = grpc_combiner_create(); grpc_closure c1; grpc_closure c2; grpc_closure c3; - GRPC_CLOSURE_INIT(&c1, DoNothing, nullptr, grpc_combiner_scheduler(combiner)); - GRPC_CLOSURE_INIT(&c2, DoNothing, nullptr, grpc_combiner_scheduler(combiner)); - GRPC_CLOSURE_INIT(&c3, DoNothing, nullptr, grpc_combiner_scheduler(combiner)); + GRPC_CLOSURE_INIT(&c1, DoNothing, nullptr, nullptr); + GRPC_CLOSURE_INIT(&c2, DoNothing, nullptr, nullptr); + GRPC_CLOSURE_INIT(&c3, DoNothing, nullptr, nullptr); grpc_core::ExecCtx exec_ctx; for (auto _ : state) { - GRPC_CLOSURE_SCHED(&c1, GRPC_ERROR_NONE); - GRPC_CLOSURE_SCHED(&c2, GRPC_ERROR_NONE); - GRPC_CLOSURE_SCHED(&c3, GRPC_ERROR_NONE); + combiner->Run(&c1, GRPC_ERROR_NONE); + combiner->Run(&c2, GRPC_ERROR_NONE); + combiner->Run(&c3, GRPC_ERROR_NONE); grpc_core::ExecCtx::Get()->Flush(); } GRPC_COMBINER_UNREF(combiner, "finished"); @@ -299,18 +299,16 @@ BENCHMARK(BM_ClosureSched3OnCombiner); static void BM_ClosureSched2OnTwoCombiners(benchmark::State& state) { TrackCounters track_counters; - grpc_combiner* combiner1 = grpc_combiner_create(); - grpc_combiner* combiner2 = grpc_combiner_create(); + grpc_core::Combiner* combiner1 = grpc_combiner_create(); + grpc_core::Combiner* combiner2 = grpc_combiner_create(); grpc_closure c1; grpc_closure c2; - GRPC_CLOSURE_INIT(&c1, DoNothing, nullptr, - grpc_combiner_scheduler(combiner1)); - GRPC_CLOSURE_INIT(&c2, DoNothing, nullptr, - grpc_combiner_scheduler(combiner2)); + GRPC_CLOSURE_INIT(&c1, DoNothing, nullptr, nullptr); + GRPC_CLOSURE_INIT(&c2, DoNothing, nullptr, nullptr); grpc_core::ExecCtx exec_ctx; for (auto _ : state) { - GRPC_CLOSURE_SCHED(&c1, GRPC_ERROR_NONE); - GRPC_CLOSURE_SCHED(&c2, GRPC_ERROR_NONE); + combiner1->Run(&c1, GRPC_ERROR_NONE); + combiner2->Run(&c2, GRPC_ERROR_NONE); grpc_core::ExecCtx::Get()->Flush(); } GRPC_COMBINER_UNREF(combiner1, "finished"); @@ -322,26 +320,22 @@ BENCHMARK(BM_ClosureSched2OnTwoCombiners); static void BM_ClosureSched4OnTwoCombiners(benchmark::State& state) { TrackCounters track_counters; - grpc_combiner* combiner1 = grpc_combiner_create(); - grpc_combiner* combiner2 = grpc_combiner_create(); + grpc_core::Combiner* combiner1 = grpc_combiner_create(); + grpc_core::Combiner* combiner2 = grpc_combiner_create(); grpc_closure c1; grpc_closure c2; grpc_closure c3; grpc_closure c4; - GRPC_CLOSURE_INIT(&c1, DoNothing, nullptr, - grpc_combiner_scheduler(combiner1)); - GRPC_CLOSURE_INIT(&c2, DoNothing, nullptr, - grpc_combiner_scheduler(combiner2)); - GRPC_CLOSURE_INIT(&c3, DoNothing, nullptr, - grpc_combiner_scheduler(combiner1)); - GRPC_CLOSURE_INIT(&c4, DoNothing, nullptr, - grpc_combiner_scheduler(combiner2)); + GRPC_CLOSURE_INIT(&c1, DoNothing, nullptr, nullptr); + GRPC_CLOSURE_INIT(&c2, DoNothing, nullptr, nullptr); + GRPC_CLOSURE_INIT(&c3, DoNothing, nullptr, nullptr); + GRPC_CLOSURE_INIT(&c4, DoNothing, nullptr, nullptr); grpc_core::ExecCtx exec_ctx; for (auto _ : state) { - GRPC_CLOSURE_SCHED(&c1, GRPC_ERROR_NONE); - GRPC_CLOSURE_SCHED(&c2, GRPC_ERROR_NONE); - GRPC_CLOSURE_SCHED(&c3, GRPC_ERROR_NONE); - GRPC_CLOSURE_SCHED(&c4, GRPC_ERROR_NONE); + combiner1->Run(&c1, GRPC_ERROR_NONE); + combiner2->Run(&c2, GRPC_ERROR_NONE); + combiner1->Run(&c3, GRPC_ERROR_NONE); + combiner2->Run(&c4, GRPC_ERROR_NONE); grpc_core::ExecCtx::Get()->Flush(); } GRPC_COMBINER_UNREF(combiner1, "finished"); @@ -390,32 +384,6 @@ static void BM_ClosureReschedOnExecCtx(benchmark::State& state) { } BENCHMARK(BM_ClosureReschedOnExecCtx); -static void BM_ClosureReschedOnCombiner(benchmark::State& state) { - TrackCounters track_counters; - grpc_core::ExecCtx exec_ctx; - grpc_combiner* combiner = grpc_combiner_create(); - Rescheduler r(state, grpc_combiner_scheduler(combiner)); - r.ScheduleFirst(); - grpc_core::ExecCtx::Get()->Flush(); - GRPC_COMBINER_UNREF(combiner, "finished"); - - track_counters.Finish(state); -} -BENCHMARK(BM_ClosureReschedOnCombiner); - -static void BM_ClosureReschedOnCombinerFinally(benchmark::State& state) { - TrackCounters track_counters; - grpc_core::ExecCtx exec_ctx; - grpc_combiner* combiner = grpc_combiner_create(); - Rescheduler r(state, grpc_combiner_finally_scheduler(combiner)); - r.ScheduleFirstAgainstDifferentScheduler(grpc_combiner_scheduler(combiner)); - grpc_core::ExecCtx::Get()->Flush(); - GRPC_COMBINER_UNREF(combiner, "finished"); - - track_counters.Finish(state); -} -BENCHMARK(BM_ClosureReschedOnCombinerFinally); - // Some distros have RunSpecifiedBenchmarks under the benchmark namespace, // and others do not. This allows us to support both modes. namespace benchmark { diff --git a/test/cpp/naming/cancel_ares_query_test.cc b/test/cpp/naming/cancel_ares_query_test.cc index c0210dfc037..86408fb49fc 100644 --- a/test/cpp/naming/cancel_ares_query_test.cc +++ b/test/cpp/naming/cancel_ares_query_test.cc @@ -81,7 +81,7 @@ struct ArgsStruct { gpr_mu* mu; grpc_pollset* pollset; grpc_pollset_set* pollset_set; - grpc_combiner* lock; + grpc_core::Combiner* lock; grpc_channel_args* channel_args; }; diff --git a/test/cpp/naming/resolver_component_test.cc b/test/cpp/naming/resolver_component_test.cc index 8dc6cef9267..ac170e4a99a 100644 --- a/test/cpp/naming/resolver_component_test.cc +++ b/test/cpp/naming/resolver_component_test.cc @@ -192,7 +192,7 @@ struct ArgsStruct { gpr_mu* mu; grpc_pollset* pollset; grpc_pollset_set* pollset_set; - grpc_combiner* lock; + grpc_core::Combiner* lock; grpc_channel_args* channel_args; vector expected_addrs; std::string expected_service_config_string; @@ -616,9 +616,9 @@ void RunResolvesRelevantRecordsTest( CreateResultHandler(&args)); grpc_channel_args_destroy(resolver_args); gpr_free(whole_uri); - GRPC_CLOSURE_SCHED(GRPC_CLOSURE_CREATE(StartResolvingLocked, resolver.get(), - grpc_combiner_scheduler(args.lock)), - GRPC_ERROR_NONE); + args.lock->Run( + GRPC_CLOSURE_CREATE(StartResolvingLocked, resolver.get(), nullptr), + GRPC_ERROR_NONE); grpc_core::ExecCtx::Get()->Flush(); PollPollsetUntilRequestDone(&args); ArgsFinish(&args);