mirror of https://github.com/grpc/grpc.git
commit
0df63d3c98
115 changed files with 3274 additions and 3559 deletions
File diff suppressed because it is too large
Load Diff
@ -1,946 +0,0 @@ |
||||
/*
|
||||
* |
||||
* Copyright 2015 gRPC authors. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
* |
||||
*/ |
||||
|
||||
#include <grpc/support/port_platform.h> |
||||
|
||||
#include "src/core/ext/filters/client_channel/request_routing.h" |
||||
|
||||
#include <inttypes.h> |
||||
#include <limits.h> |
||||
#include <stdbool.h> |
||||
#include <stdio.h> |
||||
#include <string.h> |
||||
|
||||
#include <grpc/support/alloc.h> |
||||
#include <grpc/support/log.h> |
||||
#include <grpc/support/string_util.h> |
||||
#include <grpc/support/sync.h> |
||||
|
||||
#include "src/core/ext/filters/client_channel/backup_poller.h" |
||||
#include "src/core/ext/filters/client_channel/global_subchannel_pool.h" |
||||
#include "src/core/ext/filters/client_channel/http_connect_handshaker.h" |
||||
#include "src/core/ext/filters/client_channel/lb_policy_registry.h" |
||||
#include "src/core/ext/filters/client_channel/local_subchannel_pool.h" |
||||
#include "src/core/ext/filters/client_channel/proxy_mapper_registry.h" |
||||
#include "src/core/ext/filters/client_channel/resolver_registry.h" |
||||
#include "src/core/ext/filters/client_channel/retry_throttle.h" |
||||
#include "src/core/ext/filters/client_channel/server_address.h" |
||||
#include "src/core/ext/filters/client_channel/subchannel.h" |
||||
#include "src/core/ext/filters/deadline/deadline_filter.h" |
||||
#include "src/core/lib/backoff/backoff.h" |
||||
#include "src/core/lib/channel/channel_args.h" |
||||
#include "src/core/lib/channel/connected_channel.h" |
||||
#include "src/core/lib/channel/status_util.h" |
||||
#include "src/core/lib/gpr/string.h" |
||||
#include "src/core/lib/gprpp/inlined_vector.h" |
||||
#include "src/core/lib/gprpp/manual_constructor.h" |
||||
#include "src/core/lib/iomgr/combiner.h" |
||||
#include "src/core/lib/iomgr/iomgr.h" |
||||
#include "src/core/lib/iomgr/polling_entity.h" |
||||
#include "src/core/lib/profiling/timers.h" |
||||
#include "src/core/lib/slice/slice_internal.h" |
||||
#include "src/core/lib/slice/slice_string_helpers.h" |
||||
#include "src/core/lib/surface/channel.h" |
||||
#include "src/core/lib/transport/connectivity_state.h" |
||||
#include "src/core/lib/transport/error_utils.h" |
||||
#include "src/core/lib/transport/metadata.h" |
||||
#include "src/core/lib/transport/metadata_batch.h" |
||||
#include "src/core/lib/transport/service_config.h" |
||||
#include "src/core/lib/transport/static_metadata.h" |
||||
#include "src/core/lib/transport/status_metadata.h" |
||||
|
||||
namespace grpc_core { |
||||
|
||||
//
|
||||
// RequestRouter::Request::ResolverResultWaiter
|
||||
//
|
||||
|
||||
// Handles waiting for a resolver result.
|
||||
// Used only for the first call on an idle channel.
|
||||
class RequestRouter::Request::ResolverResultWaiter { |
||||
public: |
||||
explicit ResolverResultWaiter(Request* request) |
||||
: request_router_(request->request_router_), |
||||
request_(request), |
||||
tracer_enabled_(request_router_->tracer_->enabled()) { |
||||
if (tracer_enabled_) { |
||||
gpr_log(GPR_INFO, |
||||
"request_router=%p request=%p: deferring pick pending resolver " |
||||
"result", |
||||
request_router_, request); |
||||
} |
||||
// Add closure to be run when a resolver result is available.
|
||||
GRPC_CLOSURE_INIT(&done_closure_, &DoneLocked, this, |
||||
grpc_combiner_scheduler(request_router_->combiner_)); |
||||
AddToWaitingList(); |
||||
// Set cancellation closure, so that we abort if the call is cancelled.
|
||||
GRPC_CLOSURE_INIT(&cancel_closure_, &CancelLocked, this, |
||||
grpc_combiner_scheduler(request_router_->combiner_)); |
||||
grpc_call_combiner_set_notify_on_cancel(request->call_combiner_, |
||||
&cancel_closure_); |
||||
} |
||||
|
||||
private: |
||||
// Adds done_closure_ to
|
||||
// request_router_->waiting_for_resolver_result_closures_.
|
||||
void AddToWaitingList() { |
||||
grpc_closure_list_append( |
||||
&request_router_->waiting_for_resolver_result_closures_, &done_closure_, |
||||
GRPC_ERROR_NONE); |
||||
} |
||||
|
||||
// Invoked when a resolver result is available.
|
||||
static void DoneLocked(void* arg, grpc_error* error) { |
||||
ResolverResultWaiter* self = static_cast<ResolverResultWaiter*>(arg); |
||||
RequestRouter* request_router = self->request_router_; |
||||
// If CancelLocked() has already run, delete ourselves without doing
|
||||
// anything. Note that the call stack may have already been destroyed,
|
||||
// so it's not safe to access anything in state_.
|
||||
if (GPR_UNLIKELY(self->finished_)) { |
||||
if (self->tracer_enabled_) { |
||||
gpr_log(GPR_INFO, |
||||
"request_router=%p: call cancelled before resolver result", |
||||
request_router); |
||||
} |
||||
Delete(self); |
||||
return; |
||||
} |
||||
// Otherwise, process the resolver result.
|
||||
Request* request = self->request_; |
||||
if (GPR_UNLIKELY(error != GRPC_ERROR_NONE)) { |
||||
if (self->tracer_enabled_) { |
||||
gpr_log(GPR_INFO, |
||||
"request_router=%p request=%p: resolver failed to return data", |
||||
request_router, request); |
||||
} |
||||
GRPC_CLOSURE_RUN(request->on_route_done_, GRPC_ERROR_REF(error)); |
||||
} else if (GPR_UNLIKELY(request_router->resolver_ == nullptr)) { |
||||
// Shutting down.
|
||||
if (self->tracer_enabled_) { |
||||
gpr_log(GPR_INFO, "request_router=%p request=%p: resolver disconnected", |
||||
request_router, request); |
||||
} |
||||
GRPC_CLOSURE_RUN(request->on_route_done_, |
||||
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected")); |
||||
} else if (GPR_UNLIKELY(request_router->lb_policy_ == nullptr)) { |
||||
// Transient resolver failure.
|
||||
// If call has wait_for_ready=true, try again; otherwise, fail.
|
||||
if (*request->pick_.initial_metadata_flags & |
||||
GRPC_INITIAL_METADATA_WAIT_FOR_READY) { |
||||
if (self->tracer_enabled_) { |
||||
gpr_log(GPR_INFO, |
||||
"request_router=%p request=%p: resolver returned but no LB " |
||||
"policy; wait_for_ready=true; trying again", |
||||
request_router, request); |
||||
} |
||||
// Re-add ourselves to the waiting list.
|
||||
self->AddToWaitingList(); |
||||
// Return early so that we don't set finished_ to true below.
|
||||
return; |
||||
} else { |
||||
if (self->tracer_enabled_) { |
||||
gpr_log(GPR_INFO, |
||||
"request_router=%p request=%p: resolver returned but no LB " |
||||
"policy; wait_for_ready=false; failing", |
||||
request_router, request); |
||||
} |
||||
GRPC_CLOSURE_RUN( |
||||
request->on_route_done_, |
||||
grpc_error_set_int( |
||||
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Name resolution failure"), |
||||
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE)); |
||||
} |
||||
} else { |
||||
if (self->tracer_enabled_) { |
||||
gpr_log(GPR_INFO, |
||||
"request_router=%p request=%p: resolver returned, doing LB " |
||||
"pick", |
||||
request_router, request); |
||||
} |
||||
request->ProcessServiceConfigAndStartLbPickLocked(); |
||||
} |
||||
self->finished_ = true; |
||||
} |
||||
|
||||
// Invoked when the call is cancelled.
|
||||
// Note: This runs under the client_channel combiner, but will NOT be
|
||||
// holding the call combiner.
|
||||
static void CancelLocked(void* arg, grpc_error* error) { |
||||
ResolverResultWaiter* self = static_cast<ResolverResultWaiter*>(arg); |
||||
RequestRouter* request_router = self->request_router_; |
||||
// If DoneLocked() has already run, delete ourselves without doing anything.
|
||||
if (self->finished_) { |
||||
Delete(self); |
||||
return; |
||||
} |
||||
Request* request = self->request_; |
||||
// If we are being cancelled, immediately invoke on_route_done_
|
||||
// to propagate the error back to the caller.
|
||||
if (error != GRPC_ERROR_NONE) { |
||||
if (self->tracer_enabled_) { |
||||
gpr_log(GPR_INFO, |
||||
"request_router=%p request=%p: cancelling call waiting for " |
||||
"name resolution", |
||||
request_router, request); |
||||
} |
||||
// Note: Although we are not in the call combiner here, we are
|
||||
// basically stealing the call combiner from the pending pick, so
|
||||
// it's safe to run on_route_done_ here -- we are essentially
|
||||
// calling it here instead of calling it in DoneLocked().
|
||||
GRPC_CLOSURE_RUN(request->on_route_done_, |
||||
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( |
||||
"Pick cancelled", &error, 1)); |
||||
} |
||||
self->finished_ = true; |
||||
} |
||||
|
||||
RequestRouter* request_router_; |
||||
Request* request_; |
||||
const bool tracer_enabled_; |
||||
grpc_closure done_closure_; |
||||
grpc_closure cancel_closure_; |
||||
bool finished_ = false; |
||||
}; |
||||
|
||||
//
|
||||
// RequestRouter::Request::AsyncPickCanceller
|
||||
//
|
||||
|
||||
// Handles the call combiner cancellation callback for an async LB pick.
|
||||
class RequestRouter::Request::AsyncPickCanceller { |
||||
public: |
||||
explicit AsyncPickCanceller(Request* request) |
||||
: request_router_(request->request_router_), |
||||
request_(request), |
||||
tracer_enabled_(request_router_->tracer_->enabled()) { |
||||
GRPC_CALL_STACK_REF(request->owning_call_, "pick_callback_cancel"); |
||||
// Set cancellation closure, so that we abort if the call is cancelled.
|
||||
GRPC_CLOSURE_INIT(&cancel_closure_, &CancelLocked, this, |
||||
grpc_combiner_scheduler(request_router_->combiner_)); |
||||
grpc_call_combiner_set_notify_on_cancel(request->call_combiner_, |
||||
&cancel_closure_); |
||||
} |
||||
|
||||
void MarkFinishedLocked() { |
||||
finished_ = true; |
||||
GRPC_CALL_STACK_UNREF(request_->owning_call_, "pick_callback_cancel"); |
||||
} |
||||
|
||||
private: |
||||
// Invoked when the call is cancelled.
|
||||
// Note: This runs under the client_channel combiner, but will NOT be
|
||||
// holding the call combiner.
|
||||
static void CancelLocked(void* arg, grpc_error* error) { |
||||
AsyncPickCanceller* self = static_cast<AsyncPickCanceller*>(arg); |
||||
Request* request = self->request_; |
||||
RequestRouter* request_router = self->request_router_; |
||||
if (!self->finished_) { |
||||
// Note: request_router->lb_policy_ may have changed since we started our
|
||||
// pick, in which case we will be cancelling the pick on a policy other
|
||||
// than the one we started it on. However, this will just be a no-op.
|
||||
if (error != GRPC_ERROR_NONE && request_router->lb_policy_ != nullptr) { |
||||
if (self->tracer_enabled_) { |
||||
gpr_log(GPR_INFO, |
||||
"request_router=%p request=%p: cancelling pick from LB " |
||||
"policy %p", |
||||
request_router, request, request_router->lb_policy_.get()); |
||||
} |
||||
request_router->lb_policy_->CancelPickLocked(&request->pick_, |
||||
GRPC_ERROR_REF(error)); |
||||
} |
||||
request->pick_canceller_ = nullptr; |
||||
GRPC_CALL_STACK_UNREF(request->owning_call_, "pick_callback_cancel"); |
||||
} |
||||
Delete(self); |
||||
} |
||||
|
||||
RequestRouter* request_router_; |
||||
Request* request_; |
||||
const bool tracer_enabled_; |
||||
grpc_closure cancel_closure_; |
||||
bool finished_ = false; |
||||
}; |
||||
|
||||
//
|
||||
// RequestRouter::Request
|
||||
//
|
||||
|
||||
RequestRouter::Request::Request(grpc_call_stack* owning_call, |
||||
grpc_call_combiner* call_combiner, |
||||
grpc_polling_entity* pollent, |
||||
grpc_metadata_batch* send_initial_metadata, |
||||
uint32_t* send_initial_metadata_flags, |
||||
ApplyServiceConfigCallback apply_service_config, |
||||
void* apply_service_config_user_data, |
||||
grpc_closure* on_route_done) |
||||
: owning_call_(owning_call), |
||||
call_combiner_(call_combiner), |
||||
pollent_(pollent), |
||||
apply_service_config_(apply_service_config), |
||||
apply_service_config_user_data_(apply_service_config_user_data), |
||||
on_route_done_(on_route_done) { |
||||
pick_.initial_metadata = send_initial_metadata; |
||||
pick_.initial_metadata_flags = send_initial_metadata_flags; |
||||
} |
||||
|
||||
RequestRouter::Request::~Request() { |
||||
if (pick_.connected_subchannel != nullptr) { |
||||
pick_.connected_subchannel.reset(); |
||||
} |
||||
for (size_t i = 0; i < GRPC_CONTEXT_COUNT; ++i) { |
||||
if (pick_.subchannel_call_context[i].destroy != nullptr) { |
||||
pick_.subchannel_call_context[i].destroy( |
||||
pick_.subchannel_call_context[i].value); |
||||
} |
||||
} |
||||
} |
||||
|
||||
// Invoked once resolver results are available.
|
||||
void RequestRouter::Request::ProcessServiceConfigAndStartLbPickLocked() { |
||||
// Get service config data if needed.
|
||||
if (!apply_service_config_(apply_service_config_user_data_)) return; |
||||
// Start LB pick.
|
||||
StartLbPickLocked(); |
||||
} |
||||
|
||||
void RequestRouter::Request::MaybeAddCallToInterestedPartiesLocked() { |
||||
if (!pollent_added_to_interested_parties_) { |
||||
pollent_added_to_interested_parties_ = true; |
||||
grpc_polling_entity_add_to_pollset_set( |
||||
pollent_, request_router_->interested_parties_); |
||||
} |
||||
} |
||||
|
||||
void RequestRouter::Request::MaybeRemoveCallFromInterestedPartiesLocked() { |
||||
if (pollent_added_to_interested_parties_) { |
||||
pollent_added_to_interested_parties_ = false; |
||||
grpc_polling_entity_del_from_pollset_set( |
||||
pollent_, request_router_->interested_parties_); |
||||
} |
||||
} |
||||
|
||||
// Starts a pick on the LB policy.
|
||||
void RequestRouter::Request::StartLbPickLocked() { |
||||
if (request_router_->tracer_->enabled()) { |
||||
gpr_log(GPR_INFO, |
||||
"request_router=%p request=%p: starting pick on lb_policy=%p", |
||||
request_router_, this, request_router_->lb_policy_.get()); |
||||
} |
||||
GRPC_CLOSURE_INIT(&on_pick_done_, &LbPickDoneLocked, this, |
||||
grpc_combiner_scheduler(request_router_->combiner_)); |
||||
pick_.on_complete = &on_pick_done_; |
||||
GRPC_CALL_STACK_REF(owning_call_, "pick_callback"); |
||||
grpc_error* error = GRPC_ERROR_NONE; |
||||
const bool pick_done = |
||||
request_router_->lb_policy_->PickLocked(&pick_, &error); |
||||
if (pick_done) { |
||||
// Pick completed synchronously.
|
||||
if (request_router_->tracer_->enabled()) { |
||||
gpr_log(GPR_INFO, |
||||
"request_router=%p request=%p: pick completed synchronously", |
||||
request_router_, this); |
||||
} |
||||
GRPC_CLOSURE_RUN(on_route_done_, error); |
||||
GRPC_CALL_STACK_UNREF(owning_call_, "pick_callback"); |
||||
} else { |
||||
// Pick will be returned asynchronously.
|
||||
// Add the request's polling entity to the request_router's
|
||||
// interested_parties, so that the I/O of the LB policy can be done
|
||||
// under it. It will be removed in LbPickDoneLocked().
|
||||
MaybeAddCallToInterestedPartiesLocked(); |
||||
// Request notification on call cancellation.
|
||||
// We allocate a separate object to track cancellation, since the
|
||||
// cancellation closure might still be pending when we need to reuse
|
||||
// the memory in which this Request object is stored for a subsequent
|
||||
// retry attempt.
|
||||
pick_canceller_ = New<AsyncPickCanceller>(this); |
||||
} |
||||
} |
||||
|
||||
// Callback invoked by LoadBalancingPolicy::PickLocked() for async picks.
|
||||
// Unrefs the LB policy and invokes on_route_done_.
|
||||
void RequestRouter::Request::LbPickDoneLocked(void* arg, grpc_error* error) { |
||||
Request* self = static_cast<Request*>(arg); |
||||
RequestRouter* request_router = self->request_router_; |
||||
if (request_router->tracer_->enabled()) { |
||||
gpr_log(GPR_INFO, |
||||
"request_router=%p request=%p: pick completed asynchronously", |
||||
request_router, self); |
||||
} |
||||
self->MaybeRemoveCallFromInterestedPartiesLocked(); |
||||
if (self->pick_canceller_ != nullptr) { |
||||
self->pick_canceller_->MarkFinishedLocked(); |
||||
} |
||||
GRPC_CLOSURE_RUN(self->on_route_done_, GRPC_ERROR_REF(error)); |
||||
GRPC_CALL_STACK_UNREF(self->owning_call_, "pick_callback"); |
||||
} |
||||
|
||||
//
|
||||
// RequestRouter::LbConnectivityWatcher
|
||||
//
|
||||
|
||||
class RequestRouter::LbConnectivityWatcher { |
||||
public: |
||||
LbConnectivityWatcher(RequestRouter* request_router, |
||||
grpc_connectivity_state state, |
||||
LoadBalancingPolicy* lb_policy, |
||||
grpc_channel_stack* owning_stack, |
||||
grpc_combiner* combiner) |
||||
: request_router_(request_router), |
||||
state_(state), |
||||
lb_policy_(lb_policy), |
||||
owning_stack_(owning_stack) { |
||||
GRPC_CHANNEL_STACK_REF(owning_stack_, "LbConnectivityWatcher"); |
||||
GRPC_CLOSURE_INIT(&on_changed_, &OnLbPolicyStateChangedLocked, this, |
||||
grpc_combiner_scheduler(combiner)); |
||||
lb_policy_->NotifyOnStateChangeLocked(&state_, &on_changed_); |
||||
} |
||||
|
||||
~LbConnectivityWatcher() { |
||||
GRPC_CHANNEL_STACK_UNREF(owning_stack_, "LbConnectivityWatcher"); |
||||
} |
||||
|
||||
private: |
||||
static void OnLbPolicyStateChangedLocked(void* arg, grpc_error* error) { |
||||
LbConnectivityWatcher* self = static_cast<LbConnectivityWatcher*>(arg); |
||||
// If the notification is not for the current policy, we're stale,
|
||||
// so delete ourselves.
|
||||
if (self->lb_policy_ != self->request_router_->lb_policy_.get()) { |
||||
Delete(self); |
||||
return; |
||||
} |
||||
// Otherwise, process notification.
|
||||
if (self->request_router_->tracer_->enabled()) { |
||||
gpr_log(GPR_INFO, "request_router=%p: lb_policy=%p state changed to %s", |
||||
self->request_router_, self->lb_policy_, |
||||
grpc_connectivity_state_name(self->state_)); |
||||
} |
||||
self->request_router_->SetConnectivityStateLocked( |
||||
self->state_, GRPC_ERROR_REF(error), "lb_changed"); |
||||
// If shutting down, terminate watch.
|
||||
if (self->state_ == GRPC_CHANNEL_SHUTDOWN) { |
||||
Delete(self); |
||||
return; |
||||
} |
||||
// Renew watch.
|
||||
self->lb_policy_->NotifyOnStateChangeLocked(&self->state_, |
||||
&self->on_changed_); |
||||
} |
||||
|
||||
RequestRouter* request_router_; |
||||
grpc_connectivity_state state_; |
||||
// LB policy address. No ref held, so not safe to dereference unless
|
||||
// it happens to match request_router->lb_policy_.
|
||||
LoadBalancingPolicy* lb_policy_; |
||||
grpc_channel_stack* owning_stack_; |
||||
grpc_closure on_changed_; |
||||
}; |
||||
|
||||
//
|
||||
// RequestRounter::ReresolutionRequestHandler
|
||||
//
|
||||
|
||||
class RequestRouter::ReresolutionRequestHandler { |
||||
public: |
||||
ReresolutionRequestHandler(RequestRouter* request_router, |
||||
LoadBalancingPolicy* lb_policy, |
||||
grpc_channel_stack* owning_stack, |
||||
grpc_combiner* combiner) |
||||
: request_router_(request_router), |
||||
lb_policy_(lb_policy), |
||||
owning_stack_(owning_stack) { |
||||
GRPC_CHANNEL_STACK_REF(owning_stack_, "ReresolutionRequestHandler"); |
||||
GRPC_CLOSURE_INIT(&closure_, &OnRequestReresolutionLocked, this, |
||||
grpc_combiner_scheduler(combiner)); |
||||
lb_policy_->SetReresolutionClosureLocked(&closure_); |
||||
} |
||||
|
||||
private: |
||||
static void OnRequestReresolutionLocked(void* arg, grpc_error* error) { |
||||
ReresolutionRequestHandler* self = |
||||
static_cast<ReresolutionRequestHandler*>(arg); |
||||
RequestRouter* request_router = self->request_router_; |
||||
// If this invocation is for a stale LB policy, treat it as an LB shutdown
|
||||
// signal.
|
||||
if (self->lb_policy_ != request_router->lb_policy_.get() || |
||||
error != GRPC_ERROR_NONE || request_router->resolver_ == nullptr) { |
||||
GRPC_CHANNEL_STACK_UNREF(request_router->owning_stack_, |
||||
"ReresolutionRequestHandler"); |
||||
Delete(self); |
||||
return; |
||||
} |
||||
if (request_router->tracer_->enabled()) { |
||||
gpr_log(GPR_INFO, "request_router=%p: started name re-resolving", |
||||
request_router); |
||||
} |
||||
request_router->resolver_->RequestReresolutionLocked(); |
||||
// Give back the closure to the LB policy.
|
||||
self->lb_policy_->SetReresolutionClosureLocked(&self->closure_); |
||||
} |
||||
|
||||
RequestRouter* request_router_; |
||||
// LB policy address. No ref held, so not safe to dereference unless
|
||||
// it happens to match request_router->lb_policy_.
|
||||
LoadBalancingPolicy* lb_policy_; |
||||
grpc_channel_stack* owning_stack_; |
||||
grpc_closure closure_; |
||||
}; |
||||
|
||||
//
|
||||
// RequestRouter
|
||||
//
|
||||
|
||||
RequestRouter::RequestRouter( |
||||
grpc_channel_stack* owning_stack, grpc_combiner* combiner, |
||||
grpc_client_channel_factory* client_channel_factory, |
||||
grpc_pollset_set* interested_parties, TraceFlag* tracer, |
||||
ProcessResolverResultCallback process_resolver_result, |
||||
void* process_resolver_result_user_data, const char* target_uri, |
||||
const grpc_channel_args* args, grpc_error** error) |
||||
: owning_stack_(owning_stack), |
||||
combiner_(combiner), |
||||
client_channel_factory_(client_channel_factory), |
||||
interested_parties_(interested_parties), |
||||
tracer_(tracer), |
||||
process_resolver_result_(process_resolver_result), |
||||
process_resolver_result_user_data_(process_resolver_result_user_data) { |
||||
// Get subchannel pool.
|
||||
const grpc_arg* arg = |
||||
grpc_channel_args_find(args, GRPC_ARG_USE_LOCAL_SUBCHANNEL_POOL); |
||||
if (grpc_channel_arg_get_bool(arg, false)) { |
||||
subchannel_pool_ = MakeRefCounted<LocalSubchannelPool>(); |
||||
} else { |
||||
subchannel_pool_ = GlobalSubchannelPool::instance(); |
||||
} |
||||
GRPC_CLOSURE_INIT(&on_resolver_result_changed_, |
||||
&RequestRouter::OnResolverResultChangedLocked, this, |
||||
grpc_combiner_scheduler(combiner)); |
||||
grpc_connectivity_state_init(&state_tracker_, GRPC_CHANNEL_IDLE, |
||||
"request_router"); |
||||
grpc_channel_args* new_args = nullptr; |
||||
if (process_resolver_result == nullptr) { |
||||
grpc_arg arg = grpc_channel_arg_integer_create( |
||||
const_cast<char*>(GRPC_ARG_SERVICE_CONFIG_DISABLE_RESOLUTION), 0); |
||||
new_args = grpc_channel_args_copy_and_add(args, &arg, 1); |
||||
} |
||||
resolver_ = ResolverRegistry::CreateResolver( |
||||
target_uri, (new_args == nullptr ? args : new_args), interested_parties_, |
||||
combiner_); |
||||
grpc_channel_args_destroy(new_args); |
||||
if (resolver_ == nullptr) { |
||||
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("resolver creation failed"); |
||||
} |
||||
} |
||||
|
||||
RequestRouter::~RequestRouter() { |
||||
if (resolver_ != nullptr) { |
||||
// The only way we can get here is if we never started resolving,
|
||||
// because we take a ref to the channel stack when we start
|
||||
// resolving and do not release it until the resolver callback is
|
||||
// invoked after the resolver shuts down.
|
||||
resolver_.reset(); |
||||
} |
||||
if (lb_policy_ != nullptr) { |
||||
grpc_pollset_set_del_pollset_set(lb_policy_->interested_parties(), |
||||
interested_parties_); |
||||
lb_policy_.reset(); |
||||
} |
||||
if (client_channel_factory_ != nullptr) { |
||||
grpc_client_channel_factory_unref(client_channel_factory_); |
||||
} |
||||
grpc_connectivity_state_destroy(&state_tracker_); |
||||
} |
||||
|
||||
namespace { |
||||
|
||||
const char* GetChannelConnectivityStateChangeString( |
||||
grpc_connectivity_state state) { |
||||
switch (state) { |
||||
case GRPC_CHANNEL_IDLE: |
||||
return "Channel state change to IDLE"; |
||||
case GRPC_CHANNEL_CONNECTING: |
||||
return "Channel state change to CONNECTING"; |
||||
case GRPC_CHANNEL_READY: |
||||
return "Channel state change to READY"; |
||||
case GRPC_CHANNEL_TRANSIENT_FAILURE: |
||||
return "Channel state change to TRANSIENT_FAILURE"; |
||||
case GRPC_CHANNEL_SHUTDOWN: |
||||
return "Channel state change to SHUTDOWN"; |
||||
} |
||||
GPR_UNREACHABLE_CODE(return "UNKNOWN"); |
||||
} |
||||
|
||||
} // namespace
|
||||
|
||||
void RequestRouter::SetConnectivityStateLocked(grpc_connectivity_state state, |
||||
grpc_error* error, |
||||
const char* reason) { |
||||
if (lb_policy_ != nullptr) { |
||||
if (state == GRPC_CHANNEL_TRANSIENT_FAILURE) { |
||||
// Cancel picks with wait_for_ready=false.
|
||||
lb_policy_->CancelMatchingPicksLocked( |
||||
/* mask= */ GRPC_INITIAL_METADATA_WAIT_FOR_READY, |
||||
/* check= */ 0, GRPC_ERROR_REF(error)); |
||||
} else if (state == GRPC_CHANNEL_SHUTDOWN) { |
||||
// Cancel all picks.
|
||||
lb_policy_->CancelMatchingPicksLocked(/* mask= */ 0, /* check= */ 0, |
||||
GRPC_ERROR_REF(error)); |
||||
} |
||||
} |
||||
if (tracer_->enabled()) { |
||||
gpr_log(GPR_INFO, "request_router=%p: setting connectivity state to %s", |
||||
this, grpc_connectivity_state_name(state)); |
||||
} |
||||
if (channelz_node_ != nullptr) { |
||||
channelz_node_->AddTraceEvent( |
||||
channelz::ChannelTrace::Severity::Info, |
||||
grpc_slice_from_static_string( |
||||
GetChannelConnectivityStateChangeString(state))); |
||||
} |
||||
grpc_connectivity_state_set(&state_tracker_, state, error, reason); |
||||
} |
||||
|
||||
void RequestRouter::StartResolvingLocked() { |
||||
if (tracer_->enabled()) { |
||||
gpr_log(GPR_INFO, "request_router=%p: starting name resolution", this); |
||||
} |
||||
GPR_ASSERT(!started_resolving_); |
||||
started_resolving_ = true; |
||||
GRPC_CHANNEL_STACK_REF(owning_stack_, "resolver"); |
||||
resolver_->NextLocked(&resolver_result_, &on_resolver_result_changed_); |
||||
} |
||||
|
||||
// Invoked from the resolver NextLocked() callback when the resolver
|
||||
// is shutting down.
|
||||
void RequestRouter::OnResolverShutdownLocked(grpc_error* error) { |
||||
if (tracer_->enabled()) { |
||||
gpr_log(GPR_INFO, "request_router=%p: shutting down", this); |
||||
} |
||||
if (lb_policy_ != nullptr) { |
||||
if (tracer_->enabled()) { |
||||
gpr_log(GPR_INFO, "request_router=%p: shutting down lb_policy=%p", this, |
||||
lb_policy_.get()); |
||||
} |
||||
grpc_pollset_set_del_pollset_set(lb_policy_->interested_parties(), |
||||
interested_parties_); |
||||
lb_policy_.reset(); |
||||
} |
||||
if (resolver_ != nullptr) { |
||||
// This should never happen; it can only be triggered by a resolver
|
||||
// implementation spotaneously deciding to report shutdown without
|
||||
// being orphaned. This code is included just to be defensive.
|
||||
if (tracer_->enabled()) { |
||||
gpr_log(GPR_INFO, |
||||
"request_router=%p: spontaneous shutdown from resolver %p", this, |
||||
resolver_.get()); |
||||
} |
||||
resolver_.reset(); |
||||
SetConnectivityStateLocked(GRPC_CHANNEL_SHUTDOWN, |
||||
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( |
||||
"Resolver spontaneous shutdown", &error, 1), |
||||
"resolver_spontaneous_shutdown"); |
||||
} |
||||
grpc_closure_list_fail_all(&waiting_for_resolver_result_closures_, |
||||
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( |
||||
"Channel disconnected", &error, 1)); |
||||
GRPC_CLOSURE_LIST_SCHED(&waiting_for_resolver_result_closures_); |
||||
GRPC_CHANNEL_STACK_UNREF(owning_stack_, "resolver"); |
||||
grpc_channel_args_destroy(resolver_result_); |
||||
resolver_result_ = nullptr; |
||||
GRPC_ERROR_UNREF(error); |
||||
} |
||||
|
||||
// Creates a new LB policy, replacing any previous one.
|
||||
// If the new policy is created successfully, sets *connectivity_state and
|
||||
// *connectivity_error to its initial connectivity state; otherwise,
|
||||
// leaves them unchanged.
|
||||
void RequestRouter::CreateNewLbPolicyLocked( |
||||
const char* lb_policy_name, grpc_json* lb_config, |
||||
grpc_connectivity_state* connectivity_state, |
||||
grpc_error** connectivity_error, TraceStringVector* trace_strings) { |
||||
LoadBalancingPolicy::Args lb_policy_args; |
||||
lb_policy_args.combiner = combiner_; |
||||
lb_policy_args.client_channel_factory = client_channel_factory_; |
||||
lb_policy_args.subchannel_pool = subchannel_pool_; |
||||
lb_policy_args.args = resolver_result_; |
||||
lb_policy_args.lb_config = lb_config; |
||||
OrphanablePtr<LoadBalancingPolicy> new_lb_policy = |
||||
LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(lb_policy_name, |
||||
lb_policy_args); |
||||
if (GPR_UNLIKELY(new_lb_policy == nullptr)) { |
||||
gpr_log(GPR_ERROR, "could not create LB policy \"%s\"", lb_policy_name); |
||||
if (channelz_node_ != nullptr) { |
||||
char* str; |
||||
gpr_asprintf(&str, "Could not create LB policy \'%s\'", lb_policy_name); |
||||
trace_strings->push_back(str); |
||||
} |
||||
} else { |
||||
if (tracer_->enabled()) { |
||||
gpr_log(GPR_INFO, "request_router=%p: created new LB policy \"%s\" (%p)", |
||||
this, lb_policy_name, new_lb_policy.get()); |
||||
} |
||||
if (channelz_node_ != nullptr) { |
||||
char* str; |
||||
gpr_asprintf(&str, "Created new LB policy \'%s\'", lb_policy_name); |
||||
trace_strings->push_back(str); |
||||
} |
||||
// Swap out the LB policy and update the fds in interested_parties_.
|
||||
if (lb_policy_ != nullptr) { |
||||
if (tracer_->enabled()) { |
||||
gpr_log(GPR_INFO, "request_router=%p: shutting down lb_policy=%p", this, |
||||
lb_policy_.get()); |
||||
} |
||||
grpc_pollset_set_del_pollset_set(lb_policy_->interested_parties(), |
||||
interested_parties_); |
||||
lb_policy_->HandOffPendingPicksLocked(new_lb_policy.get()); |
||||
} |
||||
lb_policy_ = std::move(new_lb_policy); |
||||
grpc_pollset_set_add_pollset_set(lb_policy_->interested_parties(), |
||||
interested_parties_); |
||||
// Create re-resolution request handler for the new LB policy. It
|
||||
// will delete itself when no longer needed.
|
||||
New<ReresolutionRequestHandler>(this, lb_policy_.get(), owning_stack_, |
||||
combiner_); |
||||
// Get the new LB policy's initial connectivity state and start a
|
||||
// connectivity watch.
|
||||
GRPC_ERROR_UNREF(*connectivity_error); |
||||
*connectivity_state = |
||||
lb_policy_->CheckConnectivityLocked(connectivity_error); |
||||
if (exit_idle_when_lb_policy_arrives_) { |
||||
lb_policy_->ExitIdleLocked(); |
||||
exit_idle_when_lb_policy_arrives_ = false; |
||||
} |
||||
// Create new watcher. It will delete itself when done.
|
||||
New<LbConnectivityWatcher>(this, *connectivity_state, lb_policy_.get(), |
||||
owning_stack_, combiner_); |
||||
} |
||||
} |
||||
|
||||
void RequestRouter::MaybeAddTraceMessagesForAddressChangesLocked( |
||||
TraceStringVector* trace_strings) { |
||||
const ServerAddressList* addresses = |
||||
FindServerAddressListChannelArg(resolver_result_); |
||||
const bool resolution_contains_addresses = |
||||
addresses != nullptr && addresses->size() > 0; |
||||
if (!resolution_contains_addresses && |
||||
previous_resolution_contained_addresses_) { |
||||
trace_strings->push_back(gpr_strdup("Address list became empty")); |
||||
} else if (resolution_contains_addresses && |
||||
!previous_resolution_contained_addresses_) { |
||||
trace_strings->push_back(gpr_strdup("Address list became non-empty")); |
||||
} |
||||
previous_resolution_contained_addresses_ = resolution_contains_addresses; |
||||
} |
||||
|
||||
void RequestRouter::ConcatenateAndAddChannelTraceLocked( |
||||
TraceStringVector* trace_strings) const { |
||||
if (!trace_strings->empty()) { |
||||
gpr_strvec v; |
||||
gpr_strvec_init(&v); |
||||
gpr_strvec_add(&v, gpr_strdup("Resolution event: ")); |
||||
bool is_first = 1; |
||||
for (size_t i = 0; i < trace_strings->size(); ++i) { |
||||
if (!is_first) gpr_strvec_add(&v, gpr_strdup(", ")); |
||||
is_first = false; |
||||
gpr_strvec_add(&v, (*trace_strings)[i]); |
||||
} |
||||
char* flat; |
||||
size_t flat_len = 0; |
||||
flat = gpr_strvec_flatten(&v, &flat_len); |
||||
channelz_node_->AddTraceEvent(channelz::ChannelTrace::Severity::Info, |
||||
grpc_slice_new(flat, flat_len, gpr_free)); |
||||
gpr_strvec_destroy(&v); |
||||
} |
||||
} |
||||
|
||||
// Callback invoked when a resolver result is available.
|
||||
void RequestRouter::OnResolverResultChangedLocked(void* arg, |
||||
grpc_error* error) { |
||||
RequestRouter* self = static_cast<RequestRouter*>(arg); |
||||
if (self->tracer_->enabled()) { |
||||
const char* disposition = |
||||
self->resolver_result_ != nullptr |
||||
? "" |
||||
: (error == GRPC_ERROR_NONE ? " (transient error)" |
||||
: " (resolver shutdown)"); |
||||
gpr_log(GPR_INFO, |
||||
"request_router=%p: got resolver result: resolver_result=%p " |
||||
"error=%s%s", |
||||
self, self->resolver_result_, grpc_error_string(error), |
||||
disposition); |
||||
} |
||||
// Handle shutdown.
|
||||
if (error != GRPC_ERROR_NONE || self->resolver_ == nullptr) { |
||||
self->OnResolverShutdownLocked(GRPC_ERROR_REF(error)); |
||||
return; |
||||
} |
||||
// Data used to set the channel's connectivity state.
|
||||
bool set_connectivity_state = true; |
||||
// We only want to trace the address resolution in the follow cases:
|
||||
// (a) Address resolution resulted in service config change.
|
||||
// (b) Address resolution that causes number of backends to go from
|
||||
// zero to non-zero.
|
||||
// (c) Address resolution that causes number of backends to go from
|
||||
// non-zero to zero.
|
||||
// (d) Address resolution that causes a new LB policy to be created.
|
||||
//
|
||||
// we track a list of strings to eventually be concatenated and traced.
|
||||
TraceStringVector trace_strings; |
||||
grpc_connectivity_state connectivity_state = GRPC_CHANNEL_TRANSIENT_FAILURE; |
||||
grpc_error* connectivity_error = |
||||
GRPC_ERROR_CREATE_FROM_STATIC_STRING("No load balancing policy"); |
||||
// resolver_result_ will be null in the case of a transient
|
||||
// resolution error. In that case, we don't have any new result to
|
||||
// process, which means that we keep using the previous result (if any).
|
||||
if (self->resolver_result_ == nullptr) { |
||||
if (self->tracer_->enabled()) { |
||||
gpr_log(GPR_INFO, "request_router=%p: resolver transient failure", self); |
||||
} |
||||
// Don't override connectivity state if we already have an LB policy.
|
||||
if (self->lb_policy_ != nullptr) set_connectivity_state = false; |
||||
} else { |
||||
// Parse the resolver result.
|
||||
const char* lb_policy_name = nullptr; |
||||
grpc_json* lb_policy_config = nullptr; |
||||
const bool service_config_changed = self->process_resolver_result_( |
||||
self->process_resolver_result_user_data_, *self->resolver_result_, |
||||
&lb_policy_name, &lb_policy_config); |
||||
GPR_ASSERT(lb_policy_name != nullptr); |
||||
// Check to see if we're already using the right LB policy.
|
||||
const bool lb_policy_name_changed = |
||||
self->lb_policy_ == nullptr || |
||||
strcmp(self->lb_policy_->name(), lb_policy_name) != 0; |
||||
if (self->lb_policy_ != nullptr && !lb_policy_name_changed) { |
||||
// Continue using the same LB policy. Update with new addresses.
|
||||
if (self->tracer_->enabled()) { |
||||
gpr_log(GPR_INFO, |
||||
"request_router=%p: updating existing LB policy \"%s\" (%p)", |
||||
self, lb_policy_name, self->lb_policy_.get()); |
||||
} |
||||
self->lb_policy_->UpdateLocked(*self->resolver_result_, lb_policy_config); |
||||
// No need to set the channel's connectivity state; the existing
|
||||
// watch on the LB policy will take care of that.
|
||||
set_connectivity_state = false; |
||||
} else { |
||||
// Instantiate new LB policy.
|
||||
self->CreateNewLbPolicyLocked(lb_policy_name, lb_policy_config, |
||||
&connectivity_state, &connectivity_error, |
||||
&trace_strings); |
||||
} |
||||
// Add channel trace event.
|
||||
if (self->channelz_node_ != nullptr) { |
||||
if (service_config_changed) { |
||||
// TODO(ncteisen): might be worth somehow including a snippet of the
|
||||
// config in the trace, at the risk of bloating the trace logs.
|
||||
trace_strings.push_back(gpr_strdup("Service config changed")); |
||||
} |
||||
self->MaybeAddTraceMessagesForAddressChangesLocked(&trace_strings); |
||||
self->ConcatenateAndAddChannelTraceLocked(&trace_strings); |
||||
} |
||||
// Clean up.
|
||||
grpc_channel_args_destroy(self->resolver_result_); |
||||
self->resolver_result_ = nullptr; |
||||
} |
||||
// Set the channel's connectivity state if needed.
|
||||
if (set_connectivity_state) { |
||||
self->SetConnectivityStateLocked(connectivity_state, connectivity_error, |
||||
"resolver_result"); |
||||
} else { |
||||
GRPC_ERROR_UNREF(connectivity_error); |
||||
} |
||||
// Invoke closures that were waiting for results and renew the watch.
|
||||
GRPC_CLOSURE_LIST_SCHED(&self->waiting_for_resolver_result_closures_); |
||||
self->resolver_->NextLocked(&self->resolver_result_, |
||||
&self->on_resolver_result_changed_); |
||||
} |
||||
|
||||
void RequestRouter::RouteCallLocked(Request* request) { |
||||
GPR_ASSERT(request->pick_.connected_subchannel == nullptr); |
||||
request->request_router_ = this; |
||||
if (lb_policy_ != nullptr) { |
||||
// We already have resolver results, so process the service config
|
||||
// and start an LB pick.
|
||||
request->ProcessServiceConfigAndStartLbPickLocked(); |
||||
} else if (resolver_ == nullptr) { |
||||
GRPC_CLOSURE_RUN(request->on_route_done_, |
||||
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected")); |
||||
} else { |
||||
// We do not yet have an LB policy, so wait for a resolver result.
|
||||
if (!started_resolving_) { |
||||
StartResolvingLocked(); |
||||
} |
||||
// Create a new waiter, which will delete itself when done.
|
||||
New<Request::ResolverResultWaiter>(request); |
||||
// Add the request's polling entity to the request_router's
|
||||
// interested_parties, so that the I/O of the resolver can be done
|
||||
// under it. It will be removed in LbPickDoneLocked().
|
||||
request->MaybeAddCallToInterestedPartiesLocked(); |
||||
} |
||||
} |
||||
|
||||
void RequestRouter::ShutdownLocked(grpc_error* error) { |
||||
if (resolver_ != nullptr) { |
||||
SetConnectivityStateLocked(GRPC_CHANNEL_SHUTDOWN, GRPC_ERROR_REF(error), |
||||
"disconnect"); |
||||
resolver_.reset(); |
||||
if (!started_resolving_) { |
||||
grpc_closure_list_fail_all(&waiting_for_resolver_result_closures_, |
||||
GRPC_ERROR_REF(error)); |
||||
GRPC_CLOSURE_LIST_SCHED(&waiting_for_resolver_result_closures_); |
||||
} |
||||
if (lb_policy_ != nullptr) { |
||||
grpc_pollset_set_del_pollset_set(lb_policy_->interested_parties(), |
||||
interested_parties_); |
||||
lb_policy_.reset(); |
||||
} |
||||
} |
||||
GRPC_ERROR_UNREF(error); |
||||
} |
||||
|
||||
grpc_connectivity_state RequestRouter::GetConnectivityState() { |
||||
return grpc_connectivity_state_check(&state_tracker_); |
||||
} |
||||
|
||||
void RequestRouter::NotifyOnConnectivityStateChange( |
||||
grpc_connectivity_state* state, grpc_closure* closure) { |
||||
grpc_connectivity_state_notify_on_state_change(&state_tracker_, state, |
||||
closure); |
||||
} |
||||
|
||||
void RequestRouter::ExitIdleLocked() { |
||||
if (lb_policy_ != nullptr) { |
||||
lb_policy_->ExitIdleLocked(); |
||||
} else { |
||||
exit_idle_when_lb_policy_arrives_ = true; |
||||
if (!started_resolving_ && resolver_ != nullptr) { |
||||
StartResolvingLocked(); |
||||
} |
||||
} |
||||
} |
||||
|
||||
void RequestRouter::ResetConnectionBackoffLocked() { |
||||
if (resolver_ != nullptr) { |
||||
resolver_->ResetBackoffLocked(); |
||||
resolver_->RequestReresolutionLocked(); |
||||
} |
||||
if (lb_policy_ != nullptr) { |
||||
lb_policy_->ResetBackoffLocked(); |
||||
} |
||||
} |
||||
|
||||
} // namespace grpc_core
|
@ -1,181 +0,0 @@ |
||||
/*
|
||||
* |
||||
* Copyright 2018 gRPC authors. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
* |
||||
*/ |
||||
|
||||
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_REQUEST_ROUTING_H |
||||
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_REQUEST_ROUTING_H |
||||
|
||||
#include <grpc/support/port_platform.h> |
||||
|
||||
#include "src/core/ext/filters/client_channel/client_channel_channelz.h" |
||||
#include "src/core/ext/filters/client_channel/client_channel_factory.h" |
||||
#include "src/core/ext/filters/client_channel/lb_policy.h" |
||||
#include "src/core/ext/filters/client_channel/resolver.h" |
||||
#include "src/core/ext/filters/client_channel/subchannel_pool_interface.h" |
||||
#include "src/core/lib/channel/channel_args.h" |
||||
#include "src/core/lib/channel/channel_stack.h" |
||||
#include "src/core/lib/debug/trace.h" |
||||
#include "src/core/lib/gprpp/inlined_vector.h" |
||||
#include "src/core/lib/gprpp/orphanable.h" |
||||
#include "src/core/lib/iomgr/call_combiner.h" |
||||
#include "src/core/lib/iomgr/closure.h" |
||||
#include "src/core/lib/iomgr/polling_entity.h" |
||||
#include "src/core/lib/iomgr/pollset_set.h" |
||||
#include "src/core/lib/transport/connectivity_state.h" |
||||
#include "src/core/lib/transport/metadata_batch.h" |
||||
|
||||
namespace grpc_core { |
||||
|
||||
class RequestRouter { |
||||
public: |
||||
class Request { |
||||
public: |
||||
// Synchronous callback that applies the service config to a call.
|
||||
// Returns false if the call should be failed.
|
||||
typedef bool (*ApplyServiceConfigCallback)(void* user_data); |
||||
|
||||
Request(grpc_call_stack* owning_call, grpc_call_combiner* call_combiner, |
||||
grpc_polling_entity* pollent, |
||||
grpc_metadata_batch* send_initial_metadata, |
||||
uint32_t* send_initial_metadata_flags, |
||||
ApplyServiceConfigCallback apply_service_config, |
||||
void* apply_service_config_user_data, grpc_closure* on_route_done); |
||||
|
||||
~Request(); |
||||
|
||||
// TODO(roth): It seems a bit ugly to expose this member in a
|
||||
// non-const way. Find a better API to avoid this.
|
||||
LoadBalancingPolicy::PickState* pick() { return &pick_; } |
||||
|
||||
private: |
||||
friend class RequestRouter; |
||||
|
||||
class ResolverResultWaiter; |
||||
class AsyncPickCanceller; |
||||
|
||||
void ProcessServiceConfigAndStartLbPickLocked(); |
||||
void StartLbPickLocked(); |
||||
static void LbPickDoneLocked(void* arg, grpc_error* error); |
||||
|
||||
void MaybeAddCallToInterestedPartiesLocked(); |
||||
void MaybeRemoveCallFromInterestedPartiesLocked(); |
||||
|
||||
// Populated by caller.
|
||||
grpc_call_stack* owning_call_; |
||||
grpc_call_combiner* call_combiner_; |
||||
grpc_polling_entity* pollent_; |
||||
ApplyServiceConfigCallback apply_service_config_; |
||||
void* apply_service_config_user_data_; |
||||
grpc_closure* on_route_done_; |
||||
LoadBalancingPolicy::PickState pick_; |
||||
|
||||
// Internal state.
|
||||
RequestRouter* request_router_ = nullptr; |
||||
bool pollent_added_to_interested_parties_ = false; |
||||
grpc_closure on_pick_done_; |
||||
AsyncPickCanceller* pick_canceller_ = nullptr; |
||||
}; |
||||
|
||||
// Synchronous callback that takes the service config JSON string and
|
||||
// LB policy name.
|
||||
// Returns true if the service config has changed since the last result.
|
||||
typedef bool (*ProcessResolverResultCallback)(void* user_data, |
||||
const grpc_channel_args& args, |
||||
const char** lb_policy_name, |
||||
grpc_json** lb_policy_config); |
||||
|
||||
RequestRouter(grpc_channel_stack* owning_stack, grpc_combiner* combiner, |
||||
grpc_client_channel_factory* client_channel_factory, |
||||
grpc_pollset_set* interested_parties, TraceFlag* tracer, |
||||
ProcessResolverResultCallback process_resolver_result, |
||||
void* process_resolver_result_user_data, const char* target_uri, |
||||
const grpc_channel_args* args, grpc_error** error); |
||||
|
||||
~RequestRouter(); |
||||
|
||||
void set_channelz_node(channelz::ClientChannelNode* channelz_node) { |
||||
channelz_node_ = channelz_node; |
||||
} |
||||
|
||||
void RouteCallLocked(Request* request); |
||||
|
||||
// TODO(roth): Add methods to cancel picks.
|
||||
|
||||
void ShutdownLocked(grpc_error* error); |
||||
|
||||
void ExitIdleLocked(); |
||||
void ResetConnectionBackoffLocked(); |
||||
|
||||
grpc_connectivity_state GetConnectivityState(); |
||||
void NotifyOnConnectivityStateChange(grpc_connectivity_state* state, |
||||
grpc_closure* closure); |
||||
|
||||
LoadBalancingPolicy* lb_policy() const { return lb_policy_.get(); } |
||||
|
||||
private: |
||||
using TraceStringVector = InlinedVector<char*, 3>; |
||||
|
||||
class ReresolutionRequestHandler; |
||||
class LbConnectivityWatcher; |
||||
|
||||
void StartResolvingLocked(); |
||||
void OnResolverShutdownLocked(grpc_error* error); |
||||
void CreateNewLbPolicyLocked(const char* lb_policy_name, grpc_json* lb_config, |
||||
grpc_connectivity_state* connectivity_state, |
||||
grpc_error** connectivity_error, |
||||
TraceStringVector* trace_strings); |
||||
void MaybeAddTraceMessagesForAddressChangesLocked( |
||||
TraceStringVector* trace_strings); |
||||
void ConcatenateAndAddChannelTraceLocked( |
||||
TraceStringVector* trace_strings) const; |
||||
static void OnResolverResultChangedLocked(void* arg, grpc_error* error); |
||||
|
||||
void SetConnectivityStateLocked(grpc_connectivity_state state, |
||||
grpc_error* error, const char* reason); |
||||
|
||||
// Passed in from caller at construction time.
|
||||
grpc_channel_stack* owning_stack_; |
||||
grpc_combiner* combiner_; |
||||
grpc_client_channel_factory* client_channel_factory_; |
||||
grpc_pollset_set* interested_parties_; |
||||
TraceFlag* tracer_; |
||||
|
||||
channelz::ClientChannelNode* channelz_node_ = nullptr; |
||||
|
||||
// Resolver and associated state.
|
||||
OrphanablePtr<Resolver> resolver_; |
||||
ProcessResolverResultCallback process_resolver_result_; |
||||
void* process_resolver_result_user_data_; |
||||
bool started_resolving_ = false; |
||||
grpc_channel_args* resolver_result_ = nullptr; |
||||
bool previous_resolution_contained_addresses_ = false; |
||||
grpc_closure_list waiting_for_resolver_result_closures_; |
||||
grpc_closure on_resolver_result_changed_; |
||||
|
||||
// LB policy and associated state.
|
||||
OrphanablePtr<LoadBalancingPolicy> lb_policy_; |
||||
bool exit_idle_when_lb_policy_arrives_ = false; |
||||
|
||||
// Subchannel pool to pass to LB policy.
|
||||
RefCountedPtr<SubchannelPoolInterface> subchannel_pool_; |
||||
|
||||
grpc_connectivity_state_tracker state_tracker_; |
||||
}; |
||||
|
||||
} // namespace grpc_core
|
||||
|
||||
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_REQUEST_ROUTING_H */ |
@ -0,0 +1,460 @@ |
||||
/*
|
||||
* |
||||
* Copyright 2015 gRPC authors. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
* |
||||
*/ |
||||
|
||||
#include <grpc/support/port_platform.h> |
||||
|
||||
#include "src/core/ext/filters/client_channel/resolving_lb_policy.h" |
||||
|
||||
#include <inttypes.h> |
||||
#include <limits.h> |
||||
#include <stdbool.h> |
||||
#include <stdio.h> |
||||
#include <string.h> |
||||
|
||||
#include <grpc/support/alloc.h> |
||||
#include <grpc/support/log.h> |
||||
#include <grpc/support/string_util.h> |
||||
#include <grpc/support/sync.h> |
||||
|
||||
#include "src/core/ext/filters/client_channel/backup_poller.h" |
||||
#include "src/core/ext/filters/client_channel/http_connect_handshaker.h" |
||||
#include "src/core/ext/filters/client_channel/lb_policy_registry.h" |
||||
#include "src/core/ext/filters/client_channel/proxy_mapper_registry.h" |
||||
#include "src/core/ext/filters/client_channel/resolver_registry.h" |
||||
#include "src/core/ext/filters/client_channel/retry_throttle.h" |
||||
#include "src/core/ext/filters/client_channel/server_address.h" |
||||
#include "src/core/ext/filters/client_channel/subchannel.h" |
||||
#include "src/core/ext/filters/deadline/deadline_filter.h" |
||||
#include "src/core/lib/backoff/backoff.h" |
||||
#include "src/core/lib/channel/channel_args.h" |
||||
#include "src/core/lib/channel/connected_channel.h" |
||||
#include "src/core/lib/channel/status_util.h" |
||||
#include "src/core/lib/gpr/string.h" |
||||
#include "src/core/lib/gprpp/inlined_vector.h" |
||||
#include "src/core/lib/gprpp/manual_constructor.h" |
||||
#include "src/core/lib/iomgr/combiner.h" |
||||
#include "src/core/lib/iomgr/iomgr.h" |
||||
#include "src/core/lib/iomgr/polling_entity.h" |
||||
#include "src/core/lib/profiling/timers.h" |
||||
#include "src/core/lib/slice/slice_internal.h" |
||||
#include "src/core/lib/slice/slice_string_helpers.h" |
||||
#include "src/core/lib/surface/channel.h" |
||||
#include "src/core/lib/transport/connectivity_state.h" |
||||
#include "src/core/lib/transport/error_utils.h" |
||||
#include "src/core/lib/transport/metadata.h" |
||||
#include "src/core/lib/transport/metadata_batch.h" |
||||
#include "src/core/lib/transport/service_config.h" |
||||
#include "src/core/lib/transport/static_metadata.h" |
||||
#include "src/core/lib/transport/status_metadata.h" |
||||
|
||||
namespace grpc_core { |
||||
|
||||
//
|
||||
// ResolvingLoadBalancingPolicy::ResolvingControlHelper
|
||||
//
|
||||
|
||||
class ResolvingLoadBalancingPolicy::ResolvingControlHelper |
||||
: public LoadBalancingPolicy::ChannelControlHelper { |
||||
public: |
||||
explicit ResolvingControlHelper( |
||||
RefCountedPtr<ResolvingLoadBalancingPolicy> parent) |
||||
: parent_(std::move(parent)) {} |
||||
|
||||
Subchannel* CreateSubchannel(const grpc_channel_args& args) override { |
||||
if (parent_->resolver_ == nullptr) return nullptr; // Shutting down.
|
||||
return parent_->channel_control_helper()->CreateSubchannel(args); |
||||
} |
||||
|
||||
grpc_channel* CreateChannel(const char* target, grpc_client_channel_type type, |
||||
const grpc_channel_args& args) override { |
||||
if (parent_->resolver_ == nullptr) return nullptr; // Shutting down.
|
||||
return parent_->channel_control_helper()->CreateChannel(target, type, args); |
||||
} |
||||
|
||||
void UpdateState(grpc_connectivity_state state, grpc_error* state_error, |
||||
UniquePtr<SubchannelPicker> picker) override { |
||||
if (parent_->resolver_ == nullptr) { |
||||
// shutting down.
|
||||
GRPC_ERROR_UNREF(state_error); |
||||
return; |
||||
} |
||||
parent_->channel_control_helper()->UpdateState(state, state_error, |
||||
std::move(picker)); |
||||
} |
||||
|
||||
void RequestReresolution() override { |
||||
if (parent_->tracer_->enabled()) { |
||||
gpr_log(GPR_INFO, "resolving_lb=%p: started name re-resolving", |
||||
parent_.get()); |
||||
} |
||||
if (parent_->resolver_ != nullptr) { |
||||
parent_->resolver_->RequestReresolutionLocked(); |
||||
} |
||||
} |
||||
|
||||
private: |
||||
RefCountedPtr<ResolvingLoadBalancingPolicy> parent_; |
||||
}; |
||||
|
||||
//
|
||||
// ResolvingLoadBalancingPolicy
|
||||
//
|
||||
|
||||
ResolvingLoadBalancingPolicy::ResolvingLoadBalancingPolicy( |
||||
Args args, TraceFlag* tracer, UniquePtr<char> target_uri, |
||||
UniquePtr<char> child_policy_name, grpc_json* child_lb_config, |
||||
grpc_error** error) |
||||
: LoadBalancingPolicy(std::move(args)), |
||||
tracer_(tracer), |
||||
target_uri_(std::move(target_uri)), |
||||
child_policy_name_(std::move(child_policy_name)), |
||||
child_lb_config_str_(grpc_json_dump_to_string(child_lb_config, 0)), |
||||
child_lb_config_(grpc_json_parse_string(child_lb_config_str_.get())) { |
||||
GPR_ASSERT(child_policy_name_ != nullptr); |
||||
// Don't fetch service config, since this ctor is for use in nested LB
|
||||
// policies, not at the top level, and we only fetch the service
|
||||
// config at the top level.
|
||||
grpc_arg arg = grpc_channel_arg_integer_create( |
||||
const_cast<char*>(GRPC_ARG_SERVICE_CONFIG_DISABLE_RESOLUTION), 0); |
||||
grpc_channel_args* new_args = |
||||
grpc_channel_args_copy_and_add(args.args, &arg, 1); |
||||
*error = Init(*new_args); |
||||
grpc_channel_args_destroy(new_args); |
||||
} |
||||
|
||||
ResolvingLoadBalancingPolicy::ResolvingLoadBalancingPolicy( |
||||
Args args, TraceFlag* tracer, UniquePtr<char> target_uri, |
||||
ProcessResolverResultCallback process_resolver_result, |
||||
void* process_resolver_result_user_data, grpc_error** error) |
||||
: LoadBalancingPolicy(std::move(args)), |
||||
tracer_(tracer), |
||||
target_uri_(std::move(target_uri)), |
||||
process_resolver_result_(process_resolver_result), |
||||
process_resolver_result_user_data_(process_resolver_result_user_data) { |
||||
GPR_ASSERT(process_resolver_result != nullptr); |
||||
*error = Init(*args.args); |
||||
} |
||||
|
||||
grpc_error* ResolvingLoadBalancingPolicy::Init(const grpc_channel_args& args) { |
||||
GRPC_CLOSURE_INIT( |
||||
&on_resolver_result_changed_, |
||||
&ResolvingLoadBalancingPolicy::OnResolverResultChangedLocked, this, |
||||
grpc_combiner_scheduler(combiner())); |
||||
resolver_ = ResolverRegistry::CreateResolver( |
||||
target_uri_.get(), &args, interested_parties(), combiner()); |
||||
if (resolver_ == nullptr) { |
||||
return GRPC_ERROR_CREATE_FROM_STATIC_STRING("resolver creation failed"); |
||||
} |
||||
// Return our picker to the channel.
|
||||
channel_control_helper()->UpdateState( |
||||
GRPC_CHANNEL_IDLE, GRPC_ERROR_NONE, |
||||
UniquePtr<SubchannelPicker>(New<QueuePicker>(Ref()))); |
||||
return GRPC_ERROR_NONE; |
||||
} |
||||
|
||||
ResolvingLoadBalancingPolicy::~ResolvingLoadBalancingPolicy() { |
||||
GPR_ASSERT(resolver_ == nullptr); |
||||
GPR_ASSERT(lb_policy_ == nullptr); |
||||
grpc_json_destroy(child_lb_config_); |
||||
} |
||||
|
||||
void ResolvingLoadBalancingPolicy::ShutdownLocked() { |
||||
if (resolver_ != nullptr) { |
||||
resolver_.reset(); |
||||
if (lb_policy_ != nullptr) { |
||||
grpc_pollset_set_del_pollset_set(lb_policy_->interested_parties(), |
||||
interested_parties()); |
||||
lb_policy_.reset(); |
||||
} |
||||
} |
||||
} |
||||
|
||||
void ResolvingLoadBalancingPolicy::ExitIdleLocked() { |
||||
if (lb_policy_ != nullptr) { |
||||
lb_policy_->ExitIdleLocked(); |
||||
} else { |
||||
if (!started_resolving_ && resolver_ != nullptr) { |
||||
StartResolvingLocked(); |
||||
} |
||||
} |
||||
} |
||||
|
||||
void ResolvingLoadBalancingPolicy::ResetBackoffLocked() { |
||||
if (resolver_ != nullptr) { |
||||
resolver_->ResetBackoffLocked(); |
||||
resolver_->RequestReresolutionLocked(); |
||||
} |
||||
if (lb_policy_ != nullptr) { |
||||
lb_policy_->ResetBackoffLocked(); |
||||
} |
||||
} |
||||
|
||||
void ResolvingLoadBalancingPolicy::FillChildRefsForChannelz( |
||||
channelz::ChildRefsList* child_subchannels, |
||||
channelz::ChildRefsList* child_channels) { |
||||
if (lb_policy_ != nullptr) { |
||||
lb_policy_->FillChildRefsForChannelz(child_subchannels, child_channels); |
||||
} |
||||
} |
||||
|
||||
void ResolvingLoadBalancingPolicy::StartResolvingLocked() { |
||||
if (tracer_->enabled()) { |
||||
gpr_log(GPR_INFO, "resolving_lb=%p: starting name resolution", this); |
||||
} |
||||
GPR_ASSERT(!started_resolving_); |
||||
started_resolving_ = true; |
||||
Ref().release(); |
||||
resolver_->NextLocked(&resolver_result_, &on_resolver_result_changed_); |
||||
} |
||||
|
||||
// Invoked from the resolver NextLocked() callback when the resolver
|
||||
// is shutting down.
|
||||
void ResolvingLoadBalancingPolicy::OnResolverShutdownLocked(grpc_error* error) { |
||||
if (tracer_->enabled()) { |
||||
gpr_log(GPR_INFO, "resolving_lb=%p: shutting down", this); |
||||
} |
||||
if (lb_policy_ != nullptr) { |
||||
if (tracer_->enabled()) { |
||||
gpr_log(GPR_INFO, "resolving_lb=%p: shutting down lb_policy=%p", this, |
||||
lb_policy_.get()); |
||||
} |
||||
grpc_pollset_set_del_pollset_set(lb_policy_->interested_parties(), |
||||
interested_parties()); |
||||
lb_policy_.reset(); |
||||
} |
||||
if (resolver_ != nullptr) { |
||||
// This should never happen; it can only be triggered by a resolver
|
||||
// implementation spotaneously deciding to report shutdown without
|
||||
// being orphaned. This code is included just to be defensive.
|
||||
if (tracer_->enabled()) { |
||||
gpr_log(GPR_INFO, |
||||
"resolving_lb=%p: spontaneous shutdown from resolver %p", this, |
||||
resolver_.get()); |
||||
} |
||||
resolver_.reset(); |
||||
grpc_error* error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( |
||||
"Resolver spontaneous shutdown", &error, 1); |
||||
channel_control_helper()->UpdateState( |
||||
GRPC_CHANNEL_SHUTDOWN, GRPC_ERROR_REF(error), |
||||
UniquePtr<SubchannelPicker>(New<TransientFailurePicker>(error))); |
||||
} |
||||
grpc_channel_args_destroy(resolver_result_); |
||||
resolver_result_ = nullptr; |
||||
GRPC_ERROR_UNREF(error); |
||||
Unref(); |
||||
} |
||||
|
||||
// Creates a new LB policy, replacing any previous one.
|
||||
// Updates trace_strings to indicate what was done.
|
||||
void ResolvingLoadBalancingPolicy::CreateNewLbPolicyLocked( |
||||
const char* lb_policy_name, grpc_json* lb_config, |
||||
TraceStringVector* trace_strings) { |
||||
LoadBalancingPolicy::Args lb_policy_args; |
||||
lb_policy_args.combiner = combiner(); |
||||
lb_policy_args.channel_control_helper = |
||||
UniquePtr<ChannelControlHelper>(New<ResolvingControlHelper>(Ref())); |
||||
lb_policy_args.args = resolver_result_; |
||||
lb_policy_args.lb_config = lb_config; |
||||
OrphanablePtr<LoadBalancingPolicy> new_lb_policy = |
||||
LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy( |
||||
lb_policy_name, std::move(lb_policy_args)); |
||||
if (GPR_UNLIKELY(new_lb_policy == nullptr)) { |
||||
gpr_log(GPR_ERROR, "could not create LB policy \"%s\"", lb_policy_name); |
||||
if (channelz_node() != nullptr) { |
||||
char* str; |
||||
gpr_asprintf(&str, "Could not create LB policy \"%s\"", lb_policy_name); |
||||
trace_strings->push_back(str); |
||||
} |
||||
} else { |
||||
if (tracer_->enabled()) { |
||||
gpr_log(GPR_INFO, "resolving_lb=%p: created new LB policy \"%s\" (%p)", |
||||
this, lb_policy_name, new_lb_policy.get()); |
||||
} |
||||
if (channelz_node() != nullptr) { |
||||
char* str; |
||||
gpr_asprintf(&str, "Created new LB policy \"%s\"", lb_policy_name); |
||||
trace_strings->push_back(str); |
||||
} |
||||
// Propagate channelz node.
|
||||
auto* channelz = channelz_node(); |
||||
if (channelz != nullptr) { |
||||
new_lb_policy->set_channelz_node(channelz->Ref()); |
||||
} |
||||
// Swap out the LB policy and update the fds in interested_parties_.
|
||||
if (lb_policy_ != nullptr) { |
||||
if (tracer_->enabled()) { |
||||
gpr_log(GPR_INFO, "resolving_lb=%p: shutting down lb_policy=%p", this, |
||||
lb_policy_.get()); |
||||
} |
||||
grpc_pollset_set_del_pollset_set(lb_policy_->interested_parties(), |
||||
interested_parties()); |
||||
} |
||||
lb_policy_ = std::move(new_lb_policy); |
||||
grpc_pollset_set_add_pollset_set(lb_policy_->interested_parties(), |
||||
interested_parties()); |
||||
lb_policy_->ExitIdleLocked(); |
||||
} |
||||
} |
||||
|
||||
void ResolvingLoadBalancingPolicy::MaybeAddTraceMessagesForAddressChangesLocked( |
||||
TraceStringVector* trace_strings) { |
||||
const ServerAddressList* addresses = |
||||
FindServerAddressListChannelArg(resolver_result_); |
||||
const bool resolution_contains_addresses = |
||||
addresses != nullptr && addresses->size() > 0; |
||||
if (!resolution_contains_addresses && |
||||
previous_resolution_contained_addresses_) { |
||||
trace_strings->push_back(gpr_strdup("Address list became empty")); |
||||
} else if (resolution_contains_addresses && |
||||
!previous_resolution_contained_addresses_) { |
||||
trace_strings->push_back(gpr_strdup("Address list became non-empty")); |
||||
} |
||||
previous_resolution_contained_addresses_ = resolution_contains_addresses; |
||||
} |
||||
|
||||
void ResolvingLoadBalancingPolicy::ConcatenateAndAddChannelTraceLocked( |
||||
TraceStringVector* trace_strings) const { |
||||
if (!trace_strings->empty()) { |
||||
gpr_strvec v; |
||||
gpr_strvec_init(&v); |
||||
gpr_strvec_add(&v, gpr_strdup("Resolution event: ")); |
||||
bool is_first = 1; |
||||
for (size_t i = 0; i < trace_strings->size(); ++i) { |
||||
if (!is_first) gpr_strvec_add(&v, gpr_strdup(", ")); |
||||
is_first = false; |
||||
gpr_strvec_add(&v, (*trace_strings)[i]); |
||||
} |
||||
char* flat; |
||||
size_t flat_len = 0; |
||||
flat = gpr_strvec_flatten(&v, &flat_len); |
||||
channelz_node()->AddTraceEvent(channelz::ChannelTrace::Severity::Info, |
||||
grpc_slice_new(flat, flat_len, gpr_free)); |
||||
gpr_strvec_destroy(&v); |
||||
} |
||||
} |
||||
|
||||
// Callback invoked when a resolver result is available.
|
||||
void ResolvingLoadBalancingPolicy::OnResolverResultChangedLocked( |
||||
void* arg, grpc_error* error) { |
||||
auto* self = static_cast<ResolvingLoadBalancingPolicy*>(arg); |
||||
if (self->tracer_->enabled()) { |
||||
const char* disposition = |
||||
self->resolver_result_ != nullptr |
||||
? "" |
||||
: (error == GRPC_ERROR_NONE ? " (transient error)" |
||||
: " (resolver shutdown)"); |
||||
gpr_log(GPR_INFO, |
||||
"resolving_lb=%p: got resolver result: resolver_result=%p " |
||||
"error=%s%s", |
||||
self, self->resolver_result_, grpc_error_string(error), |
||||
disposition); |
||||
} |
||||
// Handle shutdown.
|
||||
if (error != GRPC_ERROR_NONE || self->resolver_ == nullptr) { |
||||
self->OnResolverShutdownLocked(GRPC_ERROR_REF(error)); |
||||
return; |
||||
} |
||||
// We only want to trace the address resolution in the follow cases:
|
||||
// (a) Address resolution resulted in service config change.
|
||||
// (b) Address resolution that causes number of backends to go from
|
||||
// zero to non-zero.
|
||||
// (c) Address resolution that causes number of backends to go from
|
||||
// non-zero to zero.
|
||||
// (d) Address resolution that causes a new LB policy to be created.
|
||||
//
|
||||
// we track a list of strings to eventually be concatenated and traced.
|
||||
TraceStringVector trace_strings; |
||||
// resolver_result_ will be null in the case of a transient
|
||||
// resolution error. In that case, we don't have any new result to
|
||||
// process, which means that we keep using the previous result (if any).
|
||||
if (self->resolver_result_ == nullptr) { |
||||
if (self->tracer_->enabled()) { |
||||
gpr_log(GPR_INFO, "resolving_lb=%p: resolver transient failure", self); |
||||
} |
||||
// If we already have an LB policy from a previous resolution
|
||||
// result, then we continue to let it set the connectivity state.
|
||||
// Otherwise, we go into TRANSIENT_FAILURE.
|
||||
if (self->lb_policy_ == nullptr) { |
||||
// TODO(roth): When we change the resolver API to be able to
|
||||
// return transient errors in a cleaner way, we should make it the
|
||||
// resolver's responsibility to attach a status to the error,
|
||||
// rather than doing it centrally here.
|
||||
grpc_error* state_error = grpc_error_set_int( |
||||
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( |
||||
"Resolver transient failure", &error, 1), |
||||
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE); |
||||
self->channel_control_helper()->UpdateState( |
||||
GRPC_CHANNEL_TRANSIENT_FAILURE, GRPC_ERROR_REF(state_error), |
||||
UniquePtr<SubchannelPicker>( |
||||
New<TransientFailurePicker>(state_error))); |
||||
} |
||||
} else { |
||||
// Parse the resolver result.
|
||||
const char* lb_policy_name = nullptr; |
||||
grpc_json* lb_policy_config = nullptr; |
||||
bool service_config_changed = false; |
||||
if (self->process_resolver_result_ != nullptr) { |
||||
service_config_changed = self->process_resolver_result_( |
||||
self->process_resolver_result_user_data_, *self->resolver_result_, |
||||
&lb_policy_name, &lb_policy_config); |
||||
} else { |
||||
lb_policy_name = self->child_policy_name_.get(); |
||||
lb_policy_config = self->child_lb_config_; |
||||
} |
||||
GPR_ASSERT(lb_policy_name != nullptr); |
||||
// Check to see if we're already using the right LB policy.
|
||||
const bool lb_policy_name_changed = |
||||
self->lb_policy_ == nullptr || |
||||
strcmp(self->lb_policy_->name(), lb_policy_name) != 0; |
||||
if (self->lb_policy_ != nullptr && !lb_policy_name_changed) { |
||||
// Continue using the same LB policy. Update with new addresses.
|
||||
if (self->tracer_->enabled()) { |
||||
gpr_log(GPR_INFO, |
||||
"resolving_lb=%p: updating existing LB policy \"%s\" (%p)", |
||||
self, lb_policy_name, self->lb_policy_.get()); |
||||
} |
||||
self->lb_policy_->UpdateLocked(*self->resolver_result_, lb_policy_config); |
||||
} else { |
||||
// Instantiate new LB policy.
|
||||
if (self->tracer_->enabled()) { |
||||
gpr_log(GPR_INFO, "resolving_lb=%p: creating new LB policy \"%s\"", |
||||
self, lb_policy_name); |
||||
} |
||||
self->CreateNewLbPolicyLocked(lb_policy_name, lb_policy_config, |
||||
&trace_strings); |
||||
} |
||||
// Add channel trace event.
|
||||
if (self->channelz_node() != nullptr) { |
||||
if (service_config_changed) { |
||||
// TODO(ncteisen): might be worth somehow including a snippet of the
|
||||
// config in the trace, at the risk of bloating the trace logs.
|
||||
trace_strings.push_back(gpr_strdup("Service config changed")); |
||||
} |
||||
self->MaybeAddTraceMessagesForAddressChangesLocked(&trace_strings); |
||||
self->ConcatenateAndAddChannelTraceLocked(&trace_strings); |
||||
} |
||||
// Clean up.
|
||||
grpc_channel_args_destroy(self->resolver_result_); |
||||
self->resolver_result_ = nullptr; |
||||
} |
||||
// Renew resolver callback.
|
||||
self->resolver_->NextLocked(&self->resolver_result_, |
||||
&self->on_resolver_result_changed_); |
||||
} |
||||
|
||||
} // namespace grpc_core
|
@ -0,0 +1,137 @@ |
||||
/*
|
||||
* |
||||
* Copyright 2018 gRPC authors. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
* |
||||
*/ |
||||
|
||||
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVING_LB_POLICY_H |
||||
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVING_LB_POLICY_H |
||||
|
||||
#include <grpc/support/port_platform.h> |
||||
|
||||
#include "src/core/ext/filters/client_channel/client_channel_channelz.h" |
||||
#include "src/core/ext/filters/client_channel/lb_policy.h" |
||||
#include "src/core/ext/filters/client_channel/resolver.h" |
||||
#include "src/core/lib/channel/channel_args.h" |
||||
#include "src/core/lib/channel/channel_stack.h" |
||||
#include "src/core/lib/debug/trace.h" |
||||
#include "src/core/lib/gprpp/inlined_vector.h" |
||||
#include "src/core/lib/gprpp/orphanable.h" |
||||
#include "src/core/lib/iomgr/call_combiner.h" |
||||
#include "src/core/lib/iomgr/closure.h" |
||||
#include "src/core/lib/iomgr/polling_entity.h" |
||||
#include "src/core/lib/iomgr/pollset_set.h" |
||||
#include "src/core/lib/transport/connectivity_state.h" |
||||
#include "src/core/lib/transport/metadata_batch.h" |
||||
|
||||
namespace grpc_core { |
||||
|
||||
// An LB policy that wraps a resolver and a child LB policy to make use
|
||||
// of the addresses returned by the resolver.
|
||||
//
|
||||
// When used in the client_channel code, the resolver will attempt to
|
||||
// fetch the service config, and the child LB policy name and config
|
||||
// will be determined based on the service config.
|
||||
//
|
||||
// When used in an LB policy implementation that needs to do another
|
||||
// round of resolution before creating a child policy, the resolver does
|
||||
// not fetch the service config, and the caller must pre-determine the
|
||||
// child LB policy and config to use.
|
||||
class ResolvingLoadBalancingPolicy : public LoadBalancingPolicy { |
||||
public: |
||||
// If error is set when this returns, then construction failed, and
|
||||
// the caller may not use the new object.
|
||||
ResolvingLoadBalancingPolicy(Args args, TraceFlag* tracer, |
||||
UniquePtr<char> target_uri, |
||||
UniquePtr<char> child_policy_name, |
||||
grpc_json* child_lb_config, grpc_error** error); |
||||
|
||||
// Private ctor, to be used by client_channel only!
|
||||
//
|
||||
// Synchronous callback that takes the resolver result and sets
|
||||
// lb_policy_name and lb_policy_config to point to the right data.
|
||||
// Returns true if the service config has changed since the last result.
|
||||
typedef bool (*ProcessResolverResultCallback)(void* user_data, |
||||
const grpc_channel_args& args, |
||||
const char** lb_policy_name, |
||||
grpc_json** lb_policy_config); |
||||
// If error is set when this returns, then construction failed, and
|
||||
// the caller may not use the new object.
|
||||
ResolvingLoadBalancingPolicy( |
||||
Args args, TraceFlag* tracer, UniquePtr<char> target_uri, |
||||
ProcessResolverResultCallback process_resolver_result, |
||||
void* process_resolver_result_user_data, grpc_error** error); |
||||
|
||||
virtual const char* name() const override { return "resolving_lb"; } |
||||
|
||||
// No-op -- should never get updates from the channel.
|
||||
// TODO(roth): Need to support updating child LB policy's config.
|
||||
// For xds policy, will also need to support updating config
|
||||
// independently of args from resolver, since they will be coming from
|
||||
// different places. Maybe change LB policy API to support that?
|
||||
void UpdateLocked(const grpc_channel_args& args, |
||||
grpc_json* lb_config) override {} |
||||
|
||||
void ExitIdleLocked() override; |
||||
|
||||
void ResetBackoffLocked() override; |
||||
|
||||
void FillChildRefsForChannelz( |
||||
channelz::ChildRefsList* child_subchannels, |
||||
channelz::ChildRefsList* child_channels) override; |
||||
|
||||
private: |
||||
using TraceStringVector = InlinedVector<char*, 3>; |
||||
|
||||
class ResolvingControlHelper; |
||||
|
||||
~ResolvingLoadBalancingPolicy(); |
||||
|
||||
grpc_error* Init(const grpc_channel_args& args); |
||||
void ShutdownLocked() override; |
||||
|
||||
void StartResolvingLocked(); |
||||
void OnResolverShutdownLocked(grpc_error* error); |
||||
void CreateNewLbPolicyLocked(const char* lb_policy_name, grpc_json* lb_config, |
||||
TraceStringVector* trace_strings); |
||||
void MaybeAddTraceMessagesForAddressChangesLocked( |
||||
TraceStringVector* trace_strings); |
||||
void ConcatenateAndAddChannelTraceLocked( |
||||
TraceStringVector* trace_strings) const; |
||||
static void OnResolverResultChangedLocked(void* arg, grpc_error* error); |
||||
|
||||
// Passed in from caller at construction time.
|
||||
TraceFlag* tracer_; |
||||
UniquePtr<char> target_uri_; |
||||
ProcessResolverResultCallback process_resolver_result_ = nullptr; |
||||
void* process_resolver_result_user_data_ = nullptr; |
||||
UniquePtr<char> child_policy_name_; |
||||
UniquePtr<char> child_lb_config_str_; |
||||
grpc_json* child_lb_config_ = nullptr; |
||||
|
||||
// Resolver and associated state.
|
||||
OrphanablePtr<Resolver> resolver_; |
||||
bool started_resolving_ = false; |
||||
grpc_channel_args* resolver_result_ = nullptr; |
||||
bool previous_resolution_contained_addresses_ = false; |
||||
grpc_closure on_resolver_result_changed_; |
||||
|
||||
// Child LB policy and associated state.
|
||||
OrphanablePtr<LoadBalancingPolicy> lb_policy_; |
||||
}; |
||||
|
||||
} // namespace grpc_core
|
||||
|
||||
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVING_LB_POLICY_H */ |
@ -1,222 +0,0 @@ |
||||
//
|
||||
//
|
||||
// Copyright 2016 gRPC authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
//
|
||||
|
||||
#include <grpc/support/port_platform.h> |
||||
|
||||
#include "src/core/ext/filters/client_channel/subchannel_index.h" |
||||
|
||||
#include <stdbool.h> |
||||
#include <string.h> |
||||
|
||||
#include <grpc/support/alloc.h> |
||||
#include <grpc/support/string_util.h> |
||||
|
||||
#include "src/core/lib/avl/avl.h" |
||||
#include "src/core/lib/channel/channel_args.h" |
||||
#include "src/core/lib/gpr/tls.h" |
||||
|
||||
// a map of subchannel_key --> subchannel, used for detecting connections
|
||||
// to the same destination in order to share them
|
||||
static grpc_avl g_subchannel_index; |
||||
|
||||
static gpr_mu g_mu; |
||||
|
||||
static gpr_refcount g_refcount; |
||||
|
||||
struct grpc_subchannel_key { |
||||
grpc_channel_args* args; |
||||
}; |
||||
|
||||
static grpc_subchannel_key* create_key( |
||||
const grpc_channel_args* args, |
||||
grpc_channel_args* (*copy_channel_args)(const grpc_channel_args* args)) { |
||||
grpc_subchannel_key* k = |
||||
static_cast<grpc_subchannel_key*>(gpr_malloc(sizeof(*k))); |
||||
k->args = copy_channel_args(args); |
||||
return k; |
||||
} |
||||
|
||||
grpc_subchannel_key* grpc_subchannel_key_create(const grpc_channel_args* args) { |
||||
return create_key(args, grpc_channel_args_normalize); |
||||
} |
||||
|
||||
static grpc_subchannel_key* subchannel_key_copy(grpc_subchannel_key* k) { |
||||
return create_key(k->args, grpc_channel_args_copy); |
||||
} |
||||
|
||||
int grpc_subchannel_key_compare(const grpc_subchannel_key* a, |
||||
const grpc_subchannel_key* b) { |
||||
return grpc_channel_args_compare(a->args, b->args); |
||||
} |
||||
|
||||
void grpc_subchannel_key_destroy(grpc_subchannel_key* k) { |
||||
grpc_channel_args_destroy(k->args); |
||||
gpr_free(k); |
||||
} |
||||
|
||||
static void sck_avl_destroy(void* p, void* unused) { |
||||
grpc_subchannel_key_destroy(static_cast<grpc_subchannel_key*>(p)); |
||||
} |
||||
|
||||
static void* sck_avl_copy(void* p, void* unused) { |
||||
return subchannel_key_copy(static_cast<grpc_subchannel_key*>(p)); |
||||
} |
||||
|
||||
static long sck_avl_compare(void* a, void* b, void* unused) { |
||||
return grpc_subchannel_key_compare(static_cast<grpc_subchannel_key*>(a), |
||||
static_cast<grpc_subchannel_key*>(b)); |
||||
} |
||||
|
||||
static void scv_avl_destroy(void* p, void* unused) { |
||||
GRPC_SUBCHANNEL_WEAK_UNREF((grpc_subchannel*)p, "subchannel_index"); |
||||
} |
||||
|
||||
static void* scv_avl_copy(void* p, void* unused) { |
||||
GRPC_SUBCHANNEL_WEAK_REF((grpc_subchannel*)p, "subchannel_index"); |
||||
return p; |
||||
} |
||||
|
||||
static const grpc_avl_vtable subchannel_avl_vtable = { |
||||
sck_avl_destroy, // destroy_key
|
||||
sck_avl_copy, // copy_key
|
||||
sck_avl_compare, // compare_keys
|
||||
scv_avl_destroy, // destroy_value
|
||||
scv_avl_copy // copy_value
|
||||
}; |
||||
|
||||
void grpc_subchannel_index_init(void) { |
||||
g_subchannel_index = grpc_avl_create(&subchannel_avl_vtable); |
||||
gpr_mu_init(&g_mu); |
||||
gpr_ref_init(&g_refcount, 1); |
||||
} |
||||
|
||||
void grpc_subchannel_index_shutdown(void) { |
||||
// TODO(juanlishen): This refcounting mechanism may lead to memory leackage.
|
||||
// To solve that, we should force polling to flush any pending callbacks, then
|
||||
// shutdown safely.
|
||||
grpc_subchannel_index_unref(); |
||||
} |
||||
|
||||
void grpc_subchannel_index_unref(void) { |
||||
if (gpr_unref(&g_refcount)) { |
||||
gpr_mu_destroy(&g_mu); |
||||
grpc_avl_unref(g_subchannel_index, nullptr); |
||||
} |
||||
} |
||||
|
||||
void grpc_subchannel_index_ref(void) { gpr_ref_non_zero(&g_refcount); } |
||||
|
||||
grpc_subchannel* grpc_subchannel_index_find(grpc_subchannel_key* key) { |
||||
// Lock, and take a reference to the subchannel index.
|
||||
// We don't need to do the search under a lock as avl's are immutable.
|
||||
gpr_mu_lock(&g_mu); |
||||
grpc_avl index = grpc_avl_ref(g_subchannel_index, nullptr); |
||||
gpr_mu_unlock(&g_mu); |
||||
|
||||
grpc_subchannel* c = GRPC_SUBCHANNEL_REF_FROM_WEAK_REF( |
||||
(grpc_subchannel*)grpc_avl_get(index, key, nullptr), "index_find"); |
||||
grpc_avl_unref(index, nullptr); |
||||
|
||||
return c; |
||||
} |
||||
|
||||
grpc_subchannel* grpc_subchannel_index_register(grpc_subchannel_key* key, |
||||
grpc_subchannel* constructed) { |
||||
grpc_subchannel* c = nullptr; |
||||
bool need_to_unref_constructed = false; |
||||
|
||||
while (c == nullptr) { |
||||
need_to_unref_constructed = false; |
||||
|
||||
// Compare and swap loop:
|
||||
// - take a reference to the current index
|
||||
gpr_mu_lock(&g_mu); |
||||
grpc_avl index = grpc_avl_ref(g_subchannel_index, nullptr); |
||||
gpr_mu_unlock(&g_mu); |
||||
|
||||
// - Check to see if a subchannel already exists
|
||||
c = static_cast<grpc_subchannel*>(grpc_avl_get(index, key, nullptr)); |
||||
if (c != nullptr) { |
||||
c = GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(c, "index_register"); |
||||
} |
||||
if (c != nullptr) { |
||||
// yes -> we're done
|
||||
need_to_unref_constructed = true; |
||||
} else { |
||||
// no -> update the avl and compare/swap
|
||||
grpc_avl updated = grpc_avl_add( |
||||
grpc_avl_ref(index, nullptr), subchannel_key_copy(key), |
||||
GRPC_SUBCHANNEL_WEAK_REF(constructed, "index_register"), nullptr); |
||||
|
||||
// it may happen (but it's expected to be unlikely)
|
||||
// that some other thread has changed the index:
|
||||
// compare/swap here to check that, and retry as necessary
|
||||
gpr_mu_lock(&g_mu); |
||||
if (index.root == g_subchannel_index.root) { |
||||
GPR_SWAP(grpc_avl, updated, g_subchannel_index); |
||||
c = constructed; |
||||
} |
||||
gpr_mu_unlock(&g_mu); |
||||
|
||||
grpc_avl_unref(updated, nullptr); |
||||
} |
||||
grpc_avl_unref(index, nullptr); |
||||
} |
||||
|
||||
if (need_to_unref_constructed) { |
||||
GRPC_SUBCHANNEL_UNREF(constructed, "index_register"); |
||||
} |
||||
|
||||
return c; |
||||
} |
||||
|
||||
void grpc_subchannel_index_unregister(grpc_subchannel_key* key, |
||||
grpc_subchannel* constructed) { |
||||
bool done = false; |
||||
while (!done) { |
||||
// Compare and swap loop:
|
||||
// - take a reference to the current index
|
||||
gpr_mu_lock(&g_mu); |
||||
grpc_avl index = grpc_avl_ref(g_subchannel_index, nullptr); |
||||
gpr_mu_unlock(&g_mu); |
||||
|
||||
// Check to see if this key still refers to the previously
|
||||
// registered subchannel
|
||||
grpc_subchannel* c = |
||||
static_cast<grpc_subchannel*>(grpc_avl_get(index, key, nullptr)); |
||||
if (c != constructed) { |
||||
grpc_avl_unref(index, nullptr); |
||||
break; |
||||
} |
||||
|
||||
// compare and swap the update (some other thread may have
|
||||
// mutated the index behind us)
|
||||
grpc_avl updated = |
||||
grpc_avl_remove(grpc_avl_ref(index, nullptr), key, nullptr); |
||||
|
||||
gpr_mu_lock(&g_mu); |
||||
if (index.root == g_subchannel_index.root) { |
||||
GPR_SWAP(grpc_avl, updated, g_subchannel_index); |
||||
done = true; |
||||
} |
||||
gpr_mu_unlock(&g_mu); |
||||
|
||||
grpc_avl_unref(updated, nullptr); |
||||
grpc_avl_unref(index, nullptr); |
||||
} |
||||
} |
@ -1,66 +0,0 @@ |
||||
/*
|
||||
* |
||||
* Copyright 2016 gRPC authors. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
* |
||||
*/ |
||||
|
||||
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_SUBCHANNEL_INDEX_H |
||||
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_SUBCHANNEL_INDEX_H |
||||
|
||||
#include <grpc/support/port_platform.h> |
||||
|
||||
#include "src/core/ext/filters/client_channel/subchannel.h" |
||||
|
||||
/** \file Provides an index of active subchannels so that they can be
|
||||
shared amongst channels */ |
||||
|
||||
/** Create a key that can be used to uniquely identify a subchannel */ |
||||
grpc_subchannel_key* grpc_subchannel_key_create(const grpc_channel_args* args); |
||||
|
||||
/** Destroy a subchannel key */ |
||||
void grpc_subchannel_key_destroy(grpc_subchannel_key* key); |
||||
|
||||
/** Given a subchannel key, find the subchannel registered for it.
|
||||
Returns NULL if no such channel exists. |
||||
Thread-safe. */ |
||||
grpc_subchannel* grpc_subchannel_index_find(grpc_subchannel_key* key); |
||||
|
||||
/** Register a subchannel against a key.
|
||||
Takes ownership of \a constructed. |
||||
Returns the registered subchannel. This may be different from |
||||
\a constructed in the case of a registration race. */ |
||||
grpc_subchannel* grpc_subchannel_index_register(grpc_subchannel_key* key, |
||||
grpc_subchannel* constructed); |
||||
|
||||
/** Remove \a constructed as the registered subchannel for \a key. */ |
||||
void grpc_subchannel_index_unregister(grpc_subchannel_key* key, |
||||
grpc_subchannel* constructed); |
||||
|
||||
int grpc_subchannel_key_compare(const grpc_subchannel_key* a, |
||||
const grpc_subchannel_key* b); |
||||
|
||||
/** Initialize the subchannel index (global) */ |
||||
void grpc_subchannel_index_init(void); |
||||
/** Shutdown the subchannel index (global) */ |
||||
void grpc_subchannel_index_shutdown(void); |
||||
|
||||
/** Increment the refcount (non-zero) of subchannel index (global). */ |
||||
void grpc_subchannel_index_ref(void); |
||||
|
||||
/** Decrement the refcount of subchannel index (global). If the refcount drops
|
||||
to zero, unref the subchannel index and destroy its mutex. */ |
||||
void grpc_subchannel_index_unref(void); |
||||
|
||||
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_SUBCHANNEL_INDEX_H */ |
@ -1,7 +1,7 @@ |
||||
<!-- This file is generated --> |
||||
<Project> |
||||
<PropertyGroup> |
||||
<GrpcCsharpVersion>1.19.0-dev</GrpcCsharpVersion> |
||||
<GrpcCsharpVersion>1.20.0-dev</GrpcCsharpVersion> |
||||
<GoogleProtobufVersion>3.6.1</GoogleProtobufVersion> |
||||
</PropertyGroup> |
||||
</Project> |
||||
|
@ -0,0 +1,404 @@ |
||||
|
||||
// Copyright 2019 The gRPC Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// When building for Unity Android with il2cpp backend, Unity tries to link
|
||||
// the __Internal PInvoke definitions (which are required by iOS) even though
|
||||
// the .so/.dll will be actually used. This file provides dummy stubs to
|
||||
// make il2cpp happy.
|
||||
// See https://github.com/grpc/grpc/issues/16012
|
||||
|
||||
#include <stdio.h> |
||||
#include <stdlib.h> |
||||
|
||||
void grpcsharp_init() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_shutdown() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_version_string() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_batch_context_create() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_batch_context_recv_initial_metadata() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_batch_context_recv_message_length() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_batch_context_recv_message_to_buffer() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_batch_context_recv_status_on_client_status() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_batch_context_recv_status_on_client_details() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_batch_context_recv_status_on_client_trailing_metadata() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_batch_context_recv_close_on_server_cancelled() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_batch_context_reset() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_batch_context_destroy() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_request_call_context_create() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_request_call_context_call() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_request_call_context_method() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_request_call_context_host() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_request_call_context_deadline() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_request_call_context_request_metadata() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_request_call_context_reset() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_request_call_context_destroy() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_composite_call_credentials_create() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_call_credentials_release() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_call_cancel() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_call_cancel_with_status() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_call_start_unary() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_call_start_client_streaming() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_call_start_server_streaming() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_call_start_duplex_streaming() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_call_send_message() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_call_send_close_from_client() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_call_send_status_from_server() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_call_recv_message() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_call_recv_initial_metadata() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_call_start_serverside() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_call_send_initial_metadata() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_call_set_credentials() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_call_get_peer() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_call_destroy() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_channel_args_create() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_channel_args_set_string() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_channel_args_set_integer() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_channel_args_destroy() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_override_default_ssl_roots() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_ssl_credentials_create() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_composite_channel_credentials_create() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_channel_credentials_release() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_insecure_channel_create() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_secure_channel_create() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_channel_create_call() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_channel_check_connectivity_state() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_channel_watch_connectivity_state() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_channel_get_target() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_channel_destroy() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_sizeof_grpc_event() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_completion_queue_create_async() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_completion_queue_create_sync() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_completion_queue_shutdown() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_completion_queue_next() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_completion_queue_pluck() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_completion_queue_destroy() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void gprsharp_free() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_metadata_array_create() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_metadata_array_add() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_metadata_array_count() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_metadata_array_get_key() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_metadata_array_get_value() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_metadata_array_destroy_full() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_redirect_log() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_metadata_credentials_create_from_plugin() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_metadata_credentials_notify_from_plugin() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_ssl_server_credentials_create() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_server_credentials_release() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_server_create() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_server_register_completion_queue() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_server_add_insecure_http2_port() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_server_add_secure_http2_port() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_server_start() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_server_request_call() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_server_cancel_all_calls() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_server_shutdown_and_notify_callback() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_server_destroy() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_call_auth_context() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_auth_context_peer_identity_property_name() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_auth_context_property_iterator() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_auth_property_iterator_next() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_auth_context_release() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void gprsharp_now() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void gprsharp_inf_future() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void gprsharp_inf_past() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void gprsharp_convert_clock_type() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void gprsharp_sizeof_timespec() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_test_callback() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_test_nop() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
void grpcsharp_test_override_method() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
@ -0,0 +1,93 @@ |
||||
fileFormatVersion: 2 |
||||
guid: 576b78662f1f8af4fa751f709b620f52 |
||||
PluginImporter: |
||||
externalObjects: {} |
||||
serializedVersion: 2 |
||||
iconMap: {} |
||||
executionOrder: {} |
||||
isPreloaded: 0 |
||||
isOverridable: 0 |
||||
platformData: |
||||
- first: |
||||
'': Any |
||||
second: |
||||
enabled: 0 |
||||
settings: |
||||
Exclude Android: 0 |
||||
Exclude Editor: 1 |
||||
Exclude Linux: 1 |
||||
Exclude Linux64: 1 |
||||
Exclude LinuxUniversal: 1 |
||||
Exclude OSXUniversal: 1 |
||||
Exclude Win: 0 |
||||
Exclude Win64: 0 |
||||
- first: |
||||
Android: Android |
||||
second: |
||||
enabled: 1 |
||||
settings: |
||||
CPU: ARMv7 |
||||
- first: |
||||
Any: |
||||
second: |
||||
enabled: 0 |
||||
settings: {} |
||||
- first: |
||||
Editor: Editor |
||||
second: |
||||
enabled: 0 |
||||
settings: |
||||
CPU: AnyCPU |
||||
DefaultValueInitialized: true |
||||
OS: AnyOS |
||||
- first: |
||||
Facebook: Win |
||||
second: |
||||
enabled: 0 |
||||
settings: |
||||
CPU: AnyCPU |
||||
- first: |
||||
Facebook: Win64 |
||||
second: |
||||
enabled: 0 |
||||
settings: |
||||
CPU: AnyCPU |
||||
- first: |
||||
Standalone: Linux |
||||
second: |
||||
enabled: 0 |
||||
settings: |
||||
CPU: None |
||||
- first: |
||||
Standalone: Linux64 |
||||
second: |
||||
enabled: 0 |
||||
settings: |
||||
CPU: None |
||||
- first: |
||||
Standalone: LinuxUniversal |
||||
second: |
||||
enabled: 0 |
||||
settings: |
||||
CPU: None |
||||
- first: |
||||
Standalone: OSXUniversal |
||||
second: |
||||
enabled: 0 |
||||
settings: |
||||
CPU: None |
||||
- first: |
||||
Standalone: Win |
||||
second: |
||||
enabled: 1 |
||||
settings: |
||||
CPU: AnyCPU |
||||
- first: |
||||
Standalone: Win64 |
||||
second: |
||||
enabled: 1 |
||||
settings: |
||||
CPU: AnyCPU |
||||
userData: |
||||
assetBundleName: |
||||
assetBundleVariant: |
@ -0,0 +1,152 @@ |
||||
# Copyright 2019 gRPC authors. |
||||
# |
||||
# Licensed under the Apache License, Version 2.0 (the "License"); |
||||
# you may not use this file except in compliance with the License. |
||||
# You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
"""Client-side fork interop tests as a unit test.""" |
||||
|
||||
import six |
||||
import subprocess |
||||
import sys |
||||
import threading |
||||
import unittest |
||||
from grpc._cython import cygrpc |
||||
from tests.fork import methods |
||||
|
||||
# New instance of multiprocessing.Process using fork without exec can and will |
||||
# hang if the Python process has any other threads running. This includes the |
||||
# additional thread spawned by our _runner.py class. So in order to test our |
||||
# compatibility with multiprocessing, we first fork+exec a new process to ensure |
||||
# we don't have any conflicting background threads. |
||||
_CLIENT_FORK_SCRIPT_TEMPLATE = """if True: |
||||
import os |
||||
import sys |
||||
from grpc._cython import cygrpc |
||||
from tests.fork import methods |
||||
|
||||
cygrpc._GRPC_ENABLE_FORK_SUPPORT = True |
||||
os.environ['GRPC_POLL_STRATEGY'] = 'epoll1' |
||||
methods.TestCase.%s.run_test({ |
||||
'server_host': 'localhost', |
||||
'server_port': %d, |
||||
'use_tls': False |
||||
}) |
||||
""" |
||||
_SUBPROCESS_TIMEOUT_S = 30 |
||||
|
||||
|
||||
@unittest.skipUnless( |
||||
sys.platform.startswith("linux"), |
||||
"not supported on windows, and fork+exec networking blocked on mac") |
||||
@unittest.skipUnless(six.PY2, "https://github.com/grpc/grpc/issues/18075") |
||||
class ForkInteropTest(unittest.TestCase): |
||||
|
||||
def setUp(self): |
||||
start_server_script = """if True: |
||||
import sys |
||||
import time |
||||
|
||||
import grpc |
||||
from src.proto.grpc.testing import test_pb2_grpc |
||||
from tests.interop import methods as interop_methods |
||||
from tests.unit import test_common |
||||
|
||||
server = test_common.test_server() |
||||
test_pb2_grpc.add_TestServiceServicer_to_server( |
||||
interop_methods.TestService(), server) |
||||
port = server.add_insecure_port('[::]:0') |
||||
server.start() |
||||
print(port) |
||||
sys.stdout.flush() |
||||
while True: |
||||
time.sleep(1) |
||||
""" |
||||
self._server_process = subprocess.Popen( |
||||
[sys.executable, '-c', start_server_script], |
||||
stdout=subprocess.PIPE, |
||||
stderr=subprocess.PIPE) |
||||
timer = threading.Timer(_SUBPROCESS_TIMEOUT_S, |
||||
self._server_process.kill) |
||||
try: |
||||
timer.start() |
||||
self._port = int(self._server_process.stdout.readline()) |
||||
except ValueError: |
||||
raise Exception('Failed to get port from server') |
||||
finally: |
||||
timer.cancel() |
||||
|
||||
def testConnectivityWatch(self): |
||||
self._verifyTestCase(methods.TestCase.CONNECTIVITY_WATCH) |
||||
|
||||
def testCloseChannelBeforeFork(self): |
||||
self._verifyTestCase(methods.TestCase.CLOSE_CHANNEL_BEFORE_FORK) |
||||
|
||||
def testAsyncUnarySameChannel(self): |
||||
self._verifyTestCase(methods.TestCase.ASYNC_UNARY_SAME_CHANNEL) |
||||
|
||||
def testAsyncUnaryNewChannel(self): |
||||
self._verifyTestCase(methods.TestCase.ASYNC_UNARY_NEW_CHANNEL) |
||||
|
||||
def testBlockingUnarySameChannel(self): |
||||
self._verifyTestCase(methods.TestCase.BLOCKING_UNARY_SAME_CHANNEL) |
||||
|
||||
def testBlockingUnaryNewChannel(self): |
||||
self._verifyTestCase(methods.TestCase.BLOCKING_UNARY_NEW_CHANNEL) |
||||
|
||||
def testInProgressBidiContinueCall(self): |
||||
self._verifyTestCase(methods.TestCase.IN_PROGRESS_BIDI_CONTINUE_CALL) |
||||
|
||||
def testInProgressBidiSameChannelAsyncCall(self): |
||||
self._verifyTestCase( |
||||
methods.TestCase.IN_PROGRESS_BIDI_SAME_CHANNEL_ASYNC_CALL) |
||||
|
||||
def testInProgressBidiSameChannelBlockingCall(self): |
||||
self._verifyTestCase( |
||||
methods.TestCase.IN_PROGRESS_BIDI_SAME_CHANNEL_BLOCKING_CALL) |
||||
|
||||
def testInProgressBidiNewChannelAsyncCall(self): |
||||
self._verifyTestCase( |
||||
methods.TestCase.IN_PROGRESS_BIDI_NEW_CHANNEL_ASYNC_CALL) |
||||
|
||||
def testInProgressBidiNewChannelBlockingCall(self): |
||||
self._verifyTestCase( |
||||
methods.TestCase.IN_PROGRESS_BIDI_NEW_CHANNEL_BLOCKING_CALL) |
||||
|
||||
def tearDown(self): |
||||
self._server_process.kill() |
||||
|
||||
def _verifyTestCase(self, test_case): |
||||
script = _CLIENT_FORK_SCRIPT_TEMPLATE % (test_case.name, self._port) |
||||
process = subprocess.Popen( |
||||
[sys.executable, '-c', script], |
||||
stdout=subprocess.PIPE, |
||||
stderr=subprocess.PIPE) |
||||
timer = threading.Timer(_SUBPROCESS_TIMEOUT_S, process.kill) |
||||
try: |
||||
timer.start() |
||||
try: |
||||
out, err = process.communicate(timeout=_SUBPROCESS_TIMEOUT_S) |
||||
except TypeError: |
||||
# The timeout parameter was added in Python 3.3. |
||||
out, err = process.communicate() |
||||
except subprocess.TimeoutExpired: |
||||
process.kill() |
||||
raise RuntimeError('Process failed to terminate') |
||||
finally: |
||||
timer.cancel() |
||||
self.assertEqual( |
||||
0, process.returncode, |
||||
'process failed with exit code %d (stdout: %s, stderr: %s)' % |
||||
(process.returncode, out, err)) |
||||
|
||||
|
||||
if __name__ == '__main__': |
||||
unittest.main(verbosity=2) |
@ -0,0 +1,109 @@ |
||||
<%def name="get_native_methods()"><% |
||||
native_method_signatures = [ |
||||
'void grpcsharp_init()', |
||||
'void grpcsharp_shutdown()', |
||||
'IntPtr grpcsharp_version_string() // returns not-owned const char*', |
||||
'BatchContextSafeHandle grpcsharp_batch_context_create()', |
||||
'IntPtr grpcsharp_batch_context_recv_initial_metadata(BatchContextSafeHandle ctx)', |
||||
'IntPtr grpcsharp_batch_context_recv_message_length(BatchContextSafeHandle ctx)', |
||||
'void grpcsharp_batch_context_recv_message_to_buffer(BatchContextSafeHandle ctx, byte[] buffer, UIntPtr bufferLen)', |
||||
'StatusCode grpcsharp_batch_context_recv_status_on_client_status(BatchContextSafeHandle ctx)', |
||||
'IntPtr grpcsharp_batch_context_recv_status_on_client_details(BatchContextSafeHandle ctx, out UIntPtr detailsLength)', |
||||
'IntPtr grpcsharp_batch_context_recv_status_on_client_trailing_metadata(BatchContextSafeHandle ctx)', |
||||
'int grpcsharp_batch_context_recv_close_on_server_cancelled(BatchContextSafeHandle ctx)', |
||||
'void grpcsharp_batch_context_reset(BatchContextSafeHandle ctx)', |
||||
'void grpcsharp_batch_context_destroy(IntPtr ctx)', |
||||
'RequestCallContextSafeHandle grpcsharp_request_call_context_create()', |
||||
'CallSafeHandle grpcsharp_request_call_context_call(RequestCallContextSafeHandle ctx)', |
||||
'IntPtr grpcsharp_request_call_context_method(RequestCallContextSafeHandle ctx, out UIntPtr methodLength)', |
||||
'IntPtr grpcsharp_request_call_context_host(RequestCallContextSafeHandle ctx, out UIntPtr hostLength)', |
||||
'Timespec grpcsharp_request_call_context_deadline(RequestCallContextSafeHandle ctx)', |
||||
'IntPtr grpcsharp_request_call_context_request_metadata(RequestCallContextSafeHandle ctx)', |
||||
'void grpcsharp_request_call_context_reset(RequestCallContextSafeHandle ctx)', |
||||
'void grpcsharp_request_call_context_destroy(IntPtr ctx)', |
||||
'CallCredentialsSafeHandle grpcsharp_composite_call_credentials_create(CallCredentialsSafeHandle creds1, CallCredentialsSafeHandle creds2)', |
||||
'void grpcsharp_call_credentials_release(IntPtr credentials)', |
||||
'CallError grpcsharp_call_cancel(CallSafeHandle call)', |
||||
'CallError grpcsharp_call_cancel_with_status(CallSafeHandle call, StatusCode status, string description)', |
||||
'CallError grpcsharp_call_start_unary(CallSafeHandle call, BatchContextSafeHandle ctx, byte[] sendBuffer, UIntPtr sendBufferLen, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags metadataFlags)', |
||||
'CallError grpcsharp_call_start_client_streaming(CallSafeHandle call, BatchContextSafeHandle ctx, MetadataArraySafeHandle metadataArray, CallFlags metadataFlags)', |
||||
'CallError grpcsharp_call_start_server_streaming(CallSafeHandle call, BatchContextSafeHandle ctx, byte[] sendBuffer, UIntPtr sendBufferLen, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags metadataFlags)', |
||||
'CallError grpcsharp_call_start_duplex_streaming(CallSafeHandle call, BatchContextSafeHandle ctx, MetadataArraySafeHandle metadataArray, CallFlags metadataFlags)', |
||||
'CallError grpcsharp_call_send_message(CallSafeHandle call, BatchContextSafeHandle ctx, byte[] sendBuffer, UIntPtr sendBufferLen, WriteFlags writeFlags, int sendEmptyInitialMetadata)', |
||||
'CallError grpcsharp_call_send_close_from_client(CallSafeHandle call, BatchContextSafeHandle ctx)', |
||||
'CallError grpcsharp_call_send_status_from_server(CallSafeHandle call, BatchContextSafeHandle ctx, StatusCode statusCode, byte[] statusMessage, UIntPtr statusMessageLen, MetadataArraySafeHandle metadataArray, int sendEmptyInitialMetadata, byte[] optionalSendBuffer, UIntPtr optionalSendBufferLen, WriteFlags writeFlags)', |
||||
'CallError grpcsharp_call_recv_message(CallSafeHandle call, BatchContextSafeHandle ctx)', |
||||
'CallError grpcsharp_call_recv_initial_metadata(CallSafeHandle call, BatchContextSafeHandle ctx)', |
||||
'CallError grpcsharp_call_start_serverside(CallSafeHandle call, BatchContextSafeHandle ctx)', |
||||
'CallError grpcsharp_call_send_initial_metadata(CallSafeHandle call, BatchContextSafeHandle ctx, MetadataArraySafeHandle metadataArray)', |
||||
'CallError grpcsharp_call_set_credentials(CallSafeHandle call, CallCredentialsSafeHandle credentials)', |
||||
'CStringSafeHandle grpcsharp_call_get_peer(CallSafeHandle call)', |
||||
'void grpcsharp_call_destroy(IntPtr call)', |
||||
'ChannelArgsSafeHandle grpcsharp_channel_args_create(UIntPtr numArgs)', |
||||
'void grpcsharp_channel_args_set_string(ChannelArgsSafeHandle args, UIntPtr index, string key, string value)', |
||||
'void grpcsharp_channel_args_set_integer(ChannelArgsSafeHandle args, UIntPtr index, string key, int value)', |
||||
'void grpcsharp_channel_args_destroy(IntPtr args)', |
||||
'void grpcsharp_override_default_ssl_roots(string pemRootCerts)', |
||||
'ChannelCredentialsSafeHandle grpcsharp_ssl_credentials_create(string pemRootCerts, string keyCertPairCertChain, string keyCertPairPrivateKey)', |
||||
'ChannelCredentialsSafeHandle grpcsharp_composite_channel_credentials_create(ChannelCredentialsSafeHandle channelCreds, CallCredentialsSafeHandle callCreds)', |
||||
'void grpcsharp_channel_credentials_release(IntPtr credentials)', |
||||
'ChannelSafeHandle grpcsharp_insecure_channel_create(string target, ChannelArgsSafeHandle channelArgs)', |
||||
'ChannelSafeHandle grpcsharp_secure_channel_create(ChannelCredentialsSafeHandle credentials, string target, ChannelArgsSafeHandle channelArgs)', |
||||
'CallSafeHandle grpcsharp_channel_create_call(ChannelSafeHandle channel, CallSafeHandle parentCall, ContextPropagationFlags propagationMask, CompletionQueueSafeHandle cq, string method, string host, Timespec deadline)', |
||||
'ChannelState grpcsharp_channel_check_connectivity_state(ChannelSafeHandle channel, int tryToConnect)', |
||||
'void grpcsharp_channel_watch_connectivity_state(ChannelSafeHandle channel, ChannelState lastObservedState, Timespec deadline, CompletionQueueSafeHandle cq, BatchContextSafeHandle ctx)', |
||||
'CStringSafeHandle grpcsharp_channel_get_target(ChannelSafeHandle call)', |
||||
'void grpcsharp_channel_destroy(IntPtr channel)', |
||||
'int grpcsharp_sizeof_grpc_event()', |
||||
'CompletionQueueSafeHandle grpcsharp_completion_queue_create_async()', |
||||
'CompletionQueueSafeHandle grpcsharp_completion_queue_create_sync()', |
||||
'void grpcsharp_completion_queue_shutdown(CompletionQueueSafeHandle cq)', |
||||
'CompletionQueueEvent grpcsharp_completion_queue_next(CompletionQueueSafeHandle cq)', |
||||
'CompletionQueueEvent grpcsharp_completion_queue_pluck(CompletionQueueSafeHandle cq, IntPtr tag)', |
||||
'void grpcsharp_completion_queue_destroy(IntPtr cq)', |
||||
'void gprsharp_free(IntPtr ptr)', |
||||
'MetadataArraySafeHandle grpcsharp_metadata_array_create(UIntPtr capacity)', |
||||
'void grpcsharp_metadata_array_add(MetadataArraySafeHandle array, string key, byte[] value, UIntPtr valueLength)', |
||||
'UIntPtr grpcsharp_metadata_array_count(IntPtr metadataArray)', |
||||
'IntPtr grpcsharp_metadata_array_get_key(IntPtr metadataArray, UIntPtr index, out UIntPtr keyLength)', |
||||
'IntPtr grpcsharp_metadata_array_get_value(IntPtr metadataArray, UIntPtr index, out UIntPtr valueLength)', |
||||
'void grpcsharp_metadata_array_destroy_full(IntPtr array)', |
||||
'void grpcsharp_redirect_log(GprLogDelegate callback)', |
||||
'CallCredentialsSafeHandle grpcsharp_metadata_credentials_create_from_plugin(NativeMetadataInterceptor interceptor)', |
||||
'void grpcsharp_metadata_credentials_notify_from_plugin(IntPtr callbackPtr, IntPtr userData, MetadataArraySafeHandle metadataArray, StatusCode statusCode, string errorDetails)', |
||||
'ServerCredentialsSafeHandle grpcsharp_ssl_server_credentials_create(string pemRootCerts, string[] keyCertPairCertChainArray, string[] keyCertPairPrivateKeyArray, UIntPtr numKeyCertPairs, SslClientCertificateRequestType clientCertificateRequest)', |
||||
'void grpcsharp_server_credentials_release(IntPtr credentials)', |
||||
'ServerSafeHandle grpcsharp_server_create(ChannelArgsSafeHandle args)', |
||||
'void grpcsharp_server_register_completion_queue(ServerSafeHandle server, CompletionQueueSafeHandle cq)', |
||||
'int grpcsharp_server_add_insecure_http2_port(ServerSafeHandle server, string addr)', |
||||
'int grpcsharp_server_add_secure_http2_port(ServerSafeHandle server, string addr, ServerCredentialsSafeHandle creds)', |
||||
'void grpcsharp_server_start(ServerSafeHandle server)', |
||||
'CallError grpcsharp_server_request_call(ServerSafeHandle server, CompletionQueueSafeHandle cq, RequestCallContextSafeHandle ctx)', |
||||
'void grpcsharp_server_cancel_all_calls(ServerSafeHandle server)', |
||||
'void grpcsharp_server_shutdown_and_notify_callback(ServerSafeHandle server, CompletionQueueSafeHandle cq, BatchContextSafeHandle ctx)', |
||||
'void grpcsharp_server_destroy(IntPtr server)', |
||||
'AuthContextSafeHandle grpcsharp_call_auth_context(CallSafeHandle call)', |
||||
'IntPtr grpcsharp_auth_context_peer_identity_property_name(AuthContextSafeHandle authContext) // returns const char*', |
||||
'AuthContextSafeHandle.NativeAuthPropertyIterator grpcsharp_auth_context_property_iterator(AuthContextSafeHandle authContext)', |
||||
'IntPtr grpcsharp_auth_property_iterator_next(ref AuthContextSafeHandle.NativeAuthPropertyIterator iterator) // returns const auth_property*', |
||||
'void grpcsharp_auth_context_release(IntPtr authContext)', |
||||
'Timespec gprsharp_now(ClockType clockType)', |
||||
'Timespec gprsharp_inf_future(ClockType clockType)', |
||||
'Timespec gprsharp_inf_past(ClockType clockType)', |
||||
'Timespec gprsharp_convert_clock_type(Timespec t, ClockType targetClock)', |
||||
'int gprsharp_sizeof_timespec()', |
||||
'CallError grpcsharp_test_callback([MarshalAs(UnmanagedType.FunctionPtr)] NativeCallbackTestDelegate callback)', |
||||
'IntPtr grpcsharp_test_nop(IntPtr ptr)', |
||||
'void grpcsharp_test_override_method(string methodName, string variant)', |
||||
] |
||||
|
||||
import re |
||||
native_methods = [] |
||||
for signature in native_method_signatures: |
||||
match = re.match('([A-Za-z0-9_.]+) +([A-Za-z0-9_]+)\\((.*)\\)(.*)', signature) |
||||
if not match: |
||||
raise Exception('Malformed signature "%s"' % signature) |
||||
native_methods.append({'returntype': match.group(1), 'name': match.group(2), 'params': match.group(3), 'comment': match.group(4)}) |
||||
|
||||
return list(native_methods) |
||||
%></%def> |
@ -0,0 +1,32 @@ |
||||
%YAML 1.2 |
||||
--- | |
||||
<%namespace file="../../../../../Grpc.Core/Internal/native_methods.include" import="get_native_methods"/> |
||||
// Copyright 2019 The gRPC Authors |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
// When building for Unity Android with il2cpp backend, Unity tries to link |
||||
// the __Internal PInvoke definitions (which are required by iOS) even though |
||||
// the .so/.dll will be actually used. This file provides dummy stubs to |
||||
// make il2cpp happy. |
||||
// See https://github.com/grpc/grpc/issues/16012 |
||||
|
||||
#include <stdio.h> |
||||
#include <stdlib.h> |
||||
|
||||
% for method in get_native_methods(): |
||||
void ${method['name']}() { |
||||
fprintf(stderr, "Should never reach here"); |
||||
abort(); |
||||
} |
||||
% endfor |
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue