diff --git a/CMakeLists.txt b/CMakeLists.txt
index 8eff902f6b0..eed12052685 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -4618,6 +4618,7 @@ add_library(end2end_tests
test/core/end2end/tests/filter_call_init_fails.cc
test/core/end2end/tests/filter_causes_close.cc
test/core/end2end/tests/filter_latency.cc
+ test/core/end2end/tests/filter_status_code.cc
test/core/end2end/tests/graceful_server_shutdown.cc
test/core/end2end/tests/high_initial_seqno.cc
test/core/end2end/tests/hpack_size.cc
@@ -4719,6 +4720,7 @@ add_library(end2end_nosec_tests
test/core/end2end/tests/filter_call_init_fails.cc
test/core/end2end/tests/filter_causes_close.cc
test/core/end2end/tests/filter_latency.cc
+ test/core/end2end/tests/filter_status_code.cc
test/core/end2end/tests/graceful_server_shutdown.cc
test/core/end2end/tests/high_initial_seqno.cc
test/core/end2end/tests/hpack_size.cc
diff --git a/Makefile b/Makefile
index e51882c3640..38b40804d64 100644
--- a/Makefile
+++ b/Makefile
@@ -327,7 +327,7 @@ CXXFLAGS += -std=c++11
ifeq ($(SYSTEM),Darwin)
CXXFLAGS += -stdlib=libc++
endif
-CPPFLAGS += -g -Wall -Wextra -Werror -Wno-long-long -Wno-unused-parameter -DOSATOMIC_USE_INLINED=1
+CPPFLAGS += -g -Wall -Wextra -Werror -Wno-long-long -Wno-unused-parameter -DOSATOMIC_USE_INLINED=1 -Wno-deprecated-declarations
COREFLAGS += -fno-rtti -fno-exceptions
LDFLAGS += -g
@@ -8557,6 +8557,7 @@ LIBEND2END_TESTS_SRC = \
test/core/end2end/tests/filter_call_init_fails.cc \
test/core/end2end/tests/filter_causes_close.cc \
test/core/end2end/tests/filter_latency.cc \
+ test/core/end2end/tests/filter_status_code.cc \
test/core/end2end/tests/graceful_server_shutdown.cc \
test/core/end2end/tests/high_initial_seqno.cc \
test/core/end2end/tests/hpack_size.cc \
@@ -8655,6 +8656,7 @@ LIBEND2END_NOSEC_TESTS_SRC = \
test/core/end2end/tests/filter_call_init_fails.cc \
test/core/end2end/tests/filter_causes_close.cc \
test/core/end2end/tests/filter_latency.cc \
+ test/core/end2end/tests/filter_status_code.cc \
test/core/end2end/tests/graceful_server_shutdown.cc \
test/core/end2end/tests/high_initial_seqno.cc \
test/core/end2end/tests/hpack_size.cc \
diff --git a/Rakefile b/Rakefile
index d76b9ff657c..74c8b1fd487 100755
--- a/Rakefile
+++ b/Rakefile
@@ -113,10 +113,10 @@ task 'gem:native' do
if RUBY_PLATFORM =~ /darwin/
FileUtils.touch 'grpc_c.32.ruby'
FileUtils.touch 'grpc_c.64.ruby'
- system "rake cross native gem RUBY_CC_VERSION=2.4.0:2.3.0:2.2.2:2.1.5:2.0.0 V=#{verbose} GRPC_CONFIG=#{grpc_config}"
+ system "rake cross native gem RUBY_CC_VERSION=2.5.0:2.4.0:2.3.0:2.2.2:2.1.6:2.0.0 V=#{verbose} GRPC_CONFIG=#{grpc_config}"
else
Rake::Task['dlls'].execute
- docker_for_windows "gem update --system && bundle && rake cross native gem RUBY_CC_VERSION=2.4.0:2.3.0:2.2.2:2.1.5:2.0.0 V=#{verbose} GRPC_CONFIG=#{grpc_config}"
+ docker_for_windows "gem update --system && bundle && rake cross native gem RUBY_CC_VERSION=2.5.0:2.4.0:2.3.0:2.2.2:2.1.6:2.0.0 V=#{verbose} GRPC_CONFIG=#{grpc_config}"
end
end
diff --git a/build.yaml b/build.yaml
index 42d72459811..fef7d6189f7 100644
--- a/build.yaml
+++ b/build.yaml
@@ -5004,6 +5004,7 @@ defaults:
global:
COREFLAGS: -fno-rtti -fno-exceptions
CPPFLAGS: -g -Wall -Wextra -Werror -Wno-long-long -Wno-unused-parameter -DOSATOMIC_USE_INLINED=1
+ -Wno-deprecated-declarations
LDFLAGS: -g
zlib:
CFLAGS: -Wno-sign-conversion -Wno-conversion -Wno-unused-value -Wno-implicit-function-declaration
diff --git a/doc/md b/doc/md
deleted file mode 100644
index e69de29bb2d..00000000000
diff --git a/gRPC-Core.podspec b/gRPC-Core.podspec
index 708c3436abd..c127660dd50 100644
--- a/gRPC-Core.podspec
+++ b/gRPC-Core.podspec
@@ -1038,6 +1038,7 @@ Pod::Spec.new do |s|
'test/core/end2end/tests/filter_call_init_fails.cc',
'test/core/end2end/tests/filter_causes_close.cc',
'test/core/end2end/tests/filter_latency.cc',
+ 'test/core/end2end/tests/filter_status_code.cc',
'test/core/end2end/tests/graceful_server_shutdown.cc',
'test/core/end2end/tests/high_initial_seqno.cc',
'test/core/end2end/tests/hpack_size.cc',
diff --git a/grpc.gyp b/grpc.gyp
index c34206b1a5d..281fbfa8a6e 100644
--- a/grpc.gyp
+++ b/grpc.gyp
@@ -57,6 +57,7 @@
'-Wno-long-long',
'-Wno-unused-parameter',
'-DOSATOMIC_USE_INLINED=1',
+ '-Wno-deprecated-declarations',
],
'ldflags': [
'-g',
@@ -134,6 +135,7 @@
'-Wno-long-long',
'-Wno-unused-parameter',
'-DOSATOMIC_USE_INLINED=1',
+ '-Wno-deprecated-declarations',
],
'OTHER_CPLUSPLUSFLAGS': [
'-g',
@@ -143,6 +145,7 @@
'-Wno-long-long',
'-Wno-unused-parameter',
'-DOSATOMIC_USE_INLINED=1',
+ '-Wno-deprecated-declarations',
'-stdlib=libc++',
'-std=c++11',
'-Wno-error=deprecated-declarations'
@@ -2378,6 +2381,7 @@
'test/core/end2end/tests/filter_call_init_fails.cc',
'test/core/end2end/tests/filter_causes_close.cc',
'test/core/end2end/tests/filter_latency.cc',
+ 'test/core/end2end/tests/filter_status_code.cc',
'test/core/end2end/tests/graceful_server_shutdown.cc',
'test/core/end2end/tests/high_initial_seqno.cc',
'test/core/end2end/tests/hpack_size.cc',
@@ -2450,6 +2454,7 @@
'test/core/end2end/tests/filter_call_init_fails.cc',
'test/core/end2end/tests/filter_causes_close.cc',
'test/core/end2end/tests/filter_latency.cc',
+ 'test/core/end2end/tests/filter_status_code.cc',
'test/core/end2end/tests/graceful_server_shutdown.cc',
'test/core/end2end/tests/high_initial_seqno.cc',
'test/core/end2end/tests/hpack_size.cc',
diff --git a/include/grpc/impl/codegen/port_platform.h b/include/grpc/impl/codegen/port_platform.h
index f4bc3eb3e03..ae1f951b499 100644
--- a/include/grpc/impl/codegen/port_platform.h
+++ b/include/grpc/impl/codegen/port_platform.h
@@ -195,12 +195,25 @@
#define GPR_PTHREAD_TLS 1
#else /* __MAC_OS_X_VERSION_MIN_REQUIRED < __MAC_10_7 */
#define GPR_CPU_POSIX 1
+/* TODO(vjpai): there is a reported issue in bazel build for Mac where __thread
+ in a header is currently not working (bazelbuild/bazel#4341). Remove
+ the following conditional and use GPR_GCC_TLS when that is fixed */
+#ifndef GRPC_BAZEL_BUILD
#define GPR_GCC_TLS 1
+#else /* GRPC_BAZEL_BUILD */
+#define GPR_PTHREAD_TLS 1
+#endif /* GRPC_BAZEL_BUILD */
#define GPR_APPLE_PTHREAD_NAME 1
#endif
#else /* __MAC_OS_X_VERSION_MIN_REQUIRED */
#define GPR_CPU_POSIX 1
+/* TODO(vjpai): Remove the following conditional and use only GPR_GCC_TLS
+ when bazelbuild/bazel#4341 is fixed */
+#ifndef GRPC_BAZEL_BUILD
#define GPR_GCC_TLS 1
+#else /* GRPC_BAZEL_BUILD */
+#define GPR_PTHREAD_TLS 1
+#endif /* GRPC_BAZEL_BUILD */
#endif
#define GPR_POSIX_CRASH_HANDLER 1
#endif
@@ -421,6 +434,14 @@ typedef unsigned __int64 uint64_t;
#endif
#endif
+#ifndef GRPC_UNUSED
+#if defined(__GNUC__) && !defined(__MINGW32__)
+#define GRPC_UNUSED __attribute__((unused))
+#else
+#define GRPC_UNUSED
+#endif
+#endif
+
#ifndef GPR_PRINT_FORMAT_CHECK
#ifdef __GNUC__
#define GPR_PRINT_FORMAT_CHECK(FORMAT_STR, ARGS) \
diff --git a/src/core/ext/filters/client_channel/backup_poller.cc b/src/core/ext/filters/client_channel/backup_poller.cc
index bfc549e709e..4ee5e9c1093 100644
--- a/src/core/ext/filters/client_channel/backup_poller.cc
+++ b/src/core/ext/filters/client_channel/backup_poller.cc
@@ -33,7 +33,8 @@
#define DEFAULT_POLL_INTERVAL_MS 5000
-typedef struct backup_poller {
+namespace {
+struct backup_poller {
grpc_timer polling_timer;
grpc_closure run_poller_closure;
grpc_closure shutdown_closure;
@@ -42,7 +43,8 @@ typedef struct backup_poller {
bool shutting_down; // guarded by pollset_mu
gpr_refcount refs;
gpr_refcount shutdown_refs;
-} backup_poller;
+};
+} // namespace
static gpr_once g_once = GPR_ONCE_INIT;
static gpr_mu g_poller_mu;
diff --git a/src/core/ext/filters/client_channel/channel_connectivity.cc b/src/core/ext/filters/client_channel/channel_connectivity.cc
index 20693ba4190..a827aa30ec6 100644
--- a/src/core/ext/filters/client_channel/channel_connectivity.cc
+++ b/src/core/ext/filters/client_channel/channel_connectivity.cc
@@ -58,7 +58,8 @@ typedef enum {
CALLING_BACK_AND_FINISHED,
} callback_phase;
-typedef struct {
+namespace {
+struct state_watcher {
gpr_mu mu;
callback_phase phase;
grpc_closure on_complete;
@@ -71,7 +72,8 @@ typedef struct {
grpc_channel* channel;
grpc_error* error;
void* tag;
-} state_watcher;
+};
+} // namespace
static void delete_state_watcher(state_watcher* w) {
grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element(
diff --git a/src/core/ext/filters/client_channel/client_channel.cc b/src/core/ext/filters/client_channel/client_channel.cc
index fce5f3582b6..36134c6b06e 100644
--- a/src/core/ext/filters/client_channel/client_channel.cc
+++ b/src/core/ext/filters/client_channel/client_channel.cc
@@ -553,6 +553,7 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
}
grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties,
chand->interested_parties);
+ grpc_lb_policy_shutdown_locked(chand->lb_policy, new_lb_policy);
GRPC_LB_POLICY_UNREF(chand->lb_policy, "channel");
}
chand->lb_policy = new_lb_policy;
@@ -658,6 +659,7 @@ static void start_transport_op_locked(void* arg, grpc_error* error_ignored) {
if (chand->lb_policy != nullptr) {
grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties,
chand->interested_parties);
+ grpc_lb_policy_shutdown_locked(chand->lb_policy, nullptr);
GRPC_LB_POLICY_UNREF(chand->lb_policy, "channel");
chand->lb_policy = nullptr;
}
@@ -792,6 +794,7 @@ static void cc_destroy_channel_elem(grpc_channel_element* elem) {
if (chand->lb_policy != nullptr) {
grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties,
chand->interested_parties);
+ grpc_lb_policy_shutdown_locked(chand->lb_policy, nullptr);
GRPC_LB_POLICY_UNREF(chand->lb_policy, "channel");
}
gpr_free(chand->info_lb_policy_name);
@@ -852,12 +855,10 @@ typedef struct client_channel_call_data {
grpc_subchannel_call* subchannel_call;
grpc_error* error;
- grpc_lb_policy* lb_policy; // Holds ref while LB pick is pending.
+ grpc_lb_policy_pick_state pick;
grpc_closure lb_pick_closure;
grpc_closure lb_pick_cancel_closure;
- grpc_core::ConnectedSubchannel* connected_subchannel;
- grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT];
grpc_polling_entity* pollent;
grpc_transport_stream_op_batch* waiting_for_pick_batches[MAX_WAITING_BATCHES];
@@ -866,8 +867,6 @@ typedef struct client_channel_call_data {
grpc_transport_stream_op_batch* initial_metadata_batch;
- grpc_linked_mdelem lb_token_mdelem;
-
grpc_closure on_complete;
grpc_closure* original_on_complete;
} call_data;
@@ -1005,15 +1004,15 @@ static void create_subchannel_call_locked(grpc_call_element* elem,
channel_data* chand = (channel_data*)elem->channel_data;
call_data* calld = (call_data*)elem->call_data;
const grpc_core::ConnectedSubchannel::CallArgs call_args = {
- calld->pollent, // pollent
- calld->path, // path
- calld->call_start_time, // start_time
- calld->deadline, // deadline
- calld->arena, // arena
- calld->subchannel_call_context, // context
- calld->call_combiner // call_combiner
+ calld->pollent, // pollent
+ calld->path, // path
+ calld->call_start_time, // start_time
+ calld->deadline, // deadline
+ calld->arena, // arena
+ calld->pick.subchannel_call_context, // context
+ calld->call_combiner // call_combiner
};
- grpc_error* new_error = calld->connected_subchannel->CreateCall(
+ grpc_error* new_error = calld->pick.connected_subchannel->CreateCall(
call_args, &calld->subchannel_call);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: create subchannel_call=%p: error=%s",
@@ -1032,7 +1031,7 @@ static void create_subchannel_call_locked(grpc_call_element* elem,
static void pick_done_locked(grpc_call_element* elem, grpc_error* error) {
call_data* calld = (call_data*)elem->call_data;
channel_data* chand = (channel_data*)elem->channel_data;
- if (calld->connected_subchannel == nullptr) {
+ if (calld->pick.connected_subchannel == nullptr) {
// Failed to create subchannel.
GRPC_ERROR_UNREF(calld->error);
calld->error = error == GRPC_ERROR_NONE
@@ -1071,13 +1070,16 @@ static void pick_callback_cancel_locked(void* arg, grpc_error* error) {
grpc_call_element* elem = (grpc_call_element*)arg;
channel_data* chand = (channel_data*)elem->channel_data;
call_data* calld = (call_data*)elem->call_data;
- if (calld->lb_policy != nullptr) {
+ // Note: chand->lb_policy may have changed since we started our pick,
+ // in which case we will be cancelling the pick on a policy other than
+ // the one we started it on. However, this will just be a no-op.
+ if (error != GRPC_ERROR_NONE && chand->lb_policy != nullptr) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p",
- chand, calld, calld->lb_policy);
+ chand, calld, chand->lb_policy);
}
- grpc_lb_policy_cancel_pick_locked(
- calld->lb_policy, &calld->connected_subchannel, GRPC_ERROR_REF(error));
+ grpc_lb_policy_cancel_pick_locked(chand->lb_policy, &calld->pick,
+ GRPC_ERROR_REF(error));
}
GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback_cancel");
}
@@ -1092,9 +1094,6 @@ static void pick_callback_done_locked(void* arg, grpc_error* error) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously",
chand, calld);
}
- GPR_ASSERT(calld->lb_policy != nullptr);
- GRPC_LB_POLICY_UNREF(calld->lb_policy, "pick_subchannel");
- calld->lb_policy = nullptr;
async_pick_done_locked(elem, GRPC_ERROR_REF(error));
}
@@ -1128,26 +1127,21 @@ static bool pick_callback_start_locked(grpc_call_element* elem) {
initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
}
}
- const grpc_lb_policy_pick_args inputs = {
+ calld->pick.initial_metadata =
calld->initial_metadata_batch->payload->send_initial_metadata
- .send_initial_metadata,
- initial_metadata_flags, &calld->lb_token_mdelem};
- // Keep a ref to the LB policy in calld while the pick is pending.
- GRPC_LB_POLICY_REF(chand->lb_policy, "pick_subchannel");
- calld->lb_policy = chand->lb_policy;
+ .send_initial_metadata;
+ calld->pick.initial_metadata_flags = initial_metadata_flags;
GRPC_CLOSURE_INIT(&calld->lb_pick_closure, pick_callback_done_locked, elem,
grpc_combiner_scheduler(chand->combiner));
- const bool pick_done = grpc_lb_policy_pick_locked(
- chand->lb_policy, &inputs, &calld->connected_subchannel,
- calld->subchannel_call_context, nullptr, &calld->lb_pick_closure);
+ calld->pick.on_complete = &calld->lb_pick_closure;
+ const bool pick_done =
+ grpc_lb_policy_pick_locked(chand->lb_policy, &calld->pick);
if (pick_done) {
/* synchronous grpc_lb_policy_pick call. Unref the LB policy. */
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed synchronously",
chand, calld);
}
- GRPC_LB_POLICY_UNREF(calld->lb_policy, "pick_subchannel");
- calld->lb_policy = nullptr;
} else {
GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback_cancel");
grpc_call_combiner_set_notify_on_cancel(
@@ -1289,7 +1283,7 @@ static void start_pick_locked(void* arg, grpc_error* ignored) {
grpc_call_element* elem = (grpc_call_element*)arg;
call_data* calld = (call_data*)elem->call_data;
channel_data* chand = (channel_data*)elem->channel_data;
- GPR_ASSERT(calld->connected_subchannel == nullptr);
+ GPR_ASSERT(calld->pick.connected_subchannel == nullptr);
if (chand->lb_policy != nullptr) {
// We already have an LB policy, so ask it for a pick.
if (pick_callback_start_locked(elem)) {
@@ -1467,15 +1461,14 @@ static void cc_destroy_call_elem(grpc_call_element* elem,
GRPC_SUBCHANNEL_CALL_UNREF(calld->subchannel_call,
"client_channel_destroy_call");
}
- GPR_ASSERT(calld->lb_policy == nullptr);
GPR_ASSERT(calld->waiting_for_pick_batches_count == 0);
- if (calld->connected_subchannel != nullptr) {
- GRPC_CONNECTED_SUBCHANNEL_UNREF(calld->connected_subchannel, "picked");
+ if (calld->pick.connected_subchannel != nullptr) {
+ GRPC_CONNECTED_SUBCHANNEL_UNREF(calld->pick.connected_subchannel, "picked");
}
for (size_t i = 0; i < GRPC_CONTEXT_COUNT; ++i) {
- if (calld->subchannel_call_context[i].value != nullptr) {
- calld->subchannel_call_context[i].destroy(
- calld->subchannel_call_context[i].value);
+ if (calld->pick.subchannel_call_context[i].value != nullptr) {
+ calld->pick.subchannel_call_context[i].destroy(
+ calld->pick.subchannel_call_context[i].value);
}
}
GRPC_CLOSURE_SCHED(then_schedule_closure, GRPC_ERROR_NONE);
diff --git a/src/core/ext/filters/client_channel/lb_policy.cc b/src/core/ext/filters/client_channel/lb_policy.cc
index ebaeaadfc58..cc4fe7ec627 100644
--- a/src/core/ext/filters/client_channel/lb_policy.cc
+++ b/src/core/ext/filters/client_channel/lb_policy.cc
@@ -19,8 +19,6 @@
#include "src/core/ext/filters/client_channel/lb_policy.h"
#include "src/core/lib/iomgr/combiner.h"
-#define WEAK_REF_BITS 16
-
grpc_core::DebugOnlyTraceFlag grpc_trace_lb_policy_refcount(
false, "lb_policy_refcount");
@@ -28,91 +26,60 @@ void grpc_lb_policy_init(grpc_lb_policy* policy,
const grpc_lb_policy_vtable* vtable,
grpc_combiner* combiner) {
policy->vtable = vtable;
- gpr_atm_no_barrier_store(&policy->ref_pair, 1 << WEAK_REF_BITS);
+ gpr_ref_init(&policy->refs, 1);
policy->interested_parties = grpc_pollset_set_create();
policy->combiner = GRPC_COMBINER_REF(combiner, "lb_policy");
}
#ifndef NDEBUG
-#define REF_FUNC_EXTRA_ARGS , const char *file, int line, const char *reason
-#define REF_MUTATE_EXTRA_ARGS REF_FUNC_EXTRA_ARGS, const char* purpose
-#define REF_FUNC_PASS_ARGS(new_reason) , file, line, new_reason
-#define REF_MUTATE_PASS_ARGS(purpose) , file, line, reason, purpose
+void grpc_lb_policy_ref(grpc_lb_policy* lb_policy, const char* file, int line,
+ const char* reason) {
+ if (grpc_trace_lb_policy_refcount.enabled()) {
+ gpr_atm old_refs = gpr_atm_no_barrier_load(&lb_policy->refs.count);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "LB_POLICY:%p ref %" PRIdPTR " -> %" PRIdPTR " %s", lb_policy,
+ old_refs, old_refs + 1, reason);
+ }
#else
-#define REF_FUNC_EXTRA_ARGS
-#define REF_MUTATE_EXTRA_ARGS
-#define REF_FUNC_PASS_ARGS(new_reason)
-#define REF_MUTATE_PASS_ARGS(x)
+void grpc_lb_policy_ref(grpc_lb_policy* lb_policy) {
#endif
+ gpr_ref(&lb_policy->refs);
+}
-static gpr_atm ref_mutate(grpc_lb_policy* c, gpr_atm delta,
- int barrier REF_MUTATE_EXTRA_ARGS) {
- gpr_atm old_val = barrier ? gpr_atm_full_fetch_add(&c->ref_pair, delta)
- : gpr_atm_no_barrier_fetch_add(&c->ref_pair, delta);
#ifndef NDEBUG
+void grpc_lb_policy_unref(grpc_lb_policy* lb_policy, const char* file, int line,
+ const char* reason) {
if (grpc_trace_lb_policy_refcount.enabled()) {
+ gpr_atm old_refs = gpr_atm_no_barrier_load(&lb_policy->refs.count);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "LB_POLICY: %p %12s 0x%" PRIxPTR " -> 0x%" PRIxPTR " [%s]", c,
- purpose, old_val, old_val + delta, reason);
+ "LB_POLICY:%p unref %" PRIdPTR " -> %" PRIdPTR " %s", lb_policy,
+ old_refs, old_refs - 1, reason);
}
+#else
+void grpc_lb_policy_unref(grpc_lb_policy* lb_policy) {
#endif
- return old_val;
-}
-
-void grpc_lb_policy_ref(grpc_lb_policy* policy REF_FUNC_EXTRA_ARGS) {
- ref_mutate(policy, 1 << WEAK_REF_BITS, 0 REF_MUTATE_PASS_ARGS("STRONG_REF"));
-}
-
-static void shutdown_locked(void* arg, grpc_error* error) {
- grpc_lb_policy* policy = (grpc_lb_policy*)arg;
- policy->vtable->shutdown_locked(policy);
- GRPC_LB_POLICY_WEAK_UNREF(policy, "strong-unref");
-}
-
-void grpc_lb_policy_unref(grpc_lb_policy* policy REF_FUNC_EXTRA_ARGS) {
- gpr_atm old_val =
- ref_mutate(policy, (gpr_atm)1 - (gpr_atm)(1 << WEAK_REF_BITS),
- 1 REF_MUTATE_PASS_ARGS("STRONG_UNREF"));
- gpr_atm mask = ~(gpr_atm)((1 << WEAK_REF_BITS) - 1);
- gpr_atm check = 1 << WEAK_REF_BITS;
- if ((old_val & mask) == check) {
- GRPC_CLOSURE_SCHED(
- GRPC_CLOSURE_CREATE(shutdown_locked, policy,
- grpc_combiner_scheduler(policy->combiner)),
- GRPC_ERROR_NONE);
- } else {
- grpc_lb_policy_weak_unref(policy REF_FUNC_PASS_ARGS("strong-unref"));
+ if (gpr_unref(&lb_policy->refs)) {
+ grpc_pollset_set_destroy(lb_policy->interested_parties);
+ grpc_combiner* combiner = lb_policy->combiner;
+ lb_policy->vtable->destroy(lb_policy);
+ GRPC_COMBINER_UNREF(combiner, "lb_policy");
}
}
-void grpc_lb_policy_weak_ref(grpc_lb_policy* policy REF_FUNC_EXTRA_ARGS) {
- ref_mutate(policy, 1, 0 REF_MUTATE_PASS_ARGS("WEAK_REF"));
-}
-
-void grpc_lb_policy_weak_unref(grpc_lb_policy* policy REF_FUNC_EXTRA_ARGS) {
- gpr_atm old_val =
- ref_mutate(policy, -(gpr_atm)1, 1 REF_MUTATE_PASS_ARGS("WEAK_UNREF"));
- if (old_val == 1) {
- grpc_pollset_set_destroy(policy->interested_parties);
- grpc_combiner* combiner = policy->combiner;
- policy->vtable->destroy(policy);
- GRPC_COMBINER_UNREF(combiner, "lb_policy");
- }
+void grpc_lb_policy_shutdown_locked(grpc_lb_policy* policy,
+ grpc_lb_policy* new_policy) {
+ policy->vtable->shutdown_locked(policy, new_policy);
}
int grpc_lb_policy_pick_locked(grpc_lb_policy* policy,
- const grpc_lb_policy_pick_args* pick_args,
- grpc_core::ConnectedSubchannel** target,
- grpc_call_context_element* context,
- void** user_data, grpc_closure* on_complete) {
- return policy->vtable->pick_locked(policy, pick_args, target, context,
- user_data, on_complete);
+ grpc_lb_policy_pick_state* pick) {
+ return policy->vtable->pick_locked(policy, pick);
}
void grpc_lb_policy_cancel_pick_locked(grpc_lb_policy* policy,
- grpc_core::ConnectedSubchannel** target,
+ grpc_lb_policy_pick_state* pick,
grpc_error* error) {
- policy->vtable->cancel_pick_locked(policy, target, error);
+ policy->vtable->cancel_pick_locked(policy, pick, error);
}
void grpc_lb_policy_cancel_picks_locked(grpc_lb_policy* policy,
diff --git a/src/core/ext/filters/client_channel/lb_policy.h b/src/core/ext/filters/client_channel/lb_policy.h
index 967253418e8..414cdd15796 100644
--- a/src/core/ext/filters/client_channel/lb_policy.h
+++ b/src/core/ext/filters/client_channel/lb_policy.h
@@ -33,7 +33,7 @@ extern grpc_core::DebugOnlyTraceFlag grpc_trace_lb_policy_refcount;
struct grpc_lb_policy {
const grpc_lb_policy_vtable* vtable;
- gpr_atm ref_pair;
+ gpr_refcount refs;
/* owned pointer to interested parties in load balancing decisions */
grpc_pollset_set* interested_parties;
/* combiner under which lb_policy actions take place */
@@ -42,32 +42,42 @@ struct grpc_lb_policy {
grpc_closure* request_reresolution;
};
-/** Extra arguments for an LB pick */
-typedef struct grpc_lb_policy_pick_args {
- /** Initial metadata associated with the picking call. */
+/// State used for an LB pick.
+typedef struct grpc_lb_policy_pick_state {
+ /// Initial metadata associated with the picking call.
grpc_metadata_batch* initial_metadata;
- /** Bitmask used for selective cancelling. See \a
- * grpc_lb_policy_cancel_picks() and \a GRPC_INITIAL_METADATA_* in
- * grpc_types.h */
+ /// Bitmask used for selective cancelling. See \a
+ /// grpc_lb_policy_cancel_picks() and \a GRPC_INITIAL_METADATA_* in
+ /// grpc_types.h.
uint32_t initial_metadata_flags;
- /** Storage for LB token in \a initial_metadata, or NULL if not used */
- grpc_linked_mdelem* lb_token_mdelem_storage;
-} grpc_lb_policy_pick_args;
+ /// Storage for LB token in \a initial_metadata, or NULL if not used.
+ grpc_linked_mdelem lb_token_mdelem_storage;
+ /// Closure to run when pick is complete, if not completed synchronously.
+ grpc_closure* on_complete;
+ /// Will be set to the selected subchannel, or NULL on failure or when
+ /// the LB policy decides to drop the call.
+ grpc_core::ConnectedSubchannel* connected_subchannel;
+ /// Will be populated with context to pass to the subchannel call, if needed.
+ grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT];
+ /// Upon success, \a *user_data will be set to whatever opaque information
+ /// may need to be propagated from the LB policy, or NULL if not needed.
+ void** user_data;
+ /// Next pointer. For internal use by LB policy.
+ struct grpc_lb_policy_pick_state* next;
+} grpc_lb_policy_pick_state;
struct grpc_lb_policy_vtable {
void (*destroy)(grpc_lb_policy* policy);
- void (*shutdown_locked)(grpc_lb_policy* policy);
+
+ /// \see grpc_lb_policy_shutdown_locked().
+ void (*shutdown_locked)(grpc_lb_policy* policy, grpc_lb_policy* new_policy);
/** \see grpc_lb_policy_pick */
- int (*pick_locked)(grpc_lb_policy* policy,
- const grpc_lb_policy_pick_args* pick_args,
- grpc_core::ConnectedSubchannel** target,
- grpc_call_context_element* context, void** user_data,
- grpc_closure* on_complete);
+ int (*pick_locked)(grpc_lb_policy* policy, grpc_lb_policy_pick_state* pick);
/** \see grpc_lb_policy_cancel_pick */
void (*cancel_pick_locked)(grpc_lb_policy* policy,
- grpc_core::ConnectedSubchannel** target,
+ grpc_lb_policy_pick_state* pick,
grpc_error* error);
/** \see grpc_lb_policy_cancel_picks */
@@ -103,37 +113,19 @@ struct grpc_lb_policy_vtable {
};
#ifndef NDEBUG
-
-/* Strong references: the policy will shutdown when they reach zero */
#define GRPC_LB_POLICY_REF(p, r) \
grpc_lb_policy_ref((p), __FILE__, __LINE__, (r))
#define GRPC_LB_POLICY_UNREF(p, r) \
grpc_lb_policy_unref((p), __FILE__, __LINE__, (r))
-
-/* Weak references: they don't prevent the shutdown of the LB policy. When no
- * strong references are left but there are still weak ones, shutdown is called.
- * Once the weak reference also reaches zero, the LB policy is destroyed. */
-#define GRPC_LB_POLICY_WEAK_REF(p, r) \
- grpc_lb_policy_weak_ref((p), __FILE__, __LINE__, (r))
-#define GRPC_LB_POLICY_WEAK_UNREF(p, r) \
- grpc_lb_policy_weak_unref((p), __FILE__, __LINE__, (r))
void grpc_lb_policy_ref(grpc_lb_policy* policy, const char* file, int line,
const char* reason);
void grpc_lb_policy_unref(grpc_lb_policy* policy, const char* file, int line,
const char* reason);
-void grpc_lb_policy_weak_ref(grpc_lb_policy* policy, const char* file, int line,
- const char* reason);
-void grpc_lb_policy_weak_unref(grpc_lb_policy* policy, const char* file,
- int line, const char* reason);
-#else
+#else // !NDEBUG
#define GRPC_LB_POLICY_REF(p, r) grpc_lb_policy_ref((p))
#define GRPC_LB_POLICY_UNREF(p, r) grpc_lb_policy_unref((p))
-#define GRPC_LB_POLICY_WEAK_REF(p, r) grpc_lb_policy_weak_ref((p))
-#define GRPC_LB_POLICY_WEAK_UNREF(p, r) grpc_lb_policy_weak_unref((p))
void grpc_lb_policy_ref(grpc_lb_policy* policy);
void grpc_lb_policy_unref(grpc_lb_policy* policy);
-void grpc_lb_policy_weak_ref(grpc_lb_policy* policy);
-void grpc_lb_policy_weak_unref(grpc_lb_policy* policy);
#endif
/** called by concrete implementations to initialize the base struct */
@@ -141,28 +133,24 @@ void grpc_lb_policy_init(grpc_lb_policy* policy,
const grpc_lb_policy_vtable* vtable,
grpc_combiner* combiner);
-/** Finds an appropriate subchannel for a call, based on \a pick_args.
-
- \a target will be set to the selected subchannel, or NULL on failure
- or when the LB policy decides to drop the call.
+/// Shuts down \a policy.
+/// If \a new_policy is non-null, any pending picks will be restarted
+/// on that policy; otherwise, they will be failed.
+void grpc_lb_policy_shutdown_locked(grpc_lb_policy* policy,
+ grpc_lb_policy* new_policy);
- Upon success, \a user_data will be set to whatever opaque information
- may need to be propagated from the LB policy, or NULL if not needed.
- \a context will be populated with context to pass to the subchannel
- call, if needed.
+/** Finds an appropriate subchannel for a call, based on data in \a pick.
+ \a pick must remain alive until the pick is complete.
If the pick succeeds and a result is known immediately, a non-zero
- value will be returned. Otherwise, \a on_complete will be invoked
+ value will be returned. Otherwise, \a pick->on_complete will be invoked
once the pick is complete with its error argument set to indicate
success or failure.
Any IO should be done under the \a interested_parties \a grpc_pollset_set
in the \a grpc_lb_policy struct. */
int grpc_lb_policy_pick_locked(grpc_lb_policy* policy,
- const grpc_lb_policy_pick_args* pick_args,
- grpc_core::ConnectedSubchannel** target,
- grpc_call_context_element* context,
- void** user_data, grpc_closure* on_complete);
+ grpc_lb_policy_pick_state* pick);
/** Perform a connected subchannel ping (see \a
grpc_core::ConnectedSubchannel::Ping)
@@ -171,11 +159,11 @@ void grpc_lb_policy_ping_one_locked(grpc_lb_policy* policy,
grpc_closure* on_initiate,
grpc_closure* on_ack);
-/** Cancel picks for \a target.
+/** Cancel picks for \a pick.
The \a on_complete callback of the pending picks will be invoked with \a
*target set to NULL. */
void grpc_lb_policy_cancel_pick_locked(grpc_lb_policy* policy,
- grpc_core::ConnectedSubchannel** target,
+ grpc_lb_policy_pick_state* pick,
grpc_error* error);
/** Cancel all pending picks for which their \a initial_metadata_flags (as given
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
index 3eedb08ecc0..1708d81e61a 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
@@ -32,7 +32,8 @@ static grpc_error* init_channel_elem(grpc_channel_element* elem,
static void destroy_channel_elem(grpc_channel_element* elem) {}
-typedef struct {
+namespace {
+struct call_data {
// Stats object to update.
grpc_grpclb_client_stats* client_stats;
// State for intercepting send_initial_metadata.
@@ -43,7 +44,8 @@ typedef struct {
grpc_closure recv_initial_metadata_ready;
grpc_closure* original_recv_initial_metadata_ready;
bool recv_initial_metadata_succeeded;
-} call_data;
+};
+} // namespace
static void on_complete_for_send(void* arg, grpc_error* error) {
call_data* calld = (call_data*)arg;
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
index ebc7fdac4c8..5849ac9d2da 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
@@ -54,7 +54,7 @@
* operations in progress over the old RR instance. This is done by
* decreasing the reference count on the old policy. The moment no more
* references are held on the old RR policy, it'll be destroyed and \a
- * glb_rr_connectivity_changed notified with a \a GRPC_CHANNEL_SHUTDOWN
+ * on_rr_connectivity_changed notified with a \a GRPC_CHANNEL_SHUTDOWN
* state. At this point we can transition to a new RR instance safely, which
* is done once again via \a rr_handover_locked().
*
@@ -128,185 +128,48 @@
grpc_core::TraceFlag grpc_lb_glb_trace(false, "glb");
-/* add lb_token of selected subchannel (address) to the call's initial
- * metadata */
-static grpc_error* initial_metadata_add_lb_token(
- grpc_metadata_batch* initial_metadata,
- grpc_linked_mdelem* lb_token_mdelem_storage, grpc_mdelem lb_token) {
- GPR_ASSERT(lb_token_mdelem_storage != nullptr);
- GPR_ASSERT(!GRPC_MDISNULL(lb_token));
- return grpc_metadata_batch_add_tail(initial_metadata, lb_token_mdelem_storage,
- lb_token);
-}
-
-static void destroy_client_stats(void* arg) {
- grpc_grpclb_client_stats_unref((grpc_grpclb_client_stats*)arg);
-}
-
-typedef struct wrapped_rr_closure_arg {
- /* the closure instance using this struct as argument */
- grpc_closure wrapper_closure;
-
- /* the original closure. Usually a on_complete/notify cb for pick() and ping()
- * calls against the internal RR instance, respectively. */
- grpc_closure* wrapped_closure;
-
- /* the pick's initial metadata, kept in order to append the LB token for the
- * pick */
- grpc_metadata_batch* initial_metadata;
-
- /* the picked target, used to determine which LB token to add to the pick's
- * initial metadata */
- grpc_core::ConnectedSubchannel** target;
-
- /* the context to be populated for the subchannel call */
- grpc_call_context_element* context;
-
- /* Stats for client-side load reporting. Note that this holds a
- * reference, which must be either passed on via context or unreffed. */
+struct glb_lb_policy;
+
+namespace {
+
+/// Linked list of pending pick requests. It stores all information needed to
+/// eventually call (Round Robin's) pick() on them. They mainly stay pending
+/// waiting for the RR policy to be created.
+///
+/// Note that when a pick is sent to the RR policy, we inject our own
+/// on_complete callback, so that we can intercept the result before
+/// invoking the original on_complete callback. This allows us to set the
+/// LB token metadata and add client_stats to the call context.
+/// See \a pending_pick_complete() for details.
+struct pending_pick {
+ // Our on_complete closure and the original one.
+ grpc_closure on_complete;
+ grpc_closure* original_on_complete;
+ // The original pick.
+ grpc_lb_policy_pick_state* pick;
+ // Stats for client-side load reporting. Note that this holds a
+ // reference, which must be either passed on via context or unreffed.
grpc_grpclb_client_stats* client_stats;
-
- /* the LB token associated with the pick */
+ // The LB token associated with the pick. This is set via user_data in
+ // the pick.
grpc_mdelem lb_token;
-
- /* storage for the lb token initial metadata mdelem */
- grpc_linked_mdelem* lb_token_mdelem_storage;
-
- /* The RR instance related to the closure */
- grpc_lb_policy* rr_policy;
-
- /* The grpclb instance that created the wrapping. This instance is not owned,
- * reference counts are untouched. It's used only for logging purposes. */
- grpc_lb_policy* glb_policy;
-
- /* heap memory to be freed upon closure execution. */
- void* free_when_done;
-} wrapped_rr_closure_arg;
-
-/* The \a on_complete closure passed as part of the pick requires keeping a
- * reference to its associated round robin instance. We wrap this closure in
- * order to unref the round robin instance upon its invocation */
-static void wrapped_rr_closure(void* arg, grpc_error* error) {
- wrapped_rr_closure_arg* wc_arg = (wrapped_rr_closure_arg*)arg;
-
- GPR_ASSERT(wc_arg->wrapped_closure != nullptr);
- GRPC_CLOSURE_SCHED(wc_arg->wrapped_closure, GRPC_ERROR_REF(error));
-
- if (wc_arg->rr_policy != nullptr) {
- /* if *target is nullptr, no pick has been made by the RR policy (eg, all
- * addresses failed to connect). There won't be any user_data/token
- * available */
- if (*wc_arg->target != nullptr) {
- if (!GRPC_MDISNULL(wc_arg->lb_token)) {
- initial_metadata_add_lb_token(wc_arg->initial_metadata,
- wc_arg->lb_token_mdelem_storage,
- GRPC_MDELEM_REF(wc_arg->lb_token));
- } else {
- gpr_log(
- GPR_ERROR,
- "[grpclb %p] No LB token for connected subchannel pick %p (from RR "
- "instance %p).",
- wc_arg->glb_policy, *wc_arg->target, wc_arg->rr_policy);
- abort();
- }
- // Pass on client stats via context. Passes ownership of the reference.
- GPR_ASSERT(wc_arg->client_stats != nullptr);
- wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
- wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
- } else {
- grpc_grpclb_client_stats_unref(wc_arg->client_stats);
- }
- if (grpc_lb_glb_trace.enabled()) {
- gpr_log(GPR_INFO, "[grpclb %p] Unreffing RR %p", wc_arg->glb_policy,
- wc_arg->rr_policy);
- }
- GRPC_LB_POLICY_UNREF(wc_arg->rr_policy, "wrapped_rr_closure");
- }
- GPR_ASSERT(wc_arg->free_when_done != nullptr);
- gpr_free(wc_arg->free_when_done);
-}
-
-/* Linked list of pending pick requests. It stores all information needed to
- * eventually call (Round Robin's) pick() on them. They mainly stay pending
- * waiting for the RR policy to be created/updated.
- *
- * One particularity is the wrapping of the user-provided \a on_complete closure
- * (in \a wrapped_on_complete and \a wrapped_on_complete_arg). This is needed in
- * order to correctly unref the RR policy instance upon completion of the pick.
- * See \a wrapped_rr_closure for details. */
-typedef struct pending_pick {
+ // The grpclb instance that created the wrapping. This instance is not owned,
+ // reference counts are untouched. It's used only for logging purposes.
+ glb_lb_policy* glb_policy;
+ // Next pending pick.
struct pending_pick* next;
+};
- /* original pick()'s arguments */
- grpc_lb_policy_pick_args pick_args;
-
- /* output argument where to store the pick()ed connected subchannel, or
- * nullptr upon error. */
- grpc_core::ConnectedSubchannel** target;
-
- /* args for wrapped_on_complete */
- wrapped_rr_closure_arg wrapped_on_complete_arg;
-} pending_pick;
-
-static void add_pending_pick(pending_pick** root,
- const grpc_lb_policy_pick_args* pick_args,
- grpc_core::ConnectedSubchannel** target,
- grpc_call_context_element* context,
- grpc_closure* on_complete) {
- pending_pick* pp = (pending_pick*)gpr_zalloc(sizeof(*pp));
- pp->next = *root;
- pp->pick_args = *pick_args;
- pp->target = target;
- pp->wrapped_on_complete_arg.wrapped_closure = on_complete;
- pp->wrapped_on_complete_arg.target = target;
- pp->wrapped_on_complete_arg.context = context;
- pp->wrapped_on_complete_arg.initial_metadata = pick_args->initial_metadata;
- pp->wrapped_on_complete_arg.lb_token_mdelem_storage =
- pick_args->lb_token_mdelem_storage;
- pp->wrapped_on_complete_arg.free_when_done = pp;
- GRPC_CLOSURE_INIT(&pp->wrapped_on_complete_arg.wrapper_closure,
- wrapped_rr_closure, &pp->wrapped_on_complete_arg,
- grpc_schedule_on_exec_ctx);
- *root = pp;
-}
-
-/* Same as the \a pending_pick struct but for ping operations */
-typedef struct pending_ping {
+/// A linked list of pending pings waiting for the RR policy to be created.
+struct pending_ping {
+ grpc_closure* on_initiate;
+ grpc_closure* on_ack;
struct pending_ping* next;
+};
- /* args for sending the ping */
- wrapped_rr_closure_arg* on_initiate;
- wrapped_rr_closure_arg* on_ack;
-} pending_ping;
-
-static void add_pending_ping(pending_ping** root, grpc_closure* on_initiate,
- grpc_closure* on_ack) {
- pending_ping* pping = (pending_ping*)gpr_zalloc(sizeof(*pping));
- if (on_initiate != nullptr) {
- pping->on_initiate =
- (wrapped_rr_closure_arg*)gpr_zalloc(sizeof(*pping->on_initiate));
- pping->on_initiate->wrapped_closure = on_initiate;
- pping->on_initiate->free_when_done = pping->on_initiate;
- GRPC_CLOSURE_INIT(&pping->on_initiate->wrapper_closure, wrapped_rr_closure,
- &pping->on_initiate, grpc_schedule_on_exec_ctx);
- }
- if (on_ack != nullptr) {
- pping->on_ack = (wrapped_rr_closure_arg*)gpr_zalloc(sizeof(*pping->on_ack));
- pping->on_ack->wrapped_closure = on_ack;
- pping->on_ack->free_when_done = pping->on_ack;
- GRPC_CLOSURE_INIT(&pping->on_ack->wrapper_closure, wrapped_rr_closure,
- &pping->on_ack, grpc_schedule_on_exec_ctx);
- }
- pping->next = *root;
- *root = pping;
-}
-
-/*
- * glb_lb_policy
- */
-typedef struct rr_connectivity_data rr_connectivity_data;
+} // namespace
-typedef struct glb_lb_policy {
+struct glb_lb_policy {
/** base policy: must be first */
grpc_lb_policy base;
@@ -331,6 +194,9 @@ typedef struct glb_lb_policy {
/** the RR policy to use of the backend servers returned by the LB server */
grpc_lb_policy* rr_policy;
+ grpc_closure on_rr_connectivity_changed;
+ grpc_connectivity_state rr_connectivity_state;
+
bool started_picking;
/** our connectivity state tracker */
@@ -365,11 +231,11 @@ typedef struct glb_lb_policy {
/** are we already watching the LB channel's connectivity? */
bool watching_lb_channel;
- /** is \a lb_call_retry_timer active? */
- bool retry_timer_active;
+ /** is the callback associated with \a lb_call_retry_timer pending? */
+ bool retry_timer_callback_pending;
- /** is \a lb_fallback_timer active? */
- bool fallback_timer_active;
+ /** is the callback associated with \a lb_fallback_timer pending? */
+ bool fallback_timer_callback_pending;
/** called upon changes to the LB channel's connectivity. */
grpc_closure lb_channel_on_connectivity_changed;
@@ -428,22 +294,92 @@ typedef struct glb_lb_policy {
/* Interval and timer for next client load report. */
grpc_millis client_stats_report_interval;
grpc_timer client_load_report_timer;
- bool client_load_report_timer_pending;
+ bool client_load_report_timer_callback_pending;
bool last_client_load_report_counters_were_zero;
/* Closure used for either the load report timer or the callback for
* completion of sending the load report. */
grpc_closure client_load_report_closure;
/* Client load report message payload. */
grpc_byte_buffer* client_load_report_payload;
-} glb_lb_policy;
-
-/* Keeps track and reacts to changes in connectivity of the RR instance */
-struct rr_connectivity_data {
- grpc_closure on_change;
- grpc_connectivity_state state;
- glb_lb_policy* glb_policy;
};
+/* add lb_token of selected subchannel (address) to the call's initial
+ * metadata */
+static grpc_error* initial_metadata_add_lb_token(
+ grpc_metadata_batch* initial_metadata,
+ grpc_linked_mdelem* lb_token_mdelem_storage, grpc_mdelem lb_token) {
+ GPR_ASSERT(lb_token_mdelem_storage != nullptr);
+ GPR_ASSERT(!GRPC_MDISNULL(lb_token));
+ return grpc_metadata_batch_add_tail(initial_metadata, lb_token_mdelem_storage,
+ lb_token);
+}
+
+static void destroy_client_stats(void* arg) {
+ grpc_grpclb_client_stats_unref((grpc_grpclb_client_stats*)arg);
+}
+
+static void pending_pick_set_metadata_and_context(pending_pick* pp) {
+ /* if connected_subchannel is nullptr, no pick has been made by the RR
+ * policy (e.g., all addresses failed to connect). There won't be any
+ * user_data/token available */
+ if (pp->pick->connected_subchannel != nullptr) {
+ if (!GRPC_MDISNULL(pp->lb_token)) {
+ initial_metadata_add_lb_token(pp->pick->initial_metadata,
+ &pp->pick->lb_token_mdelem_storage,
+ GRPC_MDELEM_REF(pp->lb_token));
+ } else {
+ gpr_log(GPR_ERROR,
+ "[grpclb %p] No LB token for connected subchannel pick %p",
+ pp->glb_policy, pp->pick);
+ abort();
+ }
+ // Pass on client stats via context. Passes ownership of the reference.
+ GPR_ASSERT(pp->client_stats != nullptr);
+ pp->pick->subchannel_call_context[GRPC_GRPCLB_CLIENT_STATS].value =
+ pp->client_stats;
+ pp->pick->subchannel_call_context[GRPC_GRPCLB_CLIENT_STATS].destroy =
+ destroy_client_stats;
+ } else {
+ grpc_grpclb_client_stats_unref(pp->client_stats);
+ }
+}
+
+/* The \a on_complete closure passed as part of the pick requires keeping a
+ * reference to its associated round robin instance. We wrap this closure in
+ * order to unref the round robin instance upon its invocation */
+static void pending_pick_complete(void* arg, grpc_error* error) {
+ pending_pick* pp = (pending_pick*)arg;
+ pending_pick_set_metadata_and_context(pp);
+ GRPC_CLOSURE_SCHED(pp->original_on_complete, GRPC_ERROR_REF(error));
+ gpr_free(pp);
+}
+
+static pending_pick* pending_pick_create(glb_lb_policy* glb_policy,
+ grpc_lb_policy_pick_state* pick) {
+ pending_pick* pp = (pending_pick*)gpr_zalloc(sizeof(*pp));
+ pp->pick = pick;
+ pp->glb_policy = glb_policy;
+ GRPC_CLOSURE_INIT(&pp->on_complete, pending_pick_complete, pp,
+ grpc_schedule_on_exec_ctx);
+ pp->original_on_complete = pick->on_complete;
+ pp->pick->on_complete = &pp->on_complete;
+ return pp;
+}
+
+static void pending_pick_add(pending_pick** root, pending_pick* new_pp) {
+ new_pp->next = *root;
+ *root = new_pp;
+}
+
+static void pending_ping_add(pending_ping** root, grpc_closure* on_initiate,
+ grpc_closure* on_ack) {
+ pending_ping* pping = (pending_ping*)gpr_zalloc(sizeof(*pping));
+ pping->on_initiate = on_initiate;
+ pping->on_ack = on_ack;
+ pping->next = *root;
+ *root = pping;
+}
+
static bool is_server_valid(const grpc_grpclb_server* server, size_t idx,
bool log) {
if (server->drop) return false;
@@ -555,7 +491,6 @@ static grpc_lb_addresses* process_serverlist_locked(
gpr_free(uri);
user_data = (void*)GRPC_MDELEM_LB_TOKEN_EMPTY.payload;
}
-
grpc_lb_addresses_set_address(lb_addresses, addr_idx, &addr.addr, addr.len,
false /* is_balancer */,
nullptr /* balancer_name */, user_data);
@@ -596,7 +531,6 @@ static void update_lb_connectivity_status_locked(
grpc_error* rr_state_error) {
const grpc_connectivity_state curr_glb_state =
grpc_connectivity_state_check(&glb_policy->state_tracker);
-
/* The new connectivity status is a function of the previous one and the new
* input coming from the status of the RR policy.
*
@@ -626,7 +560,6 @@ static void update_lb_connectivity_status_locked(
*
* (*) This function mustn't be called during shutting down. */
GPR_ASSERT(curr_glb_state != GRPC_CHANNEL_SHUTDOWN);
-
switch (rr_state) {
case GRPC_CHANNEL_TRANSIENT_FAILURE:
case GRPC_CHANNEL_SHUTDOWN:
@@ -637,7 +570,6 @@ static void update_lb_connectivity_status_locked(
case GRPC_CHANNEL_READY:
GPR_ASSERT(rr_state_error == GRPC_ERROR_NONE);
}
-
if (grpc_lb_glb_trace.enabled()) {
gpr_log(
GPR_INFO,
@@ -655,10 +587,8 @@ static void update_lb_connectivity_status_locked(
* cleanups this callback would otherwise be responsible for.
* If \a force_async is true, then we will manually schedule the
* completion callback even if the pick is available immediately. */
-static bool pick_from_internal_rr_locked(
- glb_lb_policy* glb_policy, const grpc_lb_policy_pick_args* pick_args,
- bool force_async, grpc_core::ConnectedSubchannel** target,
- wrapped_rr_closure_arg* wc_arg) {
+static bool pick_from_internal_rr_locked(glb_lb_policy* glb_policy,
+ bool force_async, pending_pick* pp) {
// Check for drops if we are not using fallback backend addresses.
if (glb_policy->serverlist != nullptr) {
// Look at the index into the serverlist to see if we should drop this call.
@@ -668,57 +598,36 @@ static bool pick_from_internal_rr_locked(
glb_policy->serverlist_index = 0; // Wrap-around.
}
if (server->drop) {
- // Not using the RR policy, so unref it.
- if (grpc_lb_glb_trace.enabled()) {
- gpr_log(GPR_INFO, "[grpclb %p] Unreffing RR %p for drop", glb_policy,
- wc_arg->rr_policy);
- }
- GRPC_LB_POLICY_UNREF(wc_arg->rr_policy, "glb_pick_sync");
// Update client load reporting stats to indicate the number of
// dropped calls. Note that we have to do this here instead of in
// the client_load_reporting filter, because we do not create a
// subchannel call (and therefore no client_load_reporting filter)
// for dropped calls.
- GPR_ASSERT(wc_arg->client_stats != nullptr);
+ GPR_ASSERT(glb_policy->client_stats != nullptr);
grpc_grpclb_client_stats_add_call_dropped_locked(
- server->load_balance_token, wc_arg->client_stats);
- grpc_grpclb_client_stats_unref(wc_arg->client_stats);
+ server->load_balance_token, glb_policy->client_stats);
if (force_async) {
- GPR_ASSERT(wc_arg->wrapped_closure != nullptr);
- GRPC_CLOSURE_SCHED(wc_arg->wrapped_closure, GRPC_ERROR_NONE);
- gpr_free(wc_arg->free_when_done);
+ GRPC_CLOSURE_SCHED(pp->original_on_complete, GRPC_ERROR_NONE);
+ gpr_free(pp);
return false;
}
- gpr_free(wc_arg->free_when_done);
+ gpr_free(pp);
return true;
}
}
+ // Set client_stats and user_data.
+ pp->client_stats = grpc_grpclb_client_stats_ref(glb_policy->client_stats);
+ GPR_ASSERT(pp->pick->user_data == nullptr);
+ pp->pick->user_data = (void**)&pp->lb_token;
// Pick via the RR policy.
- const bool pick_done = grpc_lb_policy_pick_locked(
- wc_arg->rr_policy, pick_args, target, wc_arg->context,
- (void**)&wc_arg->lb_token, &wc_arg->wrapper_closure);
+ bool pick_done = grpc_lb_policy_pick_locked(glb_policy->rr_policy, pp->pick);
if (pick_done) {
- /* synchronous grpc_lb_policy_pick call. Unref the RR policy. */
- if (grpc_lb_glb_trace.enabled()) {
- gpr_log(GPR_INFO, "[grpclb %p] Unreffing RR %p", glb_policy,
- wc_arg->rr_policy);
- }
- GRPC_LB_POLICY_UNREF(wc_arg->rr_policy, "glb_pick_sync");
- /* add the load reporting initial metadata */
- initial_metadata_add_lb_token(pick_args->initial_metadata,
- pick_args->lb_token_mdelem_storage,
- GRPC_MDELEM_REF(wc_arg->lb_token));
- // Pass on client stats via context. Passes ownership of the reference.
- GPR_ASSERT(wc_arg->client_stats != nullptr);
- wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
- wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
+ pending_pick_set_metadata_and_context(pp);
if (force_async) {
- GPR_ASSERT(wc_arg->wrapped_closure != nullptr);
- GRPC_CLOSURE_SCHED(wc_arg->wrapped_closure, GRPC_ERROR_NONE);
- gpr_free(wc_arg->free_when_done);
- return false;
+ GRPC_CLOSURE_SCHED(pp->original_on_complete, GRPC_ERROR_NONE);
+ pick_done = false;
}
- gpr_free(wc_arg->free_when_done);
+ gpr_free(pp);
}
/* else, the pending pick will be registered and taken care of by the
* pending pick list inside the RR policy (glb_policy->rr_policy).
@@ -760,7 +669,7 @@ static void lb_policy_args_destroy(grpc_lb_policy_args* args) {
gpr_free(args);
}
-static void glb_rr_connectivity_changed_locked(void* arg, grpc_error* error);
+static void on_rr_connectivity_changed_locked(void* arg, grpc_error* error);
static void create_rr_locked(glb_lb_policy* glb_policy,
grpc_lb_policy_args* args) {
GPR_ASSERT(glb_policy->rr_policy == nullptr);
@@ -782,72 +691,46 @@ static void create_rr_locked(glb_lb_policy* glb_policy,
glb_policy->base.request_reresolution = nullptr;
glb_policy->rr_policy = new_rr_policy;
grpc_error* rr_state_error = nullptr;
- const grpc_connectivity_state rr_state =
- grpc_lb_policy_check_connectivity_locked(glb_policy->rr_policy,
- &rr_state_error);
+ glb_policy->rr_connectivity_state = grpc_lb_policy_check_connectivity_locked(
+ glb_policy->rr_policy, &rr_state_error);
/* Connectivity state is a function of the RR policy updated/created */
- update_lb_connectivity_status_locked(glb_policy, rr_state, rr_state_error);
+ update_lb_connectivity_status_locked(
+ glb_policy, glb_policy->rr_connectivity_state, rr_state_error);
/* Add the gRPC LB's interested_parties pollset_set to that of the newly
* created RR policy. This will make the RR policy progress upon activity on
* gRPC LB, which in turn is tied to the application's call */
grpc_pollset_set_add_pollset_set(glb_policy->rr_policy->interested_parties,
glb_policy->base.interested_parties);
-
- /* Allocate the data for the tracking of the new RR policy's connectivity.
- * It'll be deallocated in glb_rr_connectivity_changed() */
- rr_connectivity_data* rr_connectivity =
- (rr_connectivity_data*)gpr_zalloc(sizeof(rr_connectivity_data));
- GRPC_CLOSURE_INIT(&rr_connectivity->on_change,
- glb_rr_connectivity_changed_locked, rr_connectivity,
+ GRPC_CLOSURE_INIT(&glb_policy->on_rr_connectivity_changed,
+ on_rr_connectivity_changed_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner));
- rr_connectivity->glb_policy = glb_policy;
- rr_connectivity->state = rr_state;
-
/* Subscribe to changes to the connectivity of the new RR */
- GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "glb_rr_connectivity_cb");
- grpc_lb_policy_notify_on_state_change_locked(glb_policy->rr_policy,
- &rr_connectivity->state,
- &rr_connectivity->on_change);
+ GRPC_LB_POLICY_REF(&glb_policy->base, "glb_rr_connectivity_cb");
+ grpc_lb_policy_notify_on_state_change_locked(
+ glb_policy->rr_policy, &glb_policy->rr_connectivity_state,
+ &glb_policy->on_rr_connectivity_changed);
grpc_lb_policy_exit_idle_locked(glb_policy->rr_policy);
-
- /* Update picks and pings in wait */
+ // Send pending picks to RR policy.
pending_pick* pp;
while ((pp = glb_policy->pending_picks)) {
glb_policy->pending_picks = pp->next;
- GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_pick");
- pp->wrapped_on_complete_arg.rr_policy = glb_policy->rr_policy;
- pp->wrapped_on_complete_arg.client_stats =
- grpc_grpclb_client_stats_ref(glb_policy->client_stats);
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO,
"[grpclb %p] Pending pick about to (async) PICK from RR %p",
glb_policy, glb_policy->rr_policy);
}
- pick_from_internal_rr_locked(glb_policy, &pp->pick_args,
- true /* force_async */, pp->target,
- &pp->wrapped_on_complete_arg);
+ pick_from_internal_rr_locked(glb_policy, true /* force_async */, pp);
}
-
+ // Send pending pings to RR policy.
pending_ping* pping;
while ((pping = glb_policy->pending_pings)) {
glb_policy->pending_pings = pping->next;
- grpc_closure* on_initiate = nullptr;
- grpc_closure* on_ack = nullptr;
- if (pping->on_initiate != nullptr) {
- GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping");
- pping->on_initiate->rr_policy = glb_policy->rr_policy;
- on_initiate = &pping->on_initiate->wrapper_closure;
- }
- if (pping->on_ack != nullptr) {
- GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping");
- pping->on_ack->rr_policy = glb_policy->rr_policy;
- on_ack = &pping->on_ack->wrapper_closure;
- }
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO, "[grpclb %p] Pending ping about to PING from RR %p",
glb_policy, glb_policy->rr_policy);
}
- grpc_lb_policy_ping_one_locked(glb_policy->rr_policy, on_initiate, on_ack);
+ grpc_lb_policy_ping_one_locked(glb_policy->rr_policy, pping->on_initiate,
+ pping->on_ack);
gpr_free(pping);
}
}
@@ -873,31 +756,28 @@ static void rr_handover_locked(glb_lb_policy* glb_policy) {
lb_policy_args_destroy(args);
}
-static void glb_rr_connectivity_changed_locked(void* arg, grpc_error* error) {
- rr_connectivity_data* rr_connectivity = (rr_connectivity_data*)arg;
- glb_lb_policy* glb_policy = rr_connectivity->glb_policy;
+static void on_rr_connectivity_changed_locked(void* arg, grpc_error* error) {
+ glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
if (glb_policy->shutting_down) {
- GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "glb_rr_connectivity_cb");
- gpr_free(rr_connectivity);
+ GRPC_LB_POLICY_UNREF(&glb_policy->base, "glb_rr_connectivity_cb");
return;
}
- if (rr_connectivity->state == GRPC_CHANNEL_SHUTDOWN) {
+ if (glb_policy->rr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
/* An RR policy that has transitioned into the SHUTDOWN connectivity state
* should not be considered for picks or updates: the SHUTDOWN state is a
* sink, policies can't transition back from it. .*/
GRPC_LB_POLICY_UNREF(glb_policy->rr_policy, "rr_connectivity_shutdown");
glb_policy->rr_policy = nullptr;
- GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "glb_rr_connectivity_cb");
- gpr_free(rr_connectivity);
+ GRPC_LB_POLICY_UNREF(&glb_policy->base, "glb_rr_connectivity_cb");
return;
}
/* rr state != SHUTDOWN && !glb_policy->shutting down: biz as usual */
- update_lb_connectivity_status_locked(glb_policy, rr_connectivity->state,
- GRPC_ERROR_REF(error));
- /* Resubscribe. Reuse the "glb_rr_connectivity_cb" weak ref. */
- grpc_lb_policy_notify_on_state_change_locked(glb_policy->rr_policy,
- &rr_connectivity->state,
- &rr_connectivity->on_change);
+ update_lb_connectivity_status_locked(
+ glb_policy, glb_policy->rr_connectivity_state, GRPC_ERROR_REF(error));
+ /* Resubscribe. Reuse the "glb_rr_connectivity_cb" ref. */
+ grpc_lb_policy_notify_on_state_change_locked(
+ glb_policy->rr_policy, &glb_policy->rr_connectivity_state,
+ &glb_policy->on_rr_connectivity_changed);
}
static void destroy_balancer_name(void* balancer_name) {
@@ -1005,38 +885,27 @@ static void glb_destroy(grpc_lb_policy* pol) {
gpr_free(glb_policy);
}
-static void glb_shutdown_locked(grpc_lb_policy* pol) {
+static void glb_shutdown_locked(grpc_lb_policy* pol,
+ grpc_lb_policy* new_policy) {
glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
glb_policy->shutting_down = true;
-
- /* We need a copy of the lb_call pointer because we can't cancell the call
- * while holding glb_policy->mu: lb_on_server_status_received, invoked due to
- * the cancel, needs to acquire that same lock */
- grpc_call* lb_call = glb_policy->lb_call;
-
/* glb_policy->lb_call and this local lb_call must be consistent at this point
* because glb_policy->lb_call is only assigned in lb_call_init_locked as part
* of query_for_backends_locked, which can only be invoked while
* glb_policy->shutting_down is false. */
- if (lb_call != nullptr) {
- grpc_call_cancel(lb_call, nullptr);
+ if (glb_policy->lb_call != nullptr) {
+ grpc_call_cancel(glb_policy->lb_call, nullptr);
/* lb_on_server_status_received will pick up the cancel and clean up */
}
- if (glb_policy->retry_timer_active) {
+ if (glb_policy->retry_timer_callback_pending) {
grpc_timer_cancel(&glb_policy->lb_call_retry_timer);
- glb_policy->retry_timer_active = false;
}
- if (glb_policy->fallback_timer_active) {
+ if (glb_policy->fallback_timer_callback_pending) {
grpc_timer_cancel(&glb_policy->lb_fallback_timer);
- glb_policy->fallback_timer_active = false;
}
-
- pending_pick* pp = glb_policy->pending_picks;
- glb_policy->pending_picks = nullptr;
- pending_ping* pping = glb_policy->pending_pings;
- glb_policy->pending_pings = nullptr;
if (glb_policy->rr_policy != nullptr) {
+ grpc_lb_policy_shutdown_locked(glb_policy->rr_policy, nullptr);
GRPC_LB_POLICY_UNREF(glb_policy->rr_policy, "glb_shutdown");
} else {
grpc_lb_policy_try_reresolve(pol, &grpc_lb_glb_trace, GRPC_ERROR_CANCELLED);
@@ -1051,28 +920,33 @@ static void glb_shutdown_locked(grpc_lb_policy* pol) {
}
grpc_connectivity_state_set(&glb_policy->state_tracker, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(error), "glb_shutdown");
-
+ // Clear pending picks.
+ pending_pick* pp = glb_policy->pending_picks;
+ glb_policy->pending_picks = nullptr;
while (pp != nullptr) {
pending_pick* next = pp->next;
- *pp->target = nullptr;
- GRPC_CLOSURE_SCHED(&pp->wrapped_on_complete_arg.wrapper_closure,
- GRPC_ERROR_REF(error));
- gpr_free(pp);
+ if (new_policy != nullptr) {
+ // Hand pick over to new policy.
+ grpc_grpclb_client_stats_unref(pp->client_stats);
+ pp->pick->on_complete = pp->original_on_complete;
+ if (grpc_lb_policy_pick_locked(new_policy, pp->pick)) {
+ // Synchronous return; schedule callback.
+ GRPC_CLOSURE_SCHED(pp->pick->on_complete, GRPC_ERROR_NONE);
+ }
+ gpr_free(pp);
+ } else {
+ pp->pick->connected_subchannel = nullptr;
+ GRPC_CLOSURE_SCHED(&pp->on_complete, GRPC_ERROR_REF(error));
+ }
pp = next;
}
-
+ // Clear pending pings.
+ pending_ping* pping = glb_policy->pending_pings;
+ glb_policy->pending_pings = nullptr;
while (pping != nullptr) {
pending_ping* next = pping->next;
- if (pping->on_initiate != nullptr) {
- GRPC_CLOSURE_SCHED(&pping->on_initiate->wrapper_closure,
- GRPC_ERROR_REF(error));
- gpr_free(pping->on_initiate);
- }
- if (pping->on_ack != nullptr) {
- GRPC_CLOSURE_SCHED(&pping->on_ack->wrapper_closure,
- GRPC_ERROR_REF(error));
- gpr_free(pping->on_ack);
- }
+ GRPC_CLOSURE_SCHED(pping->on_initiate, GRPC_ERROR_REF(error));
+ GRPC_CLOSURE_SCHED(pping->on_ack, GRPC_ERROR_REF(error));
gpr_free(pping);
pping = next;
}
@@ -1090,16 +964,16 @@ static void glb_shutdown_locked(grpc_lb_policy* pol) {
// level (grpclb), inside the glb_policy->pending_picks list. To cancel these,
// we invoke the completion closure and set *target to nullptr right here.
static void glb_cancel_pick_locked(grpc_lb_policy* pol,
- grpc_core::ConnectedSubchannel** target,
+ grpc_lb_policy_pick_state* pick,
grpc_error* error) {
glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
pending_pick* pp = glb_policy->pending_picks;
glb_policy->pending_picks = nullptr;
while (pp != nullptr) {
pending_pick* next = pp->next;
- if (pp->target == target) {
- *target = nullptr;
- GRPC_CLOSURE_SCHED(&pp->wrapped_on_complete_arg.wrapper_closure,
+ if (pp->pick == pick) {
+ pick->connected_subchannel = nullptr;
+ GRPC_CLOSURE_SCHED(&pp->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick Cancelled", &error, 1));
} else {
@@ -1109,7 +983,7 @@ static void glb_cancel_pick_locked(grpc_lb_policy* pol,
pp = next;
}
if (glb_policy->rr_policy != nullptr) {
- grpc_lb_policy_cancel_pick_locked(glb_policy->rr_policy, target,
+ grpc_lb_policy_cancel_pick_locked(glb_policy->rr_policy, pick,
GRPC_ERROR_REF(error));
}
GRPC_ERROR_UNREF(error);
@@ -1134,9 +1008,9 @@ static void glb_cancel_picks_locked(grpc_lb_policy* pol,
glb_policy->pending_picks = nullptr;
while (pp != nullptr) {
pending_pick* next = pp->next;
- if ((pp->pick_args.initial_metadata_flags & initial_metadata_flags_mask) ==
+ if ((pp->pick->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
- GRPC_CLOSURE_SCHED(&pp->wrapped_on_complete_arg.wrapper_closure,
+ GRPC_CLOSURE_SCHED(&pp->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick Cancelled", &error, 1));
} else {
@@ -1158,14 +1032,15 @@ static void query_for_backends_locked(glb_lb_policy* glb_policy);
static void start_picking_locked(glb_lb_policy* glb_policy) {
/* start a timer to fall back */
if (glb_policy->lb_fallback_timeout_ms > 0 &&
- glb_policy->serverlist == nullptr && !glb_policy->fallback_timer_active) {
+ glb_policy->serverlist == nullptr &&
+ !glb_policy->fallback_timer_callback_pending) {
grpc_millis deadline =
grpc_core::ExecCtx::Get()->Now() + glb_policy->lb_fallback_timeout_ms;
- GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_fallback_timer");
+ GRPC_LB_POLICY_REF(&glb_policy->base, "grpclb_fallback_timer");
GRPC_CLOSURE_INIT(&glb_policy->lb_on_fallback, lb_on_fallback_timer_locked,
glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner));
- glb_policy->fallback_timer_active = true;
+ glb_policy->fallback_timer_callback_pending = true;
grpc_timer_init(&glb_policy->lb_fallback_timer, deadline,
&glb_policy->lb_on_fallback);
}
@@ -1183,19 +1058,9 @@ static void glb_exit_idle_locked(grpc_lb_policy* pol) {
}
static int glb_pick_locked(grpc_lb_policy* pol,
- const grpc_lb_policy_pick_args* pick_args,
- grpc_core::ConnectedSubchannel** target,
- grpc_call_context_element* context, void** user_data,
- grpc_closure* on_complete) {
- if (pick_args->lb_token_mdelem_storage == nullptr) {
- *target = nullptr;
- GRPC_CLOSURE_SCHED(on_complete,
- GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "No mdelem storage for the LB token. Load reporting "
- "won't work without it. Failing"));
- return 0;
- }
+ grpc_lb_policy_pick_state* pick) {
glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
+ pending_pick* pp = pending_pick_create(glb_policy, pick);
bool pick_done = false;
if (glb_policy->rr_policy != nullptr) {
const grpc_connectivity_state rr_connectivity_state =
@@ -1203,7 +1068,7 @@ static int glb_pick_locked(grpc_lb_policy* pol,
nullptr);
// The glb_policy->rr_policy may have transitioned to SHUTDOWN but the
// callback registered to capture this event
- // (glb_rr_connectivity_changed_locked) may not have been invoked yet. We
+ // (on_rr_connectivity_changed_locked) may not have been invoked yet. We
// need to make sure we aren't trying to pick from a RR policy instance
// that's in shutdown.
if (rr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
@@ -1213,32 +1078,16 @@ static int glb_pick_locked(grpc_lb_policy* pol,
glb_policy, glb_policy->rr_policy,
grpc_connectivity_state_name(rr_connectivity_state));
}
- add_pending_pick(&glb_policy->pending_picks, pick_args, target, context,
- on_complete);
+ pending_pick_add(&glb_policy->pending_picks, pp);
pick_done = false;
} else { // RR not in shutdown
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO, "[grpclb %p] about to PICK from RR %p", glb_policy,
glb_policy->rr_policy);
}
- GRPC_LB_POLICY_REF(glb_policy->rr_policy, "glb_pick");
- wrapped_rr_closure_arg* wc_arg =
- (wrapped_rr_closure_arg*)gpr_zalloc(sizeof(wrapped_rr_closure_arg));
- GRPC_CLOSURE_INIT(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg,
- grpc_schedule_on_exec_ctx);
- wc_arg->rr_policy = glb_policy->rr_policy;
- wc_arg->target = target;
- wc_arg->context = context;
GPR_ASSERT(glb_policy->client_stats != nullptr);
- wc_arg->client_stats =
- grpc_grpclb_client_stats_ref(glb_policy->client_stats);
- wc_arg->wrapped_closure = on_complete;
- wc_arg->lb_token_mdelem_storage = pick_args->lb_token_mdelem_storage;
- wc_arg->initial_metadata = pick_args->initial_metadata;
- wc_arg->free_when_done = wc_arg;
- wc_arg->glb_policy = pol;
- pick_done = pick_from_internal_rr_locked(
- glb_policy, pick_args, false /* force_async */, target, wc_arg);
+ pick_done =
+ pick_from_internal_rr_locked(glb_policy, false /* force_async */, pp);
}
} else { // glb_policy->rr_policy == NULL
if (grpc_lb_glb_trace.enabled()) {
@@ -1246,8 +1095,7 @@ static int glb_pick_locked(grpc_lb_policy* pol,
"[grpclb %p] No RR policy. Adding to grpclb's pending picks",
glb_policy);
}
- add_pending_pick(&glb_policy->pending_picks, pick_args, target, context,
- on_complete);
+ pending_pick_add(&glb_policy->pending_picks, pp);
if (!glb_policy->started_picking) {
start_picking_locked(glb_policy);
}
@@ -1269,7 +1117,7 @@ static void glb_ping_one_locked(grpc_lb_policy* pol, grpc_closure* on_initiate,
if (glb_policy->rr_policy) {
grpc_lb_policy_ping_one_locked(glb_policy->rr_policy, on_initiate, on_ack);
} else {
- add_pending_ping(&glb_policy->pending_pings, on_initiate, on_ack);
+ pending_ping_add(&glb_policy->pending_pings, on_initiate, on_ack);
if (!glb_policy->started_picking) {
start_picking_locked(glb_policy);
}
@@ -1286,7 +1134,7 @@ static void glb_notify_on_state_change_locked(grpc_lb_policy* pol,
static void lb_call_on_retry_timer_locked(void* arg, grpc_error* error) {
glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
- glb_policy->retry_timer_active = false;
+ glb_policy->retry_timer_callback_pending = false;
if (!glb_policy->shutting_down && glb_policy->lb_call == nullptr &&
error == GRPC_ERROR_NONE) {
if (grpc_lb_glb_trace.enabled()) {
@@ -1294,12 +1142,12 @@ static void lb_call_on_retry_timer_locked(void* arg, grpc_error* error) {
}
query_for_backends_locked(glb_policy);
}
- GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "grpclb_retry_timer");
+ GRPC_LB_POLICY_UNREF(&glb_policy->base, "grpclb_retry_timer");
}
static void maybe_restart_lb_call(glb_lb_policy* glb_policy) {
if (glb_policy->started_picking && glb_policy->updating_lb_call) {
- if (glb_policy->retry_timer_active) {
+ if (glb_policy->retry_timer_callback_pending) {
grpc_timer_cancel(&glb_policy->lb_call_retry_timer);
}
if (!glb_policy->shutting_down) start_picking_locked(glb_policy);
@@ -1313,23 +1161,23 @@ static void maybe_restart_lb_call(glb_lb_policy* glb_policy) {
grpc_millis timeout = next_try - grpc_core::ExecCtx::Get()->Now();
if (timeout > 0) {
gpr_log(GPR_DEBUG,
- "[grpclb %p] ... retry_timer_active in %" PRIuPTR "ms.",
+ "[grpclb %p] ... retry LB call after %" PRIuPTR "ms.",
glb_policy, timeout);
} else {
- gpr_log(GPR_DEBUG, "[grpclb %p] ... retry_timer_active immediately.",
+ gpr_log(GPR_DEBUG, "[grpclb %p] ... retry LB call immediately.",
glb_policy);
}
}
- GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer");
+ GRPC_LB_POLICY_REF(&glb_policy->base, "grpclb_retry_timer");
GRPC_CLOSURE_INIT(&glb_policy->lb_on_call_retry,
lb_call_on_retry_timer_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner));
- glb_policy->retry_timer_active = true;
+ glb_policy->retry_timer_callback_pending = true;
grpc_timer_init(&glb_policy->lb_call_retry_timer, next_try,
&glb_policy->lb_on_call_retry);
}
- GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base,
- "lb_on_server_status_received_locked");
+ GRPC_LB_POLICY_UNREF(&glb_policy->base,
+ "lb_on_server_status_received_locked");
}
static void send_client_load_report_locked(void* arg, grpc_error* error);
@@ -1351,8 +1199,8 @@ static void client_load_report_done_locked(void* arg, grpc_error* error) {
grpc_byte_buffer_destroy(glb_policy->client_load_report_payload);
glb_policy->client_load_report_payload = nullptr;
if (error != GRPC_ERROR_NONE || glb_policy->lb_call == nullptr) {
- glb_policy->client_load_report_timer_pending = false;
- GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "client_load_report");
+ glb_policy->client_load_report_timer_callback_pending = false;
+ GRPC_LB_POLICY_UNREF(&glb_policy->base, "client_load_report");
if (glb_policy->lb_call == nullptr) {
maybe_restart_lb_call(glb_policy);
}
@@ -1392,8 +1240,8 @@ static bool load_report_counters_are_zero(grpc_grpclb_request* request) {
static void send_client_load_report_locked(void* arg, grpc_error* error) {
glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
if (error == GRPC_ERROR_CANCELLED || glb_policy->lb_call == nullptr) {
- glb_policy->client_load_report_timer_pending = false;
- GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "client_load_report");
+ glb_policy->client_load_report_timer_callback_pending = false;
+ GRPC_LB_POLICY_UNREF(&glb_policy->base, "client_load_report");
if (glb_policy->lb_call == nullptr) {
maybe_restart_lb_call(glb_policy);
}
@@ -1503,7 +1351,7 @@ static void lb_call_destroy_locked(glb_lb_policy* glb_policy) {
grpc_byte_buffer_destroy(glb_policy->lb_request_payload);
grpc_slice_unref_internal(glb_policy->lb_call_status_details);
- if (glb_policy->client_load_report_timer_pending) {
+ if (glb_policy->client_load_report_timer_callback_pending) {
grpc_timer_cancel(&glb_policy->client_load_report_timer);
}
}
@@ -1546,10 +1394,8 @@ static void query_for_backends_locked(glb_lb_policy* glb_policy) {
op->flags = 0;
op->reserved = nullptr;
op++;
- /* take a weak ref (won't prevent calling of \a glb_shutdown if the strong ref
- * count goes to zero) to be unref'd in lb_on_sent_initial_request_locked() */
- GRPC_LB_POLICY_WEAK_REF(&glb_policy->base,
- "lb_on_sent_initial_request_locked");
+ /* take a ref to be released in lb_on_sent_initial_request_locked() */
+ GRPC_LB_POLICY_REF(&glb_policy->base, "lb_on_sent_initial_request_locked");
call_error = grpc_call_start_batch_and_execute(
glb_policy->lb_call, ops, (size_t)(op - ops),
&glb_policy->lb_on_sent_initial_request);
@@ -1565,10 +1411,8 @@ static void query_for_backends_locked(glb_lb_policy* glb_policy) {
op->flags = 0;
op->reserved = nullptr;
op++;
- /* take a weak ref (won't prevent calling of \a glb_shutdown if the strong ref
- * count goes to zero) to be unref'd in lb_on_server_status_received_locked */
- GRPC_LB_POLICY_WEAK_REF(&glb_policy->base,
- "lb_on_server_status_received_locked");
+ /* take a ref to be released in lb_on_server_status_received_locked() */
+ GRPC_LB_POLICY_REF(&glb_policy->base, "lb_on_server_status_received_locked");
call_error = grpc_call_start_batch_and_execute(
glb_policy->lb_call, ops, (size_t)(op - ops),
&glb_policy->lb_on_server_status_received);
@@ -1580,9 +1424,8 @@ static void query_for_backends_locked(glb_lb_policy* glb_policy) {
op->flags = 0;
op->reserved = nullptr;
op++;
- /* take another weak ref to be unref'd/reused in
- * lb_on_response_received_locked */
- GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "lb_on_response_received_locked");
+ /* take a ref to be unref'd/reused in lb_on_response_received_locked() */
+ GRPC_LB_POLICY_REF(&glb_policy->base, "lb_on_response_received_locked");
call_error = grpc_call_start_batch_and_execute(
glb_policy->lb_call, ops, (size_t)(op - ops),
&glb_policy->lb_on_response_received);
@@ -1597,8 +1440,7 @@ static void lb_on_sent_initial_request_locked(void* arg, grpc_error* error) {
if (glb_policy->client_load_report_payload != nullptr) {
do_send_client_load_report_locked(glb_policy);
}
- GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base,
- "lb_on_sent_initial_request_locked");
+ GRPC_LB_POLICY_UNREF(&glb_policy->base, "lb_on_sent_initial_request_locked");
}
static void lb_on_response_received_locked(void* arg, grpc_error* error) {
@@ -1630,11 +1472,9 @@ static void lb_on_response_received_locked(void* arg, grpc_error* error) {
"client load reporting interval = %" PRIdPTR " milliseconds",
glb_policy, glb_policy->client_stats_report_interval);
}
- /* take a weak ref (won't prevent calling of \a glb_shutdown() if the
- * strong ref count goes to zero) to be unref'd in
- * send_client_load_report_locked() */
- glb_policy->client_load_report_timer_pending = true;
- GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "client_load_report");
+ /* take a ref to be unref'd in send_client_load_report_locked() */
+ glb_policy->client_load_report_timer_callback_pending = true;
+ GRPC_LB_POLICY_REF(&glb_policy->base, "client_load_report");
schedule_next_client_load_report(glb_policy);
} else if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO,
@@ -1682,9 +1522,8 @@ static void lb_on_response_received_locked(void* arg, grpc_error* error) {
/* or dispose of the fallback */
grpc_lb_addresses_destroy(glb_policy->fallback_backend_addresses);
glb_policy->fallback_backend_addresses = nullptr;
- if (glb_policy->fallback_timer_active) {
+ if (glb_policy->fallback_timer_callback_pending) {
grpc_timer_cancel(&glb_policy->lb_fallback_timer);
- glb_policy->fallback_timer_active = false;
}
}
/* and update the copy in the glb_lb_policy instance. This
@@ -1717,27 +1556,27 @@ static void lb_on_response_received_locked(void* arg, grpc_error* error) {
op->flags = 0;
op->reserved = nullptr;
op++;
- /* reuse the "lb_on_response_received_locked" weak ref taken in
+ /* reuse the "lb_on_response_received_locked" ref taken in
* query_for_backends_locked() */
const grpc_call_error call_error = grpc_call_start_batch_and_execute(
glb_policy->lb_call, ops, (size_t)(op - ops),
&glb_policy->lb_on_response_received); /* loop */
GPR_ASSERT(GRPC_CALL_OK == call_error);
} else {
- GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base,
- "lb_on_response_received_locked_shutdown");
+ GRPC_LB_POLICY_UNREF(&glb_policy->base,
+ "lb_on_response_received_locked_shutdown");
}
} else { /* empty payload: call cancelled. */
- /* dispose of the "lb_on_response_received_locked" weak ref taken in
+ /* dispose of the "lb_on_response_received_locked" ref taken in
* query_for_backends_locked() and reused in every reception loop */
- GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base,
- "lb_on_response_received_locked_empty_payload");
+ GRPC_LB_POLICY_UNREF(&glb_policy->base,
+ "lb_on_response_received_locked_empty_payload");
}
}
static void lb_on_fallback_timer_locked(void* arg, grpc_error* error) {
glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
- glb_policy->fallback_timer_active = false;
+ glb_policy->fallback_timer_callback_pending = false;
/* If we receive a serverlist after the timer fires but before this callback
* actually runs, don't fall back. */
if (glb_policy->serverlist == nullptr) {
@@ -1751,7 +1590,7 @@ static void lb_on_fallback_timer_locked(void* arg, grpc_error* error) {
rr_handover_locked(glb_policy);
}
}
- GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "grpclb_fallback_timer");
+ GRPC_LB_POLICY_UNREF(&glb_policy->base, "grpclb_fallback_timer");
}
static void lb_on_server_status_received_locked(void* arg, grpc_error* error) {
@@ -1772,7 +1611,7 @@ static void lb_on_server_status_received_locked(void* arg, grpc_error* error) {
// If the load report timer is still pending, we wait for it to be
// called before restarting the call. Otherwise, we restart the call
// here.
- if (!glb_policy->client_load_report_timer_pending) {
+ if (!glb_policy->client_load_report_timer_callback_pending) {
maybe_restart_lb_call(glb_policy);
}
}
@@ -1835,7 +1674,7 @@ static void glb_update_locked(grpc_lb_policy* policy,
grpc_channel_get_channel_stack(glb_policy->lb_channel));
GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
glb_policy->watching_lb_channel = true;
- GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "watch_lb_channel_connectivity");
+ GRPC_LB_POLICY_REF(&glb_policy->base, "watch_lb_channel_connectivity");
grpc_client_channel_watch_connectivity_state(
client_channel_elem,
grpc_polling_entity_create_from_pollset_set(
@@ -1882,9 +1721,8 @@ static void glb_lb_channel_on_connectivity_changed_cb(void* arg,
// lb_on_server_status_received() will pick up the cancel and reinit
// lb_call.
} else if (glb_policy->started_picking) {
- if (glb_policy->retry_timer_active) {
+ if (glb_policy->retry_timer_callback_pending) {
grpc_timer_cancel(&glb_policy->lb_call_retry_timer);
- glb_policy->retry_timer_active = false;
}
start_picking_locked(glb_policy);
}
@@ -1892,8 +1730,8 @@ static void glb_lb_channel_on_connectivity_changed_cb(void* arg,
case GRPC_CHANNEL_SHUTDOWN:
done:
glb_policy->watching_lb_channel = false;
- GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base,
- "watch_lb_channel_connectivity_cb_shutdown");
+ GRPC_LB_POLICY_UNREF(&glb_policy->base,
+ "watch_lb_channel_connectivity_cb_shutdown");
break;
}
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
index e70f2a8c52b..58dcbffb2d8 100644
--- a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
@@ -31,13 +31,6 @@
grpc_core::TraceFlag grpc_lb_pick_first_trace(false, "pick_first");
-typedef struct pending_pick {
- struct pending_pick* next;
- uint32_t initial_metadata_flags;
- grpc_core::ConnectedSubchannel** target;
- grpc_closure* on_complete;
-} pending_pick;
-
typedef struct {
/** base policy: must be first */
grpc_lb_policy base;
@@ -52,7 +45,7 @@ typedef struct {
/** are we shut down? */
bool shutdown;
/** list of picks that are waiting on connectivity */
- pending_pick* pending_picks;
+ grpc_lb_policy_pick_state* pending_picks;
/** our connectivity state tracker */
grpc_connectivity_state_tracker state_tracker;
} pick_first_lb_policy;
@@ -70,19 +63,27 @@ static void pf_destroy(grpc_lb_policy* pol) {
}
}
-static void pf_shutdown_locked(grpc_lb_policy* pol) {
+static void pf_shutdown_locked(grpc_lb_policy* pol,
+ grpc_lb_policy* new_policy) {
pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_DEBUG, "Pick First %p Shutting down", p);
}
p->shutdown = true;
- pending_pick* pp;
- while ((pp = p->pending_picks) != nullptr) {
- p->pending_picks = pp->next;
- *pp->target = nullptr;
- GRPC_CLOSURE_SCHED(pp->on_complete, GRPC_ERROR_REF(error));
- gpr_free(pp);
+ grpc_lb_policy_pick_state* pick;
+ while ((pick = p->pending_picks) != nullptr) {
+ p->pending_picks = pick->next;
+ if (new_policy != nullptr) {
+ // Hand off to new LB policy.
+ if (grpc_lb_policy_pick_locked(new_policy, pick)) {
+ // Synchronous return, schedule closure.
+ GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_NONE);
+ }
+ } else {
+ pick->connected_subchannel = nullptr;
+ GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_REF(error));
+ }
}
grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(error), "shutdown");
@@ -102,19 +103,18 @@ static void pf_shutdown_locked(grpc_lb_policy* pol) {
}
static void pf_cancel_pick_locked(grpc_lb_policy* pol,
- grpc_core::ConnectedSubchannel** target,
+ grpc_lb_policy_pick_state* pick,
grpc_error* error) {
pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
- pending_pick* pp = p->pending_picks;
+ grpc_lb_policy_pick_state* pp = p->pending_picks;
p->pending_picks = nullptr;
while (pp != nullptr) {
- pending_pick* next = pp->next;
- if (pp->target == target) {
- *target = nullptr;
- GRPC_CLOSURE_SCHED(pp->on_complete,
+ grpc_lb_policy_pick_state* next = pp->next;
+ if (pp == pick) {
+ pick->connected_subchannel = nullptr;
+ GRPC_CLOSURE_SCHED(pick->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick Cancelled", &error, 1));
- gpr_free(pp);
} else {
pp->next = p->pending_picks;
p->pending_picks = pp;
@@ -129,21 +129,20 @@ static void pf_cancel_picks_locked(grpc_lb_policy* pol,
uint32_t initial_metadata_flags_eq,
grpc_error* error) {
pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
- pending_pick* pp = p->pending_picks;
+ grpc_lb_policy_pick_state* pick = p->pending_picks;
p->pending_picks = nullptr;
- while (pp != nullptr) {
- pending_pick* next = pp->next;
- if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
+ while (pick != nullptr) {
+ grpc_lb_policy_pick_state* next = pick->next;
+ if ((pick->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
- GRPC_CLOSURE_SCHED(pp->on_complete,
+ GRPC_CLOSURE_SCHED(pick->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick Cancelled", &error, 1));
- gpr_free(pp);
} else {
- pp->next = p->pending_picks;
- p->pending_picks = pp;
+ pick->next = p->pending_picks;
+ p->pending_picks = pick;
}
- pp = next;
+ pick = next;
}
GRPC_ERROR_UNREF(error);
}
@@ -173,27 +172,20 @@ static void pf_exit_idle_locked(grpc_lb_policy* pol) {
}
static int pf_pick_locked(grpc_lb_policy* pol,
- const grpc_lb_policy_pick_args* pick_args,
- grpc_core::ConnectedSubchannel** target,
- grpc_call_context_element* context, void** user_data,
- grpc_closure* on_complete) {
+ grpc_lb_policy_pick_state* pick) {
pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
// If we have a selected subchannel already, return synchronously.
if (p->selected != nullptr) {
- *target = GRPC_CONNECTED_SUBCHANNEL_REF(p->selected->connected_subchannel,
- "picked");
+ pick->connected_subchannel = GRPC_CONNECTED_SUBCHANNEL_REF(
+ p->selected->connected_subchannel, "picked");
return 1;
}
// No subchannel selected yet, so handle asynchronously.
if (!p->started_picking) {
start_picking_locked(p);
}
- pending_pick* pp = (pending_pick*)gpr_malloc(sizeof(*pp));
- pp->next = p->pending_picks;
- pp->target = target;
- pp->initial_metadata_flags = pick_args->initial_metadata_flags;
- pp->on_complete = on_complete;
- p->pending_picks = pp;
+ pick->next = p->pending_picks;
+ p->pending_picks = pick;
return 0;
}
@@ -396,8 +388,6 @@ static void pf_connectivity_changed_locked(void* arg, grpc_error* error) {
sd->curr_connectivity_state = sd->pending_connectivity_state_unsafe;
// Handle updates for the currently selected subchannel.
if (p->selected == sd) {
- gpr_log(GPR_INFO, "BAR selected. subchannel %p, conn subchannel %p",
- sd->subchannel, p->selected->connected_subchannel);
// If the new state is anything other than READY and there is a
// pending update, switch to the pending update.
if (sd->curr_connectivity_state != GRPC_CHANNEL_READY &&
@@ -464,8 +454,7 @@ static void pf_connectivity_changed_locked(void* arg, grpc_error* error) {
if (con == nullptr) {
// The subchannel may have become disconnected by the time this callback
// is invoked. Simply ignore and resubscribe: ulterior connectivity
- // states
- // must be in the pipeline and will eventually be invoked.
+ // states must be in the pipeline and will eventually be invoked.
grpc_lb_subchannel_data_start_connectivity_watch(sd);
break;
}
@@ -489,18 +478,17 @@ static void pf_connectivity_changed_locked(void* arg, grpc_error* error) {
// Drop all other subchannels, since we are now connected.
destroy_unselected_subchannels_locked(p);
// Update any calls that were waiting for a pick.
- pending_pick* pp;
- while ((pp = p->pending_picks)) {
- p->pending_picks = pp->next;
- *pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(
+ grpc_lb_policy_pick_state* pick;
+ while ((pick = p->pending_picks)) {
+ p->pending_picks = pick->next;
+ pick->connected_subchannel = GRPC_CONNECTED_SUBCHANNEL_REF(
p->selected->connected_subchannel, "picked");
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_INFO,
"Servicing pending pick with selected subchannel %p",
(void*)p->selected);
}
- GRPC_CLOSURE_SCHED(pp->on_complete, GRPC_ERROR_NONE);
- gpr_free(pp);
+ GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_NONE);
}
// Renew notification.
grpc_lb_subchannel_data_start_connectivity_watch(sd);
diff --git a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
index a6a8fbb3cfe..8d3fd637572 100644
--- a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
@@ -41,29 +41,6 @@
grpc_core::TraceFlag grpc_lb_round_robin_trace(false, "round_robin");
-/** List of entities waiting for a pick.
- *
- * Once a pick is available, \a target is updated and \a on_complete called. */
-typedef struct pending_pick {
- struct pending_pick* next;
-
- /* output argument where to store the pick()ed user_data. It'll be NULL if no
- * such data is present or there's an error (the definite test for errors is
- * \a target being NULL). */
- void** user_data;
-
- /* bitmask passed to pick() and used for selective cancelling. See
- * grpc_lb_policy_cancel_picks() */
- uint32_t initial_metadata_flags;
-
- /* output argument where to store the pick()ed connected subchannel, or NULL
- * upon error. */
- grpc_core::ConnectedSubchannel** target;
-
- /* to be invoked once the pick() has completed (regardless of success) */
- grpc_closure* on_complete;
-} pending_pick;
-
typedef struct round_robin_lb_policy {
/** base policy: must be first */
grpc_lb_policy base;
@@ -75,7 +52,7 @@ typedef struct round_robin_lb_policy {
/** are we shutting down? */
bool shutdown;
/** List of picks that are waiting on connectivity */
- pending_pick* pending_picks;
+ grpc_lb_policy_pick_state* pending_picks;
/** our connectivity state tracker */
grpc_connectivity_state_tracker state_tracker;
@@ -167,19 +144,27 @@ static void rr_destroy(grpc_lb_policy* pol) {
gpr_free(p);
}
-static void rr_shutdown_locked(grpc_lb_policy* pol) {
+static void rr_shutdown_locked(grpc_lb_policy* pol,
+ grpc_lb_policy* new_policy) {
round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG, "[RR %p] Shutting down", p);
}
p->shutdown = true;
- pending_pick* pp;
- while ((pp = p->pending_picks) != nullptr) {
- p->pending_picks = pp->next;
- *pp->target = nullptr;
- GRPC_CLOSURE_SCHED(pp->on_complete, GRPC_ERROR_REF(error));
- gpr_free(pp);
+ grpc_lb_policy_pick_state* pick;
+ while ((pick = p->pending_picks) != nullptr) {
+ p->pending_picks = pick->next;
+ if (new_policy != nullptr) {
+ // Hand off to new LB policy.
+ if (grpc_lb_policy_pick_locked(new_policy, pick)) {
+ // Synchronous return; schedule callback.
+ GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_NONE);
+ }
+ } else {
+ pick->connected_subchannel = nullptr;
+ GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_REF(error));
+ }
}
grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(error), "rr_shutdown");
@@ -199,19 +184,18 @@ static void rr_shutdown_locked(grpc_lb_policy* pol) {
}
static void rr_cancel_pick_locked(grpc_lb_policy* pol,
- grpc_core::ConnectedSubchannel** target,
+ grpc_lb_policy_pick_state* pick,
grpc_error* error) {
round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
- pending_pick* pp = p->pending_picks;
+ grpc_lb_policy_pick_state* pp = p->pending_picks;
p->pending_picks = nullptr;
while (pp != nullptr) {
- pending_pick* next = pp->next;
- if (pp->target == target) {
- *target = nullptr;
- GRPC_CLOSURE_SCHED(pp->on_complete,
+ grpc_lb_policy_pick_state* next = pp->next;
+ if (pp == pick) {
+ pick->connected_subchannel = nullptr;
+ GRPC_CLOSURE_SCHED(pick->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick cancelled", &error, 1));
- gpr_free(pp);
} else {
pp->next = p->pending_picks;
p->pending_picks = pp;
@@ -226,22 +210,21 @@ static void rr_cancel_picks_locked(grpc_lb_policy* pol,
uint32_t initial_metadata_flags_eq,
grpc_error* error) {
round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
- pending_pick* pp = p->pending_picks;
+ grpc_lb_policy_pick_state* pick = p->pending_picks;
p->pending_picks = nullptr;
- while (pp != nullptr) {
- pending_pick* next = pp->next;
- if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
+ while (pick != nullptr) {
+ grpc_lb_policy_pick_state* next = pick->next;
+ if ((pick->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
- *pp->target = nullptr;
- GRPC_CLOSURE_SCHED(pp->on_complete,
+ pick->connected_subchannel = nullptr;
+ GRPC_CLOSURE_SCHED(pick->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick cancelled", &error, 1));
- gpr_free(pp);
} else {
- pp->next = p->pending_picks;
- p->pending_picks = pp;
+ pick->next = p->pending_picks;
+ p->pending_picks = pick;
}
- pp = next;
+ pick = next;
}
GRPC_ERROR_UNREF(error);
}
@@ -266,13 +249,10 @@ static void rr_exit_idle_locked(grpc_lb_policy* pol) {
}
static int rr_pick_locked(grpc_lb_policy* pol,
- const grpc_lb_policy_pick_args* pick_args,
- grpc_core::ConnectedSubchannel** target,
- grpc_call_context_element* context, void** user_data,
- grpc_closure* on_complete) {
+ grpc_lb_policy_pick_state* pick) {
round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
if (grpc_lb_round_robin_trace.enabled()) {
- gpr_log(GPR_INFO, "[RR %p] Trying to pick (shutdown: %d)", (void*)pol,
+ gpr_log(GPR_INFO, "[RR %p] Trying to pick (shutdown: %d)", pol,
p->shutdown);
}
GPR_ASSERT(!p->shutdown);
@@ -282,18 +262,18 @@ static int rr_pick_locked(grpc_lb_policy* pol,
/* readily available, report right away */
grpc_lb_subchannel_data* sd =
&p->subchannel_list->subchannels[next_ready_index];
- *target =
+ pick->connected_subchannel =
GRPC_CONNECTED_SUBCHANNEL_REF(sd->connected_subchannel, "rr_picked");
- if (user_data != nullptr) {
- *user_data = sd->user_data;
+ if (pick->user_data != nullptr) {
+ *pick->user_data = sd->user_data;
}
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(
GPR_DEBUG,
"[RR %p] Picked target <-- Subchannel %p (connected %p) (sl %p, "
- "index %lu)",
- (void*)p, (void*)sd->subchannel, (void*)*target,
- (void*)sd->subchannel_list, (unsigned long)next_ready_index);
+ "index %" PRIuPTR ")",
+ p, sd->subchannel, pick->connected_subchannel, sd->subchannel_list,
+ next_ready_index);
}
/* only advance the last picked pointer if the selection was used */
update_last_ready_subchannel_index_locked(p, next_ready_index);
@@ -304,13 +284,8 @@ static int rr_pick_locked(grpc_lb_policy* pol,
if (!p->started_picking) {
start_picking_locked(p);
}
- pending_pick* pp = (pending_pick*)gpr_malloc(sizeof(*pp));
- pp->next = p->pending_picks;
- pp->target = target;
- pp->on_complete = on_complete;
- pp->initial_metadata_flags = pick_args->initial_metadata_flags;
- pp->user_data = user_data;
- p->pending_picks = pp;
+ pick->next = p->pending_picks;
+ p->pending_picks = pick;
return 0;
}
@@ -495,13 +470,13 @@ static void rr_connectivity_changed_locked(void* arg, grpc_error* error) {
// picks, update the last picked pointer
update_last_ready_subchannel_index_locked(p, next_ready_index);
}
- pending_pick* pp;
- while ((pp = p->pending_picks)) {
- p->pending_picks = pp->next;
- *pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(
+ grpc_lb_policy_pick_state* pick;
+ while ((pick = p->pending_picks)) {
+ p->pending_picks = pick->next;
+ pick->connected_subchannel = GRPC_CONNECTED_SUBCHANNEL_REF(
selected->connected_subchannel, "rr_picked");
- if (pp->user_data != nullptr) {
- *pp->user_data = selected->user_data;
+ if (pick->user_data != nullptr) {
+ *pick->user_data = selected->user_data;
}
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG,
@@ -510,8 +485,7 @@ static void rr_connectivity_changed_locked(void* arg, grpc_error* error) {
(void*)p, (void*)selected->subchannel,
(void*)p->subchannel_list, (unsigned long)next_ready_index);
}
- GRPC_CLOSURE_SCHED(pp->on_complete, GRPC_ERROR_NONE);
- gpr_free(pp);
+ GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_NONE);
}
break;
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
index a3b4c8e524d..5ce1298afc4 100644
--- a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
@@ -213,13 +213,13 @@ void grpc_lb_subchannel_list_unref(grpc_lb_subchannel_list* subchannel_list,
void grpc_lb_subchannel_list_ref_for_connectivity_watch(
grpc_lb_subchannel_list* subchannel_list, const char* reason) {
- GRPC_LB_POLICY_WEAK_REF(subchannel_list->policy, reason);
+ GRPC_LB_POLICY_REF(subchannel_list->policy, reason);
grpc_lb_subchannel_list_ref(subchannel_list, reason);
}
void grpc_lb_subchannel_list_unref_for_connectivity_watch(
grpc_lb_subchannel_list* subchannel_list, const char* reason) {
- GRPC_LB_POLICY_WEAK_UNREF(subchannel_list->policy, reason);
+ GRPC_LB_POLICY_UNREF(subchannel_list->policy, reason);
grpc_lb_subchannel_list_unref(subchannel_list, reason);
}
diff --git a/src/core/ext/filters/client_channel/subchannel.cc b/src/core/ext/filters/client_channel/subchannel.cc
index 2f1662e63bd..37c43d78ba4 100644
--- a/src/core/ext/filters/client_channel/subchannel.cc
+++ b/src/core/ext/filters/client_channel/subchannel.cc
@@ -60,11 +60,13 @@
((grpc_core::ConnectedSubchannel*)(gpr_atm_##barrier##_load( \
&(subchannel)->connected_subchannel)))
-typedef struct {
+namespace {
+struct state_watcher {
grpc_closure closure;
grpc_subchannel* subchannel;
grpc_connectivity_state connectivity_state;
-} state_watcher;
+};
+} // namespace
typedef struct external_state_watcher {
grpc_subchannel* subchannel;
@@ -167,13 +169,13 @@ static void connection_destroy(void* arg, grpc_error* error) {
gpr_free(stk);
}
-grpc_core::ConnectedSubchannel* ConnectedSubchannel_ref(
+grpc_core::ConnectedSubchannel* grpc_connected_subchannel_ref(
grpc_core::ConnectedSubchannel* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
c->Ref(DEBUG_LOCATION, REF_REASON);
return c;
}
-void ConnectedSubchannel_unref(
+void grpc_connected_subchannel_unref(
grpc_core::ConnectedSubchannel* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
c->Unref(DEBUG_LOCATION, REF_REASON);
}
@@ -541,10 +543,6 @@ static void on_connected_subchannel_connectivity_changed(void* p,
if (!c->disconnected && con != nullptr) {
GRPC_CONNECTED_SUBCHANNEL_UNREF(con, "transient_failure");
gpr_atm_no_barrier_store(&c->connected_subchannel, (gpr_atm) nullptr);
- gpr_log(
- GPR_INFO,
- "LOL FORMER Connected subchannel %p of subchannel %p is now NULL.",
- con, c);
grpc_connectivity_state_set(&c->state_tracker,
GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_REF(error), "reflect_child");
@@ -554,8 +552,9 @@ static void on_connected_subchannel_connectivity_changed(void* p,
gpr_log(GPR_INFO,
"Connected subchannel %p of subchannel %p has gone into %s. "
"Attempting to reconnect.",
- con, c, grpc_connectivity_state_name(
- connected_subchannel_watcher->connectivity_state));
+ con, c,
+ grpc_connectivity_state_name(
+ connected_subchannel_watcher->connectivity_state));
}
maybe_start_connecting_locked(c);
} else {
diff --git a/src/core/ext/filters/client_channel/subchannel.h b/src/core/ext/filters/client_channel/subchannel.h
index dbc57427873..61fa97a21aa 100644
--- a/src/core/ext/filters/client_channel/subchannel.h
+++ b/src/core/ext/filters/client_channel/subchannel.h
@@ -49,9 +49,9 @@ typedef struct grpc_subchannel_key grpc_subchannel_key;
#define GRPC_SUBCHANNEL_WEAK_UNREF(p, r) \
grpc_subchannel_weak_unref((p), __FILE__, __LINE__, (r))
#define GRPC_CONNECTED_SUBCHANNEL_REF(p, r) \
- ConnectedSubchannel_ref((p), __FILE__, __LINE__, (r))
+ grpc_connected_subchannel_ref((p), __FILE__, __LINE__, (r))
#define GRPC_CONNECTED_SUBCHANNEL_UNREF(p, r) \
- ConnectedSubchannel_unref((p), __FILE__, __LINE__, (r))
+ grpc_connected_subchannel_unref((p), __FILE__, __LINE__, (r))
#define GRPC_SUBCHANNEL_CALL_REF(p, r) \
grpc_subchannel_call_ref((p), __FILE__, __LINE__, (r))
#define GRPC_SUBCHANNEL_CALL_UNREF(p, r) \
@@ -65,8 +65,9 @@ typedef struct grpc_subchannel_key grpc_subchannel_key;
#define GRPC_SUBCHANNEL_UNREF(p, r) grpc_subchannel_unref((p))
#define GRPC_SUBCHANNEL_WEAK_REF(p, r) grpc_subchannel_weak_ref((p))
#define GRPC_SUBCHANNEL_WEAK_UNREF(p, r) grpc_subchannel_weak_unref((p))
-#define GRPC_CONNECTED_SUBCHANNEL_REF(p, r) ConnectedSubchannel_ref((p))
-#define GRPC_CONNECTED_SUBCHANNEL_UNREF(p, r) ConnectedSubchannel_unref((p))
+#define GRPC_CONNECTED_SUBCHANNEL_REF(p, r) grpc_connected_subchannel_ref((p))
+#define GRPC_CONNECTED_SUBCHANNEL_UNREF(p, r) \
+ grpc_connected_subchannel_unref((p))
#define GRPC_SUBCHANNEL_CALL_REF(p, r) grpc_subchannel_call_ref((p))
#define GRPC_SUBCHANNEL_CALL_UNREF(p, r) grpc_subchannel_call_unref((p))
#define GRPC_SUBCHANNEL_REF_EXTRA_ARGS
@@ -110,9 +111,9 @@ grpc_subchannel* grpc_subchannel_weak_ref(
grpc_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void grpc_subchannel_weak_unref(
grpc_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
-grpc_core::ConnectedSubchannel* ConnectedSubchannel_ref(
+grpc_core::ConnectedSubchannel* grpc_connected_subchannel_ref(
grpc_core::ConnectedSubchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
-void ConnectedSubchannel_unref(
+void grpc_connected_subchannel_unref(
grpc_core::ConnectedSubchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void grpc_subchannel_call_ref(
grpc_subchannel_call* call GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
diff --git a/src/core/ext/filters/http/client/http_client_filter.cc b/src/core/ext/filters/http/client/http_client_filter.cc
index a1fb10f5b87..6dbd8c2a6d5 100644
--- a/src/core/ext/filters/http/client/http_client_filter.cc
+++ b/src/core/ext/filters/http/client/http_client_filter.cc
@@ -35,7 +35,8 @@
/* default maximum size of payload eligable for GET request */
static const size_t kMaxPayloadSizeForGet = 2048;
-typedef struct call_data {
+namespace {
+struct call_data {
grpc_call_combiner* call_combiner;
// State for handling send_initial_metadata ops.
grpc_linked_mdelem method;
@@ -60,13 +61,14 @@ typedef struct call_data {
grpc_closure on_send_message_next_done;
grpc_closure* original_send_message_on_complete;
grpc_closure send_message_on_complete;
-} call_data;
+};
-typedef struct channel_data {
+struct channel_data {
grpc_mdelem static_scheme;
grpc_mdelem user_agent;
size_t max_payload_size_for_get;
-} channel_data;
+};
+} // namespace
static grpc_error* client_filter_incoming_metadata(grpc_call_element* elem,
grpc_metadata_batch* b) {
diff --git a/src/core/ext/filters/http/message_compress/message_compress_filter.cc b/src/core/ext/filters/http/message_compress/message_compress_filter.cc
index 9ae13d2ed27..92d17162001 100644
--- a/src/core/ext/filters/http/message_compress/message_compress_filter.cc
+++ b/src/core/ext/filters/http/message_compress/message_compress_filter.cc
@@ -35,16 +35,17 @@
#include "src/core/lib/surface/call.h"
#include "src/core/lib/transport/static_metadata.h"
-typedef enum {
+namespace {
+enum initial_metadata_state {
// Initial metadata not yet seen.
INITIAL_METADATA_UNSEEN = 0,
// Initial metadata seen; compression algorithm set.
HAS_COMPRESSION_ALGORITHM,
// Initial metadata seen; no compression algorithm set.
NO_COMPRESSION_ALGORITHM,
-} initial_metadata_state;
+};
-typedef struct call_data {
+struct call_data {
grpc_call_combiner* call_combiner;
grpc_linked_mdelem compression_algorithm_storage;
grpc_linked_mdelem stream_compression_algorithm_storage;
@@ -62,9 +63,9 @@ typedef struct call_data {
grpc_closure* original_send_message_on_complete;
grpc_closure send_message_on_complete;
grpc_closure on_send_message_next_done;
-} call_data;
+};
-typedef struct channel_data {
+struct channel_data {
/** The default, channel-level, compression algorithm */
grpc_compression_algorithm default_compression_algorithm;
/** Bitset of enabled algorithms */
@@ -78,7 +79,8 @@ typedef struct channel_data {
uint32_t enabled_stream_compression_algorithms_bitset;
/** Supported stream compression algorithms */
uint32_t supported_stream_compression_algorithms;
-} channel_data;
+};
+} // namespace
static bool skip_compression(grpc_call_element* elem, uint32_t flags,
bool has_compression_algorithm) {
diff --git a/src/core/ext/filters/http/server/http_server_filter.cc b/src/core/ext/filters/http/server/http_server_filter.cc
index b872dc98f59..508a3bf9fc4 100644
--- a/src/core/ext/filters/http/server/http_server_filter.cc
+++ b/src/core/ext/filters/http/server/http_server_filter.cc
@@ -31,7 +31,8 @@
#define EXPECTED_CONTENT_TYPE "application/grpc"
#define EXPECTED_CONTENT_TYPE_LENGTH sizeof(EXPECTED_CONTENT_TYPE) - 1
-typedef struct call_data {
+namespace {
+struct call_data {
grpc_call_combiner* call_combiner;
grpc_linked_mdelem status;
@@ -60,11 +61,12 @@ typedef struct call_data {
grpc_closure hs_on_recv;
grpc_closure hs_on_complete;
grpc_closure hs_recv_message_ready;
-} call_data;
+};
-typedef struct channel_data {
+struct channel_data {
uint8_t unused;
-} channel_data;
+};
+} // namespace
static grpc_error* server_filter_outgoing_metadata(grpc_call_element* elem,
grpc_metadata_batch* b) {
diff --git a/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc b/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc
index f50a928fcd9..a4142297689 100644
--- a/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc
+++ b/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc
@@ -31,7 +31,8 @@
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/transport/static_metadata.h"
-typedef struct call_data {
+namespace {
+struct call_data {
intptr_t id; /**< an id unique to the call */
bool have_trailing_md_string;
grpc_slice trailing_md_string;
@@ -48,11 +49,12 @@ typedef struct call_data {
/* to get notified of the availability of the incoming initial metadata. */
grpc_closure on_initial_md_ready;
grpc_metadata_batch* recv_initial_metadata;
-} call_data;
+};
-typedef struct channel_data {
+struct channel_data {
intptr_t id; /**< an id unique to the channel */
-} channel_data;
+};
+} // namespace
static void on_initial_md_ready(void* user_data, grpc_error* err) {
grpc_call_element* elem = (grpc_call_element*)user_data;
diff --git a/src/core/ext/filters/max_age/max_age_filter.cc b/src/core/ext/filters/max_age/max_age_filter.cc
index 0499c6ecfc7..7b86e4cd6c1 100644
--- a/src/core/ext/filters/max_age/max_age_filter.cc
+++ b/src/core/ext/filters/max_age/max_age_filter.cc
@@ -37,7 +37,8 @@
#define MAX_CONNECTION_IDLE_INTEGER_OPTIONS \
{ DEFAULT_MAX_CONNECTION_IDLE_MS, 1, INT_MAX }
-typedef struct channel_data {
+namespace {
+struct channel_data {
/* We take a reference to the channel stack for the timer callback */
grpc_channel_stack* channel_stack;
/* Guards access to max_age_timer, max_age_timer_pending, max_age_grace_timer
@@ -84,7 +85,8 @@ typedef struct channel_data {
grpc_connectivity_state connectivity_state;
/* Number of active calls */
gpr_atm call_count;
-} channel_data;
+};
+} // namespace
/* Increase the nubmer of active calls. Before the increasement, if there are no
calls, the max_idle_timer should be cancelled. */
diff --git a/src/core/ext/filters/message_size/message_size_filter.cc b/src/core/ext/filters/message_size/message_size_filter.cc
index f8487f9a9e9..3cb7b136c00 100644
--- a/src/core/ext/filters/message_size/message_size_filter.cc
+++ b/src/core/ext/filters/message_size/message_size_filter.cc
@@ -86,7 +86,8 @@ static void* refcounted_message_size_limits_create_from_json(
return value;
}
-typedef struct call_data {
+namespace {
+struct call_data {
grpc_call_combiner* call_combiner;
message_size_limits limits;
// Receive closures are chained: we inject this closure as the
@@ -97,13 +98,14 @@ typedef struct call_data {
grpc_byte_stream** recv_message;
// Original recv_message_ready callback, invoked after our own.
grpc_closure* next_recv_message_ready;
-} call_data;
+};
-typedef struct channel_data {
+struct channel_data {
message_size_limits limits;
// Maps path names to refcounted_message_size_limits structs.
grpc_slice_hash_table* method_limit_table;
-} channel_data;
+};
+} // namespace
// Callback invoked when we receive a message. Here we check the max
// receive message size.
diff --git a/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc b/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc
index 555a9134a21..88bb8c71cc6 100644
--- a/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc
+++ b/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc
@@ -25,7 +25,8 @@
#include "src/core/lib/surface/channel_init.h"
#include "src/core/lib/transport/metadata.h"
-typedef struct call_data {
+namespace {
+struct call_data {
// Receive closures are chained: we inject this closure as the
// recv_initial_metadata_ready up-call on transport_stream_op, and remember to
// call our next_recv_initial_metadata_ready member after handling it.
@@ -37,7 +38,8 @@ typedef struct call_data {
// Marks whether the workaround is active
bool workaround_active;
-} call_data;
+};
+} // namespace
// Find the user agent metadata element in the batch
static bool get_user_agent_mdelem(const grpc_metadata_batch* batch,
diff --git a/src/core/ext/transport/chttp2/transport/writing.cc b/src/core/ext/transport/chttp2/transport/writing.cc
index 043ca9bb83e..9a6f5e9bcf7 100644
--- a/src/core/ext/transport/chttp2/transport/writing.cc
+++ b/src/core/ext/transport/chttp2/transport/writing.cc
@@ -138,10 +138,11 @@ static void report_stall(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
const char* staller) {
gpr_log(
GPR_DEBUG,
- "%s:%p stream %d stalled by %s [fc:pending=%" PRIdPTR ":flowed=%" PRId64
+ "%s:%p stream %d stalled by %s [fc:pending=%" PRIdPTR
+ ":pending-compressed=%" PRIdPTR ":flowed=%" PRId64
":peer_initwin=%d:t_win=%" PRId64 ":s_win=%d:s_delta=%" PRId64 "]",
t->peer_string, t, s->id, staller, s->flow_controlled_buffer.length,
- s->flow_controlled_bytes_flowed,
+ s->compressed_data_buffer.length, s->flow_controlled_bytes_flowed,
t->settings[GRPC_ACKED_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE],
t->flow_control->remote_window(),
diff --git a/src/core/lib/iomgr/ev_poll_posix.cc b/src/core/lib/iomgr/ev_poll_posix.cc
index 5fa02017f06..08edff5159b 100644
--- a/src/core/lib/iomgr/ev_poll_posix.cc
+++ b/src/core/lib/iomgr/ev_poll_posix.cc
@@ -71,6 +71,7 @@ struct grpc_fd {
int shutdown;
int closed;
int released;
+ gpr_atm pollhup;
grpc_error* shutdown_error;
/* The watcher list.
@@ -335,6 +336,7 @@ static grpc_fd* fd_create(int fd, const char* name) {
r->on_done_closure = nullptr;
r->closed = 0;
r->released = 0;
+ gpr_atm_no_barrier_store(&r->pollhup, 0);
r->read_notifier_pollset = nullptr;
char* name2;
@@ -462,7 +464,7 @@ static grpc_error* fd_shutdown_error(grpc_fd* fd) {
static void notify_on_locked(grpc_fd* fd, grpc_closure** st,
grpc_closure* closure) {
- if (fd->shutdown) {
+ if (fd->shutdown || gpr_atm_no_barrier_load(&fd->pollhup)) {
GRPC_CLOSURE_SCHED(closure,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("FD shutdown"));
} else if (*st == CLOSURE_NOT_READY) {
@@ -950,7 +952,8 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
pfds[0].events = POLLIN;
pfds[0].revents = 0;
for (i = 0; i < pollset->fd_count; i++) {
- if (fd_is_orphaned(pollset->fds[i])) {
+ if (fd_is_orphaned(pollset->fds[i]) ||
+ gpr_atm_no_barrier_load(&pollset->fds[i]->pollhup) == 1) {
GRPC_FD_UNREF(pollset->fds[i], "multipoller");
} else {
pollset->fds[fd_count++] = pollset->fds[i];
@@ -1017,6 +1020,12 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
pfds[i].fd, (pfds[i].revents & POLLIN_CHECK) != 0,
(pfds[i].revents & POLLOUT_CHECK) != 0, pfds[i].revents);
}
+ /* This is a mitigation to prevent poll() from spinning on a
+ ** POLLHUP https://github.com/grpc/grpc/pull/13665
+ */
+ if (pfds[i].revents & POLLHUP) {
+ gpr_atm_no_barrier_store(&watchers[i].fd->pollhup, 1);
+ }
fd_end_poll(&watchers[i], pfds[i].revents & POLLIN_CHECK,
pfds[i].revents & POLLOUT_CHECK, pollset);
}
diff --git a/src/core/lib/iomgr/exec_ctx.h b/src/core/lib/iomgr/exec_ctx.h
index 8f8d518def7..5c6007c3c70 100644
--- a/src/core/lib/iomgr/exec_ctx.h
+++ b/src/core/lib/iomgr/exec_ctx.h
@@ -111,7 +111,7 @@ class ExecCtx {
/** Checks if there is work to be done */
bool HasWork() {
- return combiner_data_.active_combiner != NULL ||
+ return combiner_data_.active_combiner != nullptr ||
!grpc_closure_list_empty(closure_list_);
}
diff --git a/src/core/lib/iomgr/gethostname_sysconf.cc b/src/core/lib/iomgr/gethostname_sysconf.cc
index e099fbd3888..3d74e033388 100644
--- a/src/core/lib/iomgr/gethostname_sysconf.cc
+++ b/src/core/lib/iomgr/gethostname_sysconf.cc
@@ -30,7 +30,7 @@ char* grpc_gethostname() {
char* hostname = (char*)gpr_malloc(host_name_max);
if (gethostname(hostname, host_name_max) != 0) {
gpr_free(hostname);
- return NULL;
+ return nullptr;
}
return hostname;
}
diff --git a/src/core/lib/iomgr/tcp_posix.cc b/src/core/lib/iomgr/tcp_posix.cc
index 816acf2a233..d47a077251a 100644
--- a/src/core/lib/iomgr/tcp_posix.cc
+++ b/src/core/lib/iomgr/tcp_posix.cc
@@ -63,7 +63,8 @@ typedef size_t msg_iovlen_type;
grpc_core::TraceFlag grpc_tcp_trace(false, "tcp");
-typedef struct {
+namespace {
+struct grpc_tcp {
grpc_endpoint base;
grpc_fd* em_fd;
int fd;
@@ -96,12 +97,13 @@ typedef struct {
grpc_resource_user* resource_user;
grpc_resource_user_slice_allocator slice_allocator;
-} grpc_tcp;
+};
-typedef struct backup_poller {
+struct backup_poller {
gpr_mu* pollset_mu;
grpc_closure run_poller;
-} backup_poller;
+};
+} // namespace
#define BACKUP_POLLER_POLLSET(b) ((grpc_pollset*)((b) + 1))
diff --git a/src/core/lib/iomgr/tcp_uv.cc b/src/core/lib/iomgr/tcp_uv.cc
index 2c26b60511d..baa49d5cc5d 100644
--- a/src/core/lib/iomgr/tcp_uv.cc
+++ b/src/core/lib/iomgr/tcp_uv.cc
@@ -65,6 +65,17 @@ typedef struct {
grpc_pollset* pollset;
} grpc_tcp;
+static grpc_error* tcp_annotate_error(grpc_error* src_error, grpc_tcp* tcp) {
+ return grpc_error_set_str(
+ grpc_error_set_int(
+ src_error,
+ /* All tcp errors are marked with UNAVAILABLE so that application may
+ * choose to retry. */
+ GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE),
+ GRPC_ERROR_STR_TARGET_ADDRESS,
+ grpc_slice_from_copied_string(tcp->peer_string));
+}
+
static void tcp_free(grpc_tcp* tcp) {
grpc_resource_user_unref(tcp->resource_user);
gpr_free(tcp->handle);
@@ -162,7 +173,8 @@ static void read_callback(uv_stream_t* stream, ssize_t nread,
// TODO(murgatroid99): figure out what the return value here means
uv_read_stop(stream);
if (nread == UV_EOF) {
- error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("EOF");
+ error =
+ tcp_annotate_error(GRPC_ERROR_CREATE_FROM_STATIC_STRING("EOF"), tcp);
grpc_slice_buffer_reset_and_unref_internal(tcp->read_slices);
} else if (nread > 0) {
// Successful read
@@ -177,7 +189,8 @@ static void read_callback(uv_stream_t* stream, ssize_t nread,
}
} else {
// nread < 0: Error
- error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Read failed");
+ error = tcp_annotate_error(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Read failed"), tcp);
grpc_slice_buffer_reset_and_unref_internal(tcp->read_slices);
}
call_read_cb(tcp, error);
@@ -194,7 +207,9 @@ static void tcp_read_allocation_done(void* tcpp, grpc_error* error) {
status =
uv_read_start((uv_stream_t*)tcp->handle, alloc_uv_buf, read_callback);
if (status != 0) {
- error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Read failed at start");
+ error = tcp_annotate_error(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Read failed at start"),
+ tcp);
error = grpc_error_set_str(
error, GRPC_ERROR_STR_OS_ERROR,
grpc_slice_from_static_string(uv_strerror(status)));
@@ -235,7 +250,8 @@ static void write_callback(uv_write_t* req, int status) {
if (status == 0) {
error = GRPC_ERROR_NONE;
} else {
- error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Write failed");
+ error = tcp_annotate_error(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Write failed"), tcp);
}
if (grpc_tcp_trace.enabled()) {
const char* str = grpc_error_string(error);
@@ -268,8 +284,10 @@ static void uv_endpoint_write(grpc_endpoint* ep,
}
if (tcp->shutting_down) {
- GRPC_CLOSURE_SCHED(cb, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "TCP socket is shutting down"));
+ GRPC_CLOSURE_SCHED(cb,
+ tcp_annotate_error(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "TCP socket is shutting down"),
+ tcp));
return;
}
diff --git a/src/core/lib/iomgr/wakeup_fd_nospecial.cc b/src/core/lib/iomgr/wakeup_fd_nospecial.cc
index 4c20b8c1b71..c2b525a2544 100644
--- a/src/core/lib/iomgr/wakeup_fd_nospecial.cc
+++ b/src/core/lib/iomgr/wakeup_fd_nospecial.cc
@@ -31,6 +31,6 @@
static int check_availability_invalid(void) { return 0; }
const grpc_wakeup_fd_vtable grpc_specialized_wakeup_fd_vtable = {
- NULL, NULL, NULL, NULL, check_availability_invalid};
+ nullptr, nullptr, nullptr, nullptr, check_availability_invalid};
#endif /* GRPC_POSIX_NO_SPECIAL_WAKEUP_FD */
diff --git a/src/core/lib/security/transport/client_auth_filter.cc b/src/core/lib/security/transport/client_auth_filter.cc
index cd3c2e3f19d..6a3641f112b 100644
--- a/src/core/lib/security/transport/client_auth_filter.cc
+++ b/src/core/lib/security/transport/client_auth_filter.cc
@@ -37,8 +37,9 @@
#define MAX_CREDENTIALS_METADATA_COUNT 4
+namespace {
/* We can have a per-call credentials. */
-typedef struct {
+struct call_data {
grpc_call_stack* owning_call;
grpc_call_combiner* call_combiner;
grpc_call_credentials* creds;
@@ -57,13 +58,14 @@ typedef struct {
grpc_closure async_result_closure;
grpc_closure check_call_host_cancel_closure;
grpc_closure get_request_metadata_cancel_closure;
-} call_data;
+};
/* We can have a per-channel credentials. */
-typedef struct {
+struct channel_data {
grpc_channel_security_connector* security_connector;
grpc_auth_context* auth_context;
-} channel_data;
+};
+} // namespace
void grpc_auth_metadata_context_reset(
grpc_auth_metadata_context* auth_md_context) {
diff --git a/src/core/lib/security/transport/server_auth_filter.cc b/src/core/lib/security/transport/server_auth_filter.cc
index 73653f2a668..f82971dc567 100644
--- a/src/core/lib/security/transport/server_auth_filter.cc
+++ b/src/core/lib/security/transport/server_auth_filter.cc
@@ -26,13 +26,14 @@
#include "src/core/lib/security/transport/auth_filters.h"
#include "src/core/lib/slice/slice_internal.h"
-typedef enum {
+namespace {
+enum async_state {
STATE_INIT = 0,
STATE_DONE,
STATE_CANCELLED,
-} async_state;
+};
-typedef struct call_data {
+struct call_data {
grpc_call_combiner* call_combiner;
grpc_call_stack* owning_call;
grpc_transport_stream_op_batch* recv_initial_metadata_batch;
@@ -44,12 +45,13 @@ typedef struct call_data {
grpc_auth_context* auth_context;
grpc_closure cancel_closure;
gpr_atm state; // async_state
-} call_data;
+};
-typedef struct channel_data {
+struct channel_data {
grpc_auth_context* auth_context;
grpc_server_credentials* creds;
-} channel_data;
+};
+} // namespace
static grpc_metadata_array metadata_batch_to_md_array(
const grpc_metadata_batch* batch) {
diff --git a/src/core/lib/support/env_posix.cc b/src/core/lib/support/env_posix.cc
index 7bea31ca550..8146330555c 100644
--- a/src/core/lib/support/env_posix.cc
+++ b/src/core/lib/support/env_posix.cc
@@ -31,12 +31,12 @@
const char* gpr_getenv_silent(const char* name, char** dst) {
*dst = gpr_getenv(name);
- return NULL;
+ return nullptr;
}
char* gpr_getenv(const char* name) {
char* result = getenv(name);
- return result == NULL ? result : gpr_strdup(result);
+ return result == nullptr ? result : gpr_strdup(result);
}
void gpr_setenv(const char* name, const char* value) {
diff --git a/src/core/lib/support/fork.cc b/src/core/lib/support/fork.cc
index d59ca5584cb..dc291c40804 100644
--- a/src/core/lib/support/fork.cc
+++ b/src/core/lib/support/fork.cc
@@ -39,7 +39,7 @@ void grpc_fork_support_init() {
#else
fork_support_enabled = 0;
char* env = gpr_getenv("GRPC_ENABLE_FORK_SUPPORT");
- if (env != NULL) {
+ if (env != nullptr) {
static const char* truthy[] = {"yes", "Yes", "YES", "true",
"True", "TRUE", "1"};
for (size_t i = 0; i < GPR_ARRAY_SIZE(truthy); i++) {
diff --git a/src/core/lib/support/log_posix.cc b/src/core/lib/support/log_posix.cc
index 9fab480a8d4..6f93cdefcdf 100644
--- a/src/core/lib/support/log_posix.cc
+++ b/src/core/lib/support/log_posix.cc
@@ -35,15 +35,15 @@ static intptr_t gettid(void) { return (intptr_t)pthread_self(); }
void gpr_log(const char* file, int line, gpr_log_severity severity,
const char* format, ...) {
char buf[64];
- char* allocated = NULL;
- char* message = NULL;
+ char* allocated = nullptr;
+ char* message = nullptr;
int ret;
va_list args;
va_start(args, format);
ret = vsnprintf(buf, sizeof(buf), format, args);
va_end(args);
if (ret < 0) {
- message = NULL;
+ message = nullptr;
} else if ((size_t)ret <= sizeof(buf) - 1) {
message = buf;
} else {
@@ -66,7 +66,7 @@ void gpr_default_log(gpr_log_func_args* args) {
timer = (time_t)now.tv_sec;
final_slash = strrchr(args->file, '/');
- if (final_slash == NULL)
+ if (final_slash == nullptr)
display_file = args->file;
else
display_file = final_slash + 1;
diff --git a/src/core/lib/support/time_posix.cc b/src/core/lib/support/time_posix.cc
index 47a849480f5..b2087c93cfe 100644
--- a/src/core/lib/support/time_posix.cc
+++ b/src/core/lib/support/time_posix.cc
@@ -107,7 +107,7 @@ static gpr_timespec now_impl(gpr_clock_type clock) {
now.clock_type = clock;
switch (clock) {
case GPR_CLOCK_REALTIME:
- gettimeofday(&now_tv, NULL);
+ gettimeofday(&now_tv, nullptr);
now.tv_sec = now_tv.tv_sec;
now.tv_nsec = now_tv.tv_usec * 1000;
break;
diff --git a/src/core/lib/surface/call.cc b/src/core/lib/surface/call.cc
index a457aaa7a2f..d677576c145 100644
--- a/src/core/lib/surface/call.cc
+++ b/src/core/lib/surface/call.cc
@@ -1851,8 +1851,9 @@ static grpc_call_error call_start_batch(grpc_call* call, const grpc_op* ops,
{
grpc_error* override_error = GRPC_ERROR_NONE;
if (op->data.send_status_from_server.status != GRPC_STATUS_OK) {
- override_error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "Error from server send status");
+ override_error =
+ error_from_status(op->data.send_status_from_server.status,
+ "Returned non-ok status");
}
if (op->data.send_status_from_server.status_details != nullptr) {
call->send_extra_metadata[1].md = grpc_mdelem_from_slices(
diff --git a/src/core/lib/surface/server.cc b/src/core/lib/surface/server.cc
index f1d428f0a1b..ee98cf2693a 100644
--- a/src/core/lib/surface/server.cc
+++ b/src/core/lib/surface/server.cc
@@ -44,24 +44,23 @@
#include "src/core/lib/transport/metadata.h"
#include "src/core/lib/transport/static_metadata.h"
-typedef struct listener {
+grpc_core::TraceFlag grpc_server_channel_trace(false, "server_channel");
+
+namespace {
+struct listener {
void* arg;
void (*start)(grpc_server* server, void* arg, grpc_pollset** pollsets,
size_t pollset_count);
void (*destroy)(grpc_server* server, void* arg, grpc_closure* closure);
struct listener* next;
grpc_closure destroy_done;
-} listener;
+};
-typedef struct call_data call_data;
-typedef struct channel_data channel_data;
-typedef struct registered_method registered_method;
+enum requested_call_type { BATCH_CALL, REGISTERED_CALL };
-typedef enum { BATCH_CALL, REGISTERED_CALL } requested_call_type;
+struct registered_method;
-grpc_core::TraceFlag grpc_server_channel_trace(false, "server_channel");
-
-typedef struct requested_call {
+struct requested_call {
gpr_mpscq_node request_link; /* must be first */
requested_call_type type;
size_t cq_idx;
@@ -81,15 +80,15 @@ typedef struct requested_call {
grpc_byte_buffer** optional_payload;
} registered;
} data;
-} requested_call;
+};
-typedef struct channel_registered_method {
+struct channel_registered_method {
registered_method* server_registered_method;
uint32_t flags;
bool has_host;
grpc_slice method;
grpc_slice host;
-} channel_registered_method;
+};
struct channel_data {
grpc_server* server;
@@ -176,6 +175,7 @@ typedef struct {
grpc_channel** channels;
size_t num_channels;
} channel_broadcaster;
+} // namespace
struct grpc_server {
grpc_channel_args* channel_args;
diff --git a/src/core/lib/transport/error_utils.cc b/src/core/lib/transport/error_utils.cc
index ffaf327081f..891576f4baf 100644
--- a/src/core/lib/transport/error_utils.cc
+++ b/src/core/lib/transport/error_utils.cc
@@ -70,7 +70,7 @@ void grpc_error_get_status(grpc_error* error, grpc_millis deadline,
}
if (code != nullptr) *code = status;
- if (error_string != NULL && status != GRPC_STATUS_OK) {
+ if (error_string != nullptr && status != GRPC_STATUS_OK) {
*error_string = gpr_strdup(grpc_error_string(error));
}
diff --git a/src/core/tsi/ssl_transport_security.cc b/src/core/tsi/ssl_transport_security.cc
index f35caef6402..229f7efd373 100644
--- a/src/core/tsi/ssl_transport_security.cc
+++ b/src/core/tsi/ssl_transport_security.cc
@@ -116,6 +116,9 @@ typedef struct {
static gpr_once init_openssl_once = GPR_ONCE_INIT;
static gpr_mu* openssl_mutexes = nullptr;
+static void openssl_locking_cb(int mode, int type, const char* file,
+ int line) GRPC_UNUSED;
+static unsigned long openssl_thread_id_cb(void) GRPC_UNUSED;
static void openssl_locking_cb(int mode, int type, const char* file, int line) {
if (mode & CRYPTO_LOCK) {
diff --git a/src/csharp/Grpc.Auth/Grpc.Auth.csproj b/src/csharp/Grpc.Auth/Grpc.Auth.csproj
index bbcbd95be5f..5bbff389487 100755
--- a/src/csharp/Grpc.Auth/Grpc.Auth.csproj
+++ b/src/csharp/Grpc.Auth/Grpc.Auth.csproj
@@ -15,12 +15,12 @@
gRPC RPC Protocol HTTP/2 Auth OAuth2
https://github.com/grpc/grpc
https://github.com/grpc/grpc/blob/master/LICENSE
- true
- true
true
true
+
+
diff --git a/src/csharp/Grpc.Core.Testing/Grpc.Core.Testing.csproj b/src/csharp/Grpc.Core.Testing/Grpc.Core.Testing.csproj
index 4d6767fa985..40840d4da3e 100755
--- a/src/csharp/Grpc.Core.Testing/Grpc.Core.Testing.csproj
+++ b/src/csharp/Grpc.Core.Testing/Grpc.Core.Testing.csproj
@@ -15,12 +15,12 @@
gRPC test testing
https://github.com/grpc/grpc
https://github.com/grpc/grpc/blob/master/LICENSE
- true
- true
true
true
+
+
diff --git a/src/csharp/Grpc.Core/Grpc.Core.csproj b/src/csharp/Grpc.Core/Grpc.Core.csproj
index d9950b2f201..6d44be7ddd6 100755
--- a/src/csharp/Grpc.Core/Grpc.Core.csproj
+++ b/src/csharp/Grpc.Core/Grpc.Core.csproj
@@ -14,12 +14,12 @@
gRPC RPC Protocol HTTP/2
https://github.com/grpc/grpc
https://github.com/grpc/grpc/blob/master/LICENSE
- true
- true
true
true
+
+
diff --git a/src/csharp/Grpc.Core/SourceLink.csproj.include b/src/csharp/Grpc.Core/SourceLink.csproj.include
new file mode 100755
index 00000000000..02ae79fb893
--- /dev/null
+++ b/src/csharp/Grpc.Core/SourceLink.csproj.include
@@ -0,0 +1,19 @@
+
+
+
+
+
+ true
+ lib/netstandard1.5
+
+
+ true
+ lib/net45
+
+
+
+
+
+
+
+
diff --git a/src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.csproj b/src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.csproj
index 681719d124a..da61253455a 100755
--- a/src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.csproj
+++ b/src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.csproj
@@ -14,12 +14,12 @@
gRPC health check
https://github.com/grpc/grpc
https://github.com/grpc/grpc/blob/master/LICENSE
- true
- true
true
true
+
+
diff --git a/src/csharp/Grpc.Reflection/Grpc.Reflection.csproj b/src/csharp/Grpc.Reflection/Grpc.Reflection.csproj
index 704eea5c17a..862ecda5fd9 100755
--- a/src/csharp/Grpc.Reflection/Grpc.Reflection.csproj
+++ b/src/csharp/Grpc.Reflection/Grpc.Reflection.csproj
@@ -14,12 +14,12 @@
gRPC reflection
https://github.com/grpc/grpc
https://github.com/grpc/grpc/blob/master/LICENSE
- true
- true
true
true
+
+
diff --git a/test/core/end2end/end2end_nosec_tests.cc b/test/core/end2end/end2end_nosec_tests.cc
index 3236feea56e..6318550ad86 100644
--- a/test/core/end2end/end2end_nosec_tests.cc
+++ b/test/core/end2end/end2end_nosec_tests.cc
@@ -68,6 +68,8 @@ extern void filter_causes_close(grpc_end2end_test_config config);
extern void filter_causes_close_pre_init(void);
extern void filter_latency(grpc_end2end_test_config config);
extern void filter_latency_pre_init(void);
+extern void filter_status_code(grpc_end2end_test_config config);
+extern void filter_status_code_pre_init(void);
extern void graceful_server_shutdown(grpc_end2end_test_config config);
extern void graceful_server_shutdown_pre_init(void);
extern void high_initial_seqno(grpc_end2end_test_config config);
@@ -170,6 +172,7 @@ void grpc_end2end_tests_pre_init(void) {
filter_call_init_fails_pre_init();
filter_causes_close_pre_init();
filter_latency_pre_init();
+ filter_status_code_pre_init();
graceful_server_shutdown_pre_init();
high_initial_seqno_pre_init();
hpack_size_pre_init();
@@ -237,6 +240,7 @@ void grpc_end2end_tests(int argc, char **argv,
filter_call_init_fails(config);
filter_causes_close(config);
filter_latency(config);
+ filter_status_code(config);
graceful_server_shutdown(config);
high_initial_seqno(config);
hpack_size(config);
@@ -356,6 +360,10 @@ void grpc_end2end_tests(int argc, char **argv,
filter_latency(config);
continue;
}
+ if (0 == strcmp("filter_status_code", argv[i])) {
+ filter_status_code(config);
+ continue;
+ }
if (0 == strcmp("graceful_server_shutdown", argv[i])) {
graceful_server_shutdown(config);
continue;
diff --git a/test/core/end2end/end2end_tests.cc b/test/core/end2end/end2end_tests.cc
index ca9443b642d..9d8dfd67238 100644
--- a/test/core/end2end/end2end_tests.cc
+++ b/test/core/end2end/end2end_tests.cc
@@ -70,6 +70,8 @@ extern void filter_causes_close(grpc_end2end_test_config config);
extern void filter_causes_close_pre_init(void);
extern void filter_latency(grpc_end2end_test_config config);
extern void filter_latency_pre_init(void);
+extern void filter_status_code(grpc_end2end_test_config config);
+extern void filter_status_code_pre_init(void);
extern void graceful_server_shutdown(grpc_end2end_test_config config);
extern void graceful_server_shutdown_pre_init(void);
extern void high_initial_seqno(grpc_end2end_test_config config);
@@ -173,6 +175,7 @@ void grpc_end2end_tests_pre_init(void) {
filter_call_init_fails_pre_init();
filter_causes_close_pre_init();
filter_latency_pre_init();
+ filter_status_code_pre_init();
graceful_server_shutdown_pre_init();
high_initial_seqno_pre_init();
hpack_size_pre_init();
@@ -241,6 +244,7 @@ void grpc_end2end_tests(int argc, char **argv,
filter_call_init_fails(config);
filter_causes_close(config);
filter_latency(config);
+ filter_status_code(config);
graceful_server_shutdown(config);
high_initial_seqno(config);
hpack_size(config);
@@ -364,6 +368,10 @@ void grpc_end2end_tests(int argc, char **argv,
filter_latency(config);
continue;
}
+ if (0 == strcmp("filter_status_code", argv[i])) {
+ filter_status_code(config);
+ continue;
+ }
if (0 == strcmp("graceful_server_shutdown", argv[i])) {
graceful_server_shutdown(config);
continue;
diff --git a/test/core/end2end/gen_build_yaml.py b/test/core/end2end/gen_build_yaml.py
index 7c8e7f420ae..e7cf97b2d02 100755
--- a/test/core/end2end/gen_build_yaml.py
+++ b/test/core/end2end/gen_build_yaml.py
@@ -101,6 +101,7 @@ END2END_TESTS = {
'filter_causes_close': default_test_options._replace(cpu_cost=LOWCPU),
'filter_call_init_fails': default_test_options,
'filter_latency': default_test_options._replace(cpu_cost=LOWCPU),
+ 'filter_status_code': default_test_options._replace(cpu_cost=LOWCPU),
'graceful_server_shutdown': default_test_options._replace(cpu_cost=LOWCPU,exclude_inproc=True),
'hpack_size': default_test_options._replace(proxyable=False,
traceable=False,
diff --git a/test/core/end2end/generate_tests.bzl b/test/core/end2end/generate_tests.bzl
index b9a42bdb885..1d759e1ecbf 100755
--- a/test/core/end2end/generate_tests.bzl
+++ b/test/core/end2end/generate_tests.bzl
@@ -146,6 +146,7 @@ END2END_TESTS = {
'trailing_metadata': test_options(),
'authority_not_supported': test_options(),
'filter_latency': test_options(),
+ 'filter_status_code': test_options(),
'workaround_cronet_compression': test_options(),
'write_buffering': test_options(needs_write_buffering=True),
'write_buffering_at_end': test_options(needs_write_buffering=True),
diff --git a/test/core/end2end/tests/filter_status_code.cc b/test/core/end2end/tests/filter_status_code.cc
new file mode 100644
index 00000000000..261ddd93ec5
--- /dev/null
+++ b/test/core/end2end/tests/filter_status_code.cc
@@ -0,0 +1,353 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "test/core/end2end/end2end_tests.h"
+
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+
+#include "src/core/lib/channel/channel_stack_builder.h"
+#include "src/core/lib/surface/channel_init.h"
+#include "test/core/end2end/cq_verifier.h"
+
+static bool g_enable_filter = false;
+static gpr_mu g_mu;
+static bool g_client_code_recv;
+static bool g_server_code_recv;
+static gpr_cv g_client_code_cv;
+static gpr_cv g_server_code_cv;
+static grpc_status_code g_client_status_code;
+static grpc_status_code g_server_status_code;
+
+static void* tag(intptr_t t) { return (void*)t; }
+
+static grpc_end2end_test_fixture begin_test(grpc_end2end_test_config config,
+ const char* test_name,
+ grpc_channel_args* client_args,
+ grpc_channel_args* server_args) {
+ grpc_end2end_test_fixture f;
+ gpr_log(GPR_INFO, "Running test: %s/%s", test_name, config.name);
+ f = config.create_fixture(client_args, server_args);
+ config.init_server(&f, server_args);
+ config.init_client(&f, client_args);
+ return f;
+}
+
+static gpr_timespec n_seconds_from_now(int n) {
+ return grpc_timeout_seconds_to_deadline(n);
+}
+
+static gpr_timespec five_seconds_from_now(void) {
+ return n_seconds_from_now(5);
+}
+
+static void drain_cq(grpc_completion_queue* cq) {
+ grpc_event ev;
+ do {
+ ev = grpc_completion_queue_next(cq, five_seconds_from_now(), nullptr);
+ } while (ev.type != GRPC_QUEUE_SHUTDOWN);
+}
+
+static void shutdown_server(grpc_end2end_test_fixture* f) {
+ if (!f->server) return;
+ grpc_server_shutdown_and_notify(f->server, f->shutdown_cq, tag(1000));
+ GPR_ASSERT(grpc_completion_queue_pluck(f->shutdown_cq, tag(1000),
+ grpc_timeout_seconds_to_deadline(5),
+ nullptr)
+ .type == GRPC_OP_COMPLETE);
+ grpc_server_destroy(f->server);
+ f->server = nullptr;
+}
+
+static void shutdown_client(grpc_end2end_test_fixture* f) {
+ if (!f->client) return;
+ grpc_channel_destroy(f->client);
+ f->client = nullptr;
+}
+
+static void end_test(grpc_end2end_test_fixture* f) {
+ shutdown_server(f);
+ shutdown_client(f);
+
+ grpc_completion_queue_shutdown(f->cq);
+ drain_cq(f->cq);
+ grpc_completion_queue_destroy(f->cq);
+ grpc_completion_queue_destroy(f->shutdown_cq);
+}
+
+// Simple request via a server filter that saves the reported status code.
+static void test_request(grpc_end2end_test_config config) {
+ grpc_call* c;
+ grpc_call* s;
+ grpc_end2end_test_fixture f =
+ begin_test(config, "filter_status_code", nullptr, nullptr);
+ cq_verifier* cqv = cq_verifier_create(f.cq);
+ grpc_op ops[6];
+ grpc_op* op;
+ grpc_metadata_array initial_metadata_recv;
+ grpc_metadata_array trailing_metadata_recv;
+ grpc_metadata_array request_metadata_recv;
+ grpc_call_details call_details;
+ grpc_status_code status;
+ grpc_call_error error;
+ grpc_slice details;
+ int was_cancelled = 2;
+
+ gpr_mu_lock(&g_mu);
+ g_client_status_code = GRPC_STATUS_OK;
+ g_server_status_code = GRPC_STATUS_OK;
+ gpr_mu_unlock(&g_mu);
+
+ gpr_timespec deadline = five_seconds_from_now();
+ c = grpc_channel_create_call(
+ f.client, nullptr, GRPC_PROPAGATE_DEFAULTS, f.cq,
+ grpc_slice_from_static_string("/foo"),
+ get_host_override_slice("foo.test.google.fr", config), deadline, nullptr);
+ GPR_ASSERT(c);
+
+ grpc_metadata_array_init(&initial_metadata_recv);
+ grpc_metadata_array_init(&trailing_metadata_recv);
+ grpc_metadata_array_init(&request_metadata_recv);
+ grpc_call_details_init(&call_details);
+
+ memset(ops, 0, sizeof(ops));
+ op = ops;
+ op->op = GRPC_OP_SEND_INITIAL_METADATA;
+ op->data.send_initial_metadata.count = 0;
+ op->data.send_initial_metadata.metadata = nullptr;
+ op->flags = 0;
+ op->reserved = nullptr;
+ op++;
+ op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
+ op->flags = 0;
+ op->reserved = nullptr;
+ op++;
+ op->op = GRPC_OP_RECV_INITIAL_METADATA;
+ op->data.recv_initial_metadata.recv_initial_metadata = &initial_metadata_recv;
+ op->flags = 0;
+ op->reserved = nullptr;
+ op++;
+ op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
+ op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
+ op->data.recv_status_on_client.status = &status;
+ op->data.recv_status_on_client.status_details = &details;
+ op->flags = 0;
+ op->reserved = nullptr;
+ op++;
+ error = grpc_call_start_batch(c, ops, (size_t)(op - ops), tag(1), nullptr);
+ GPR_ASSERT(GRPC_CALL_OK == error);
+
+ error =
+ grpc_server_request_call(f.server, &s, &call_details,
+ &request_metadata_recv, f.cq, f.cq, tag(101));
+ GPR_ASSERT(GRPC_CALL_OK == error);
+
+ CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
+ cq_verify(cqv);
+
+ memset(ops, 0, sizeof(ops));
+ op = ops;
+ op->op = GRPC_OP_SEND_INITIAL_METADATA;
+ op->data.send_initial_metadata.count = 0;
+ op->flags = 0;
+ op->reserved = nullptr;
+ op++;
+ op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
+ op->data.send_status_from_server.trailing_metadata_count = 0;
+ op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED;
+ grpc_slice status_string = grpc_slice_from_static_string("xyz");
+ op->data.send_status_from_server.status_details = &status_string;
+ op->flags = 0;
+ op->reserved = nullptr;
+ op++;
+ op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
+ op->data.recv_close_on_server.cancelled = &was_cancelled;
+ op->flags = 0;
+ op->reserved = nullptr;
+ op++;
+ error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), nullptr);
+ GPR_ASSERT(GRPC_CALL_OK == error);
+
+ CQ_EXPECT_COMPLETION(cqv, tag(102), 1);
+ CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
+ cq_verify(cqv);
+
+ GPR_ASSERT(status == GRPC_STATUS_UNIMPLEMENTED);
+ GPR_ASSERT(0 == grpc_slice_str_cmp(details, "xyz"));
+
+ grpc_slice_unref(details);
+ grpc_metadata_array_destroy(&initial_metadata_recv);
+ grpc_metadata_array_destroy(&trailing_metadata_recv);
+ grpc_metadata_array_destroy(&request_metadata_recv);
+ grpc_call_details_destroy(&call_details);
+
+ grpc_call_unref(s);
+ grpc_call_unref(c);
+
+ cq_verifier_destroy(cqv);
+
+ end_test(&f);
+ config.tear_down_data(&f);
+
+ // Perform checks after test tear-down
+ // Guards against the case that there's outstanding channel-related work on a
+ // call prior to verification
+ // TODO(https://github.com/grpc/grpc/issues/13915) enable this for windows
+#ifndef GPR_WINDOWS
+ gpr_mu_lock(&g_mu);
+ if (!g_client_code_recv) {
+ GPR_ASSERT(gpr_cv_wait(&g_client_code_cv, &g_mu,
+ grpc_timeout_seconds_to_deadline(3)));
+ }
+ if (!g_server_code_recv) {
+ GPR_ASSERT(gpr_cv_wait(&g_client_code_cv, &g_mu,
+ grpc_timeout_seconds_to_deadline(3)));
+ }
+ GPR_ASSERT(g_client_status_code == GRPC_STATUS_UNIMPLEMENTED);
+ GPR_ASSERT(g_server_status_code == GRPC_STATUS_UNIMPLEMENTED);
+ gpr_mu_unlock(&g_mu);
+#endif // GPR_WINDOWS
+}
+
+/*******************************************************************************
+ * Test status_code filter
+ */
+
+static grpc_error* init_call_elem(grpc_call_element* elem,
+ const grpc_call_element_args* args) {
+ return GRPC_ERROR_NONE;
+}
+
+static void client_destroy_call_elem(grpc_call_element* elem,
+ const grpc_call_final_info* final_info,
+ grpc_closure* ignored) {
+ gpr_mu_lock(&g_mu);
+ g_client_status_code = final_info->final_status;
+ g_client_code_recv = true;
+ gpr_cv_signal(&g_client_code_cv);
+ gpr_mu_unlock(&g_mu);
+}
+
+static void server_destroy_call_elem(grpc_call_element* elem,
+ const grpc_call_final_info* final_info,
+ grpc_closure* ignored) {
+ gpr_mu_lock(&g_mu);
+ g_server_status_code = final_info->final_status;
+ g_server_code_recv = true;
+ gpr_cv_signal(&g_server_code_cv);
+ gpr_mu_unlock(&g_mu);
+}
+
+static grpc_error* init_channel_elem(grpc_channel_element* elem,
+ grpc_channel_element_args* args) {
+ return GRPC_ERROR_NONE;
+}
+
+static void destroy_channel_elem(grpc_channel_element* elem) {}
+
+static const grpc_channel_filter test_client_filter = {
+ grpc_call_next_op,
+ grpc_channel_next_op,
+ 0,
+ init_call_elem,
+ grpc_call_stack_ignore_set_pollset_or_pollset_set,
+ client_destroy_call_elem,
+ 0,
+ init_channel_elem,
+ destroy_channel_elem,
+ grpc_channel_next_get_info,
+ "client_filter_status_code"};
+
+static const grpc_channel_filter test_server_filter = {
+ grpc_call_next_op,
+ grpc_channel_next_op,
+ 0,
+ init_call_elem,
+ grpc_call_stack_ignore_set_pollset_or_pollset_set,
+ server_destroy_call_elem,
+ 0,
+ init_channel_elem,
+ destroy_channel_elem,
+ grpc_channel_next_get_info,
+ "server_filter_status_code"};
+
+/*******************************************************************************
+ * Registration
+ */
+
+static bool maybe_add_filter(grpc_channel_stack_builder* builder, void* arg) {
+ grpc_channel_filter* filter = (grpc_channel_filter*)arg;
+ if (g_enable_filter) {
+ // Want to add the filter as close to the end as possible, to make
+ // sure that all of the filters work well together. However, we
+ // can't add it at the very end, because the
+ // connected_channel/client_channel filter must be the last one.
+ // So we add it right before the last one.
+ grpc_channel_stack_builder_iterator* it =
+ grpc_channel_stack_builder_create_iterator_at_last(builder);
+ GPR_ASSERT(grpc_channel_stack_builder_move_prev(it));
+ const bool retval = grpc_channel_stack_builder_add_filter_before(
+ it, filter, nullptr, nullptr);
+ grpc_channel_stack_builder_iterator_destroy(it);
+ return retval;
+ } else {
+ return true;
+ }
+}
+
+static void init_plugin(void) {
+ gpr_mu_init(&g_mu);
+ gpr_cv_init(&g_client_code_cv);
+ gpr_cv_init(&g_server_code_cv);
+ g_client_code_recv = false;
+ g_server_code_recv = false;
+
+ grpc_channel_init_register_stage(GRPC_CLIENT_CHANNEL, INT_MAX,
+ maybe_add_filter,
+ (void*)&test_client_filter);
+ grpc_channel_init_register_stage(GRPC_CLIENT_DIRECT_CHANNEL, INT_MAX,
+ maybe_add_filter,
+ (void*)&test_client_filter);
+ grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL, INT_MAX,
+ maybe_add_filter,
+ (void*)&test_server_filter);
+}
+
+static void destroy_plugin(void) {
+ gpr_cv_destroy(&g_client_code_cv);
+ gpr_cv_destroy(&g_server_code_cv);
+ gpr_mu_destroy(&g_mu);
+}
+
+void filter_status_code(grpc_end2end_test_config config) {
+ g_enable_filter = true;
+ test_request(config);
+ g_enable_filter = false;
+}
+
+void filter_status_code_pre_init(void) {
+ grpc_register_plugin(init_plugin, destroy_plugin);
+}
diff --git a/test/core/iomgr/fd_conservation_posix_test.cc b/test/core/iomgr/fd_conservation_posix_test.cc
index aaa14010f8d..4866e350d53 100644
--- a/test/core/iomgr/fd_conservation_posix_test.cc
+++ b/test/core/iomgr/fd_conservation_posix_test.cc
@@ -43,7 +43,7 @@ int main(int argc, char** argv) {
grpc_resource_quota_create("fd_conservation_posix_test");
for (i = 0; i < 100; i++) {
- p = grpc_iomgr_create_endpoint_pair("test", NULL);
+ p = grpc_iomgr_create_endpoint_pair("test", nullptr);
grpc_endpoint_destroy(p.client);
grpc_endpoint_destroy(p.server);
grpc_core::ExecCtx::Get()->Flush();
diff --git a/test/core/iomgr/resource_quota_test.cc b/test/core/iomgr/resource_quota_test.cc
index ae26f72701b..07682d26308 100644
--- a/test/core/iomgr/resource_quota_test.cc
+++ b/test/core/iomgr/resource_quota_test.cc
@@ -118,7 +118,7 @@ static void test_instant_alloc_then_free(void) {
grpc_resource_user* usr = grpc_resource_user_create(q, "usr");
{
grpc_core::ExecCtx exec_ctx;
- grpc_resource_user_alloc(usr, 1024, NULL);
+ grpc_resource_user_alloc(usr, 1024, nullptr);
}
{
grpc_core::ExecCtx exec_ctx;
@@ -136,7 +136,7 @@ static void test_instant_alloc_free_pair(void) {
grpc_resource_user* usr = grpc_resource_user_create(q, "usr");
{
grpc_core::ExecCtx exec_ctx;
- grpc_resource_user_alloc(usr, 1024, NULL);
+ grpc_resource_user_alloc(usr, 1024, nullptr);
grpc_resource_user_free(usr, 1024);
}
grpc_resource_quota_unref(q);
@@ -565,7 +565,7 @@ static void test_resource_user_stays_allocated_until_memory_released(void) {
grpc_resource_user* usr = grpc_resource_user_create(q, "usr");
{
grpc_core::ExecCtx exec_ctx;
- grpc_resource_user_alloc(usr, 1024, NULL);
+ grpc_resource_user_alloc(usr, 1024, nullptr);
}
{
grpc_core::ExecCtx exec_ctx;
@@ -608,8 +608,8 @@ test_resource_user_stays_allocated_and_reclaimers_unrun_until_memory_released(
grpc_core::ExecCtx exec_ctx;
grpc_resource_user_alloc(usr, 1024, set_event(&allocated));
grpc_core::ExecCtx::Get()->Flush();
- GPR_ASSERT(gpr_event_wait(&allocated,
- grpc_timeout_seconds_to_deadline(5)) != NULL);
+ GPR_ASSERT(gpr_event_wait(&allocated, grpc_timeout_seconds_to_deadline(
+ 5)) != nullptr);
GPR_ASSERT(gpr_event_wait(&reclaimer_cancelled,
grpc_timeout_milliseconds_to_deadline(100)) ==
nullptr);
@@ -667,8 +667,8 @@ static void test_reclaimers_can_be_posted_repeatedly(void) {
grpc_core::ExecCtx exec_ctx;
grpc_resource_user_alloc(usr, 1024, set_event(&allocated));
grpc_core::ExecCtx::Get()->Flush();
- GPR_ASSERT(gpr_event_wait(&allocated,
- grpc_timeout_seconds_to_deadline(5)) != NULL);
+ GPR_ASSERT(gpr_event_wait(&allocated, grpc_timeout_seconds_to_deadline(
+ 5)) != nullptr);
GPR_ASSERT(gpr_event_wait(&reclaimer_done,
grpc_timeout_seconds_to_deadline(5)) !=
nullptr);
diff --git a/test/core/transport/chttp2/settings_timeout_test.cc b/test/core/transport/chttp2/settings_timeout_test.cc
index 08473c72b68..d7d6ee75082 100644
--- a/test/core/transport/chttp2/settings_timeout_test.cc
+++ b/test/core/transport/chttp2/settings_timeout_test.cc
@@ -21,6 +21,7 @@
#include
#include
+#include
#include
#include
diff --git a/third_party/rake-compiler-dock/Dockerfile b/third_party/rake-compiler-dock/Dockerfile
index b4a5158535f..06c721c39ba 100644
--- a/third_party/rake-compiler-dock/Dockerfile
+++ b/third_party/rake-compiler-dock/Dockerfile
@@ -1,182 +1,4 @@
-FROM ubuntu:17.04
-
-RUN apt-get -y update && \
- apt-get install -y curl git-core xz-utils build-essential wget unzip sudo gpg dirmngr
-
-# Add "rvm" as system group, to avoid conflicts with host GIDs typically starting with 1000
-RUN groupadd -r rvm && useradd -r -g rvm -G sudo -p "" --create-home rvm && \
- echo "source /etc/profile.d/rvm.sh" >> /etc/rubybashrc
-
-USER root
-RUN apt-get -y update && \
- apt-get install -y gcc-mingw-w64-x86-64 gcc-mingw-w64-i686 g++-mingw-w64-x86-64 g++-mingw-w64-i686 \
- gcc-multilib moreutils
-USER rvm
-
-# install rvm, RVM 1.26.0+ has signed releases, source rvm for usage outside of package scripts
-RUN gpg --keyserver hkp://keys.gnupg.net --recv-keys D39DC0E3 && \
- (curl -L http://get.rvm.io | sudo bash -s stable) && \
- bash -c " \
- source /etc/rubybashrc && \
- rvmsudo rvm cleanup all "
-
-# Regenerate the following using build.sh if the build folder changes.
-RUN echo \\
-H4sIANslcFgAA+08a3fbxo75uvwVqOxTSbFIidTDVhKnTW0n8d28ju2ku6fO5R2RQ4kVRXJnSMve\
-OPe3LzAk9fIrvk3UuylxUnk0DwADYDADzKiD1A/c5oNvCi2E7W7rgdlpdbe3Tfxumj2zpepzeGC2\
-W+1Oy7TaZvcBNpvYHbrflq0MUpkwAfCAxVHgXIQ39uPhcB3srBsGSv8ydSMu5DeiQQrudW7Wv9Xq\
-rejf6nZ7D6D1jfhZgr+4/ve5x9IgkZADD8/sMecxbO1CZZQksR2L6PwCqCjzsjerPdrbt1++PT6x\
-j97/8t/2u1fPTp6/PXq9Uv3h4Oj48O0bVXv4+tmLA1DVe3tFS0X7s6Xw14Vs/Ys0ZN9q9d/t/zsd\
-c3X9W1a3XP/rgI0fmgM/bA6YHGnaUERpzFwX9CFUNl8c7lfoz9Hb9+8q2mTs+gL0GJrJJG6OognX\
-UsnFcm89xdJ7VXoB4mzSoI2FBlWwZjAfCvqEOh4fHFU07eXb1we7mzViAZ48eQIV7owi+GfWXNeC\
-EHQJzVSKZhA5LGgKNua6E01iP+AC0dD4StNYqta0jHJakAFdlzzRFfFTjTzdL8+OX9oHbz7sNnni\
-4AoYXBAHwsmbdR2H/vzdu6Z8//eH3tRwvhGNO/Z/s2etnv+sTrtdrv91QPOhBg/hZORLwC19KNgE\
-Rix0Ay7h+PDF4ZsTwG/gRWLKhCvBTyCJsCpKRrj2cITDpTQIxWECiMMPEx663KVeAw64rwCT8O5w\
-H0zqRR33I2eMY0nsiSxQILmpn4yg4matOLCyNBLehvDKD9PzBiBpcLNjC6DhhizIWRbEZzYEsD4S\
-iJWFF3mnjM0T5JtjNw57iQj0PWBjVsyU+FfDXIgRWU6DhjU1bcMPnSB1OTzJ0Bmjp4t1F7KZXMRc\
-Xq2eMj+hWg1lA7Hvwi60HmvaWeS7Wsa3jRipsUYfhD2d1LVP6IV8r4YD6sofjf0goG+NnNv6Y+1z\
-hnPC/FANZWLoNMAZoT0/xPLZbx/rhEUmInUUYuYkfhRCyKd2VnxMRIhowpJUIme6iawBNB/CMU8g\
-jZWwMwQpygy1KmPu+N6FakBEkCEySEawgNmQzC60sgtL8ySiWOKTOLlAnwy1H5eHTZgc1x9fweYF\
-bChz4cHCbGqZPBqwgKYBNZLvw3qrrnpnYke1j2sKcy7YT0qypKDaj5kIVCuA4DjdEH49+K/Dk+OT\
-Zyfvj2sL7Z95IHk2diY4fs6ds7imxG5+RGbyUo4Q7UlEolahbpVlIhkKhRc1uu71n/n/mCXOiMtv\
-lAf48vjfsnpdk/x/r9Uu4/91wLL+6RSkm8aOsa3H7e3OV7KH++d/OttWp9T/OuB2/YdRbKi2P0Tj\
-rvyP2VqN/3rdtlWe/9YBru95GOsM8WDHmk4Uev6QNvrBvIxnDJefQ7drdreZZxi9XrvFBwxMWtTd\
-7z0++t5hZf0vxtB6y+gb3a+wB9zf//eoqvT/a4C79U9fWaJTeKYPUnWiN8hrfDmN2/2/2eq2V/L/\
-ltUt83/rgecimkDH9HbanU6n7zmdttUeWF6/3+U722aXse5Ox3L7nttvtzvwGuOtYx6DuQ2t1iP1\
-DyxUoUZoHsErJiT8JwsxkH4SYPnnoeD+/+r4EUo3Ep7h8qfaPkv4IzhmSQPaLfgbCxGD2YPWziP6\
-tw1baBEt7Tgd/M6d5BH89u7Zyd7Lj7CHMeSQq7DTQyuFCcdAFylN8lRDZqb+IOBZJqEwVU3DkB9i\
-wc/8CCO1lbFTDAkx3B9cwCiKxn44hChEfEhFqwj+P6mPe2FVDLLNsFoBhwWBoal0SZYxQXoTJOsg\
-Cw5LJS/oqkyICLH7BeSIJBSIGtgrATfiMqwmGvGA7AuMB7GzHwJTXLp0F+OLKJzwMDG0w4S4CrHJ\
-DyTNOYiYCxhF45cRo+CfAwtkBDETCUReHrlj4M2EC4E/EExcNLAw5trvEvVIeR3hRk4+HeRP3QMh\
-fV/KFLmlrtm9z6NmEw8Io3RgoJSXncSKy6D0TTMb39zJRL8icSQVRlPKUqA6VZ6IJD8TO8zFPhmL\
-QS7yhjYd+c6IBicXse8osS5OlJ8nJFrwkIuGmpqv8lFTSvfMjMaFOGCJF4mJdsYCmqOSmpOkCmHI\
-uctdkgfKc8ouchwkaBzKPFRood6mxOA9boCMMukThVzGyFcyUpg1ym4wB1XrkmXlE3zBJ8SlEaCo\
-GkVliDZ0xrMMV47I5xLtROWqtJnVL5gEsUbZjjOONv4u4AyNL4wS3phzpJw7uChNJ4mQMxQWgyGO\
-CMlw1HxlsXwQgeBuA097gPziTLF+qHJALi5YNyNPjEsDjiM0XkRVEbj6JjHE4yHowqsYmq7rGvGv\
-zKKJWuGh9FG3TI4NMYBLaHdhawn060ADU9EqzKQBZgctU3JB2R1Z26o3wDLB5QHPKvS6pi0dZW/m\
-YXBzW37UbbVbzOv3DMPrm52+2aejbq/TocndhlnD2dyK/eefQTd7/cYObGV/sOJv7IzBrCfIUZSi\
-ygbKY3kcfQLaY3E3Cxtq6dCVy6LNS0f4cTLv5UxcePIE2EDaeRdD8EAZmE22aXvorGvUjLpTNXUN\
-NH1OpHBTJK2pHwQ5O1IZnQdTrpaNIyIpIVv5aN5zBNglMbC/WsjST9Bi8oToT7XKxqeC6OfmAp1K\
-XXk3bWvOBRnc/TjYujcHOY2COswhipVhGXEqR7WHipKdcWvnTTXlS+r1+SgeuihI0nK71220+7Cl\
-/rbuqecNyjz7Ic9nxlSWk2xIqvx2NuvCkf1QjMrG2LkntlX/Gvawi54NoOjePuNiQd0btML8sBB2\
-Y0n35EYn44lHZfQGr7GPWpRDHnKh2CpEvoLmS0bmbKvqJZ0UnSuw+xR+u1Zdjbmx3Q43mltDu3vw\
-VQz5lCoflwQYxVk2XE2F1pb6ptSkk8iBri3VnPWb5rzImpp2UWFT34/kbS+TywVpFzSLfrfSJXXM\
-x5IOcOfkM2WRUZHvxOWCy0uNRCMhM5XFqKscF6q4hduZDJ/TdhfFPKwlRsgmuENVp9W66uZdLqrS\
-M6YCV+vCGSAnUz0NT8PKfIHPu1IHWwxutvSrQxQ7gjO3tuokPF/IZHVBLxTnK1QpgMwhE/x1yi6o\
-XhFdYUVKdPRlWWwzeTjxFS+mGMQNXolxyeMWM4YnUDGNfgV+/DEvPNmFxWc4i/K+RjFiK7lWNchO\
-RHd7Cd2jKOnNtXG7freuQ3GnDq6OMmQ6+KHW/HttZhz1wu5q1fpmE6menppkD6enVnUJw73kszjn\
-K7Sz2gFyPob66cNafkcoZ/SRdjaFav1uTPkN0B9FpM6Tp0Y9Fhh01KDI3CkzPK1J4dAe45+f1mfI\
-4zSRmZSWVEyKneIZaLU2Wzc54dXGRKQhmjyvYZEHwRK3fBHXwhdVnLvQe7mkG53ozCWtLqR7+5+7\
-nMo1fqgQztLUF4vqXNDpd+n019npNcz7HgtIUBWHhXjGz2I/mWJEpMSA51JUW9tq4hEpCs4qFPaE\
-efSSrxYMARnFL1G4hPCYZ1Hjo+sCveR36YxSDOhWYjx+pg+i8yLM687xzSNmxUd1LqirsfTcm6JJ\
-BwyPNG9VxL8oXMEn0RmdaUKZwKOjwZ4aumhgKz0WXyHe0e2a5X5Nr/2D472jw3cn9H4RvUh2yHJ/\
-qq024kLSQbOMrtEyaAVFU2mY2v+D5Pjd+T/KpkRpoqOVynsm/nK46/5n27JW8n9m22qV+b91wFLQ\
-rIIG9R5wfqBR7+owrL2xLQ+ae8xxdkwMmnuMWayzEjTfPDoLmm9uV0FzW7lN+rNNXrPw/fSyz6bn\
-f5+byo5lc+PT68M3L35Vr48/47eVl8bXRBeEI3MCFV1XWHbzYb+8P3y1/7kIFaq6zkM2CLguR5Qp\
-qc7rXV+qBh/9BgsCSoJV8xAFW4v1k4yvq3SC3EvmlbTMdquE+iM9XEEX/TwSDodfEXnkjC21NR7R\
-hmgaOw386BfvlFQmx0/u6XOuvf/tG+2v+RTkX7j/t6xeef+zDrhR/1/n6l/Bv3D/j19K/78OKO//\
-/9pwy/q3JQYu6qriD/qBu95/W2ZnZf1vd8v33+uBL17/O/2d7XbLMQzH4z2r2y7Wf3a+m/fOznPz\
-7+r81un16ei2VRToDIfnGObYXho6dOfoTSJXvXGOh+pBbkd90rtderULMmbT8AzkhaS7P7DtouSM\
-RIQB8ZBr8B9ZFCd5InEY5SLoFyuSc/XLFc8ZEQknkiOQfjiChOFHEA0tEFEauqeLCFJEgH+G+Mdl\
-GBViZM4DDE7tgIkhtz2XbvbwP448pWFWOF1NKU/GiT/hQB/DCQwnVLAFBrt4jrOHPFGt+d/Ic9kF\
-oAkGENPn6WqGO6ZkGRQrEuQoTTDIDNU7aDz7JcwZgxvYfkL5dW7HI1csRtILKL5kJOVGHtF8mLRR\
-SWdM7P5jE8vqlzkVrHHOlObszVyFFbgE6pAIW47+oSnNhjb2G3GcrFJyZfPV4ZuDN2/pN0WzUZXN\
-GYXv/qc2/5Zwjf+3jLbR+rPP/2Z5/l8L3Kj/9e3/rU6vfXX/L/M/a4Ev3v/5gLfdHdz/WbvdG2zf\
-b//f6Xfaav9XhR7t/7Md5tadojDBytKuM6vV1FU/lwlUzjev6wC7cH7B5ePs7ZQi6LAEnj4lBl3u\
-SYN+c2o/2zt4+1zTN/KL95fPPhzYxwdv9p8fvjoAU4O8A2jg+d/XNnXt+u/8Wf7fsjC4zPI/5fvf\
-tcCN+v9T/L9lYQfl/ztl/mctUPr/0v+7TZkIP7angsUxF1+dxh3+3zTN7ZXzX3sb3UC5/tcAGz+o\
-/7MG3bxRMoM2AE3bgGMyiOyeLQrVm6YAvChw1cN2mVAiQT0m/uALejw9iM4huxbLeyGKJFKv64Gp\
-9AouQF/CIB3OXxpMp1PjbDbeiMSwmfjOmCfNHdwRNG32ZiDhk5hYqc6r6Gua+IGsapoyXlzolY1P\
-6qXHgOHyZxNe22zVPxs4s4qmqYfL2OfZ0YsPhuD00ODTpXcJ3u4/m3/Xm/DZmLBYvQhhYkhP1+hZ\
-8y6c5JQN+qV7VRGqZo2GE0SSa9njkvfEieHE9Av8hmqlFynY+BuWG1T7UaNHIFr+dLRgJEsqLTMC\
-W/kra2To02XSiC7pFRiig8+alobYIikNhjKpgWKoAQ8LtPT2hp+jKzetbUXQiVyOxDZ/Mqg6+6V5\
-LgyDM2eUPZUhHiPhq3nPpyMmqnJ1isXssiHZtBRNovV9+cYSSiihhBJKKKGEEkoooYQSSiihhBJK\
-KKGEEkoooYQSSiihhBJKKKGEEkoooYQSSiihhH9H+D9i0BbqAHgAAA==\
-| base64 -d | tar xzC /tmp
-
-# Import patch files for ruby and gems
-RUN cp -r /tmp/build/patches /home/rvm/patches/
-ENV BASH_ENV /etc/rubybashrc
-
-# install rubies and fix permissions on
-RUN bash -c " \
- export CFLAGS='-s -O3 -fno-fast-math -fPIC' && \
- echo 'about to install patches for ruby 2.4.0 from:' && \
- ls -r ~/patches && \
- for v in 2.4.0 ; do \
- rvm install \$v --patch \$(echo ~/patches/ruby-\$v/* | tr ' ' ','); \
- done && \
- rvm cleanup all && \
- find /usr/local/rvm -type d -print0 | sudo xargs -0 chmod g+sw "
-
-# Install rake-compiler and typical gems in all Rubies
-# do not generate documentation for gems
-RUN echo "gem: --no-ri --no-rdoc" >> ~/.gemrc && \
- bash -c " \
- rvm all do gem install bundler rake-compiler hoe mini_portile rubygems-tasks && \
- rvm 2.4.0 do gem install mini_portile2 && \
- find /usr/local/rvm -type d -print0 | sudo xargs -0 chmod g+sw "
-
-RUN bash -c "gem env"
-RUN bash -c "gem list rake-compiler"
-
-# Install rake-compiler's cross rubies in global dir instead of /root
-RUN sudo mkdir -p /usr/local/rake-compiler && \
- sudo chown rvm.rvm /usr/local/rake-compiler && \
- ln -s /usr/local/rake-compiler ~/.rake-compiler
-
-# Patch rake-compiler to avoid build of ruby extensions
-RUN cd /usr/local/rvm/gems/ruby-2.4.0/gems/rake-compiler-0.9.5 && git apply /home/rvm/patches/rake-compiler-0.9.5/*.diff ; \
- true
-
-RUN bash -c "rvm use 2.4.0 --default && \
- export MAKE=\"make -j`nproc`\" CFLAGS='-s -O1 -fno-omit-frame-pointer -fno-fast-math' && \
- rake-compiler cross-ruby VERSION=2.4.0 HOST=i686-w64-mingw32 && \
- rake-compiler cross-ruby VERSION=2.4.0 HOST=x86_64-w64-mingw32 && \
- rake-compiler cross-ruby VERSION=2.4.0 HOST=x86_64-linux-gnu && \
- rake-compiler cross-ruby VERSION=2.3.0 HOST=i686-w64-mingw32 && \
- rake-compiler cross-ruby VERSION=2.3.0 HOST=x86_64-w64-mingw32 && \
- rake-compiler cross-ruby VERSION=2.3.0 HOST=x86_64-linux-gnu && \
- rake-compiler cross-ruby VERSION=2.2.2 HOST=i686-w64-mingw32 && \
- rake-compiler cross-ruby VERSION=2.2.2 HOST=x86_64-w64-mingw32 && \
- rake-compiler cross-ruby VERSION=2.2.2 HOST=x86_64-linux-gnu && \
- rake-compiler cross-ruby VERSION=2.1.5 HOST=i686-w64-mingw32 && \
- rake-compiler cross-ruby VERSION=2.1.5 HOST=x86_64-w64-mingw32 && \
- rake-compiler cross-ruby VERSION=2.1.5 HOST=x86_64-linux-gnu && \
- rake-compiler cross-ruby VERSION=2.0.0-p645 HOST=i686-w64-mingw32 && \
- rake-compiler cross-ruby VERSION=2.0.0-p645 HOST=x86_64-w64-mingw32 && \
- rake-compiler cross-ruby VERSION=2.0.0-p645 HOST=x86_64-linux-gnu && \
- rm -rf ~/.rake-compiler/tmp/builds ~/.rake-compiler/sources && \
- find /usr/local/rvm -type d -print0 | sudo xargs -0 chmod g+sw "
-
-RUN bash -c "rvm use 2.4.0 --default && \
- export MAKE=\"make -j`nproc`\" CFLAGS='-m32 -s -O1 -fno-omit-frame-pointer -fno-fast-math' LDFLAGS='-m32' && \
- rake-compiler cross-ruby VERSION=2.4.0 HOST=i686-linux-gnu && \
- rake-compiler cross-ruby VERSION=2.3.0 HOST=i686-linux-gnu && \
- rake-compiler cross-ruby VERSION=2.2.2 HOST=i686-linux-gnu && \
- rake-compiler cross-ruby VERSION=2.1.5 HOST=i686-linux-gnu && \
- rake-compiler cross-ruby VERSION=2.0.0-p645 HOST=i686-linux-gnu && \
- rm -rf ~/.rake-compiler/tmp/builds ~/.rake-compiler/sources && \
- find /usr/local/rvm -type d -print0 | sudo xargs -0 chmod g+sw "
-
-RUN bash -c " \
- rvm alias create 2.4 2.4.0 "
-
-USER root
-
-# Fix paths in rake-compiler/config.yml and add rvm and mingw-tools to the global bashrc
-RUN sed -i -- "s:/root/.rake-compiler:/usr/local/rake-compiler:g" /usr/local/rake-compiler/config.yml && \
- echo "source /etc/profile.d/rvm.sh" >> /etc/bash.bashrc && \
- echo "export PATH=\$PATH:/opt/mingw/mingw32/bin" >> /etc/bash.bashrc && \
- echo "export PATH=\$PATH:/opt/mingw/mingw64/bin" >> /etc/bash.bashrc
-
-# Install wrappers for strip commands as a workaround for "Protocol error" in boot2docker.
-RUN cp /tmp/build/strip_wrapper /root/
-RUN sudo chmod +rx /root/strip_wrapper
-RUN mv /usr/bin/i686-w64-mingw32-strip /usr/bin/i686-w64-mingw32-strip.bin && \
- mv /usr/bin/x86_64-w64-mingw32-strip /usr/bin/x86_64-w64-mingw32-strip.bin && \
- ln /root/strip_wrapper /usr/bin/i686-w64-mingw32-strip && \
- ln /root/strip_wrapper /usr/bin/x86_64-w64-mingw32-strip
+FROM larskanis/rake-compiler-dock:0.6.2
RUN find / -name rbconfig.rb | while read f ; do sed -i 's/0x0501/0x0600/' $f ; done
RUN find / -name win32.h | while read f ; do sed -i 's/gettimeofday/rb_gettimeofday/' $f ; done
@@ -184,26 +6,6 @@ RUN sed -i 's/defined.__MINGW64__.$/1/' /usr/local/rake-compiler/ruby/i686-w64-m
RUN find / -name libwinpthread.dll.a | xargs rm
RUN find / -name libwinpthread-1.dll | xargs rm
RUN find / -name *msvcrt-ruby*.dll.a | while read f ; do n=`echo $f | sed s/.dll//` ; mv $f $n ; done
-RUN find /usr/local/rake-compiler/ruby -name libruby.so | xargs rm
-RUN find /usr/local/rake-compiler/ruby -name libruby-static.a | while read f ; do ar t $f | xargs ar d $f ; done
-RUN find /usr/local/rake-compiler/ruby -name libruby-static.a | while read f ; do mv $f `echo $f | sed s/-static//` ; done
-
-# Install SIGINT forwarder
-RUN cp /tmp/build/sigfw.c /root/
-RUN gcc $HOME/sigfw.c -o /usr/local/bin/sigfw
-
-# Install user mapper
-RUN cp /tmp/build/runas /usr/local/bin/
-
-# Install sudoers configuration
-RUN cp /tmp/build/sudoers /etc/sudoers.d/rake-compiler-dock
-
-# Fixup Ruby 2.4 'static' compilation issue.
-RUN echo '!' > /usr/local/rake-compiler/ruby/x86_64-linux-gnu/ruby-2.4.0/lib/libruby.a
-RUN echo '!' > /usr/local/rake-compiler/ruby/i686-linux-gnu/ruby-2.4.0/lib/libruby.a
-
-ENV RUBY_CC_VERSION 2.4.0:2.3.0:2.2.2:2.1.5:2.0.0
-
RUN apt-get install -y g++-multilib
CMD bash
diff --git a/third_party/rake-compiler-dock/build.sh b/third_party/rake-compiler-dock/build.sh
deleted file mode 100755
index ca01fa6d4db..00000000000
--- a/third_party/rake-compiler-dock/build.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/sh
-
-# Run this to produce the snipplet of data to insert in the Dockerfile.
-
-echo 'RUN echo \\'
-tar cz build | base64 | sed 's/$/\\/'
-echo '| base64 -d | tar xzC /tmp'
diff --git a/third_party/rake-compiler-dock/build/patches/rake-compiler-0.9.5/compat-with-bundler.diff b/third_party/rake-compiler-dock/build/patches/rake-compiler-0.9.5/compat-with-bundler.diff
deleted file mode 100644
index ea22bd928ec..00000000000
--- a/third_party/rake-compiler-dock/build/patches/rake-compiler-0.9.5/compat-with-bundler.diff
+++ /dev/null
@@ -1,105 +0,0 @@
-From 41f834449fc4323b2f995e8715aa5842d9fd9334 Mon Sep 17 00:00:00 2001
-From: Lars Kanis
-Date: Sat, 30 Jan 2016 08:08:07 +0100
-Subject: [PATCH] Change the fake mechanism to be compatible with bundler.
-
-The previous fake mechanism worked by hooking onto the
-"require 'rbconfig'" call.
-This is problematic because bundler internally requires rbconfig, but doesn't
-work corretly in a faked environment.
-It then fails to load gems that are also part of the standard library, like
-json and rdoc.
-This results in issues like https://github.com/rake-compiler/rake-compiler-dock/issues/8
-
-The fake mechanism is now changed to hook onto the "require 'mkrb'" call,
-which is typically part of the extconf file, and it is where the faked platform
-values are actually needed.
-That way it is loaded after bundler/setup, so that the library paths are
-set according to the Gemfile.lock, to the native Linux libraries, before
-the fake environment is active.
-
-Please note, that the build directory of a given gem needs to be cleared,
-in order to get updated fake files. So do a "rm tmp pkg -rf".
----
- lib/rake/extensiontask.rb | 35 ++++++++++++++---------------------
- 1 file changed, 14 insertions(+), 21 deletions(-)
-
-diff --git a/lib/rake/extensiontask.rb b/lib/rake/extensiontask.rb
-index 030af96..f914919 100644
---- a/lib/rake/extensiontask.rb
-+++ b/lib/rake/extensiontask.rb
-@@ -169,8 +169,8 @@ Java extension should be preferred.
- # now add the extconf script
- cmd << abs_extconf.relative_path_from(abs_tmp_path)
-
-- # rbconfig.rb will be present if we are cross compiling
-- if t.prerequisites.include?("#{tmp_path}/rbconfig.rb") then
-+ # fake.rb will be present if we are cross compiling
-+ if t.prerequisites.include?("#{tmp_path}/fake.rb") then
- options.push(*cross_config_options(platf))
- end
-
-@@ -365,39 +365,30 @@ Java extension should be preferred.
- # define compilation tasks for cross platform!
- define_compile_tasks(for_platform, ruby_ver)
-
-- # chain fake.rb, rbconfig.rb and mkmf.rb to Makefile generation
-+ # chain fake.rb and mkmf.rb to Makefile generation
- file "#{tmp_path}/Makefile" => ["#{tmp_path}/fake.rb",
-- "#{tmp_path}/rbconfig.rb",
- "#{tmp_path}/mkmf.rb"]
-
-- # copy the file from the cross-ruby location
-- file "#{tmp_path}/rbconfig.rb" => [rbconfig_file] do |t|
-+ # copy the rbconfig from the cross-ruby location and
-+ # genearte fake.rb for different ruby versions
-+ file "#{tmp_path}/fake.rb" => [rbconfig_file] do |t|
- File.open(t.name, 'w') do |f|
-- f.write "require 'fake.rb'\n\n"
-+ f.write fake_rb(for_platform, ruby_ver)
- f.write File.read(t.prerequisites.first)
- end
- end
-
- # copy mkmf from cross-ruby location
- file "#{tmp_path}/mkmf.rb" => [mkmf_file] do |t|
-- cp t.prerequisites.first, t.name
-- if ruby_ver < "1.9" && "1.9" <= RUBY_VERSION
-- File.open(t.name, 'r+t') do |f|
-- content = f.read
-+ File.open(t.name, 'w') do |f|
-+ content = File.read(t.prerequisites.first)
-+ content.sub!(/^(require ')rbconfig(')$/, '\\1fake\\2')
-+ if ruby_ver < "1.9" && "1.9" <= RUBY_VERSION
- content.sub!(/^( break )\*(defaults)$/, '\\1\\2.first')
- content.sub!(/^( return )\*(defaults)$/, '\\1\\2.first')
- content.sub!(/^( mfile\.)print( configuration\(srcprefix\))$/, '\\1puts\\2')
-- f.rewind
-- f.write content
-- f.truncate(f.tell)
- end
-- end
-- end
--
-- # genearte fake.rb for different ruby versions
-- file "#{tmp_path}/fake.rb" do |t|
-- File.open(t.name, 'w') do |f|
-- f.write fake_rb(for_platform, ruby_ver)
-+ f.write content
- end
- end
-
-@@ -495,8 +486,10 @@ Java extension should be preferred.
- # "cannot load such file -- win32/resolv" when it is required later on.
- # See also: https://github.com/tjschuck/rake-compiler-dev-box/issues/5
- require 'resolv'
-+ require 'rbconfig'
-
- class Object
-+ remove_const :RbConfig
- remove_const :RUBY_PLATFORM
- remove_const :RUBY_VERSION
- remove_const :RUBY_DESCRIPTION if defined?(RUBY_DESCRIPTION)
---
-2.5.0.windows.1
-
diff --git a/third_party/rake-compiler-dock/build/patches/rake-compiler-0.9.5/without-exts.diff b/third_party/rake-compiler-dock/build/patches/rake-compiler-0.9.5/without-exts.diff
deleted file mode 100644
index 07739d33ecc..00000000000
--- a/third_party/rake-compiler-dock/build/patches/rake-compiler-0.9.5/without-exts.diff
+++ /dev/null
@@ -1,14 +0,0 @@
-diff --git a/tasks/bin/cross-ruby.rake b/tasks/bin/cross-ruby.rake
-index 6acc816..6aa2a49 100644
---- a/tasks/bin/cross-ruby.rake
-+++ b/tasks/bin/cross-ruby.rake
-@@ -135,8 +135,7 @@ file "#{USER_HOME}/builds/#{MINGW_HOST}/#{RUBY_CC_VERSION}/Makefile" => ["#{USER
- "--build=#{RUBY_BUILD}",
- '--enable-shared',
- '--disable-install-doc',
-- '--without-tk',
-- '--without-tcl'
-+ '--with-ext='
- ]
-
- # Force Winsock2 for Ruby 1.8, 1.9 defaults to it
diff --git a/third_party/rake-compiler-dock/build/patches/ruby-1.8.7-p374/nop.patch b/third_party/rake-compiler-dock/build/patches/ruby-1.8.7-p374/nop.patch
deleted file mode 100644
index fac8525da60..00000000000
--- a/third_party/rake-compiler-dock/build/patches/ruby-1.8.7-p374/nop.patch
+++ /dev/null
@@ -1,2 +0,0 @@
-diff --git a/configure b/configure
-index 55157af..6630eba 100755
diff --git a/third_party/rake-compiler-dock/build/patches/ruby-1.9.3/no_sendfile.patch b/third_party/rake-compiler-dock/build/patches/ruby-1.9.3/no_sendfile.patch
deleted file mode 100644
index d8f339e8140..00000000000
--- a/third_party/rake-compiler-dock/build/patches/ruby-1.9.3/no_sendfile.patch
+++ /dev/null
@@ -1,13 +0,0 @@
-diff --git a/configure b/configure
-index 898730c..cfe6253 100755
---- a/configure
-+++ b/configure
-@@ -14695,7 +14695,7 @@ for ac_func in fmod killpg wait4 waitpid fork spawnv syscall __syscall chroot ge
- setsid telldir seekdir fchmod cosh sinh tanh log2 round\
- setuid setgid daemon select_large_fdset setenv unsetenv\
- mktime timegm gmtime_r clock_gettime gettimeofday poll ppoll\
-- pread sendfile shutdown sigaltstack dl_iterate_phdr
-+ pread shutdown sigaltstack dl_iterate_phdr
- do :
- as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
- ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var"
diff --git a/third_party/rake-compiler-dock/build/patches/ruby-1.9.3/nop.patch b/third_party/rake-compiler-dock/build/patches/ruby-1.9.3/nop.patch
deleted file mode 100644
index fac8525da60..00000000000
--- a/third_party/rake-compiler-dock/build/patches/ruby-1.9.3/nop.patch
+++ /dev/null
@@ -1,2 +0,0 @@
-diff --git a/configure b/configure
-index 55157af..6630eba 100755
diff --git a/third_party/rake-compiler-dock/build/patches/ruby-2.3.0/no_sendfile.patch b/third_party/rake-compiler-dock/build/patches/ruby-2.3.0/no_sendfile.patch
deleted file mode 100644
index 915fc7b790f..00000000000
--- a/third_party/rake-compiler-dock/build/patches/ruby-2.3.0/no_sendfile.patch
+++ /dev/null
@@ -1,12 +0,0 @@
-diff --git a/configure b/configure
-index ebe3d8c..a336b73 100755
---- a/configure
-+++ b/configure
-@@ -18943,7 +18943,6 @@ do :
- ac_fn_c_check_func "$LINENO" "sendfile" "ac_cv_func_sendfile"
- if test "x$ac_cv_func_sendfile" = xyes; then :
- cat >>confdefs.h <<_ACEOF
--#define HAVE_SENDFILE 1
- _ACEOF
-
- fi
diff --git a/third_party/rake-compiler-dock/build/patches/ruby-2.4.0/no_sendfile.patch b/third_party/rake-compiler-dock/build/patches/ruby-2.4.0/no_sendfile.patch
deleted file mode 100644
index 915fc7b790f..00000000000
--- a/third_party/rake-compiler-dock/build/patches/ruby-2.4.0/no_sendfile.patch
+++ /dev/null
@@ -1,12 +0,0 @@
-diff --git a/configure b/configure
-index ebe3d8c..a336b73 100755
---- a/configure
-+++ b/configure
-@@ -18943,7 +18943,6 @@ do :
- ac_fn_c_check_func "$LINENO" "sendfile" "ac_cv_func_sendfile"
- if test "x$ac_cv_func_sendfile" = xyes; then :
- cat >>confdefs.h <<_ACEOF
--#define HAVE_SENDFILE 1
- _ACEOF
-
- fi
diff --git a/third_party/rake-compiler-dock/build/runas b/third_party/rake-compiler-dock/build/runas
deleted file mode 100755
index b29ce31fcce..00000000000
--- a/third_party/rake-compiler-dock/build/runas
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-
-groupadd -g "$GID" "$GROUP"
-mkdir -p /tmp/home
-useradd -g "$GID" -u "$UID" -G rvm,sudo -p "" -b /tmp/home -m "$USER"
-
-HOME=$(bash <<< "echo ~$USER")
-ln -s /usr/local/rake-compiler "$HOME"/.rake-compiler
-
-sudo -u "$USER" --set-home \
- BASH_ENV=/etc/rubybashrc \
- -- "$@"
diff --git a/third_party/rake-compiler-dock/build/sigfw.c b/third_party/rake-compiler-dock/build/sigfw.c
deleted file mode 100644
index 291d76cec8c..00000000000
--- a/third_party/rake-compiler-dock/build/sigfw.c
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * This program handles SIGINT and forwards it to another process.
- * It is intended to be run as PID 1.
- *
- * Docker starts processes with "docker run" as PID 1.
- * On Linux, the default signal handler for PID 1 ignores any signals.
- * Therefore Ctrl-C aka SIGINT is ignored per default.
- */
-
-#include
-#include
-#include
-
-int pid = 0;
-
-void
-handle_sigint (int signum)
-{
- if(pid)
- kill(pid, SIGINT);
-}
-
-int main(int argc, char *argv[]){
- struct sigaction new_action;
- int status = -1;
-
- /* Set up the structure to specify the new action. */
- new_action.sa_handler = handle_sigint;
- sigemptyset (&new_action.sa_mask);
- new_action.sa_flags = 0;
-
- sigaction (SIGINT, &new_action, (void*)0);
-
- pid = fork();
- if(pid){
- wait(&status);
- return WEXITSTATUS(status);
- }else{
- status = execvp(argv[1], &argv[1]);
- perror("exec");
- return status;
- }
-}
diff --git a/third_party/rake-compiler-dock/build/strip_wrapper b/third_party/rake-compiler-dock/build/strip_wrapper
deleted file mode 100755
index 7f8a1346a1d..00000000000
--- a/third_party/rake-compiler-dock/build/strip_wrapper
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/env ruby
-
-# Strip file on local folder instead of a Virtualbox shared folder
-# to work around this bug: https://www.virtualbox.org/ticket/8463
-
-require 'tempfile'
-require 'fileutils'
-
-strip = "#{File.basename($0)}.bin"
-
-files = ARGV.reject{|f| f=~/^-/ }.map do |arg|
- tmp = Tempfile.new 'strip'
- tmp.close
- FileUtils.cp arg, tmp.path
- [tmp, arg]
-end
-
-options = ARGV.select{|f| f=~/^-/ } + files.map{|t,o| t.path }
-
-unless system( strip, *options )
- exit 127
-end
-code = $?.exitstatus
-
-files.each do |tmp, orig|
- FileUtils.rm orig
- FileUtils.cp tmp.path, orig
-end
-
-exit code
diff --git a/third_party/rake-compiler-dock/build/sudoers b/third_party/rake-compiler-dock/build/sudoers
deleted file mode 100644
index f9f9b97c951..00000000000
--- a/third_party/rake-compiler-dock/build/sudoers
+++ /dev/null
@@ -1 +0,0 @@
-Defaults env_keep += "http_proxy https_proxy ftp_proxy RCD_HOST_RUBY_PLATFORM RCD_HOST_RUBY_VERSION RCD_IMAGE RUBY_CC_VERSION"
diff --git a/tools/bazel.rc b/tools/bazel.rc
index c554f039713..8af2fc981d7 100644
--- a/tools/bazel.rc
+++ b/tools/bazel.rc
@@ -1,4 +1,5 @@
build --client_env=CC=clang
+build --copt -DGRPC_BAZEL_BUILD
build:asan --strip=never
build:asan --copt -fsanitize-coverage=edge
diff --git a/tools/distrib/build_ruby_environment_macos.sh b/tools/distrib/build_ruby_environment_macos.sh
index fe0c5a4d70b..af367402554 100644
--- a/tools/distrib/build_ruby_environment_macos.sh
+++ b/tools/distrib/build_ruby_environment_macos.sh
@@ -47,7 +47,7 @@ EOF
MAKE="make -j8"
-for v in 2.4.0 2.3.0 2.2.2 2.1.5 2.0.0-p645 ; do
+for v in 2.5.0 2.4.0 2.3.0 2.2.2 2.1.6 2.0.0-p645 ; do
ccache -c
rake -f $CROSS_RUBY cross-ruby VERSION=$v HOST=x86_64-darwin11
done
diff --git a/tools/distrib/check_copyright.py b/tools/distrib/check_copyright.py
index 8f782e07c25..0eb2cbe1a25 100755
--- a/tools/distrib/check_copyright.py
+++ b/tools/distrib/check_copyright.py
@@ -48,6 +48,7 @@ LICENSE_PREFIX = {
'.cc': r'\s*(?://|\*)\s*',
'.h': r'\s*(?://|\*)\s*',
'.m': r'\s*\*\s*',
+ '.mm': r'\s*\*\s*',
'.php': r'\s*\*\s*',
'.js': r'\s*\*\s*',
'.py': r'#\s*',
diff --git a/tools/run_tests/generated/sources_and_headers.json b/tools/run_tests/generated/sources_and_headers.json
index 0fc5a25afd3..d432bd0e537 100644
--- a/tools/run_tests/generated/sources_and_headers.json
+++ b/tools/run_tests/generated/sources_and_headers.json
@@ -7616,6 +7616,7 @@
"test/core/end2end/tests/filter_call_init_fails.cc",
"test/core/end2end/tests/filter_causes_close.cc",
"test/core/end2end/tests/filter_latency.cc",
+ "test/core/end2end/tests/filter_status_code.cc",
"test/core/end2end/tests/graceful_server_shutdown.cc",
"test/core/end2end/tests/high_initial_seqno.cc",
"test/core/end2end/tests/hpack_size.cc",
@@ -7697,6 +7698,7 @@
"test/core/end2end/tests/filter_call_init_fails.cc",
"test/core/end2end/tests/filter_causes_close.cc",
"test/core/end2end/tests/filter_latency.cc",
+ "test/core/end2end/tests/filter_status_code.cc",
"test/core/end2end/tests/graceful_server_shutdown.cc",
"test/core/end2end/tests/high_initial_seqno.cc",
"test/core/end2end/tests/hpack_size.cc",
diff --git a/tools/run_tests/generated/tests.json b/tools/run_tests/generated/tests.json
index 5cf371190c9..98517cba2e7 100644
--- a/tools/run_tests/generated/tests.json
+++ b/tools/run_tests/generated/tests.json
@@ -6797,6 +6797,29 @@
"posix"
]
},
+ {
+ "args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_census_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
{
"args": [
"graceful_server_shutdown"
@@ -8135,6 +8158,29 @@
"posix"
]
},
+ {
+ "args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_compress_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
{
"args": [
"graceful_server_shutdown"
@@ -9430,6 +9476,28 @@
"posix"
]
},
+ {
+ "args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_fakesec_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
{
"args": [
"graceful_server_shutdown"
@@ -10636,6 +10704,29 @@
"posix"
]
},
+ {
+ "args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_fd_test",
+ "platforms": [
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
{
"args": [
"graceful_server_shutdown"
@@ -11903,6 +11994,29 @@
"posix"
]
},
+ {
+ "args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_full_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
{
"args": [
"graceful_server_shutdown"
@@ -13159,6 +13273,25 @@
"linux"
]
},
+ {
+ "args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "linux"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_full+pipe_test",
+ "platforms": [
+ "linux"
+ ]
+ },
{
"args": [
"graceful_server_shutdown"
@@ -14343,6 +14476,29 @@
"posix"
]
},
+ {
+ "args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_full+trace_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
{
"args": [
"graceful_server_shutdown"
@@ -15635,6 +15791,29 @@
"posix"
]
},
+ {
+ "args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_full+workarounds_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
{
"args": [
"graceful_server_shutdown"
@@ -16991,6 +17170,30 @@
"posix"
]
},
+ {
+ "args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_http_proxy_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
{
"args": [
"graceful_server_shutdown"
@@ -18389,6 +18592,29 @@
"posix"
]
},
+ {
+ "args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_load_reporting_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
{
"args": [
"graceful_server_shutdown"
@@ -19745,6 +19971,30 @@
"posix"
]
},
+ {
+ "args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_oauth2_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
{
"args": [
"graceful_server_shutdown"
@@ -21067,7 +21317,7 @@
},
{
"args": [
- "graceful_server_shutdown"
+ "filter_status_code"
],
"ci_platforms": [
"windows",
@@ -21091,7 +21341,7 @@
},
{
"args": [
- "high_initial_seqno"
+ "graceful_server_shutdown"
],
"ci_platforms": [
"windows",
@@ -21115,14 +21365,14 @@
},
{
"args": [
- "idempotent_request"
+ "high_initial_seqno"
],
"ci_platforms": [
"windows",
"linux",
"posix"
],
- "cpu_cost": 1.0,
+ "cpu_cost": 0.1,
"exclude_configs": [],
"exclude_iomgrs": [
"uv"
@@ -21139,7 +21389,7 @@
},
{
"args": [
- "invoke_large_request"
+ "idempotent_request"
],
"ci_platforms": [
"windows",
@@ -21163,7 +21413,7 @@
},
{
"args": [
- "large_metadata"
+ "invoke_large_request"
],
"ci_platforms": [
"windows",
@@ -21187,7 +21437,7 @@
},
{
"args": [
- "load_reporting_hook"
+ "large_metadata"
],
"ci_platforms": [
"windows",
@@ -21211,14 +21461,14 @@
},
{
"args": [
- "max_connection_age"
+ "load_reporting_hook"
],
"ci_platforms": [
"windows",
"linux",
"posix"
],
- "cpu_cost": 0.1,
+ "cpu_cost": 1.0,
"exclude_configs": [],
"exclude_iomgrs": [
"uv"
@@ -21235,7 +21485,31 @@
},
{
"args": [
- "max_message_length"
+ "max_connection_age"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_proxy_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
+ "max_message_length"
],
"ci_platforms": [
"windows",
@@ -22169,6 +22443,30 @@
"posix"
]
},
+ {
+ "args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_sockpair_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
{
"args": [
"graceful_server_shutdown"
@@ -23393,6 +23691,30 @@
"posix"
]
},
+ {
+ "args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_sockpair+trace_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
{
"args": [
"graceful_server_shutdown"
@@ -24577,6 +24899,32 @@
"posix"
]
},
+ {
+ "args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [
+ "msan"
+ ],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_sockpair_1byte_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
{
"args": [
"graceful_server_shutdown"
@@ -25923,6 +26271,29 @@
"posix"
]
},
+ {
+ "args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_ssl_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
{
"args": [
"graceful_server_shutdown"
@@ -27207,6 +27578,30 @@
"posix"
]
},
+ {
+ "args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_ssl_proxy_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
{
"args": [
"graceful_server_shutdown"
@@ -28364,6 +28759,29 @@
"posix"
]
},
+ {
+ "args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_uds_test",
+ "platforms": [
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
{
"args": [
"graceful_server_shutdown"
@@ -29560,6 +29978,29 @@
"posix"
]
},
+ {
+ "args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "inproc_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
{
"args": [
"high_initial_seqno"
@@ -30597,6 +31038,29 @@
"posix"
]
},
+ {
+ "args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_census_nosec_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
{
"args": [
"graceful_server_shutdown"
@@ -31912,6 +32376,29 @@
"posix"
]
},
+ {
+ "args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_compress_nosec_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
{
"args": [
"graceful_server_shutdown"
@@ -33112,7 +33599,7 @@
},
{
"args": [
- "graceful_server_shutdown"
+ "filter_status_code"
],
"ci_platforms": [
"linux",
@@ -33135,7 +33622,7 @@
},
{
"args": [
- "high_initial_seqno"
+ "graceful_server_shutdown"
],
"ci_platforms": [
"linux",
@@ -33158,7 +33645,7 @@
},
{
"args": [
- "hpack_size"
+ "high_initial_seqno"
],
"ci_platforms": [
"linux",
@@ -33181,14 +33668,14 @@
},
{
"args": [
- "idempotent_request"
+ "hpack_size"
],
"ci_platforms": [
"linux",
"mac",
"posix"
],
- "cpu_cost": 1.0,
+ "cpu_cost": 0.1,
"exclude_configs": [],
"exclude_iomgrs": [
"uv"
@@ -33204,7 +33691,7 @@
},
{
"args": [
- "invoke_large_request"
+ "idempotent_request"
],
"ci_platforms": [
"linux",
@@ -33227,14 +33714,14 @@
},
{
"args": [
- "keepalive_timeout"
+ "invoke_large_request"
],
"ci_platforms": [
"linux",
"mac",
"posix"
],
- "cpu_cost": 0.1,
+ "cpu_cost": 1.0,
"exclude_configs": [],
"exclude_iomgrs": [
"uv"
@@ -33250,14 +33737,14 @@
},
{
"args": [
- "large_metadata"
+ "keepalive_timeout"
],
"ci_platforms": [
"linux",
"mac",
"posix"
],
- "cpu_cost": 1.0,
+ "cpu_cost": 0.1,
"exclude_configs": [],
"exclude_iomgrs": [
"uv"
@@ -33273,7 +33760,7 @@
},
{
"args": [
- "load_reporting_hook"
+ "large_metadata"
],
"ci_platforms": [
"linux",
@@ -33296,14 +33783,14 @@
},
{
"args": [
- "max_concurrent_streams"
+ "load_reporting_hook"
],
"ci_platforms": [
"linux",
"mac",
"posix"
],
- "cpu_cost": 0.1,
+ "cpu_cost": 1.0,
"exclude_configs": [],
"exclude_iomgrs": [
"uv"
@@ -33319,7 +33806,7 @@
},
{
"args": [
- "max_connection_age"
+ "max_concurrent_streams"
],
"ci_platforms": [
"linux",
@@ -33342,7 +33829,30 @@
},
{
"args": [
- "max_message_length"
+ "max_connection_age"
+ ],
+ "ci_platforms": [
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_fd_nosec_test",
+ "platforms": [
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
+ "max_message_length"
],
"ci_platforms": [
"linux",
@@ -34354,6 +34864,29 @@
"posix"
]
},
+ {
+ "args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_full_nosec_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
{
"args": [
"graceful_server_shutdown"
@@ -35591,6 +36124,25 @@
"linux"
]
},
+ {
+ "args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "linux"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_full+pipe_nosec_test",
+ "platforms": [
+ "linux"
+ ]
+ },
{
"args": [
"graceful_server_shutdown"
@@ -36752,6 +37304,29 @@
"posix"
]
},
+ {
+ "args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_full+trace_nosec_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
{
"args": [
"graceful_server_shutdown"
@@ -38021,6 +38596,29 @@
"posix"
]
},
+ {
+ "args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_full+workarounds_nosec_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
{
"args": [
"graceful_server_shutdown"
@@ -39353,6 +39951,30 @@
"posix"
]
},
+ {
+ "args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_http_proxy_nosec_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
{
"args": [
"graceful_server_shutdown"
@@ -40728,6 +41350,29 @@
"posix"
]
},
+ {
+ "args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_load_reporting_nosec_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
{
"args": [
"graceful_server_shutdown"
@@ -41988,6 +42633,30 @@
"posix"
]
},
+ {
+ "args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_proxy_nosec_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
{
"args": [
"graceful_server_shutdown"
@@ -43068,6 +43737,30 @@
"posix"
]
},
+ {
+ "args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_sockpair_nosec_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
{
"args": [
"graceful_server_shutdown"
@@ -44268,6 +44961,30 @@
"posix"
]
},
+ {
+ "args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_sockpair+trace_nosec_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
{
"args": [
"graceful_server_shutdown"
@@ -45426,6 +46143,32 @@
"posix"
]
},
+ {
+ "args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [
+ "msan"
+ ],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_sockpair_1byte_nosec_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
{
"args": [
"graceful_server_shutdown"
@@ -46724,6 +47467,29 @@
"posix"
]
},
+ {
+ "args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_uds_nosec_test",
+ "platforms": [
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
{
"args": [
"graceful_server_shutdown"
@@ -47897,6 +48663,29 @@
"posix"
]
},
+ {
+ "args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "inproc_nosec_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
{
"args": [
"high_initial_seqno"