Merge branch 'master' of github.com:grpc/grpc into conn_subchannel

reviewable/pr13932/r3
David Garcia Quintas 7 years ago
commit 70fbe62613
  1. 2
      CMakeLists.txt
  2. 4
      Makefile
  3. 4
      Rakefile
  4. 1
      build.yaml
  5. 0
      doc/md
  6. 1
      gRPC-Core.podspec
  7. 5
      grpc.gyp
  8. 21
      include/grpc/impl/codegen/port_platform.h
  9. 6
      src/core/ext/filters/client_channel/backup_poller.cc
  10. 6
      src/core/ext/filters/client_channel/channel_connectivity.cc
  11. 71
      src/core/ext/filters/client_channel/client_channel.cc
  12. 95
      src/core/ext/filters/client_channel/lb_policy.cc
  13. 90
      src/core/ext/filters/client_channel/lb_policy.h
  14. 6
      src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
  15. 660
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
  16. 94
      src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
  17. 122
      src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
  18. 4
      src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
  19. 19
      src/core/ext/filters/client_channel/subchannel.cc
  20. 13
      src/core/ext/filters/client_channel/subchannel.h
  21. 10
      src/core/ext/filters/http/client/http_client_filter.cc
  22. 14
      src/core/ext/filters/http/message_compress/message_compress_filter.cc
  23. 10
      src/core/ext/filters/http/server/http_server_filter.cc
  24. 10
      src/core/ext/filters/load_reporting/server_load_reporting_filter.cc
  25. 6
      src/core/ext/filters/max_age/max_age_filter.cc
  26. 10
      src/core/ext/filters/message_size/message_size_filter.cc
  27. 6
      src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc
  28. 5
      src/core/ext/transport/chttp2/transport/writing.cc
  29. 13
      src/core/lib/iomgr/ev_poll_posix.cc
  30. 2
      src/core/lib/iomgr/exec_ctx.h
  31. 2
      src/core/lib/iomgr/gethostname_sysconf.cc
  32. 10
      src/core/lib/iomgr/tcp_posix.cc
  33. 30
      src/core/lib/iomgr/tcp_uv.cc
  34. 2
      src/core/lib/iomgr/wakeup_fd_nospecial.cc
  35. 10
      src/core/lib/security/transport/client_auth_filter.cc
  36. 14
      src/core/lib/security/transport/server_auth_filter.cc
  37. 4
      src/core/lib/support/env_posix.cc
  38. 2
      src/core/lib/support/fork.cc
  39. 8
      src/core/lib/support/log_posix.cc
  40. 2
      src/core/lib/support/time_posix.cc
  41. 5
      src/core/lib/surface/call.cc
  42. 24
      src/core/lib/surface/server.cc
  43. 2
      src/core/lib/transport/error_utils.cc
  44. 3
      src/core/tsi/ssl_transport_security.cc
  45. 4
      src/csharp/Grpc.Auth/Grpc.Auth.csproj
  46. 4
      src/csharp/Grpc.Core.Testing/Grpc.Core.Testing.csproj
  47. 4
      src/csharp/Grpc.Core/Grpc.Core.csproj
  48. 19
      src/csharp/Grpc.Core/SourceLink.csproj.include
  49. 4
      src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.csproj
  50. 4
      src/csharp/Grpc.Reflection/Grpc.Reflection.csproj
  51. 8
      test/core/end2end/end2end_nosec_tests.cc
  52. 8
      test/core/end2end/end2end_tests.cc
  53. 1
      test/core/end2end/gen_build_yaml.py
  54. 1
      test/core/end2end/generate_tests.bzl
  55. 353
      test/core/end2end/tests/filter_status_code.cc
  56. 2
      test/core/iomgr/fd_conservation_posix_test.cc
  57. 14
      test/core/iomgr/resource_quota_test.cc
  58. 1
      test/core/transport/chttp2/settings_timeout_test.cc
  59. 200
      third_party/rake-compiler-dock/Dockerfile
  60. 7
      third_party/rake-compiler-dock/build.sh
  61. 105
      third_party/rake-compiler-dock/build/patches/rake-compiler-0.9.5/compat-with-bundler.diff
  62. 14
      third_party/rake-compiler-dock/build/patches/rake-compiler-0.9.5/without-exts.diff
  63. 2
      third_party/rake-compiler-dock/build/patches/ruby-1.8.7-p374/nop.patch
  64. 13
      third_party/rake-compiler-dock/build/patches/ruby-1.9.3/no_sendfile.patch
  65. 2
      third_party/rake-compiler-dock/build/patches/ruby-1.9.3/nop.patch
  66. 12
      third_party/rake-compiler-dock/build/patches/ruby-2.3.0/no_sendfile.patch
  67. 12
      third_party/rake-compiler-dock/build/patches/ruby-2.4.0/no_sendfile.patch
  68. 12
      third_party/rake-compiler-dock/build/runas
  69. 43
      third_party/rake-compiler-dock/build/sigfw.c
  70. 30
      third_party/rake-compiler-dock/build/strip_wrapper
  71. 1
      third_party/rake-compiler-dock/build/sudoers
  72. 1
      tools/bazel.rc
  73. 2
      tools/distrib/build_ruby_environment_macos.sh
  74. 1
      tools/distrib/check_copyright.py
  75. 2
      tools/run_tests/generated/sources_and_headers.json
  76. 839
      tools/run_tests/generated/tests.json

@ -4618,6 +4618,7 @@ add_library(end2end_tests
test/core/end2end/tests/filter_call_init_fails.cc
test/core/end2end/tests/filter_causes_close.cc
test/core/end2end/tests/filter_latency.cc
test/core/end2end/tests/filter_status_code.cc
test/core/end2end/tests/graceful_server_shutdown.cc
test/core/end2end/tests/high_initial_seqno.cc
test/core/end2end/tests/hpack_size.cc
@ -4719,6 +4720,7 @@ add_library(end2end_nosec_tests
test/core/end2end/tests/filter_call_init_fails.cc
test/core/end2end/tests/filter_causes_close.cc
test/core/end2end/tests/filter_latency.cc
test/core/end2end/tests/filter_status_code.cc
test/core/end2end/tests/graceful_server_shutdown.cc
test/core/end2end/tests/high_initial_seqno.cc
test/core/end2end/tests/hpack_size.cc

@ -327,7 +327,7 @@ CXXFLAGS += -std=c++11
ifeq ($(SYSTEM),Darwin)
CXXFLAGS += -stdlib=libc++
endif
CPPFLAGS += -g -Wall -Wextra -Werror -Wno-long-long -Wno-unused-parameter -DOSATOMIC_USE_INLINED=1
CPPFLAGS += -g -Wall -Wextra -Werror -Wno-long-long -Wno-unused-parameter -DOSATOMIC_USE_INLINED=1 -Wno-deprecated-declarations
COREFLAGS += -fno-rtti -fno-exceptions
LDFLAGS += -g
@ -8557,6 +8557,7 @@ LIBEND2END_TESTS_SRC = \
test/core/end2end/tests/filter_call_init_fails.cc \
test/core/end2end/tests/filter_causes_close.cc \
test/core/end2end/tests/filter_latency.cc \
test/core/end2end/tests/filter_status_code.cc \
test/core/end2end/tests/graceful_server_shutdown.cc \
test/core/end2end/tests/high_initial_seqno.cc \
test/core/end2end/tests/hpack_size.cc \
@ -8655,6 +8656,7 @@ LIBEND2END_NOSEC_TESTS_SRC = \
test/core/end2end/tests/filter_call_init_fails.cc \
test/core/end2end/tests/filter_causes_close.cc \
test/core/end2end/tests/filter_latency.cc \
test/core/end2end/tests/filter_status_code.cc \
test/core/end2end/tests/graceful_server_shutdown.cc \
test/core/end2end/tests/high_initial_seqno.cc \
test/core/end2end/tests/hpack_size.cc \

@ -113,10 +113,10 @@ task 'gem:native' do
if RUBY_PLATFORM =~ /darwin/
FileUtils.touch 'grpc_c.32.ruby'
FileUtils.touch 'grpc_c.64.ruby'
system "rake cross native gem RUBY_CC_VERSION=2.4.0:2.3.0:2.2.2:2.1.5:2.0.0 V=#{verbose} GRPC_CONFIG=#{grpc_config}"
system "rake cross native gem RUBY_CC_VERSION=2.5.0:2.4.0:2.3.0:2.2.2:2.1.6:2.0.0 V=#{verbose} GRPC_CONFIG=#{grpc_config}"
else
Rake::Task['dlls'].execute
docker_for_windows "gem update --system && bundle && rake cross native gem RUBY_CC_VERSION=2.4.0:2.3.0:2.2.2:2.1.5:2.0.0 V=#{verbose} GRPC_CONFIG=#{grpc_config}"
docker_for_windows "gem update --system && bundle && rake cross native gem RUBY_CC_VERSION=2.5.0:2.4.0:2.3.0:2.2.2:2.1.6:2.0.0 V=#{verbose} GRPC_CONFIG=#{grpc_config}"
end
end

@ -5004,6 +5004,7 @@ defaults:
global:
COREFLAGS: -fno-rtti -fno-exceptions
CPPFLAGS: -g -Wall -Wextra -Werror -Wno-long-long -Wno-unused-parameter -DOSATOMIC_USE_INLINED=1
-Wno-deprecated-declarations
LDFLAGS: -g
zlib:
CFLAGS: -Wno-sign-conversion -Wno-conversion -Wno-unused-value -Wno-implicit-function-declaration

@ -1038,6 +1038,7 @@ Pod::Spec.new do |s|
'test/core/end2end/tests/filter_call_init_fails.cc',
'test/core/end2end/tests/filter_causes_close.cc',
'test/core/end2end/tests/filter_latency.cc',
'test/core/end2end/tests/filter_status_code.cc',
'test/core/end2end/tests/graceful_server_shutdown.cc',
'test/core/end2end/tests/high_initial_seqno.cc',
'test/core/end2end/tests/hpack_size.cc',

@ -57,6 +57,7 @@
'-Wno-long-long',
'-Wno-unused-parameter',
'-DOSATOMIC_USE_INLINED=1',
'-Wno-deprecated-declarations',
],
'ldflags': [
'-g',
@ -134,6 +135,7 @@
'-Wno-long-long',
'-Wno-unused-parameter',
'-DOSATOMIC_USE_INLINED=1',
'-Wno-deprecated-declarations',
],
'OTHER_CPLUSPLUSFLAGS': [
'-g',
@ -143,6 +145,7 @@
'-Wno-long-long',
'-Wno-unused-parameter',
'-DOSATOMIC_USE_INLINED=1',
'-Wno-deprecated-declarations',
'-stdlib=libc++',
'-std=c++11',
'-Wno-error=deprecated-declarations'
@ -2378,6 +2381,7 @@
'test/core/end2end/tests/filter_call_init_fails.cc',
'test/core/end2end/tests/filter_causes_close.cc',
'test/core/end2end/tests/filter_latency.cc',
'test/core/end2end/tests/filter_status_code.cc',
'test/core/end2end/tests/graceful_server_shutdown.cc',
'test/core/end2end/tests/high_initial_seqno.cc',
'test/core/end2end/tests/hpack_size.cc',
@ -2450,6 +2454,7 @@
'test/core/end2end/tests/filter_call_init_fails.cc',
'test/core/end2end/tests/filter_causes_close.cc',
'test/core/end2end/tests/filter_latency.cc',
'test/core/end2end/tests/filter_status_code.cc',
'test/core/end2end/tests/graceful_server_shutdown.cc',
'test/core/end2end/tests/high_initial_seqno.cc',
'test/core/end2end/tests/hpack_size.cc',

@ -195,12 +195,25 @@
#define GPR_PTHREAD_TLS 1
#else /* __MAC_OS_X_VERSION_MIN_REQUIRED < __MAC_10_7 */
#define GPR_CPU_POSIX 1
/* TODO(vjpai): there is a reported issue in bazel build for Mac where __thread
in a header is currently not working (bazelbuild/bazel#4341). Remove
the following conditional and use GPR_GCC_TLS when that is fixed */
#ifndef GRPC_BAZEL_BUILD
#define GPR_GCC_TLS 1
#else /* GRPC_BAZEL_BUILD */
#define GPR_PTHREAD_TLS 1
#endif /* GRPC_BAZEL_BUILD */
#define GPR_APPLE_PTHREAD_NAME 1
#endif
#else /* __MAC_OS_X_VERSION_MIN_REQUIRED */
#define GPR_CPU_POSIX 1
/* TODO(vjpai): Remove the following conditional and use only GPR_GCC_TLS
when bazelbuild/bazel#4341 is fixed */
#ifndef GRPC_BAZEL_BUILD
#define GPR_GCC_TLS 1
#else /* GRPC_BAZEL_BUILD */
#define GPR_PTHREAD_TLS 1
#endif /* GRPC_BAZEL_BUILD */
#endif
#define GPR_POSIX_CRASH_HANDLER 1
#endif
@ -421,6 +434,14 @@ typedef unsigned __int64 uint64_t;
#endif
#endif
#ifndef GRPC_UNUSED
#if defined(__GNUC__) && !defined(__MINGW32__)
#define GRPC_UNUSED __attribute__((unused))
#else
#define GRPC_UNUSED
#endif
#endif
#ifndef GPR_PRINT_FORMAT_CHECK
#ifdef __GNUC__
#define GPR_PRINT_FORMAT_CHECK(FORMAT_STR, ARGS) \

@ -33,7 +33,8 @@
#define DEFAULT_POLL_INTERVAL_MS 5000
typedef struct backup_poller {
namespace {
struct backup_poller {
grpc_timer polling_timer;
grpc_closure run_poller_closure;
grpc_closure shutdown_closure;
@ -42,7 +43,8 @@ typedef struct backup_poller {
bool shutting_down; // guarded by pollset_mu
gpr_refcount refs;
gpr_refcount shutdown_refs;
} backup_poller;
};
} // namespace
static gpr_once g_once = GPR_ONCE_INIT;
static gpr_mu g_poller_mu;

@ -58,7 +58,8 @@ typedef enum {
CALLING_BACK_AND_FINISHED,
} callback_phase;
typedef struct {
namespace {
struct state_watcher {
gpr_mu mu;
callback_phase phase;
grpc_closure on_complete;
@ -71,7 +72,8 @@ typedef struct {
grpc_channel* channel;
grpc_error* error;
void* tag;
} state_watcher;
};
} // namespace
static void delete_state_watcher(state_watcher* w) {
grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element(

@ -553,6 +553,7 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
}
grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties,
chand->interested_parties);
grpc_lb_policy_shutdown_locked(chand->lb_policy, new_lb_policy);
GRPC_LB_POLICY_UNREF(chand->lb_policy, "channel");
}
chand->lb_policy = new_lb_policy;
@ -658,6 +659,7 @@ static void start_transport_op_locked(void* arg, grpc_error* error_ignored) {
if (chand->lb_policy != nullptr) {
grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties,
chand->interested_parties);
grpc_lb_policy_shutdown_locked(chand->lb_policy, nullptr);
GRPC_LB_POLICY_UNREF(chand->lb_policy, "channel");
chand->lb_policy = nullptr;
}
@ -792,6 +794,7 @@ static void cc_destroy_channel_elem(grpc_channel_element* elem) {
if (chand->lb_policy != nullptr) {
grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties,
chand->interested_parties);
grpc_lb_policy_shutdown_locked(chand->lb_policy, nullptr);
GRPC_LB_POLICY_UNREF(chand->lb_policy, "channel");
}
gpr_free(chand->info_lb_policy_name);
@ -852,12 +855,10 @@ typedef struct client_channel_call_data {
grpc_subchannel_call* subchannel_call;
grpc_error* error;
grpc_lb_policy* lb_policy; // Holds ref while LB pick is pending.
grpc_lb_policy_pick_state pick;
grpc_closure lb_pick_closure;
grpc_closure lb_pick_cancel_closure;
grpc_core::ConnectedSubchannel* connected_subchannel;
grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT];
grpc_polling_entity* pollent;
grpc_transport_stream_op_batch* waiting_for_pick_batches[MAX_WAITING_BATCHES];
@ -866,8 +867,6 @@ typedef struct client_channel_call_data {
grpc_transport_stream_op_batch* initial_metadata_batch;
grpc_linked_mdelem lb_token_mdelem;
grpc_closure on_complete;
grpc_closure* original_on_complete;
} call_data;
@ -1005,15 +1004,15 @@ static void create_subchannel_call_locked(grpc_call_element* elem,
channel_data* chand = (channel_data*)elem->channel_data;
call_data* calld = (call_data*)elem->call_data;
const grpc_core::ConnectedSubchannel::CallArgs call_args = {
calld->pollent, // pollent
calld->path, // path
calld->call_start_time, // start_time
calld->deadline, // deadline
calld->arena, // arena
calld->subchannel_call_context, // context
calld->call_combiner // call_combiner
calld->pollent, // pollent
calld->path, // path
calld->call_start_time, // start_time
calld->deadline, // deadline
calld->arena, // arena
calld->pick.subchannel_call_context, // context
calld->call_combiner // call_combiner
};
grpc_error* new_error = calld->connected_subchannel->CreateCall(
grpc_error* new_error = calld->pick.connected_subchannel->CreateCall(
call_args, &calld->subchannel_call);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: create subchannel_call=%p: error=%s",
@ -1032,7 +1031,7 @@ static void create_subchannel_call_locked(grpc_call_element* elem,
static void pick_done_locked(grpc_call_element* elem, grpc_error* error) {
call_data* calld = (call_data*)elem->call_data;
channel_data* chand = (channel_data*)elem->channel_data;
if (calld->connected_subchannel == nullptr) {
if (calld->pick.connected_subchannel == nullptr) {
// Failed to create subchannel.
GRPC_ERROR_UNREF(calld->error);
calld->error = error == GRPC_ERROR_NONE
@ -1071,13 +1070,16 @@ static void pick_callback_cancel_locked(void* arg, grpc_error* error) {
grpc_call_element* elem = (grpc_call_element*)arg;
channel_data* chand = (channel_data*)elem->channel_data;
call_data* calld = (call_data*)elem->call_data;
if (calld->lb_policy != nullptr) {
// Note: chand->lb_policy may have changed since we started our pick,
// in which case we will be cancelling the pick on a policy other than
// the one we started it on. However, this will just be a no-op.
if (error != GRPC_ERROR_NONE && chand->lb_policy != nullptr) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p",
chand, calld, calld->lb_policy);
chand, calld, chand->lb_policy);
}
grpc_lb_policy_cancel_pick_locked(
calld->lb_policy, &calld->connected_subchannel, GRPC_ERROR_REF(error));
grpc_lb_policy_cancel_pick_locked(chand->lb_policy, &calld->pick,
GRPC_ERROR_REF(error));
}
GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback_cancel");
}
@ -1092,9 +1094,6 @@ static void pick_callback_done_locked(void* arg, grpc_error* error) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously",
chand, calld);
}
GPR_ASSERT(calld->lb_policy != nullptr);
GRPC_LB_POLICY_UNREF(calld->lb_policy, "pick_subchannel");
calld->lb_policy = nullptr;
async_pick_done_locked(elem, GRPC_ERROR_REF(error));
}
@ -1128,26 +1127,21 @@ static bool pick_callback_start_locked(grpc_call_element* elem) {
initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
}
}
const grpc_lb_policy_pick_args inputs = {
calld->pick.initial_metadata =
calld->initial_metadata_batch->payload->send_initial_metadata
.send_initial_metadata,
initial_metadata_flags, &calld->lb_token_mdelem};
// Keep a ref to the LB policy in calld while the pick is pending.
GRPC_LB_POLICY_REF(chand->lb_policy, "pick_subchannel");
calld->lb_policy = chand->lb_policy;
.send_initial_metadata;
calld->pick.initial_metadata_flags = initial_metadata_flags;
GRPC_CLOSURE_INIT(&calld->lb_pick_closure, pick_callback_done_locked, elem,
grpc_combiner_scheduler(chand->combiner));
const bool pick_done = grpc_lb_policy_pick_locked(
chand->lb_policy, &inputs, &calld->connected_subchannel,
calld->subchannel_call_context, nullptr, &calld->lb_pick_closure);
calld->pick.on_complete = &calld->lb_pick_closure;
const bool pick_done =
grpc_lb_policy_pick_locked(chand->lb_policy, &calld->pick);
if (pick_done) {
/* synchronous grpc_lb_policy_pick call. Unref the LB policy. */
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed synchronously",
chand, calld);
}
GRPC_LB_POLICY_UNREF(calld->lb_policy, "pick_subchannel");
calld->lb_policy = nullptr;
} else {
GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback_cancel");
grpc_call_combiner_set_notify_on_cancel(
@ -1289,7 +1283,7 @@ static void start_pick_locked(void* arg, grpc_error* ignored) {
grpc_call_element* elem = (grpc_call_element*)arg;
call_data* calld = (call_data*)elem->call_data;
channel_data* chand = (channel_data*)elem->channel_data;
GPR_ASSERT(calld->connected_subchannel == nullptr);
GPR_ASSERT(calld->pick.connected_subchannel == nullptr);
if (chand->lb_policy != nullptr) {
// We already have an LB policy, so ask it for a pick.
if (pick_callback_start_locked(elem)) {
@ -1467,15 +1461,14 @@ static void cc_destroy_call_elem(grpc_call_element* elem,
GRPC_SUBCHANNEL_CALL_UNREF(calld->subchannel_call,
"client_channel_destroy_call");
}
GPR_ASSERT(calld->lb_policy == nullptr);
GPR_ASSERT(calld->waiting_for_pick_batches_count == 0);
if (calld->connected_subchannel != nullptr) {
GRPC_CONNECTED_SUBCHANNEL_UNREF(calld->connected_subchannel, "picked");
if (calld->pick.connected_subchannel != nullptr) {
GRPC_CONNECTED_SUBCHANNEL_UNREF(calld->pick.connected_subchannel, "picked");
}
for (size_t i = 0; i < GRPC_CONTEXT_COUNT; ++i) {
if (calld->subchannel_call_context[i].value != nullptr) {
calld->subchannel_call_context[i].destroy(
calld->subchannel_call_context[i].value);
if (calld->pick.subchannel_call_context[i].value != nullptr) {
calld->pick.subchannel_call_context[i].destroy(
calld->pick.subchannel_call_context[i].value);
}
}
GRPC_CLOSURE_SCHED(then_schedule_closure, GRPC_ERROR_NONE);

@ -19,8 +19,6 @@
#include "src/core/ext/filters/client_channel/lb_policy.h"
#include "src/core/lib/iomgr/combiner.h"
#define WEAK_REF_BITS 16
grpc_core::DebugOnlyTraceFlag grpc_trace_lb_policy_refcount(
false, "lb_policy_refcount");
@ -28,91 +26,60 @@ void grpc_lb_policy_init(grpc_lb_policy* policy,
const grpc_lb_policy_vtable* vtable,
grpc_combiner* combiner) {
policy->vtable = vtable;
gpr_atm_no_barrier_store(&policy->ref_pair, 1 << WEAK_REF_BITS);
gpr_ref_init(&policy->refs, 1);
policy->interested_parties = grpc_pollset_set_create();
policy->combiner = GRPC_COMBINER_REF(combiner, "lb_policy");
}
#ifndef NDEBUG
#define REF_FUNC_EXTRA_ARGS , const char *file, int line, const char *reason
#define REF_MUTATE_EXTRA_ARGS REF_FUNC_EXTRA_ARGS, const char* purpose
#define REF_FUNC_PASS_ARGS(new_reason) , file, line, new_reason
#define REF_MUTATE_PASS_ARGS(purpose) , file, line, reason, purpose
void grpc_lb_policy_ref(grpc_lb_policy* lb_policy, const char* file, int line,
const char* reason) {
if (grpc_trace_lb_policy_refcount.enabled()) {
gpr_atm old_refs = gpr_atm_no_barrier_load(&lb_policy->refs.count);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"LB_POLICY:%p ref %" PRIdPTR " -> %" PRIdPTR " %s", lb_policy,
old_refs, old_refs + 1, reason);
}
#else
#define REF_FUNC_EXTRA_ARGS
#define REF_MUTATE_EXTRA_ARGS
#define REF_FUNC_PASS_ARGS(new_reason)
#define REF_MUTATE_PASS_ARGS(x)
void grpc_lb_policy_ref(grpc_lb_policy* lb_policy) {
#endif
gpr_ref(&lb_policy->refs);
}
static gpr_atm ref_mutate(grpc_lb_policy* c, gpr_atm delta,
int barrier REF_MUTATE_EXTRA_ARGS) {
gpr_atm old_val = barrier ? gpr_atm_full_fetch_add(&c->ref_pair, delta)
: gpr_atm_no_barrier_fetch_add(&c->ref_pair, delta);
#ifndef NDEBUG
void grpc_lb_policy_unref(grpc_lb_policy* lb_policy, const char* file, int line,
const char* reason) {
if (grpc_trace_lb_policy_refcount.enabled()) {
gpr_atm old_refs = gpr_atm_no_barrier_load(&lb_policy->refs.count);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"LB_POLICY: %p %12s 0x%" PRIxPTR " -> 0x%" PRIxPTR " [%s]", c,
purpose, old_val, old_val + delta, reason);
"LB_POLICY:%p unref %" PRIdPTR " -> %" PRIdPTR " %s", lb_policy,
old_refs, old_refs - 1, reason);
}
#else
void grpc_lb_policy_unref(grpc_lb_policy* lb_policy) {
#endif
return old_val;
}
void grpc_lb_policy_ref(grpc_lb_policy* policy REF_FUNC_EXTRA_ARGS) {
ref_mutate(policy, 1 << WEAK_REF_BITS, 0 REF_MUTATE_PASS_ARGS("STRONG_REF"));
}
static void shutdown_locked(void* arg, grpc_error* error) {
grpc_lb_policy* policy = (grpc_lb_policy*)arg;
policy->vtable->shutdown_locked(policy);
GRPC_LB_POLICY_WEAK_UNREF(policy, "strong-unref");
}
void grpc_lb_policy_unref(grpc_lb_policy* policy REF_FUNC_EXTRA_ARGS) {
gpr_atm old_val =
ref_mutate(policy, (gpr_atm)1 - (gpr_atm)(1 << WEAK_REF_BITS),
1 REF_MUTATE_PASS_ARGS("STRONG_UNREF"));
gpr_atm mask = ~(gpr_atm)((1 << WEAK_REF_BITS) - 1);
gpr_atm check = 1 << WEAK_REF_BITS;
if ((old_val & mask) == check) {
GRPC_CLOSURE_SCHED(
GRPC_CLOSURE_CREATE(shutdown_locked, policy,
grpc_combiner_scheduler(policy->combiner)),
GRPC_ERROR_NONE);
} else {
grpc_lb_policy_weak_unref(policy REF_FUNC_PASS_ARGS("strong-unref"));
if (gpr_unref(&lb_policy->refs)) {
grpc_pollset_set_destroy(lb_policy->interested_parties);
grpc_combiner* combiner = lb_policy->combiner;
lb_policy->vtable->destroy(lb_policy);
GRPC_COMBINER_UNREF(combiner, "lb_policy");
}
}
void grpc_lb_policy_weak_ref(grpc_lb_policy* policy REF_FUNC_EXTRA_ARGS) {
ref_mutate(policy, 1, 0 REF_MUTATE_PASS_ARGS("WEAK_REF"));
}
void grpc_lb_policy_weak_unref(grpc_lb_policy* policy REF_FUNC_EXTRA_ARGS) {
gpr_atm old_val =
ref_mutate(policy, -(gpr_atm)1, 1 REF_MUTATE_PASS_ARGS("WEAK_UNREF"));
if (old_val == 1) {
grpc_pollset_set_destroy(policy->interested_parties);
grpc_combiner* combiner = policy->combiner;
policy->vtable->destroy(policy);
GRPC_COMBINER_UNREF(combiner, "lb_policy");
}
void grpc_lb_policy_shutdown_locked(grpc_lb_policy* policy,
grpc_lb_policy* new_policy) {
policy->vtable->shutdown_locked(policy, new_policy);
}
int grpc_lb_policy_pick_locked(grpc_lb_policy* policy,
const grpc_lb_policy_pick_args* pick_args,
grpc_core::ConnectedSubchannel** target,
grpc_call_context_element* context,
void** user_data, grpc_closure* on_complete) {
return policy->vtable->pick_locked(policy, pick_args, target, context,
user_data, on_complete);
grpc_lb_policy_pick_state* pick) {
return policy->vtable->pick_locked(policy, pick);
}
void grpc_lb_policy_cancel_pick_locked(grpc_lb_policy* policy,
grpc_core::ConnectedSubchannel** target,
grpc_lb_policy_pick_state* pick,
grpc_error* error) {
policy->vtable->cancel_pick_locked(policy, target, error);
policy->vtable->cancel_pick_locked(policy, pick, error);
}
void grpc_lb_policy_cancel_picks_locked(grpc_lb_policy* policy,

@ -33,7 +33,7 @@ extern grpc_core::DebugOnlyTraceFlag grpc_trace_lb_policy_refcount;
struct grpc_lb_policy {
const grpc_lb_policy_vtable* vtable;
gpr_atm ref_pair;
gpr_refcount refs;
/* owned pointer to interested parties in load balancing decisions */
grpc_pollset_set* interested_parties;
/* combiner under which lb_policy actions take place */
@ -42,32 +42,42 @@ struct grpc_lb_policy {
grpc_closure* request_reresolution;
};
/** Extra arguments for an LB pick */
typedef struct grpc_lb_policy_pick_args {
/** Initial metadata associated with the picking call. */
/// State used for an LB pick.
typedef struct grpc_lb_policy_pick_state {
/// Initial metadata associated with the picking call.
grpc_metadata_batch* initial_metadata;
/** Bitmask used for selective cancelling. See \a
* grpc_lb_policy_cancel_picks() and \a GRPC_INITIAL_METADATA_* in
* grpc_types.h */
/// Bitmask used for selective cancelling. See \a
/// grpc_lb_policy_cancel_picks() and \a GRPC_INITIAL_METADATA_* in
/// grpc_types.h.
uint32_t initial_metadata_flags;
/** Storage for LB token in \a initial_metadata, or NULL if not used */
grpc_linked_mdelem* lb_token_mdelem_storage;
} grpc_lb_policy_pick_args;
/// Storage for LB token in \a initial_metadata, or NULL if not used.
grpc_linked_mdelem lb_token_mdelem_storage;
/// Closure to run when pick is complete, if not completed synchronously.
grpc_closure* on_complete;
/// Will be set to the selected subchannel, or NULL on failure or when
/// the LB policy decides to drop the call.
grpc_core::ConnectedSubchannel* connected_subchannel;
/// Will be populated with context to pass to the subchannel call, if needed.
grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT];
/// Upon success, \a *user_data will be set to whatever opaque information
/// may need to be propagated from the LB policy, or NULL if not needed.
void** user_data;
/// Next pointer. For internal use by LB policy.
struct grpc_lb_policy_pick_state* next;
} grpc_lb_policy_pick_state;
struct grpc_lb_policy_vtable {
void (*destroy)(grpc_lb_policy* policy);
void (*shutdown_locked)(grpc_lb_policy* policy);
/// \see grpc_lb_policy_shutdown_locked().
void (*shutdown_locked)(grpc_lb_policy* policy, grpc_lb_policy* new_policy);
/** \see grpc_lb_policy_pick */
int (*pick_locked)(grpc_lb_policy* policy,
const grpc_lb_policy_pick_args* pick_args,
grpc_core::ConnectedSubchannel** target,
grpc_call_context_element* context, void** user_data,
grpc_closure* on_complete);
int (*pick_locked)(grpc_lb_policy* policy, grpc_lb_policy_pick_state* pick);
/** \see grpc_lb_policy_cancel_pick */
void (*cancel_pick_locked)(grpc_lb_policy* policy,
grpc_core::ConnectedSubchannel** target,
grpc_lb_policy_pick_state* pick,
grpc_error* error);
/** \see grpc_lb_policy_cancel_picks */
@ -103,37 +113,19 @@ struct grpc_lb_policy_vtable {
};
#ifndef NDEBUG
/* Strong references: the policy will shutdown when they reach zero */
#define GRPC_LB_POLICY_REF(p, r) \
grpc_lb_policy_ref((p), __FILE__, __LINE__, (r))
#define GRPC_LB_POLICY_UNREF(p, r) \
grpc_lb_policy_unref((p), __FILE__, __LINE__, (r))
/* Weak references: they don't prevent the shutdown of the LB policy. When no
* strong references are left but there are still weak ones, shutdown is called.
* Once the weak reference also reaches zero, the LB policy is destroyed. */
#define GRPC_LB_POLICY_WEAK_REF(p, r) \
grpc_lb_policy_weak_ref((p), __FILE__, __LINE__, (r))
#define GRPC_LB_POLICY_WEAK_UNREF(p, r) \
grpc_lb_policy_weak_unref((p), __FILE__, __LINE__, (r))
void grpc_lb_policy_ref(grpc_lb_policy* policy, const char* file, int line,
const char* reason);
void grpc_lb_policy_unref(grpc_lb_policy* policy, const char* file, int line,
const char* reason);
void grpc_lb_policy_weak_ref(grpc_lb_policy* policy, const char* file, int line,
const char* reason);
void grpc_lb_policy_weak_unref(grpc_lb_policy* policy, const char* file,
int line, const char* reason);
#else
#else // !NDEBUG
#define GRPC_LB_POLICY_REF(p, r) grpc_lb_policy_ref((p))
#define GRPC_LB_POLICY_UNREF(p, r) grpc_lb_policy_unref((p))
#define GRPC_LB_POLICY_WEAK_REF(p, r) grpc_lb_policy_weak_ref((p))
#define GRPC_LB_POLICY_WEAK_UNREF(p, r) grpc_lb_policy_weak_unref((p))
void grpc_lb_policy_ref(grpc_lb_policy* policy);
void grpc_lb_policy_unref(grpc_lb_policy* policy);
void grpc_lb_policy_weak_ref(grpc_lb_policy* policy);
void grpc_lb_policy_weak_unref(grpc_lb_policy* policy);
#endif
/** called by concrete implementations to initialize the base struct */
@ -141,28 +133,24 @@ void grpc_lb_policy_init(grpc_lb_policy* policy,
const grpc_lb_policy_vtable* vtable,
grpc_combiner* combiner);
/** Finds an appropriate subchannel for a call, based on \a pick_args.
\a target will be set to the selected subchannel, or NULL on failure
or when the LB policy decides to drop the call.
/// Shuts down \a policy.
/// If \a new_policy is non-null, any pending picks will be restarted
/// on that policy; otherwise, they will be failed.
void grpc_lb_policy_shutdown_locked(grpc_lb_policy* policy,
grpc_lb_policy* new_policy);
Upon success, \a user_data will be set to whatever opaque information
may need to be propagated from the LB policy, or NULL if not needed.
\a context will be populated with context to pass to the subchannel
call, if needed.
/** Finds an appropriate subchannel for a call, based on data in \a pick.
\a pick must remain alive until the pick is complete.
If the pick succeeds and a result is known immediately, a non-zero
value will be returned. Otherwise, \a on_complete will be invoked
value will be returned. Otherwise, \a pick->on_complete will be invoked
once the pick is complete with its error argument set to indicate
success or failure.
Any IO should be done under the \a interested_parties \a grpc_pollset_set
in the \a grpc_lb_policy struct. */
int grpc_lb_policy_pick_locked(grpc_lb_policy* policy,
const grpc_lb_policy_pick_args* pick_args,
grpc_core::ConnectedSubchannel** target,
grpc_call_context_element* context,
void** user_data, grpc_closure* on_complete);
grpc_lb_policy_pick_state* pick);
/** Perform a connected subchannel ping (see \a
grpc_core::ConnectedSubchannel::Ping)
@ -171,11 +159,11 @@ void grpc_lb_policy_ping_one_locked(grpc_lb_policy* policy,
grpc_closure* on_initiate,
grpc_closure* on_ack);
/** Cancel picks for \a target.
/** Cancel picks for \a pick.
The \a on_complete callback of the pending picks will be invoked with \a
*target set to NULL. */
void grpc_lb_policy_cancel_pick_locked(grpc_lb_policy* policy,
grpc_core::ConnectedSubchannel** target,
grpc_lb_policy_pick_state* pick,
grpc_error* error);
/** Cancel all pending picks for which their \a initial_metadata_flags (as given

@ -32,7 +32,8 @@ static grpc_error* init_channel_elem(grpc_channel_element* elem,
static void destroy_channel_elem(grpc_channel_element* elem) {}
typedef struct {
namespace {
struct call_data {
// Stats object to update.
grpc_grpclb_client_stats* client_stats;
// State for intercepting send_initial_metadata.
@ -43,7 +44,8 @@ typedef struct {
grpc_closure recv_initial_metadata_ready;
grpc_closure* original_recv_initial_metadata_ready;
bool recv_initial_metadata_succeeded;
} call_data;
};
} // namespace
static void on_complete_for_send(void* arg, grpc_error* error) {
call_data* calld = (call_data*)arg;

@ -31,13 +31,6 @@
grpc_core::TraceFlag grpc_lb_pick_first_trace(false, "pick_first");
typedef struct pending_pick {
struct pending_pick* next;
uint32_t initial_metadata_flags;
grpc_core::ConnectedSubchannel** target;
grpc_closure* on_complete;
} pending_pick;
typedef struct {
/** base policy: must be first */
grpc_lb_policy base;
@ -52,7 +45,7 @@ typedef struct {
/** are we shut down? */
bool shutdown;
/** list of picks that are waiting on connectivity */
pending_pick* pending_picks;
grpc_lb_policy_pick_state* pending_picks;
/** our connectivity state tracker */
grpc_connectivity_state_tracker state_tracker;
} pick_first_lb_policy;
@ -70,19 +63,27 @@ static void pf_destroy(grpc_lb_policy* pol) {
}
}
static void pf_shutdown_locked(grpc_lb_policy* pol) {
static void pf_shutdown_locked(grpc_lb_policy* pol,
grpc_lb_policy* new_policy) {
pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_DEBUG, "Pick First %p Shutting down", p);
}
p->shutdown = true;
pending_pick* pp;
while ((pp = p->pending_picks) != nullptr) {
p->pending_picks = pp->next;
*pp->target = nullptr;
GRPC_CLOSURE_SCHED(pp->on_complete, GRPC_ERROR_REF(error));
gpr_free(pp);
grpc_lb_policy_pick_state* pick;
while ((pick = p->pending_picks) != nullptr) {
p->pending_picks = pick->next;
if (new_policy != nullptr) {
// Hand off to new LB policy.
if (grpc_lb_policy_pick_locked(new_policy, pick)) {
// Synchronous return, schedule closure.
GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_NONE);
}
} else {
pick->connected_subchannel = nullptr;
GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_REF(error));
}
}
grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(error), "shutdown");
@ -102,19 +103,18 @@ static void pf_shutdown_locked(grpc_lb_policy* pol) {
}
static void pf_cancel_pick_locked(grpc_lb_policy* pol,
grpc_core::ConnectedSubchannel** target,
grpc_lb_policy_pick_state* pick,
grpc_error* error) {
pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
pending_pick* pp = p->pending_picks;
grpc_lb_policy_pick_state* pp = p->pending_picks;
p->pending_picks = nullptr;
while (pp != nullptr) {
pending_pick* next = pp->next;
if (pp->target == target) {
*target = nullptr;
GRPC_CLOSURE_SCHED(pp->on_complete,
grpc_lb_policy_pick_state* next = pp->next;
if (pp == pick) {
pick->connected_subchannel = nullptr;
GRPC_CLOSURE_SCHED(pick->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick Cancelled", &error, 1));
gpr_free(pp);
} else {
pp->next = p->pending_picks;
p->pending_picks = pp;
@ -129,21 +129,20 @@ static void pf_cancel_picks_locked(grpc_lb_policy* pol,
uint32_t initial_metadata_flags_eq,
grpc_error* error) {
pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
pending_pick* pp = p->pending_picks;
grpc_lb_policy_pick_state* pick = p->pending_picks;
p->pending_picks = nullptr;
while (pp != nullptr) {
pending_pick* next = pp->next;
if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
while (pick != nullptr) {
grpc_lb_policy_pick_state* next = pick->next;
if ((pick->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
GRPC_CLOSURE_SCHED(pp->on_complete,
GRPC_CLOSURE_SCHED(pick->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick Cancelled", &error, 1));
gpr_free(pp);
} else {
pp->next = p->pending_picks;
p->pending_picks = pp;
pick->next = p->pending_picks;
p->pending_picks = pick;
}
pp = next;
pick = next;
}
GRPC_ERROR_UNREF(error);
}
@ -173,27 +172,20 @@ static void pf_exit_idle_locked(grpc_lb_policy* pol) {
}
static int pf_pick_locked(grpc_lb_policy* pol,
const grpc_lb_policy_pick_args* pick_args,
grpc_core::ConnectedSubchannel** target,
grpc_call_context_element* context, void** user_data,
grpc_closure* on_complete) {
grpc_lb_policy_pick_state* pick) {
pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
// If we have a selected subchannel already, return synchronously.
if (p->selected != nullptr) {
*target = GRPC_CONNECTED_SUBCHANNEL_REF(p->selected->connected_subchannel,
"picked");
pick->connected_subchannel = GRPC_CONNECTED_SUBCHANNEL_REF(
p->selected->connected_subchannel, "picked");
return 1;
}
// No subchannel selected yet, so handle asynchronously.
if (!p->started_picking) {
start_picking_locked(p);
}
pending_pick* pp = (pending_pick*)gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
pp->target = target;
pp->initial_metadata_flags = pick_args->initial_metadata_flags;
pp->on_complete = on_complete;
p->pending_picks = pp;
pick->next = p->pending_picks;
p->pending_picks = pick;
return 0;
}
@ -396,8 +388,6 @@ static void pf_connectivity_changed_locked(void* arg, grpc_error* error) {
sd->curr_connectivity_state = sd->pending_connectivity_state_unsafe;
// Handle updates for the currently selected subchannel.
if (p->selected == sd) {
gpr_log(GPR_INFO, "BAR selected. subchannel %p, conn subchannel %p",
sd->subchannel, p->selected->connected_subchannel);
// If the new state is anything other than READY and there is a
// pending update, switch to the pending update.
if (sd->curr_connectivity_state != GRPC_CHANNEL_READY &&
@ -464,8 +454,7 @@ static void pf_connectivity_changed_locked(void* arg, grpc_error* error) {
if (con == nullptr) {
// The subchannel may have become disconnected by the time this callback
// is invoked. Simply ignore and resubscribe: ulterior connectivity
// states
// must be in the pipeline and will eventually be invoked.
// states must be in the pipeline and will eventually be invoked.
grpc_lb_subchannel_data_start_connectivity_watch(sd);
break;
}
@ -489,18 +478,17 @@ static void pf_connectivity_changed_locked(void* arg, grpc_error* error) {
// Drop all other subchannels, since we are now connected.
destroy_unselected_subchannels_locked(p);
// Update any calls that were waiting for a pick.
pending_pick* pp;
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(
grpc_lb_policy_pick_state* pick;
while ((pick = p->pending_picks)) {
p->pending_picks = pick->next;
pick->connected_subchannel = GRPC_CONNECTED_SUBCHANNEL_REF(
p->selected->connected_subchannel, "picked");
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_INFO,
"Servicing pending pick with selected subchannel %p",
(void*)p->selected);
}
GRPC_CLOSURE_SCHED(pp->on_complete, GRPC_ERROR_NONE);
gpr_free(pp);
GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_NONE);
}
// Renew notification.
grpc_lb_subchannel_data_start_connectivity_watch(sd);

@ -41,29 +41,6 @@
grpc_core::TraceFlag grpc_lb_round_robin_trace(false, "round_robin");
/** List of entities waiting for a pick.
*
* Once a pick is available, \a target is updated and \a on_complete called. */
typedef struct pending_pick {
struct pending_pick* next;
/* output argument where to store the pick()ed user_data. It'll be NULL if no
* such data is present or there's an error (the definite test for errors is
* \a target being NULL). */
void** user_data;
/* bitmask passed to pick() and used for selective cancelling. See
* grpc_lb_policy_cancel_picks() */
uint32_t initial_metadata_flags;
/* output argument where to store the pick()ed connected subchannel, or NULL
* upon error. */
grpc_core::ConnectedSubchannel** target;
/* to be invoked once the pick() has completed (regardless of success) */
grpc_closure* on_complete;
} pending_pick;
typedef struct round_robin_lb_policy {
/** base policy: must be first */
grpc_lb_policy base;
@ -75,7 +52,7 @@ typedef struct round_robin_lb_policy {
/** are we shutting down? */
bool shutdown;
/** List of picks that are waiting on connectivity */
pending_pick* pending_picks;
grpc_lb_policy_pick_state* pending_picks;
/** our connectivity state tracker */
grpc_connectivity_state_tracker state_tracker;
@ -167,19 +144,27 @@ static void rr_destroy(grpc_lb_policy* pol) {
gpr_free(p);
}
static void rr_shutdown_locked(grpc_lb_policy* pol) {
static void rr_shutdown_locked(grpc_lb_policy* pol,
grpc_lb_policy* new_policy) {
round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG, "[RR %p] Shutting down", p);
}
p->shutdown = true;
pending_pick* pp;
while ((pp = p->pending_picks) != nullptr) {
p->pending_picks = pp->next;
*pp->target = nullptr;
GRPC_CLOSURE_SCHED(pp->on_complete, GRPC_ERROR_REF(error));
gpr_free(pp);
grpc_lb_policy_pick_state* pick;
while ((pick = p->pending_picks) != nullptr) {
p->pending_picks = pick->next;
if (new_policy != nullptr) {
// Hand off to new LB policy.
if (grpc_lb_policy_pick_locked(new_policy, pick)) {
// Synchronous return; schedule callback.
GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_NONE);
}
} else {
pick->connected_subchannel = nullptr;
GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_REF(error));
}
}
grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(error), "rr_shutdown");
@ -199,19 +184,18 @@ static void rr_shutdown_locked(grpc_lb_policy* pol) {
}
static void rr_cancel_pick_locked(grpc_lb_policy* pol,
grpc_core::ConnectedSubchannel** target,
grpc_lb_policy_pick_state* pick,
grpc_error* error) {
round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
pending_pick* pp = p->pending_picks;
grpc_lb_policy_pick_state* pp = p->pending_picks;
p->pending_picks = nullptr;
while (pp != nullptr) {
pending_pick* next = pp->next;
if (pp->target == target) {
*target = nullptr;
GRPC_CLOSURE_SCHED(pp->on_complete,
grpc_lb_policy_pick_state* next = pp->next;
if (pp == pick) {
pick->connected_subchannel = nullptr;
GRPC_CLOSURE_SCHED(pick->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick cancelled", &error, 1));
gpr_free(pp);
} else {
pp->next = p->pending_picks;
p->pending_picks = pp;
@ -226,22 +210,21 @@ static void rr_cancel_picks_locked(grpc_lb_policy* pol,
uint32_t initial_metadata_flags_eq,
grpc_error* error) {
round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
pending_pick* pp = p->pending_picks;
grpc_lb_policy_pick_state* pick = p->pending_picks;
p->pending_picks = nullptr;
while (pp != nullptr) {
pending_pick* next = pp->next;
if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
while (pick != nullptr) {
grpc_lb_policy_pick_state* next = pick->next;
if ((pick->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
*pp->target = nullptr;
GRPC_CLOSURE_SCHED(pp->on_complete,
pick->connected_subchannel = nullptr;
GRPC_CLOSURE_SCHED(pick->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick cancelled", &error, 1));
gpr_free(pp);
} else {
pp->next = p->pending_picks;
p->pending_picks = pp;
pick->next = p->pending_picks;
p->pending_picks = pick;
}
pp = next;
pick = next;
}
GRPC_ERROR_UNREF(error);
}
@ -266,13 +249,10 @@ static void rr_exit_idle_locked(grpc_lb_policy* pol) {
}
static int rr_pick_locked(grpc_lb_policy* pol,
const grpc_lb_policy_pick_args* pick_args,
grpc_core::ConnectedSubchannel** target,
grpc_call_context_element* context, void** user_data,
grpc_closure* on_complete) {
grpc_lb_policy_pick_state* pick) {
round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_INFO, "[RR %p] Trying to pick (shutdown: %d)", (void*)pol,
gpr_log(GPR_INFO, "[RR %p] Trying to pick (shutdown: %d)", pol,
p->shutdown);
}
GPR_ASSERT(!p->shutdown);
@ -282,18 +262,18 @@ static int rr_pick_locked(grpc_lb_policy* pol,
/* readily available, report right away */
grpc_lb_subchannel_data* sd =
&p->subchannel_list->subchannels[next_ready_index];
*target =
pick->connected_subchannel =
GRPC_CONNECTED_SUBCHANNEL_REF(sd->connected_subchannel, "rr_picked");
if (user_data != nullptr) {
*user_data = sd->user_data;
if (pick->user_data != nullptr) {
*pick->user_data = sd->user_data;
}
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(
GPR_DEBUG,
"[RR %p] Picked target <-- Subchannel %p (connected %p) (sl %p, "
"index %lu)",
(void*)p, (void*)sd->subchannel, (void*)*target,
(void*)sd->subchannel_list, (unsigned long)next_ready_index);
"index %" PRIuPTR ")",
p, sd->subchannel, pick->connected_subchannel, sd->subchannel_list,
next_ready_index);
}
/* only advance the last picked pointer if the selection was used */
update_last_ready_subchannel_index_locked(p, next_ready_index);
@ -304,13 +284,8 @@ static int rr_pick_locked(grpc_lb_policy* pol,
if (!p->started_picking) {
start_picking_locked(p);
}
pending_pick* pp = (pending_pick*)gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
pp->target = target;
pp->on_complete = on_complete;
pp->initial_metadata_flags = pick_args->initial_metadata_flags;
pp->user_data = user_data;
p->pending_picks = pp;
pick->next = p->pending_picks;
p->pending_picks = pick;
return 0;
}
@ -495,13 +470,13 @@ static void rr_connectivity_changed_locked(void* arg, grpc_error* error) {
// picks, update the last picked pointer
update_last_ready_subchannel_index_locked(p, next_ready_index);
}
pending_pick* pp;
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(
grpc_lb_policy_pick_state* pick;
while ((pick = p->pending_picks)) {
p->pending_picks = pick->next;
pick->connected_subchannel = GRPC_CONNECTED_SUBCHANNEL_REF(
selected->connected_subchannel, "rr_picked");
if (pp->user_data != nullptr) {
*pp->user_data = selected->user_data;
if (pick->user_data != nullptr) {
*pick->user_data = selected->user_data;
}
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG,
@ -510,8 +485,7 @@ static void rr_connectivity_changed_locked(void* arg, grpc_error* error) {
(void*)p, (void*)selected->subchannel,
(void*)p->subchannel_list, (unsigned long)next_ready_index);
}
GRPC_CLOSURE_SCHED(pp->on_complete, GRPC_ERROR_NONE);
gpr_free(pp);
GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_NONE);
}
break;
}

@ -213,13 +213,13 @@ void grpc_lb_subchannel_list_unref(grpc_lb_subchannel_list* subchannel_list,
void grpc_lb_subchannel_list_ref_for_connectivity_watch(
grpc_lb_subchannel_list* subchannel_list, const char* reason) {
GRPC_LB_POLICY_WEAK_REF(subchannel_list->policy, reason);
GRPC_LB_POLICY_REF(subchannel_list->policy, reason);
grpc_lb_subchannel_list_ref(subchannel_list, reason);
}
void grpc_lb_subchannel_list_unref_for_connectivity_watch(
grpc_lb_subchannel_list* subchannel_list, const char* reason) {
GRPC_LB_POLICY_WEAK_UNREF(subchannel_list->policy, reason);
GRPC_LB_POLICY_UNREF(subchannel_list->policy, reason);
grpc_lb_subchannel_list_unref(subchannel_list, reason);
}

@ -60,11 +60,13 @@
((grpc_core::ConnectedSubchannel*)(gpr_atm_##barrier##_load( \
&(subchannel)->connected_subchannel)))
typedef struct {
namespace {
struct state_watcher {
grpc_closure closure;
grpc_subchannel* subchannel;
grpc_connectivity_state connectivity_state;
} state_watcher;
};
} // namespace
typedef struct external_state_watcher {
grpc_subchannel* subchannel;
@ -167,13 +169,13 @@ static void connection_destroy(void* arg, grpc_error* error) {
gpr_free(stk);
}
grpc_core::ConnectedSubchannel* ConnectedSubchannel_ref(
grpc_core::ConnectedSubchannel* grpc_connected_subchannel_ref(
grpc_core::ConnectedSubchannel* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
c->Ref(DEBUG_LOCATION, REF_REASON);
return c;
}
void ConnectedSubchannel_unref(
void grpc_connected_subchannel_unref(
grpc_core::ConnectedSubchannel* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
c->Unref(DEBUG_LOCATION, REF_REASON);
}
@ -541,10 +543,6 @@ static void on_connected_subchannel_connectivity_changed(void* p,
if (!c->disconnected && con != nullptr) {
GRPC_CONNECTED_SUBCHANNEL_UNREF(con, "transient_failure");
gpr_atm_no_barrier_store(&c->connected_subchannel, (gpr_atm) nullptr);
gpr_log(
GPR_INFO,
"LOL FORMER Connected subchannel %p of subchannel %p is now NULL.",
con, c);
grpc_connectivity_state_set(&c->state_tracker,
GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_REF(error), "reflect_child");
@ -554,8 +552,9 @@ static void on_connected_subchannel_connectivity_changed(void* p,
gpr_log(GPR_INFO,
"Connected subchannel %p of subchannel %p has gone into %s. "
"Attempting to reconnect.",
con, c, grpc_connectivity_state_name(
connected_subchannel_watcher->connectivity_state));
con, c,
grpc_connectivity_state_name(
connected_subchannel_watcher->connectivity_state));
}
maybe_start_connecting_locked(c);
} else {

@ -49,9 +49,9 @@ typedef struct grpc_subchannel_key grpc_subchannel_key;
#define GRPC_SUBCHANNEL_WEAK_UNREF(p, r) \
grpc_subchannel_weak_unref((p), __FILE__, __LINE__, (r))
#define GRPC_CONNECTED_SUBCHANNEL_REF(p, r) \
ConnectedSubchannel_ref((p), __FILE__, __LINE__, (r))
grpc_connected_subchannel_ref((p), __FILE__, __LINE__, (r))
#define GRPC_CONNECTED_SUBCHANNEL_UNREF(p, r) \
ConnectedSubchannel_unref((p), __FILE__, __LINE__, (r))
grpc_connected_subchannel_unref((p), __FILE__, __LINE__, (r))
#define GRPC_SUBCHANNEL_CALL_REF(p, r) \
grpc_subchannel_call_ref((p), __FILE__, __LINE__, (r))
#define GRPC_SUBCHANNEL_CALL_UNREF(p, r) \
@ -65,8 +65,9 @@ typedef struct grpc_subchannel_key grpc_subchannel_key;
#define GRPC_SUBCHANNEL_UNREF(p, r) grpc_subchannel_unref((p))
#define GRPC_SUBCHANNEL_WEAK_REF(p, r) grpc_subchannel_weak_ref((p))
#define GRPC_SUBCHANNEL_WEAK_UNREF(p, r) grpc_subchannel_weak_unref((p))
#define GRPC_CONNECTED_SUBCHANNEL_REF(p, r) ConnectedSubchannel_ref((p))
#define GRPC_CONNECTED_SUBCHANNEL_UNREF(p, r) ConnectedSubchannel_unref((p))
#define GRPC_CONNECTED_SUBCHANNEL_REF(p, r) grpc_connected_subchannel_ref((p))
#define GRPC_CONNECTED_SUBCHANNEL_UNREF(p, r) \
grpc_connected_subchannel_unref((p))
#define GRPC_SUBCHANNEL_CALL_REF(p, r) grpc_subchannel_call_ref((p))
#define GRPC_SUBCHANNEL_CALL_UNREF(p, r) grpc_subchannel_call_unref((p))
#define GRPC_SUBCHANNEL_REF_EXTRA_ARGS
@ -110,9 +111,9 @@ grpc_subchannel* grpc_subchannel_weak_ref(
grpc_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void grpc_subchannel_weak_unref(
grpc_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
grpc_core::ConnectedSubchannel* ConnectedSubchannel_ref(
grpc_core::ConnectedSubchannel* grpc_connected_subchannel_ref(
grpc_core::ConnectedSubchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void ConnectedSubchannel_unref(
void grpc_connected_subchannel_unref(
grpc_core::ConnectedSubchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void grpc_subchannel_call_ref(
grpc_subchannel_call* call GRPC_SUBCHANNEL_REF_EXTRA_ARGS);

@ -35,7 +35,8 @@
/* default maximum size of payload eligable for GET request */
static const size_t kMaxPayloadSizeForGet = 2048;
typedef struct call_data {
namespace {
struct call_data {
grpc_call_combiner* call_combiner;
// State for handling send_initial_metadata ops.
grpc_linked_mdelem method;
@ -60,13 +61,14 @@ typedef struct call_data {
grpc_closure on_send_message_next_done;
grpc_closure* original_send_message_on_complete;
grpc_closure send_message_on_complete;
} call_data;
};
typedef struct channel_data {
struct channel_data {
grpc_mdelem static_scheme;
grpc_mdelem user_agent;
size_t max_payload_size_for_get;
} channel_data;
};
} // namespace
static grpc_error* client_filter_incoming_metadata(grpc_call_element* elem,
grpc_metadata_batch* b) {

@ -35,16 +35,17 @@
#include "src/core/lib/surface/call.h"
#include "src/core/lib/transport/static_metadata.h"
typedef enum {
namespace {
enum initial_metadata_state {
// Initial metadata not yet seen.
INITIAL_METADATA_UNSEEN = 0,
// Initial metadata seen; compression algorithm set.
HAS_COMPRESSION_ALGORITHM,
// Initial metadata seen; no compression algorithm set.
NO_COMPRESSION_ALGORITHM,
} initial_metadata_state;
};
typedef struct call_data {
struct call_data {
grpc_call_combiner* call_combiner;
grpc_linked_mdelem compression_algorithm_storage;
grpc_linked_mdelem stream_compression_algorithm_storage;
@ -62,9 +63,9 @@ typedef struct call_data {
grpc_closure* original_send_message_on_complete;
grpc_closure send_message_on_complete;
grpc_closure on_send_message_next_done;
} call_data;
};
typedef struct channel_data {
struct channel_data {
/** The default, channel-level, compression algorithm */
grpc_compression_algorithm default_compression_algorithm;
/** Bitset of enabled algorithms */
@ -78,7 +79,8 @@ typedef struct channel_data {
uint32_t enabled_stream_compression_algorithms_bitset;
/** Supported stream compression algorithms */
uint32_t supported_stream_compression_algorithms;
} channel_data;
};
} // namespace
static bool skip_compression(grpc_call_element* elem, uint32_t flags,
bool has_compression_algorithm) {

@ -31,7 +31,8 @@
#define EXPECTED_CONTENT_TYPE "application/grpc"
#define EXPECTED_CONTENT_TYPE_LENGTH sizeof(EXPECTED_CONTENT_TYPE) - 1
typedef struct call_data {
namespace {
struct call_data {
grpc_call_combiner* call_combiner;
grpc_linked_mdelem status;
@ -60,11 +61,12 @@ typedef struct call_data {
grpc_closure hs_on_recv;
grpc_closure hs_on_complete;
grpc_closure hs_recv_message_ready;
} call_data;
};
typedef struct channel_data {
struct channel_data {
uint8_t unused;
} channel_data;
};
} // namespace
static grpc_error* server_filter_outgoing_metadata(grpc_call_element* elem,
grpc_metadata_batch* b) {

@ -31,7 +31,8 @@
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/transport/static_metadata.h"
typedef struct call_data {
namespace {
struct call_data {
intptr_t id; /**< an id unique to the call */
bool have_trailing_md_string;
grpc_slice trailing_md_string;
@ -48,11 +49,12 @@ typedef struct call_data {
/* to get notified of the availability of the incoming initial metadata. */
grpc_closure on_initial_md_ready;
grpc_metadata_batch* recv_initial_metadata;
} call_data;
};
typedef struct channel_data {
struct channel_data {
intptr_t id; /**< an id unique to the channel */
} channel_data;
};
} // namespace
static void on_initial_md_ready(void* user_data, grpc_error* err) {
grpc_call_element* elem = (grpc_call_element*)user_data;

@ -37,7 +37,8 @@
#define MAX_CONNECTION_IDLE_INTEGER_OPTIONS \
{ DEFAULT_MAX_CONNECTION_IDLE_MS, 1, INT_MAX }
typedef struct channel_data {
namespace {
struct channel_data {
/* We take a reference to the channel stack for the timer callback */
grpc_channel_stack* channel_stack;
/* Guards access to max_age_timer, max_age_timer_pending, max_age_grace_timer
@ -84,7 +85,8 @@ typedef struct channel_data {
grpc_connectivity_state connectivity_state;
/* Number of active calls */
gpr_atm call_count;
} channel_data;
};
} // namespace
/* Increase the nubmer of active calls. Before the increasement, if there are no
calls, the max_idle_timer should be cancelled. */

@ -86,7 +86,8 @@ static void* refcounted_message_size_limits_create_from_json(
return value;
}
typedef struct call_data {
namespace {
struct call_data {
grpc_call_combiner* call_combiner;
message_size_limits limits;
// Receive closures are chained: we inject this closure as the
@ -97,13 +98,14 @@ typedef struct call_data {
grpc_byte_stream** recv_message;
// Original recv_message_ready callback, invoked after our own.
grpc_closure* next_recv_message_ready;
} call_data;
};
typedef struct channel_data {
struct channel_data {
message_size_limits limits;
// Maps path names to refcounted_message_size_limits structs.
grpc_slice_hash_table* method_limit_table;
} channel_data;
};
} // namespace
// Callback invoked when we receive a message. Here we check the max
// receive message size.

@ -25,7 +25,8 @@
#include "src/core/lib/surface/channel_init.h"
#include "src/core/lib/transport/metadata.h"
typedef struct call_data {
namespace {
struct call_data {
// Receive closures are chained: we inject this closure as the
// recv_initial_metadata_ready up-call on transport_stream_op, and remember to
// call our next_recv_initial_metadata_ready member after handling it.
@ -37,7 +38,8 @@ typedef struct call_data {
// Marks whether the workaround is active
bool workaround_active;
} call_data;
};
} // namespace
// Find the user agent metadata element in the batch
static bool get_user_agent_mdelem(const grpc_metadata_batch* batch,

@ -138,10 +138,11 @@ static void report_stall(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
const char* staller) {
gpr_log(
GPR_DEBUG,
"%s:%p stream %d stalled by %s [fc:pending=%" PRIdPTR ":flowed=%" PRId64
"%s:%p stream %d stalled by %s [fc:pending=%" PRIdPTR
":pending-compressed=%" PRIdPTR ":flowed=%" PRId64
":peer_initwin=%d:t_win=%" PRId64 ":s_win=%d:s_delta=%" PRId64 "]",
t->peer_string, t, s->id, staller, s->flow_controlled_buffer.length,
s->flow_controlled_bytes_flowed,
s->compressed_data_buffer.length, s->flow_controlled_bytes_flowed,
t->settings[GRPC_ACKED_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE],
t->flow_control->remote_window(),

@ -71,6 +71,7 @@ struct grpc_fd {
int shutdown;
int closed;
int released;
gpr_atm pollhup;
grpc_error* shutdown_error;
/* The watcher list.
@ -335,6 +336,7 @@ static grpc_fd* fd_create(int fd, const char* name) {
r->on_done_closure = nullptr;
r->closed = 0;
r->released = 0;
gpr_atm_no_barrier_store(&r->pollhup, 0);
r->read_notifier_pollset = nullptr;
char* name2;
@ -462,7 +464,7 @@ static grpc_error* fd_shutdown_error(grpc_fd* fd) {
static void notify_on_locked(grpc_fd* fd, grpc_closure** st,
grpc_closure* closure) {
if (fd->shutdown) {
if (fd->shutdown || gpr_atm_no_barrier_load(&fd->pollhup)) {
GRPC_CLOSURE_SCHED(closure,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("FD shutdown"));
} else if (*st == CLOSURE_NOT_READY) {
@ -950,7 +952,8 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
pfds[0].events = POLLIN;
pfds[0].revents = 0;
for (i = 0; i < pollset->fd_count; i++) {
if (fd_is_orphaned(pollset->fds[i])) {
if (fd_is_orphaned(pollset->fds[i]) ||
gpr_atm_no_barrier_load(&pollset->fds[i]->pollhup) == 1) {
GRPC_FD_UNREF(pollset->fds[i], "multipoller");
} else {
pollset->fds[fd_count++] = pollset->fds[i];
@ -1017,6 +1020,12 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
pfds[i].fd, (pfds[i].revents & POLLIN_CHECK) != 0,
(pfds[i].revents & POLLOUT_CHECK) != 0, pfds[i].revents);
}
/* This is a mitigation to prevent poll() from spinning on a
** POLLHUP https://github.com/grpc/grpc/pull/13665
*/
if (pfds[i].revents & POLLHUP) {
gpr_atm_no_barrier_store(&watchers[i].fd->pollhup, 1);
}
fd_end_poll(&watchers[i], pfds[i].revents & POLLIN_CHECK,
pfds[i].revents & POLLOUT_CHECK, pollset);
}

@ -111,7 +111,7 @@ class ExecCtx {
/** Checks if there is work to be done */
bool HasWork() {
return combiner_data_.active_combiner != NULL ||
return combiner_data_.active_combiner != nullptr ||
!grpc_closure_list_empty(closure_list_);
}

@ -30,7 +30,7 @@ char* grpc_gethostname() {
char* hostname = (char*)gpr_malloc(host_name_max);
if (gethostname(hostname, host_name_max) != 0) {
gpr_free(hostname);
return NULL;
return nullptr;
}
return hostname;
}

@ -63,7 +63,8 @@ typedef size_t msg_iovlen_type;
grpc_core::TraceFlag grpc_tcp_trace(false, "tcp");
typedef struct {
namespace {
struct grpc_tcp {
grpc_endpoint base;
grpc_fd* em_fd;
int fd;
@ -96,12 +97,13 @@ typedef struct {
grpc_resource_user* resource_user;
grpc_resource_user_slice_allocator slice_allocator;
} grpc_tcp;
};
typedef struct backup_poller {
struct backup_poller {
gpr_mu* pollset_mu;
grpc_closure run_poller;
} backup_poller;
};
} // namespace
#define BACKUP_POLLER_POLLSET(b) ((grpc_pollset*)((b) + 1))

@ -65,6 +65,17 @@ typedef struct {
grpc_pollset* pollset;
} grpc_tcp;
static grpc_error* tcp_annotate_error(grpc_error* src_error, grpc_tcp* tcp) {
return grpc_error_set_str(
grpc_error_set_int(
src_error,
/* All tcp errors are marked with UNAVAILABLE so that application may
* choose to retry. */
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE),
GRPC_ERROR_STR_TARGET_ADDRESS,
grpc_slice_from_copied_string(tcp->peer_string));
}
static void tcp_free(grpc_tcp* tcp) {
grpc_resource_user_unref(tcp->resource_user);
gpr_free(tcp->handle);
@ -162,7 +173,8 @@ static void read_callback(uv_stream_t* stream, ssize_t nread,
// TODO(murgatroid99): figure out what the return value here means
uv_read_stop(stream);
if (nread == UV_EOF) {
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("EOF");
error =
tcp_annotate_error(GRPC_ERROR_CREATE_FROM_STATIC_STRING("EOF"), tcp);
grpc_slice_buffer_reset_and_unref_internal(tcp->read_slices);
} else if (nread > 0) {
// Successful read
@ -177,7 +189,8 @@ static void read_callback(uv_stream_t* stream, ssize_t nread,
}
} else {
// nread < 0: Error
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Read failed");
error = tcp_annotate_error(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Read failed"), tcp);
grpc_slice_buffer_reset_and_unref_internal(tcp->read_slices);
}
call_read_cb(tcp, error);
@ -194,7 +207,9 @@ static void tcp_read_allocation_done(void* tcpp, grpc_error* error) {
status =
uv_read_start((uv_stream_t*)tcp->handle, alloc_uv_buf, read_callback);
if (status != 0) {
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Read failed at start");
error = tcp_annotate_error(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Read failed at start"),
tcp);
error = grpc_error_set_str(
error, GRPC_ERROR_STR_OS_ERROR,
grpc_slice_from_static_string(uv_strerror(status)));
@ -235,7 +250,8 @@ static void write_callback(uv_write_t* req, int status) {
if (status == 0) {
error = GRPC_ERROR_NONE;
} else {
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Write failed");
error = tcp_annotate_error(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Write failed"), tcp);
}
if (grpc_tcp_trace.enabled()) {
const char* str = grpc_error_string(error);
@ -268,8 +284,10 @@ static void uv_endpoint_write(grpc_endpoint* ep,
}
if (tcp->shutting_down) {
GRPC_CLOSURE_SCHED(cb, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"TCP socket is shutting down"));
GRPC_CLOSURE_SCHED(cb,
tcp_annotate_error(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"TCP socket is shutting down"),
tcp));
return;
}

@ -31,6 +31,6 @@
static int check_availability_invalid(void) { return 0; }
const grpc_wakeup_fd_vtable grpc_specialized_wakeup_fd_vtable = {
NULL, NULL, NULL, NULL, check_availability_invalid};
nullptr, nullptr, nullptr, nullptr, check_availability_invalid};
#endif /* GRPC_POSIX_NO_SPECIAL_WAKEUP_FD */

@ -37,8 +37,9 @@
#define MAX_CREDENTIALS_METADATA_COUNT 4
namespace {
/* We can have a per-call credentials. */
typedef struct {
struct call_data {
grpc_call_stack* owning_call;
grpc_call_combiner* call_combiner;
grpc_call_credentials* creds;
@ -57,13 +58,14 @@ typedef struct {
grpc_closure async_result_closure;
grpc_closure check_call_host_cancel_closure;
grpc_closure get_request_metadata_cancel_closure;
} call_data;
};
/* We can have a per-channel credentials. */
typedef struct {
struct channel_data {
grpc_channel_security_connector* security_connector;
grpc_auth_context* auth_context;
} channel_data;
};
} // namespace
void grpc_auth_metadata_context_reset(
grpc_auth_metadata_context* auth_md_context) {

@ -26,13 +26,14 @@
#include "src/core/lib/security/transport/auth_filters.h"
#include "src/core/lib/slice/slice_internal.h"
typedef enum {
namespace {
enum async_state {
STATE_INIT = 0,
STATE_DONE,
STATE_CANCELLED,
} async_state;
};
typedef struct call_data {
struct call_data {
grpc_call_combiner* call_combiner;
grpc_call_stack* owning_call;
grpc_transport_stream_op_batch* recv_initial_metadata_batch;
@ -44,12 +45,13 @@ typedef struct call_data {
grpc_auth_context* auth_context;
grpc_closure cancel_closure;
gpr_atm state; // async_state
} call_data;
};
typedef struct channel_data {
struct channel_data {
grpc_auth_context* auth_context;
grpc_server_credentials* creds;
} channel_data;
};
} // namespace
static grpc_metadata_array metadata_batch_to_md_array(
const grpc_metadata_batch* batch) {

@ -31,12 +31,12 @@
const char* gpr_getenv_silent(const char* name, char** dst) {
*dst = gpr_getenv(name);
return NULL;
return nullptr;
}
char* gpr_getenv(const char* name) {
char* result = getenv(name);
return result == NULL ? result : gpr_strdup(result);
return result == nullptr ? result : gpr_strdup(result);
}
void gpr_setenv(const char* name, const char* value) {

@ -39,7 +39,7 @@ void grpc_fork_support_init() {
#else
fork_support_enabled = 0;
char* env = gpr_getenv("GRPC_ENABLE_FORK_SUPPORT");
if (env != NULL) {
if (env != nullptr) {
static const char* truthy[] = {"yes", "Yes", "YES", "true",
"True", "TRUE", "1"};
for (size_t i = 0; i < GPR_ARRAY_SIZE(truthy); i++) {

@ -35,15 +35,15 @@ static intptr_t gettid(void) { return (intptr_t)pthread_self(); }
void gpr_log(const char* file, int line, gpr_log_severity severity,
const char* format, ...) {
char buf[64];
char* allocated = NULL;
char* message = NULL;
char* allocated = nullptr;
char* message = nullptr;
int ret;
va_list args;
va_start(args, format);
ret = vsnprintf(buf, sizeof(buf), format, args);
va_end(args);
if (ret < 0) {
message = NULL;
message = nullptr;
} else if ((size_t)ret <= sizeof(buf) - 1) {
message = buf;
} else {
@ -66,7 +66,7 @@ void gpr_default_log(gpr_log_func_args* args) {
timer = (time_t)now.tv_sec;
final_slash = strrchr(args->file, '/');
if (final_slash == NULL)
if (final_slash == nullptr)
display_file = args->file;
else
display_file = final_slash + 1;

@ -107,7 +107,7 @@ static gpr_timespec now_impl(gpr_clock_type clock) {
now.clock_type = clock;
switch (clock) {
case GPR_CLOCK_REALTIME:
gettimeofday(&now_tv, NULL);
gettimeofday(&now_tv, nullptr);
now.tv_sec = now_tv.tv_sec;
now.tv_nsec = now_tv.tv_usec * 1000;
break;

@ -1851,8 +1851,9 @@ static grpc_call_error call_start_batch(grpc_call* call, const grpc_op* ops,
{
grpc_error* override_error = GRPC_ERROR_NONE;
if (op->data.send_status_from_server.status != GRPC_STATUS_OK) {
override_error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Error from server send status");
override_error =
error_from_status(op->data.send_status_from_server.status,
"Returned non-ok status");
}
if (op->data.send_status_from_server.status_details != nullptr) {
call->send_extra_metadata[1].md = grpc_mdelem_from_slices(

@ -44,24 +44,23 @@
#include "src/core/lib/transport/metadata.h"
#include "src/core/lib/transport/static_metadata.h"
typedef struct listener {
grpc_core::TraceFlag grpc_server_channel_trace(false, "server_channel");
namespace {
struct listener {
void* arg;
void (*start)(grpc_server* server, void* arg, grpc_pollset** pollsets,
size_t pollset_count);
void (*destroy)(grpc_server* server, void* arg, grpc_closure* closure);
struct listener* next;
grpc_closure destroy_done;
} listener;
};
typedef struct call_data call_data;
typedef struct channel_data channel_data;
typedef struct registered_method registered_method;
enum requested_call_type { BATCH_CALL, REGISTERED_CALL };
typedef enum { BATCH_CALL, REGISTERED_CALL } requested_call_type;
struct registered_method;
grpc_core::TraceFlag grpc_server_channel_trace(false, "server_channel");
typedef struct requested_call {
struct requested_call {
gpr_mpscq_node request_link; /* must be first */
requested_call_type type;
size_t cq_idx;
@ -81,15 +80,15 @@ typedef struct requested_call {
grpc_byte_buffer** optional_payload;
} registered;
} data;
} requested_call;
};
typedef struct channel_registered_method {
struct channel_registered_method {
registered_method* server_registered_method;
uint32_t flags;
bool has_host;
grpc_slice method;
grpc_slice host;
} channel_registered_method;
};
struct channel_data {
grpc_server* server;
@ -176,6 +175,7 @@ typedef struct {
grpc_channel** channels;
size_t num_channels;
} channel_broadcaster;
} // namespace
struct grpc_server {
grpc_channel_args* channel_args;

@ -70,7 +70,7 @@ void grpc_error_get_status(grpc_error* error, grpc_millis deadline,
}
if (code != nullptr) *code = status;
if (error_string != NULL && status != GRPC_STATUS_OK) {
if (error_string != nullptr && status != GRPC_STATUS_OK) {
*error_string = gpr_strdup(grpc_error_string(error));
}

@ -116,6 +116,9 @@ typedef struct {
static gpr_once init_openssl_once = GPR_ONCE_INIT;
static gpr_mu* openssl_mutexes = nullptr;
static void openssl_locking_cb(int mode, int type, const char* file,
int line) GRPC_UNUSED;
static unsigned long openssl_thread_id_cb(void) GRPC_UNUSED;
static void openssl_locking_cb(int mode, int type, const char* file, int line) {
if (mode & CRYPTO_LOCK) {

@ -15,12 +15,12 @@
<PackageTags>gRPC RPC Protocol HTTP/2 Auth OAuth2</PackageTags>
<PackageProjectUrl>https://github.com/grpc/grpc</PackageProjectUrl>
<PackageLicenseUrl>https://github.com/grpc/grpc/blob/master/LICENSE</PackageLicenseUrl>
<IncludeSymbols>true</IncludeSymbols>
<IncludeSource>true</IncludeSource>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
<Import Project="..\Grpc.Core\SourceLink.csproj.include" />
<ItemGroup>
<Compile Include="..\Grpc.Core\Version.cs" />
</ItemGroup>

@ -15,12 +15,12 @@
<PackageTags>gRPC test testing</PackageTags>
<PackageProjectUrl>https://github.com/grpc/grpc</PackageProjectUrl>
<PackageLicenseUrl>https://github.com/grpc/grpc/blob/master/LICENSE</PackageLicenseUrl>
<IncludeSymbols>true</IncludeSymbols>
<IncludeSource>true</IncludeSource>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
<Import Project="..\Grpc.Core\SourceLink.csproj.include" />
<ItemGroup>
<Compile Include="..\Grpc.Core\Version.cs" />
</ItemGroup>

@ -14,12 +14,12 @@
<PackageTags>gRPC RPC Protocol HTTP/2</PackageTags>
<PackageProjectUrl>https://github.com/grpc/grpc</PackageProjectUrl>
<PackageLicenseUrl>https://github.com/grpc/grpc/blob/master/LICENSE</PackageLicenseUrl>
<IncludeSymbols>true</IncludeSymbols>
<IncludeSource>true</IncludeSource>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
<Import Project="SourceLink.csproj.include" />
<ItemGroup>
<EmbeddedResource Include="..\..\..\etc\roots.pem" />
<Content Include="..\nativelibs\csharp_ext_macos_x64\libgrpc_csharp_ext.dylib">

@ -0,0 +1,19 @@
<!-- Ensure that debugging of the resulting NuGet packages work (we're using SourceLink). -->
<Project>
<ItemGroup Label="dotnet pack instructions">
<Content Include="$(OutputPath)netstandard1.5\$(PackageId).pdb">
<Pack>true</Pack>
<PackagePath>lib/netstandard1.5</PackagePath>
</Content>
<Content Include="$(OutputPath)net45\$(PackageId).pdb">
<Pack>true</Pack>
<PackagePath>lib/net45</PackagePath>
</Content>
</ItemGroup>
<ItemGroup>
<PackageReference Include="SourceLink.Embed.AllSourceFiles" Version="2.7.3" PrivateAssets="all" />
</ItemGroup>
</Project>

@ -14,12 +14,12 @@
<PackageTags>gRPC health check</PackageTags>
<PackageProjectUrl>https://github.com/grpc/grpc</PackageProjectUrl>
<PackageLicenseUrl>https://github.com/grpc/grpc/blob/master/LICENSE</PackageLicenseUrl>
<IncludeSymbols>true</IncludeSymbols>
<IncludeSource>true</IncludeSource>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
<Import Project="..\Grpc.Core\SourceLink.csproj.include" />
<ItemGroup>
<Compile Include="..\Grpc.Core\Version.cs" />
</ItemGroup>

@ -14,12 +14,12 @@
<PackageTags>gRPC reflection</PackageTags>
<PackageProjectUrl>https://github.com/grpc/grpc</PackageProjectUrl>
<PackageLicenseUrl>https://github.com/grpc/grpc/blob/master/LICENSE</PackageLicenseUrl>
<IncludeSymbols>true</IncludeSymbols>
<IncludeSource>true</IncludeSource>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
<Import Project="..\Grpc.Core\SourceLink.csproj.include" />
<ItemGroup>
<Compile Include="..\Grpc.Core\Version.cs" />
</ItemGroup>

@ -68,6 +68,8 @@ extern void filter_causes_close(grpc_end2end_test_config config);
extern void filter_causes_close_pre_init(void);
extern void filter_latency(grpc_end2end_test_config config);
extern void filter_latency_pre_init(void);
extern void filter_status_code(grpc_end2end_test_config config);
extern void filter_status_code_pre_init(void);
extern void graceful_server_shutdown(grpc_end2end_test_config config);
extern void graceful_server_shutdown_pre_init(void);
extern void high_initial_seqno(grpc_end2end_test_config config);
@ -170,6 +172,7 @@ void grpc_end2end_tests_pre_init(void) {
filter_call_init_fails_pre_init();
filter_causes_close_pre_init();
filter_latency_pre_init();
filter_status_code_pre_init();
graceful_server_shutdown_pre_init();
high_initial_seqno_pre_init();
hpack_size_pre_init();
@ -237,6 +240,7 @@ void grpc_end2end_tests(int argc, char **argv,
filter_call_init_fails(config);
filter_causes_close(config);
filter_latency(config);
filter_status_code(config);
graceful_server_shutdown(config);
high_initial_seqno(config);
hpack_size(config);
@ -356,6 +360,10 @@ void grpc_end2end_tests(int argc, char **argv,
filter_latency(config);
continue;
}
if (0 == strcmp("filter_status_code", argv[i])) {
filter_status_code(config);
continue;
}
if (0 == strcmp("graceful_server_shutdown", argv[i])) {
graceful_server_shutdown(config);
continue;

@ -70,6 +70,8 @@ extern void filter_causes_close(grpc_end2end_test_config config);
extern void filter_causes_close_pre_init(void);
extern void filter_latency(grpc_end2end_test_config config);
extern void filter_latency_pre_init(void);
extern void filter_status_code(grpc_end2end_test_config config);
extern void filter_status_code_pre_init(void);
extern void graceful_server_shutdown(grpc_end2end_test_config config);
extern void graceful_server_shutdown_pre_init(void);
extern void high_initial_seqno(grpc_end2end_test_config config);
@ -173,6 +175,7 @@ void grpc_end2end_tests_pre_init(void) {
filter_call_init_fails_pre_init();
filter_causes_close_pre_init();
filter_latency_pre_init();
filter_status_code_pre_init();
graceful_server_shutdown_pre_init();
high_initial_seqno_pre_init();
hpack_size_pre_init();
@ -241,6 +244,7 @@ void grpc_end2end_tests(int argc, char **argv,
filter_call_init_fails(config);
filter_causes_close(config);
filter_latency(config);
filter_status_code(config);
graceful_server_shutdown(config);
high_initial_seqno(config);
hpack_size(config);
@ -364,6 +368,10 @@ void grpc_end2end_tests(int argc, char **argv,
filter_latency(config);
continue;
}
if (0 == strcmp("filter_status_code", argv[i])) {
filter_status_code(config);
continue;
}
if (0 == strcmp("graceful_server_shutdown", argv[i])) {
graceful_server_shutdown(config);
continue;

@ -101,6 +101,7 @@ END2END_TESTS = {
'filter_causes_close': default_test_options._replace(cpu_cost=LOWCPU),
'filter_call_init_fails': default_test_options,
'filter_latency': default_test_options._replace(cpu_cost=LOWCPU),
'filter_status_code': default_test_options._replace(cpu_cost=LOWCPU),
'graceful_server_shutdown': default_test_options._replace(cpu_cost=LOWCPU,exclude_inproc=True),
'hpack_size': default_test_options._replace(proxyable=False,
traceable=False,

@ -146,6 +146,7 @@ END2END_TESTS = {
'trailing_metadata': test_options(),
'authority_not_supported': test_options(),
'filter_latency': test_options(),
'filter_status_code': test_options(),
'workaround_cronet_compression': test_options(),
'write_buffering': test_options(needs_write_buffering=True),
'write_buffering_at_end': test_options(needs_write_buffering=True),

@ -0,0 +1,353 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "test/core/end2end/end2end_tests.h"
#include <limits.h>
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <grpc/byte_buffer.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/time.h>
#include <grpc/support/useful.h>
#include "src/core/lib/channel/channel_stack_builder.h"
#include "src/core/lib/surface/channel_init.h"
#include "test/core/end2end/cq_verifier.h"
static bool g_enable_filter = false;
static gpr_mu g_mu;
static bool g_client_code_recv;
static bool g_server_code_recv;
static gpr_cv g_client_code_cv;
static gpr_cv g_server_code_cv;
static grpc_status_code g_client_status_code;
static grpc_status_code g_server_status_code;
static void* tag(intptr_t t) { return (void*)t; }
static grpc_end2end_test_fixture begin_test(grpc_end2end_test_config config,
const char* test_name,
grpc_channel_args* client_args,
grpc_channel_args* server_args) {
grpc_end2end_test_fixture f;
gpr_log(GPR_INFO, "Running test: %s/%s", test_name, config.name);
f = config.create_fixture(client_args, server_args);
config.init_server(&f, server_args);
config.init_client(&f, client_args);
return f;
}
static gpr_timespec n_seconds_from_now(int n) {
return grpc_timeout_seconds_to_deadline(n);
}
static gpr_timespec five_seconds_from_now(void) {
return n_seconds_from_now(5);
}
static void drain_cq(grpc_completion_queue* cq) {
grpc_event ev;
do {
ev = grpc_completion_queue_next(cq, five_seconds_from_now(), nullptr);
} while (ev.type != GRPC_QUEUE_SHUTDOWN);
}
static void shutdown_server(grpc_end2end_test_fixture* f) {
if (!f->server) return;
grpc_server_shutdown_and_notify(f->server, f->shutdown_cq, tag(1000));
GPR_ASSERT(grpc_completion_queue_pluck(f->shutdown_cq, tag(1000),
grpc_timeout_seconds_to_deadline(5),
nullptr)
.type == GRPC_OP_COMPLETE);
grpc_server_destroy(f->server);
f->server = nullptr;
}
static void shutdown_client(grpc_end2end_test_fixture* f) {
if (!f->client) return;
grpc_channel_destroy(f->client);
f->client = nullptr;
}
static void end_test(grpc_end2end_test_fixture* f) {
shutdown_server(f);
shutdown_client(f);
grpc_completion_queue_shutdown(f->cq);
drain_cq(f->cq);
grpc_completion_queue_destroy(f->cq);
grpc_completion_queue_destroy(f->shutdown_cq);
}
// Simple request via a server filter that saves the reported status code.
static void test_request(grpc_end2end_test_config config) {
grpc_call* c;
grpc_call* s;
grpc_end2end_test_fixture f =
begin_test(config, "filter_status_code", nullptr, nullptr);
cq_verifier* cqv = cq_verifier_create(f.cq);
grpc_op ops[6];
grpc_op* op;
grpc_metadata_array initial_metadata_recv;
grpc_metadata_array trailing_metadata_recv;
grpc_metadata_array request_metadata_recv;
grpc_call_details call_details;
grpc_status_code status;
grpc_call_error error;
grpc_slice details;
int was_cancelled = 2;
gpr_mu_lock(&g_mu);
g_client_status_code = GRPC_STATUS_OK;
g_server_status_code = GRPC_STATUS_OK;
gpr_mu_unlock(&g_mu);
gpr_timespec deadline = five_seconds_from_now();
c = grpc_channel_create_call(
f.client, nullptr, GRPC_PROPAGATE_DEFAULTS, f.cq,
grpc_slice_from_static_string("/foo"),
get_host_override_slice("foo.test.google.fr", config), deadline, nullptr);
GPR_ASSERT(c);
grpc_metadata_array_init(&initial_metadata_recv);
grpc_metadata_array_init(&trailing_metadata_recv);
grpc_metadata_array_init(&request_metadata_recv);
grpc_call_details_init(&call_details);
memset(ops, 0, sizeof(ops));
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->data.send_initial_metadata.metadata = nullptr;
op->flags = 0;
op->reserved = nullptr;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op->reserved = nullptr;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op->reserved = nullptr;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->flags = 0;
op->reserved = nullptr;
op++;
error = grpc_call_start_batch(c, ops, (size_t)(op - ops), tag(1), nullptr);
GPR_ASSERT(GRPC_CALL_OK == error);
error =
grpc_server_request_call(f.server, &s, &call_details,
&request_metadata_recv, f.cq, f.cq, tag(101));
GPR_ASSERT(GRPC_CALL_OK == error);
CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
cq_verify(cqv);
memset(ops, 0, sizeof(ops));
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op->reserved = nullptr;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED;
grpc_slice status_string = grpc_slice_from_static_string("xyz");
op->data.send_status_from_server.status_details = &status_string;
op->flags = 0;
op->reserved = nullptr;
op++;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op->reserved = nullptr;
op++;
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), nullptr);
GPR_ASSERT(GRPC_CALL_OK == error);
CQ_EXPECT_COMPLETION(cqv, tag(102), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
GPR_ASSERT(status == GRPC_STATUS_UNIMPLEMENTED);
GPR_ASSERT(0 == grpc_slice_str_cmp(details, "xyz"));
grpc_slice_unref(details);
grpc_metadata_array_destroy(&initial_metadata_recv);
grpc_metadata_array_destroy(&trailing_metadata_recv);
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_unref(s);
grpc_call_unref(c);
cq_verifier_destroy(cqv);
end_test(&f);
config.tear_down_data(&f);
// Perform checks after test tear-down
// Guards against the case that there's outstanding channel-related work on a
// call prior to verification
// TODO(https://github.com/grpc/grpc/issues/13915) enable this for windows
#ifndef GPR_WINDOWS
gpr_mu_lock(&g_mu);
if (!g_client_code_recv) {
GPR_ASSERT(gpr_cv_wait(&g_client_code_cv, &g_mu,
grpc_timeout_seconds_to_deadline(3)));
}
if (!g_server_code_recv) {
GPR_ASSERT(gpr_cv_wait(&g_client_code_cv, &g_mu,
grpc_timeout_seconds_to_deadline(3)));
}
GPR_ASSERT(g_client_status_code == GRPC_STATUS_UNIMPLEMENTED);
GPR_ASSERT(g_server_status_code == GRPC_STATUS_UNIMPLEMENTED);
gpr_mu_unlock(&g_mu);
#endif // GPR_WINDOWS
}
/*******************************************************************************
* Test status_code filter
*/
static grpc_error* init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
return GRPC_ERROR_NONE;
}
static void client_destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {
gpr_mu_lock(&g_mu);
g_client_status_code = final_info->final_status;
g_client_code_recv = true;
gpr_cv_signal(&g_client_code_cv);
gpr_mu_unlock(&g_mu);
}
static void server_destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {
gpr_mu_lock(&g_mu);
g_server_status_code = final_info->final_status;
g_server_code_recv = true;
gpr_cv_signal(&g_server_code_cv);
gpr_mu_unlock(&g_mu);
}
static grpc_error* init_channel_elem(grpc_channel_element* elem,
grpc_channel_element_args* args) {
return GRPC_ERROR_NONE;
}
static void destroy_channel_elem(grpc_channel_element* elem) {}
static const grpc_channel_filter test_client_filter = {
grpc_call_next_op,
grpc_channel_next_op,
0,
init_call_elem,
grpc_call_stack_ignore_set_pollset_or_pollset_set,
client_destroy_call_elem,
0,
init_channel_elem,
destroy_channel_elem,
grpc_channel_next_get_info,
"client_filter_status_code"};
static const grpc_channel_filter test_server_filter = {
grpc_call_next_op,
grpc_channel_next_op,
0,
init_call_elem,
grpc_call_stack_ignore_set_pollset_or_pollset_set,
server_destroy_call_elem,
0,
init_channel_elem,
destroy_channel_elem,
grpc_channel_next_get_info,
"server_filter_status_code"};
/*******************************************************************************
* Registration
*/
static bool maybe_add_filter(grpc_channel_stack_builder* builder, void* arg) {
grpc_channel_filter* filter = (grpc_channel_filter*)arg;
if (g_enable_filter) {
// Want to add the filter as close to the end as possible, to make
// sure that all of the filters work well together. However, we
// can't add it at the very end, because the
// connected_channel/client_channel filter must be the last one.
// So we add it right before the last one.
grpc_channel_stack_builder_iterator* it =
grpc_channel_stack_builder_create_iterator_at_last(builder);
GPR_ASSERT(grpc_channel_stack_builder_move_prev(it));
const bool retval = grpc_channel_stack_builder_add_filter_before(
it, filter, nullptr, nullptr);
grpc_channel_stack_builder_iterator_destroy(it);
return retval;
} else {
return true;
}
}
static void init_plugin(void) {
gpr_mu_init(&g_mu);
gpr_cv_init(&g_client_code_cv);
gpr_cv_init(&g_server_code_cv);
g_client_code_recv = false;
g_server_code_recv = false;
grpc_channel_init_register_stage(GRPC_CLIENT_CHANNEL, INT_MAX,
maybe_add_filter,
(void*)&test_client_filter);
grpc_channel_init_register_stage(GRPC_CLIENT_DIRECT_CHANNEL, INT_MAX,
maybe_add_filter,
(void*)&test_client_filter);
grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL, INT_MAX,
maybe_add_filter,
(void*)&test_server_filter);
}
static void destroy_plugin(void) {
gpr_cv_destroy(&g_client_code_cv);
gpr_cv_destroy(&g_server_code_cv);
gpr_mu_destroy(&g_mu);
}
void filter_status_code(grpc_end2end_test_config config) {
g_enable_filter = true;
test_request(config);
g_enable_filter = false;
}
void filter_status_code_pre_init(void) {
grpc_register_plugin(init_plugin, destroy_plugin);
}

@ -43,7 +43,7 @@ int main(int argc, char** argv) {
grpc_resource_quota_create("fd_conservation_posix_test");
for (i = 0; i < 100; i++) {
p = grpc_iomgr_create_endpoint_pair("test", NULL);
p = grpc_iomgr_create_endpoint_pair("test", nullptr);
grpc_endpoint_destroy(p.client);
grpc_endpoint_destroy(p.server);
grpc_core::ExecCtx::Get()->Flush();

@ -118,7 +118,7 @@ static void test_instant_alloc_then_free(void) {
grpc_resource_user* usr = grpc_resource_user_create(q, "usr");
{
grpc_core::ExecCtx exec_ctx;
grpc_resource_user_alloc(usr, 1024, NULL);
grpc_resource_user_alloc(usr, 1024, nullptr);
}
{
grpc_core::ExecCtx exec_ctx;
@ -136,7 +136,7 @@ static void test_instant_alloc_free_pair(void) {
grpc_resource_user* usr = grpc_resource_user_create(q, "usr");
{
grpc_core::ExecCtx exec_ctx;
grpc_resource_user_alloc(usr, 1024, NULL);
grpc_resource_user_alloc(usr, 1024, nullptr);
grpc_resource_user_free(usr, 1024);
}
grpc_resource_quota_unref(q);
@ -565,7 +565,7 @@ static void test_resource_user_stays_allocated_until_memory_released(void) {
grpc_resource_user* usr = grpc_resource_user_create(q, "usr");
{
grpc_core::ExecCtx exec_ctx;
grpc_resource_user_alloc(usr, 1024, NULL);
grpc_resource_user_alloc(usr, 1024, nullptr);
}
{
grpc_core::ExecCtx exec_ctx;
@ -608,8 +608,8 @@ test_resource_user_stays_allocated_and_reclaimers_unrun_until_memory_released(
grpc_core::ExecCtx exec_ctx;
grpc_resource_user_alloc(usr, 1024, set_event(&allocated));
grpc_core::ExecCtx::Get()->Flush();
GPR_ASSERT(gpr_event_wait(&allocated,
grpc_timeout_seconds_to_deadline(5)) != NULL);
GPR_ASSERT(gpr_event_wait(&allocated, grpc_timeout_seconds_to_deadline(
5)) != nullptr);
GPR_ASSERT(gpr_event_wait(&reclaimer_cancelled,
grpc_timeout_milliseconds_to_deadline(100)) ==
nullptr);
@ -667,8 +667,8 @@ static void test_reclaimers_can_be_posted_repeatedly(void) {
grpc_core::ExecCtx exec_ctx;
grpc_resource_user_alloc(usr, 1024, set_event(&allocated));
grpc_core::ExecCtx::Get()->Flush();
GPR_ASSERT(gpr_event_wait(&allocated,
grpc_timeout_seconds_to_deadline(5)) != NULL);
GPR_ASSERT(gpr_event_wait(&allocated, grpc_timeout_seconds_to_deadline(
5)) != nullptr);
GPR_ASSERT(gpr_event_wait(&reclaimer_done,
grpc_timeout_seconds_to_deadline(5)) !=
nullptr);

@ -21,6 +21,7 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <functional>
#include <memory>
#include <thread>

@ -1,182 +1,4 @@
FROM ubuntu:17.04
RUN apt-get -y update && \
apt-get install -y curl git-core xz-utils build-essential wget unzip sudo gpg dirmngr
# Add "rvm" as system group, to avoid conflicts with host GIDs typically starting with 1000
RUN groupadd -r rvm && useradd -r -g rvm -G sudo -p "" --create-home rvm && \
echo "source /etc/profile.d/rvm.sh" >> /etc/rubybashrc
USER root
RUN apt-get -y update && \
apt-get install -y gcc-mingw-w64-x86-64 gcc-mingw-w64-i686 g++-mingw-w64-x86-64 g++-mingw-w64-i686 \
gcc-multilib moreutils
USER rvm
# install rvm, RVM 1.26.0+ has signed releases, source rvm for usage outside of package scripts
RUN gpg --keyserver hkp://keys.gnupg.net --recv-keys D39DC0E3 && \
(curl -L http://get.rvm.io | sudo bash -s stable) && \
bash -c " \
source /etc/rubybashrc && \
rvmsudo rvm cleanup all "
# Regenerate the following using build.sh if the build folder changes.
RUN echo \\
H4sIANslcFgAA+08a3fbxo75uvwVqOxTSbFIidTDVhKnTW0n8d28ju2ku6fO5R2RQ4kVRXJnSMve\
OPe3LzAk9fIrvk3UuylxUnk0DwADYDADzKiD1A/c5oNvCi2E7W7rgdlpdbe3Tfxumj2zpepzeGC2\
W+1Oy7TaZvcBNpvYHbrflq0MUpkwAfCAxVHgXIQ39uPhcB3srBsGSv8ydSMu5DeiQQrudW7Wv9Xq\
rejf6nZ7D6D1jfhZgr+4/ve5x9IgkZADD8/sMecxbO1CZZQksR2L6PwCqCjzsjerPdrbt1++PT6x\
j97/8t/2u1fPTp6/PXq9Uv3h4Oj48O0bVXv4+tmLA1DVe3tFS0X7s6Xw14Vs/Ys0ZN9q9d/t/zsd\
c3X9W1a3XP/rgI0fmgM/bA6YHGnaUERpzFwX9CFUNl8c7lfoz9Hb9+8q2mTs+gL0GJrJJG6OognX\
UsnFcm89xdJ7VXoB4mzSoI2FBlWwZjAfCvqEOh4fHFU07eXb1we7mzViAZ48eQIV7owi+GfWXNeC\
EHQJzVSKZhA5LGgKNua6E01iP+AC0dD4StNYqta0jHJakAFdlzzRFfFTjTzdL8+OX9oHbz7sNnni\
4AoYXBAHwsmbdR2H/vzdu6Z8//eH3tRwvhGNO/Z/s2etnv+sTrtdrv91QPOhBg/hZORLwC19KNgE\
Rix0Ay7h+PDF4ZsTwG/gRWLKhCvBTyCJsCpKRrj2cITDpTQIxWECiMMPEx663KVeAw64rwCT8O5w\
H0zqRR33I2eMY0nsiSxQILmpn4yg4matOLCyNBLehvDKD9PzBiBpcLNjC6DhhizIWRbEZzYEsD4S\
iJWFF3mnjM0T5JtjNw57iQj0PWBjVsyU+FfDXIgRWU6DhjU1bcMPnSB1OTzJ0Bmjp4t1F7KZXMRc\
Xq2eMj+hWg1lA7Hvwi60HmvaWeS7Wsa3jRipsUYfhD2d1LVP6IV8r4YD6sofjf0goG+NnNv6Y+1z\
hnPC/FANZWLoNMAZoT0/xPLZbx/rhEUmInUUYuYkfhRCyKd2VnxMRIhowpJUIme6iawBNB/CMU8g\
jZWwMwQpygy1KmPu+N6FakBEkCEySEawgNmQzC60sgtL8ySiWOKTOLlAnwy1H5eHTZgc1x9fweYF\
bChz4cHCbGqZPBqwgKYBNZLvw3qrrnpnYke1j2sKcy7YT0qypKDaj5kIVCuA4DjdEH49+K/Dk+OT\
Zyfvj2sL7Z95IHk2diY4fs6ds7imxG5+RGbyUo4Q7UlEolahbpVlIhkKhRc1uu71n/n/mCXOiMtv\
lAf48vjfsnpdk/x/r9Uu4/91wLL+6RSkm8aOsa3H7e3OV7KH++d/OttWp9T/OuB2/YdRbKi2P0Tj\
rvyP2VqN/3rdtlWe/9YBru95GOsM8WDHmk4Uev6QNvrBvIxnDJefQ7drdreZZxi9XrvFBwxMWtTd\
7z0++t5hZf0vxtB6y+gb3a+wB9zf//eoqvT/a4C79U9fWaJTeKYPUnWiN8hrfDmN2/2/2eq2V/L/\
ltUt83/rgecimkDH9HbanU6n7zmdttUeWF6/3+U722aXse5Ox3L7nttvtzvwGuOtYx6DuQ2t1iP1\
DyxUoUZoHsErJiT8JwsxkH4SYPnnoeD+/+r4EUo3Ep7h8qfaPkv4IzhmSQPaLfgbCxGD2YPWziP6\
tw1baBEt7Tgd/M6d5BH89u7Zyd7Lj7CHMeSQq7DTQyuFCcdAFylN8lRDZqb+IOBZJqEwVU3DkB9i\
wc/8CCO1lbFTDAkx3B9cwCiKxn44hChEfEhFqwj+P6mPe2FVDLLNsFoBhwWBoal0SZYxQXoTJOsg\
Cw5LJS/oqkyICLH7BeSIJBSIGtgrATfiMqwmGvGA7AuMB7GzHwJTXLp0F+OLKJzwMDG0w4S4CrHJ\
DyTNOYiYCxhF45cRo+CfAwtkBDETCUReHrlj4M2EC4E/EExcNLAw5trvEvVIeR3hRk4+HeRP3QMh\
fV/KFLmlrtm9z6NmEw8Io3RgoJSXncSKy6D0TTMb39zJRL8icSQVRlPKUqA6VZ6IJD8TO8zFPhmL\
QS7yhjYd+c6IBicXse8osS5OlJ8nJFrwkIuGmpqv8lFTSvfMjMaFOGCJF4mJdsYCmqOSmpOkCmHI\
uctdkgfKc8ouchwkaBzKPFRood6mxOA9boCMMukThVzGyFcyUpg1ym4wB1XrkmXlE3zBJ8SlEaCo\
GkVliDZ0xrMMV47I5xLtROWqtJnVL5gEsUbZjjOONv4u4AyNL4wS3phzpJw7uChNJ4mQMxQWgyGO\
CMlw1HxlsXwQgeBuA097gPziTLF+qHJALi5YNyNPjEsDjiM0XkRVEbj6JjHE4yHowqsYmq7rGvGv\
zKKJWuGh9FG3TI4NMYBLaHdhawn060ADU9EqzKQBZgctU3JB2R1Z26o3wDLB5QHPKvS6pi0dZW/m\
YXBzW37UbbVbzOv3DMPrm52+2aejbq/TocndhlnD2dyK/eefQTd7/cYObGV/sOJv7IzBrCfIUZSi\
ygbKY3kcfQLaY3E3Cxtq6dCVy6LNS0f4cTLv5UxcePIE2EDaeRdD8EAZmE22aXvorGvUjLpTNXUN\
NH1OpHBTJK2pHwQ5O1IZnQdTrpaNIyIpIVv5aN5zBNglMbC/WsjST9Bi8oToT7XKxqeC6OfmAp1K\
XXk3bWvOBRnc/TjYujcHOY2COswhipVhGXEqR7WHipKdcWvnTTXlS+r1+SgeuihI0nK71220+7Cl\
/rbuqecNyjz7Ic9nxlSWk2xIqvx2NuvCkf1QjMrG2LkntlX/Gvawi54NoOjePuNiQd0btML8sBB2\
Y0n35EYn44lHZfQGr7GPWpRDHnKh2CpEvoLmS0bmbKvqJZ0UnSuw+xR+u1Zdjbmx3Q43mltDu3vw\
VQz5lCoflwQYxVk2XE2F1pb6ptSkk8iBri3VnPWb5rzImpp2UWFT34/kbS+TywVpFzSLfrfSJXXM\
x5IOcOfkM2WRUZHvxOWCy0uNRCMhM5XFqKscF6q4hduZDJ/TdhfFPKwlRsgmuENVp9W66uZdLqrS\
M6YCV+vCGSAnUz0NT8PKfIHPu1IHWwxutvSrQxQ7gjO3tuokPF/IZHVBLxTnK1QpgMwhE/x1yi6o\
XhFdYUVKdPRlWWwzeTjxFS+mGMQNXolxyeMWM4YnUDGNfgV+/DEvPNmFxWc4i/K+RjFiK7lWNchO\
RHd7Cd2jKOnNtXG7freuQ3GnDq6OMmQ6+KHW/HttZhz1wu5q1fpmE6menppkD6enVnUJw73kszjn\
K7Sz2gFyPob66cNafkcoZ/SRdjaFav1uTPkN0B9FpM6Tp0Y9Fhh01KDI3CkzPK1J4dAe45+f1mfI\
4zSRmZSWVEyKneIZaLU2Wzc54dXGRKQhmjyvYZEHwRK3fBHXwhdVnLvQe7mkG53ozCWtLqR7+5+7\
nMo1fqgQztLUF4vqXNDpd+n019npNcz7HgtIUBWHhXjGz2I/mWJEpMSA51JUW9tq4hEpCs4qFPaE\
efSSrxYMARnFL1G4hPCYZ1Hjo+sCveR36YxSDOhWYjx+pg+i8yLM687xzSNmxUd1LqirsfTcm6JJ\
BwyPNG9VxL8oXMEn0RmdaUKZwKOjwZ4aumhgKz0WXyHe0e2a5X5Nr/2D472jw3cn9H4RvUh2yHJ/\
qq024kLSQbOMrtEyaAVFU2mY2v+D5Pjd+T/KpkRpoqOVynsm/nK46/5n27JW8n9m22qV+b91wFLQ\
rIIG9R5wfqBR7+owrL2xLQ+ae8xxdkwMmnuMWayzEjTfPDoLmm9uV0FzW7lN+rNNXrPw/fSyz6bn\
f5+byo5lc+PT68M3L35Vr48/47eVl8bXRBeEI3MCFV1XWHbzYb+8P3y1/7kIFaq6zkM2CLguR5Qp\
qc7rXV+qBh/9BgsCSoJV8xAFW4v1k4yvq3SC3EvmlbTMdquE+iM9XEEX/TwSDodfEXnkjC21NR7R\
hmgaOw386BfvlFQmx0/u6XOuvf/tG+2v+RTkX7j/t6xeef+zDrhR/1/n6l/Bv3D/j19K/78OKO//\
/9pwy/q3JQYu6qriD/qBu95/W2ZnZf1vd8v33+uBL17/O/2d7XbLMQzH4z2r2y7Wf3a+m/fOznPz\
7+r81un16ei2VRToDIfnGObYXho6dOfoTSJXvXGOh+pBbkd90rtderULMmbT8AzkhaS7P7DtouSM\
RIQB8ZBr8B9ZFCd5InEY5SLoFyuSc/XLFc8ZEQknkiOQfjiChOFHEA0tEFEauqeLCFJEgH+G+Mdl\
GBViZM4DDE7tgIkhtz2XbvbwP448pWFWOF1NKU/GiT/hQB/DCQwnVLAFBrt4jrOHPFGt+d/Ic9kF\
oAkGENPn6WqGO6ZkGRQrEuQoTTDIDNU7aDz7JcwZgxvYfkL5dW7HI1csRtILKL5kJOVGHtF8mLRR\
SWdM7P5jE8vqlzkVrHHOlObszVyFFbgE6pAIW47+oSnNhjb2G3GcrFJyZfPV4ZuDN2/pN0WzUZXN\
GYXv/qc2/5Zwjf+3jLbR+rPP/2Z5/l8L3Kj/9e3/rU6vfXX/L/M/a4Ev3v/5gLfdHdz/WbvdG2zf\
b//f6Xfaav9XhR7t/7Md5tadojDBytKuM6vV1FU/lwlUzjev6wC7cH7B5ePs7ZQi6LAEnj4lBl3u\
SYN+c2o/2zt4+1zTN/KL95fPPhzYxwdv9p8fvjoAU4O8A2jg+d/XNnXt+u/8Wf7fsjC4zPI/5fvf\
tcCN+v9T/L9lYQfl/ztl/mctUPr/0v+7TZkIP7angsUxF1+dxh3+3zTN7ZXzX3sb3UC5/tcAGz+o\
/7MG3bxRMoM2AE3bgGMyiOyeLQrVm6YAvChw1cN2mVAiQT0m/uALejw9iM4huxbLeyGKJFKv64Gp\
9AouQF/CIB3OXxpMp1PjbDbeiMSwmfjOmCfNHdwRNG32ZiDhk5hYqc6r6Gua+IGsapoyXlzolY1P\
6qXHgOHyZxNe22zVPxs4s4qmqYfL2OfZ0YsPhuD00ODTpXcJ3u4/m3/Xm/DZmLBYvQhhYkhP1+hZ\
8y6c5JQN+qV7VRGqZo2GE0SSa9njkvfEieHE9Av8hmqlFynY+BuWG1T7UaNHIFr+dLRgJEsqLTMC\
W/kra2To02XSiC7pFRiig8+alobYIikNhjKpgWKoAQ8LtPT2hp+jKzetbUXQiVyOxDZ/Mqg6+6V5\
LgyDM2eUPZUhHiPhq3nPpyMmqnJ1isXssiHZtBRNovV9+cYSSiihhBJKKKGEEkoooYQSSiihhBJK\
KKGEEkoooYQSSiihhBJKKKGEEkoooYQSSiihhH9H+D9i0BbqAHgAAA==\
| base64 -d | tar xzC /tmp
# Import patch files for ruby and gems
RUN cp -r /tmp/build/patches /home/rvm/patches/
ENV BASH_ENV /etc/rubybashrc
# install rubies and fix permissions on
RUN bash -c " \
export CFLAGS='-s -O3 -fno-fast-math -fPIC' && \
echo 'about to install patches for ruby 2.4.0 from:' && \
ls -r ~/patches && \
for v in 2.4.0 ; do \
rvm install \$v --patch \$(echo ~/patches/ruby-\$v/* | tr ' ' ','); \
done && \
rvm cleanup all && \
find /usr/local/rvm -type d -print0 | sudo xargs -0 chmod g+sw "
# Install rake-compiler and typical gems in all Rubies
# do not generate documentation for gems
RUN echo "gem: --no-ri --no-rdoc" >> ~/.gemrc && \
bash -c " \
rvm all do gem install bundler rake-compiler hoe mini_portile rubygems-tasks && \
rvm 2.4.0 do gem install mini_portile2 && \
find /usr/local/rvm -type d -print0 | sudo xargs -0 chmod g+sw "
RUN bash -c "gem env"
RUN bash -c "gem list rake-compiler"
# Install rake-compiler's cross rubies in global dir instead of /root
RUN sudo mkdir -p /usr/local/rake-compiler && \
sudo chown rvm.rvm /usr/local/rake-compiler && \
ln -s /usr/local/rake-compiler ~/.rake-compiler
# Patch rake-compiler to avoid build of ruby extensions
RUN cd /usr/local/rvm/gems/ruby-2.4.0/gems/rake-compiler-0.9.5 && git apply /home/rvm/patches/rake-compiler-0.9.5/*.diff ; \
true
RUN bash -c "rvm use 2.4.0 --default && \
export MAKE=\"make -j`nproc`\" CFLAGS='-s -O1 -fno-omit-frame-pointer -fno-fast-math' && \
rake-compiler cross-ruby VERSION=2.4.0 HOST=i686-w64-mingw32 && \
rake-compiler cross-ruby VERSION=2.4.0 HOST=x86_64-w64-mingw32 && \
rake-compiler cross-ruby VERSION=2.4.0 HOST=x86_64-linux-gnu && \
rake-compiler cross-ruby VERSION=2.3.0 HOST=i686-w64-mingw32 && \
rake-compiler cross-ruby VERSION=2.3.0 HOST=x86_64-w64-mingw32 && \
rake-compiler cross-ruby VERSION=2.3.0 HOST=x86_64-linux-gnu && \
rake-compiler cross-ruby VERSION=2.2.2 HOST=i686-w64-mingw32 && \
rake-compiler cross-ruby VERSION=2.2.2 HOST=x86_64-w64-mingw32 && \
rake-compiler cross-ruby VERSION=2.2.2 HOST=x86_64-linux-gnu && \
rake-compiler cross-ruby VERSION=2.1.5 HOST=i686-w64-mingw32 && \
rake-compiler cross-ruby VERSION=2.1.5 HOST=x86_64-w64-mingw32 && \
rake-compiler cross-ruby VERSION=2.1.5 HOST=x86_64-linux-gnu && \
rake-compiler cross-ruby VERSION=2.0.0-p645 HOST=i686-w64-mingw32 && \
rake-compiler cross-ruby VERSION=2.0.0-p645 HOST=x86_64-w64-mingw32 && \
rake-compiler cross-ruby VERSION=2.0.0-p645 HOST=x86_64-linux-gnu && \
rm -rf ~/.rake-compiler/tmp/builds ~/.rake-compiler/sources && \
find /usr/local/rvm -type d -print0 | sudo xargs -0 chmod g+sw "
RUN bash -c "rvm use 2.4.0 --default && \
export MAKE=\"make -j`nproc`\" CFLAGS='-m32 -s -O1 -fno-omit-frame-pointer -fno-fast-math' LDFLAGS='-m32' && \
rake-compiler cross-ruby VERSION=2.4.0 HOST=i686-linux-gnu && \
rake-compiler cross-ruby VERSION=2.3.0 HOST=i686-linux-gnu && \
rake-compiler cross-ruby VERSION=2.2.2 HOST=i686-linux-gnu && \
rake-compiler cross-ruby VERSION=2.1.5 HOST=i686-linux-gnu && \
rake-compiler cross-ruby VERSION=2.0.0-p645 HOST=i686-linux-gnu && \
rm -rf ~/.rake-compiler/tmp/builds ~/.rake-compiler/sources && \
find /usr/local/rvm -type d -print0 | sudo xargs -0 chmod g+sw "
RUN bash -c " \
rvm alias create 2.4 2.4.0 "
USER root
# Fix paths in rake-compiler/config.yml and add rvm and mingw-tools to the global bashrc
RUN sed -i -- "s:/root/.rake-compiler:/usr/local/rake-compiler:g" /usr/local/rake-compiler/config.yml && \
echo "source /etc/profile.d/rvm.sh" >> /etc/bash.bashrc && \
echo "export PATH=\$PATH:/opt/mingw/mingw32/bin" >> /etc/bash.bashrc && \
echo "export PATH=\$PATH:/opt/mingw/mingw64/bin" >> /etc/bash.bashrc
# Install wrappers for strip commands as a workaround for "Protocol error" in boot2docker.
RUN cp /tmp/build/strip_wrapper /root/
RUN sudo chmod +rx /root/strip_wrapper
RUN mv /usr/bin/i686-w64-mingw32-strip /usr/bin/i686-w64-mingw32-strip.bin && \
mv /usr/bin/x86_64-w64-mingw32-strip /usr/bin/x86_64-w64-mingw32-strip.bin && \
ln /root/strip_wrapper /usr/bin/i686-w64-mingw32-strip && \
ln /root/strip_wrapper /usr/bin/x86_64-w64-mingw32-strip
FROM larskanis/rake-compiler-dock:0.6.2
RUN find / -name rbconfig.rb | while read f ; do sed -i 's/0x0501/0x0600/' $f ; done
RUN find / -name win32.h | while read f ; do sed -i 's/gettimeofday/rb_gettimeofday/' $f ; done
@ -184,26 +6,6 @@ RUN sed -i 's/defined.__MINGW64__.$/1/' /usr/local/rake-compiler/ruby/i686-w64-m
RUN find / -name libwinpthread.dll.a | xargs rm
RUN find / -name libwinpthread-1.dll | xargs rm
RUN find / -name *msvcrt-ruby*.dll.a | while read f ; do n=`echo $f | sed s/.dll//` ; mv $f $n ; done
RUN find /usr/local/rake-compiler/ruby -name libruby.so | xargs rm
RUN find /usr/local/rake-compiler/ruby -name libruby-static.a | while read f ; do ar t $f | xargs ar d $f ; done
RUN find /usr/local/rake-compiler/ruby -name libruby-static.a | while read f ; do mv $f `echo $f | sed s/-static//` ; done
# Install SIGINT forwarder
RUN cp /tmp/build/sigfw.c /root/
RUN gcc $HOME/sigfw.c -o /usr/local/bin/sigfw
# Install user mapper
RUN cp /tmp/build/runas /usr/local/bin/
# Install sudoers configuration
RUN cp /tmp/build/sudoers /etc/sudoers.d/rake-compiler-dock
# Fixup Ruby 2.4 'static' compilation issue.
RUN echo '!<arch>' > /usr/local/rake-compiler/ruby/x86_64-linux-gnu/ruby-2.4.0/lib/libruby.a
RUN echo '!<arch>' > /usr/local/rake-compiler/ruby/i686-linux-gnu/ruby-2.4.0/lib/libruby.a
ENV RUBY_CC_VERSION 2.4.0:2.3.0:2.2.2:2.1.5:2.0.0
RUN apt-get install -y g++-multilib
CMD bash

@ -1,7 +0,0 @@
#!/bin/sh
# Run this to produce the snipplet of data to insert in the Dockerfile.
echo 'RUN echo \\'
tar cz build | base64 | sed 's/$/\\/'
echo '| base64 -d | tar xzC /tmp'

@ -1,105 +0,0 @@
From 41f834449fc4323b2f995e8715aa5842d9fd9334 Mon Sep 17 00:00:00 2001
From: Lars Kanis <lars@greiz-reinsdorf.de>
Date: Sat, 30 Jan 2016 08:08:07 +0100
Subject: [PATCH] Change the fake mechanism to be compatible with bundler.
The previous fake mechanism worked by hooking onto the
"require 'rbconfig'" call.
This is problematic because bundler internally requires rbconfig, but doesn't
work corretly in a faked environment.
It then fails to load gems that are also part of the standard library, like
json and rdoc.
This results in issues like https://github.com/rake-compiler/rake-compiler-dock/issues/8
The fake mechanism is now changed to hook onto the "require 'mkrb'" call,
which is typically part of the extconf file, and it is where the faked platform
values are actually needed.
That way it is loaded after bundler/setup, so that the library paths are
set according to the Gemfile.lock, to the native Linux libraries, before
the fake environment is active.
Please note, that the build directory of a given gem needs to be cleared,
in order to get updated fake files. So do a "rm tmp pkg -rf".
---
lib/rake/extensiontask.rb | 35 ++++++++++++++---------------------
1 file changed, 14 insertions(+), 21 deletions(-)
diff --git a/lib/rake/extensiontask.rb b/lib/rake/extensiontask.rb
index 030af96..f914919 100644
--- a/lib/rake/extensiontask.rb
+++ b/lib/rake/extensiontask.rb
@@ -169,8 +169,8 @@ Java extension should be preferred.
# now add the extconf script
cmd << abs_extconf.relative_path_from(abs_tmp_path)
- # rbconfig.rb will be present if we are cross compiling
- if t.prerequisites.include?("#{tmp_path}/rbconfig.rb") then
+ # fake.rb will be present if we are cross compiling
+ if t.prerequisites.include?("#{tmp_path}/fake.rb") then
options.push(*cross_config_options(platf))
end
@@ -365,39 +365,30 @@ Java extension should be preferred.
# define compilation tasks for cross platform!
define_compile_tasks(for_platform, ruby_ver)
- # chain fake.rb, rbconfig.rb and mkmf.rb to Makefile generation
+ # chain fake.rb and mkmf.rb to Makefile generation
file "#{tmp_path}/Makefile" => ["#{tmp_path}/fake.rb",
- "#{tmp_path}/rbconfig.rb",
"#{tmp_path}/mkmf.rb"]
- # copy the file from the cross-ruby location
- file "#{tmp_path}/rbconfig.rb" => [rbconfig_file] do |t|
+ # copy the rbconfig from the cross-ruby location and
+ # genearte fake.rb for different ruby versions
+ file "#{tmp_path}/fake.rb" => [rbconfig_file] do |t|
File.open(t.name, 'w') do |f|
- f.write "require 'fake.rb'\n\n"
+ f.write fake_rb(for_platform, ruby_ver)
f.write File.read(t.prerequisites.first)
end
end
# copy mkmf from cross-ruby location
file "#{tmp_path}/mkmf.rb" => [mkmf_file] do |t|
- cp t.prerequisites.first, t.name
- if ruby_ver < "1.9" && "1.9" <= RUBY_VERSION
- File.open(t.name, 'r+t') do |f|
- content = f.read
+ File.open(t.name, 'w') do |f|
+ content = File.read(t.prerequisites.first)
+ content.sub!(/^(require ')rbconfig(')$/, '\\1fake\\2')
+ if ruby_ver < "1.9" && "1.9" <= RUBY_VERSION
content.sub!(/^( break )\*(defaults)$/, '\\1\\2.first')
content.sub!(/^( return )\*(defaults)$/, '\\1\\2.first')
content.sub!(/^( mfile\.)print( configuration\(srcprefix\))$/, '\\1puts\\2')
- f.rewind
- f.write content
- f.truncate(f.tell)
end
- end
- end
-
- # genearte fake.rb for different ruby versions
- file "#{tmp_path}/fake.rb" do |t|
- File.open(t.name, 'w') do |f|
- f.write fake_rb(for_platform, ruby_ver)
+ f.write content
end
end
@@ -495,8 +486,10 @@ Java extension should be preferred.
# "cannot load such file -- win32/resolv" when it is required later on.
# See also: https://github.com/tjschuck/rake-compiler-dev-box/issues/5
require 'resolv'
+ require 'rbconfig'
class Object
+ remove_const :RbConfig
remove_const :RUBY_PLATFORM
remove_const :RUBY_VERSION
remove_const :RUBY_DESCRIPTION if defined?(RUBY_DESCRIPTION)
--
2.5.0.windows.1

@ -1,14 +0,0 @@
diff --git a/tasks/bin/cross-ruby.rake b/tasks/bin/cross-ruby.rake
index 6acc816..6aa2a49 100644
--- a/tasks/bin/cross-ruby.rake
+++ b/tasks/bin/cross-ruby.rake
@@ -135,8 +135,7 @@ file "#{USER_HOME}/builds/#{MINGW_HOST}/#{RUBY_CC_VERSION}/Makefile" => ["#{USER
"--build=#{RUBY_BUILD}",
'--enable-shared',
'--disable-install-doc',
- '--without-tk',
- '--without-tcl'
+ '--with-ext='
]
# Force Winsock2 for Ruby 1.8, 1.9 defaults to it

@ -1,2 +0,0 @@
diff --git a/configure b/configure
index 55157af..6630eba 100755

@ -1,13 +0,0 @@
diff --git a/configure b/configure
index 898730c..cfe6253 100755
--- a/configure
+++ b/configure
@@ -14695,7 +14695,7 @@ for ac_func in fmod killpg wait4 waitpid fork spawnv syscall __syscall chroot ge
setsid telldir seekdir fchmod cosh sinh tanh log2 round\
setuid setgid daemon select_large_fdset setenv unsetenv\
mktime timegm gmtime_r clock_gettime gettimeofday poll ppoll\
- pread sendfile shutdown sigaltstack dl_iterate_phdr
+ pread shutdown sigaltstack dl_iterate_phdr
do :
as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var"

@ -1,2 +0,0 @@
diff --git a/configure b/configure
index 55157af..6630eba 100755

@ -1,12 +0,0 @@
diff --git a/configure b/configure
index ebe3d8c..a336b73 100755
--- a/configure
+++ b/configure
@@ -18943,7 +18943,6 @@ do :
ac_fn_c_check_func "$LINENO" "sendfile" "ac_cv_func_sendfile"
if test "x$ac_cv_func_sendfile" = xyes; then :
cat >>confdefs.h <<_ACEOF
-#define HAVE_SENDFILE 1
_ACEOF
fi

@ -1,12 +0,0 @@
diff --git a/configure b/configure
index ebe3d8c..a336b73 100755
--- a/configure
+++ b/configure
@@ -18943,7 +18943,6 @@ do :
ac_fn_c_check_func "$LINENO" "sendfile" "ac_cv_func_sendfile"
if test "x$ac_cv_func_sendfile" = xyes; then :
cat >>confdefs.h <<_ACEOF
-#define HAVE_SENDFILE 1
_ACEOF
fi

@ -1,12 +0,0 @@
#!/bin/bash
groupadd -g "$GID" "$GROUP"
mkdir -p /tmp/home
useradd -g "$GID" -u "$UID" -G rvm,sudo -p "" -b /tmp/home -m "$USER"
HOME=$(bash <<< "echo ~$USER")
ln -s /usr/local/rake-compiler "$HOME"/.rake-compiler
sudo -u "$USER" --set-home \
BASH_ENV=/etc/rubybashrc \
-- "$@"

@ -1,43 +0,0 @@
/*
* This program handles SIGINT and forwards it to another process.
* It is intended to be run as PID 1.
*
* Docker starts processes with "docker run" as PID 1.
* On Linux, the default signal handler for PID 1 ignores any signals.
* Therefore Ctrl-C aka SIGINT is ignored per default.
*/
#include <signal.h>
#include <sys/types.h>
#include <sys/wait.h>
int pid = 0;
void
handle_sigint (int signum)
{
if(pid)
kill(pid, SIGINT);
}
int main(int argc, char *argv[]){
struct sigaction new_action;
int status = -1;
/* Set up the structure to specify the new action. */
new_action.sa_handler = handle_sigint;
sigemptyset (&new_action.sa_mask);
new_action.sa_flags = 0;
sigaction (SIGINT, &new_action, (void*)0);
pid = fork();
if(pid){
wait(&status);
return WEXITSTATUS(status);
}else{
status = execvp(argv[1], &argv[1]);
perror("exec");
return status;
}
}

@ -1,30 +0,0 @@
#!/usr/bin/env ruby
# Strip file on local folder instead of a Virtualbox shared folder
# to work around this bug: https://www.virtualbox.org/ticket/8463
require 'tempfile'
require 'fileutils'
strip = "#{File.basename($0)}.bin"
files = ARGV.reject{|f| f=~/^-/ }.map do |arg|
tmp = Tempfile.new 'strip'
tmp.close
FileUtils.cp arg, tmp.path
[tmp, arg]
end
options = ARGV.select{|f| f=~/^-/ } + files.map{|t,o| t.path }
unless system( strip, *options )
exit 127
end
code = $?.exitstatus
files.each do |tmp, orig|
FileUtils.rm orig
FileUtils.cp tmp.path, orig
end
exit code

@ -1 +0,0 @@
Defaults env_keep += "http_proxy https_proxy ftp_proxy RCD_HOST_RUBY_PLATFORM RCD_HOST_RUBY_VERSION RCD_IMAGE RUBY_CC_VERSION"

@ -1,4 +1,5 @@
build --client_env=CC=clang
build --copt -DGRPC_BAZEL_BUILD
build:asan --strip=never
build:asan --copt -fsanitize-coverage=edge

@ -47,7 +47,7 @@ EOF
MAKE="make -j8"
for v in 2.4.0 2.3.0 2.2.2 2.1.5 2.0.0-p645 ; do
for v in 2.5.0 2.4.0 2.3.0 2.2.2 2.1.6 2.0.0-p645 ; do
ccache -c
rake -f $CROSS_RUBY cross-ruby VERSION=$v HOST=x86_64-darwin11
done

@ -48,6 +48,7 @@ LICENSE_PREFIX = {
'.cc': r'\s*(?://|\*)\s*',
'.h': r'\s*(?://|\*)\s*',
'.m': r'\s*\*\s*',
'.mm': r'\s*\*\s*',
'.php': r'\s*\*\s*',
'.js': r'\s*\*\s*',
'.py': r'#\s*',

@ -7616,6 +7616,7 @@
"test/core/end2end/tests/filter_call_init_fails.cc",
"test/core/end2end/tests/filter_causes_close.cc",
"test/core/end2end/tests/filter_latency.cc",
"test/core/end2end/tests/filter_status_code.cc",
"test/core/end2end/tests/graceful_server_shutdown.cc",
"test/core/end2end/tests/high_initial_seqno.cc",
"test/core/end2end/tests/hpack_size.cc",
@ -7697,6 +7698,7 @@
"test/core/end2end/tests/filter_call_init_fails.cc",
"test/core/end2end/tests/filter_causes_close.cc",
"test/core/end2end/tests/filter_latency.cc",
"test/core/end2end/tests/filter_status_code.cc",
"test/core/end2end/tests/graceful_server_shutdown.cc",
"test/core/end2end/tests/high_initial_seqno.cc",
"test/core/end2end/tests/hpack_size.cc",

File diff suppressed because it is too large Load Diff
Loading…
Cancel
Save