Merge branch 'master' of github.com:grpc/grpc into nanopb_build_cleanup

pull/15595/head
David Garcia Quintas 7 years ago
commit 24f6a02b31
  1. 19
      Makefile
  2. 17
      bazel/cc_grpc_library.bzl
  3. 8
      bazel/grpc_deps.bzl
  4. 15
      examples/ruby/pubsub/.gitignore
  5. 4
      examples/ruby/pubsub/Gemfile
  6. 0
      examples/ruby/pubsub/google/protobuf/empty.rb
  7. 0
      examples/ruby/pubsub/pubsub_demo.rb
  8. 0
      examples/ruby/pubsub/tech/pubsub/proto/pubsub.rb
  9. 0
      examples/ruby/pubsub/tech/pubsub/proto/pubsub_services.rb
  10. 6
      grpc.gemspec
  11. 41
      setup.py
  12. 12
      src/compiler/csharp_generator.cc
  13. 520
      src/core/ext/filters/client_channel/client_channel.cc
  14. 2
      src/core/ext/transport/chttp2/server/chttp2_server.cc
  15. 16
      src/core/ext/transport/chttp2/transport/hpack_encoder.cc
  16. 7
      src/core/ext/transport/chttp2/transport/hpack_encoder.h
  17. 6
      src/core/lib/iomgr/tcp_client_custom.cc
  18. 2
      src/csharp/Grpc.Examples/MathGrpc.cs
  19. 2
      src/csharp/Grpc.HealthCheck/HealthGrpc.cs
  20. 2
      src/csharp/Grpc.IntegrationTesting/BenchmarkServiceGrpc.cs
  21. 38
      src/csharp/Grpc.IntegrationTesting/EmptyService.cs
  22. 85
      src/csharp/Grpc.IntegrationTesting/EmptyServiceGrpc.cs
  23. 2
      src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs
  24. 2
      src/csharp/Grpc.IntegrationTesting/ReportQpsScenarioServiceGrpc.cs
  25. 2
      src/csharp/Grpc.IntegrationTesting/TestGrpc.cs
  26. 2
      src/csharp/Grpc.IntegrationTesting/WorkerServiceGrpc.cs
  27. 2
      src/csharp/Grpc.Reflection/ReflectionGrpc.cs
  28. 2
      src/csharp/generate_proto_csharp.sh
  29. 5
      src/objective-c/BoringSSL.podspec
  30. 1
      src/php/tests/unit_tests/CallCredentials2Test.php
  31. 1
      src/php/tests/unit_tests/CallCredentialsTest.php
  32. 6
      src/php/tests/unit_tests/CallTest.php
  33. 5
      src/php/tests/unit_tests/EndToEndTest.php
  34. 30
      src/php/tests/unit_tests/InterceptorTest.php
  35. 2
      src/php/tests/unit_tests/SecureEndToEndTest.php
  36. 5
      src/php/tests/unit_tests/ServerTest.php
  37. 23
      src/proto/grpc/testing/empty_service.proto
  38. 19
      src/python/grpcio/grpc/__init__.py
  39. 13
      src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi
  40. 5
      src/python/grpcio/grpc/_cython/_cygrpc/grpc_gevent.pyx
  41. 19
      templates/Makefile.template
  42. 6
      templates/grpc.gemspec.template
  43. 5
      templates/tools/dockerfile/apt_get_pyenv.include
  44. 8
      templates/tools/dockerfile/test/python_pyenv_x64/Dockerfile.template
  45. 30
      test/build/openssl-npn.c
  46. 23
      test/core/transport/chttp2/hpack_encoder_test.cc
  47. 226
      test/cpp/end2end/round_robin_end2end_test.cc
  48. 13
      tools/dockerfile/test/python_pyenv_x64/Dockerfile
  49. 3
      tools/internal_ci/helper_scripts/prepare_build_macos_rc
  50. 11
      tools/internal_ci/linux/grpc_bazel_on_foundry_base.sh
  51. 13
      tools/internal_ci/linux/grpc_msan_on_foundry.sh
  52. 11
      tools/internal_ci/linux/grpc_ubsan_on_foundry.sh
  53. 10
      tools/interop_matrix/client_matrix.py
  54. 33
      tools/interop_matrix/create_matrix_images.py
  55. 81
      tools/interop_matrix/patches/csharp_v1.0.1/git_repo.patch
  56. 4
      tools/interop_matrix/run_interop_matrix_tests.py

@ -503,7 +503,6 @@ endif
ifeq ($(HAS_PKG_CONFIG),true)
OPENSSL_ALPN_CHECK_CMD = $(PKG_CONFIG) --atleast-version=1.0.2 openssl
OPENSSL_NPN_CHECK_CMD = $(PKG_CONFIG) --atleast-version=1.0.1 openssl
ZLIB_CHECK_CMD = $(PKG_CONFIG) --exists zlib
PROTOBUF_CHECK_CMD = $(PKG_CONFIG) --atleast-version=3.5.0 protobuf
CARES_CHECK_CMD = $(PKG_CONFIG) --atleast-version=1.11.0 libcares
@ -516,7 +515,6 @@ OPENSSL_LIBS = ssl crypto
endif
OPENSSL_ALPN_CHECK_CMD = $(CC) $(CPPFLAGS) $(CFLAGS) -o $(TMPOUT) test/build/openssl-alpn.c $(addprefix -l, $(OPENSSL_LIBS)) $(LDFLAGS)
OPENSSL_NPN_CHECK_CMD = $(CC) $(CPPFLAGS) $(CFLAGS) -o $(TMPOUT) test/build/openssl-npn.c $(addprefix -l, $(OPENSSL_LIBS)) $(LDFLAGS)
BORINGSSL_COMPILE_CHECK_CMD = $(CC) $(CPPFLAGS) -Ithird_party/boringssl/include -fvisibility=hidden -DOPENSSL_NO_ASM -D_GNU_SOURCE -DWIN32_LEAN_AND_MEAN -D_HAS_EXCEPTIONS=0 -DNOMINMAX $(CFLAGS) -Wno-sign-conversion -Wno-conversion -Wno-unused-value -Wno-unknown-pragmas -Wno-implicit-function-declaration -Wno-unused-variable -Wno-sign-compare -Wno-implicit-fallthrough $(NO_W_EXTRA_SEMI) -o $(TMPOUT) test/build/boringssl.c $(LDFLAGS)
ZLIB_CHECK_CMD = $(CC) $(CPPFLAGS) $(CFLAGS) -o $(TMPOUT) test/build/zlib.c -lz $(LDFLAGS)
PROTOBUF_CHECK_CMD = $(CXX) $(CPPFLAGS) $(CXXFLAGS) -o $(TMPOUT) test/build/protobuf.cc -lprotobuf $(LDFLAGS)
@ -544,13 +542,7 @@ HAS_SYSTEM_PROTOBUF_VERIFY = $(shell $(PROTOBUF_CHECK_CMD) 2> /dev/null && echo
ifndef REQUIRE_CUSTOM_LIBRARIES_$(CONFIG)
HAS_SYSTEM_OPENSSL_ALPN ?= $(shell $(OPENSSL_ALPN_CHECK_CMD) 2> /dev/null && echo true || echo false)
ifeq ($(HAS_SYSTEM_OPENSSL_ALPN),true)
HAS_SYSTEM_OPENSSL_NPN = true
CACHE_MK += HAS_SYSTEM_OPENSSL_ALPN = true,
else
HAS_SYSTEM_OPENSSL_NPN ?= $(shell $(OPENSSL_NPN_CHECK_CMD) 2> /dev/null && echo true || echo false)
endif
ifeq ($(HAS_SYSTEM_OPENSSL_NPN),true)
CACHE_MK += HAS_SYSTEM_OPENSSL_NPN = true,
endif
HAS_SYSTEM_ZLIB ?= $(shell $(ZLIB_CHECK_CMD) 2> /dev/null && echo true || echo false)
ifeq ($(HAS_SYSTEM_ZLIB),true)
@ -567,7 +559,6 @@ endif
else
# override system libraries if the config requires a custom compiled library
HAS_SYSTEM_OPENSSL_ALPN = false
HAS_SYSTEM_OPENSSL_NPN = false
HAS_SYSTEM_ZLIB = false
HAS_SYSTEM_PROTOBUF = false
HAS_SYSTEM_CARES = false
@ -714,12 +705,7 @@ ifneq ($(HAS_EMBEDDED_OPENSSL_ALPN),false)
EMBED_OPENSSL ?= $(HAS_EMBEDDED_OPENSSL_ALPN)
NO_SECURE ?= false
else # HAS_EMBEDDED_OPENSSL_ALPN=false
ifeq ($(HAS_SYSTEM_OPENSSL_NPN),true)
EMBED_OPENSSL ?= false
NO_SECURE ?= false
else
NO_SECURE ?= true
endif # HAS_SYSTEM_OPENSSL_NPN=true
endif # HAS_EMBEDDED_OPENSSL_ALPN
endif # HAS_SYSTEM_OPENSSL_ALPN
@ -753,10 +739,10 @@ LDFLAGS := $(LDFLAGS_OPENSSL_PKG_CONFIG) $(LDFLAGS)
else # HAS_PKG_CONFIG=false
LIBS_SECURE = $(OPENSSL_LIBS)
endif # HAS_PKG_CONFIG
ifeq ($(HAS_SYSTEM_OPENSSL_NPN),true)
ifeq ($(DISABLE_ALPN),true)
CPPFLAGS += -DTSI_OPENSSL_ALPN_SUPPORT=0
LIBS_SECURE = $(OPENSSL_LIBS)
endif # HAS_SYSTEM_OPENSSL_NPN
endif # DISABLE_ALPN
PC_LIBS_SECURE = $(addprefix -l, $(LIBS_SECURE))
endif # EMBED_OPENSSL
endif # NO_SECURE
@ -1349,7 +1335,6 @@ uri_fuzzer_test_one_entry: $(BINDIR)/$(CONFIG)/uri_fuzzer_test_one_entry
run_dep_checks:
$(OPENSSL_ALPN_CHECK_CMD) || true
$(OPENSSL_NPN_CHECK_CMD) || true
$(ZLIB_CHECK_CMD) || true
$(PERFTOOLS_CHECK_CMD) || true
$(PROTOBUF_CHECK_CMD) || true

@ -43,12 +43,7 @@ def cc_grpc_library(name, srcs, deps, proto_only, well_known_protos, generate_mo
)
if not proto_only:
if use_external:
# when this file is used by non-grpc projects
plugin = "//external:grpc_cpp_plugin"
else:
plugin = "//:grpc_cpp_plugin"
plugin = "@com_github_grpc_grpc//:grpc_cpp_plugin"
generate_cc(
name = codegen_grpc_target,
srcs = [proto_target],
@ -57,14 +52,8 @@ def cc_grpc_library(name, srcs, deps, proto_only, well_known_protos, generate_mo
generate_mocks = generate_mocks,
**kwargs
)
if use_external:
# when this file is used by non-grpc projects
grpc_deps = ["//external:grpc++_codegen_proto",
"//external:protobuf"]
else:
grpc_deps = ["//:grpc++_codegen_proto", "//external:protobuf"]
grpc_deps = ["@com_github_grpc_grpc//:grpc++_codegen_proto",
"//external:protobuf"]
native.cc_library(
name = name,
srcs = [":" + codegen_grpc_target, ":" + codegen_target],

@ -144,12 +144,12 @@ def grpc_deps():
if "com_github_bazelbuild_bazeltoolchains" not in native.existing_rules():
native.http_archive(
name = "com_github_bazelbuild_bazeltoolchains",
strip_prefix = "bazel-toolchains-09c850dbb8e785ded3d23a7003e9a0168fe1fb2f",
strip_prefix = "bazel-toolchains-4653c01284d8a4a536f8f9bb47b7d10f94c549e7",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/09c850dbb8e785ded3d23a7003e9a0168fe1fb2f.tar.gz",
"https://github.com/bazelbuild/bazel-toolchains/archive/09c850dbb8e785ded3d23a7003e9a0168fe1fb2f.tar.gz",
"https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/4653c01284d8a4a536f8f9bb47b7d10f94c549e7.tar.gz",
"https://github.com/bazelbuild/bazel-toolchains/archive/4653c01284d8a4a536f8f9bb47b7d10f94c549e7.tar.gz",
],
sha256 = "08e521cf2d0998e3d27a16c2e2542ebf4d3857b3ddadcfd145d128140754d7bd",
sha256 = "1c4a532b396c698e6467a1548554571cb85fa091e472b05e398ebc836c315d77",
)
# TODO: move some dependencies from "grpc_deps" here?

@ -0,0 +1,15 @@
/.bundle/
/.yardoc
/Gemfile.lock
/_yardoc/
/coverage/
/doc/
/pkg/
/spec/reports/
/tmp/
*.bundle
*.so
*.o
*.a
mkmf.log
vendor

@ -0,0 +1,4 @@
source 'https://rubygems.org/'
gem 'grpc', '~> 1.0'
gem 'googleauth', '>= 0.5.1', '< 0.7'

@ -20,7 +20,9 @@ Gem::Specification.new do |s|
s.files += Dir.glob('src/ruby/bin/**/*')
s.files += Dir.glob('src/ruby/ext/**/*')
s.files += Dir.glob('src/ruby/lib/**/*')
s.files += Dir.glob('src/ruby/pb/**/*')
s.files += Dir.glob('src/ruby/pb/**/*').reject do |f|
f.match(%r{^src/ruby/pb/test})
end
s.files += Dir.glob('include/grpc/**/*')
s.test_files = Dir.glob('src/ruby/spec/**/*')
s.bindir = 'src/ruby/bin'
@ -28,7 +30,6 @@ Gem::Specification.new do |s|
s.platform = Gem::Platform::RUBY
s.add_dependency 'google-protobuf', '~> 3.1'
s.add_dependency 'googleauth', '>= 0.5.1', '< 0.7'
s.add_dependency 'googleapis-common-protos-types', '~> 1.0.0'
s.add_development_dependency 'bundler', '~> 1.9'
@ -41,6 +42,7 @@ Gem::Specification.new do |s|
s.add_development_dependency 'rspec', '~> 3.6'
s.add_development_dependency 'rubocop', '~> 0.49.1'
s.add_development_dependency 'signet', '~> 0.7.0'
s.add_development_dependency 'googleauth', '>= 0.5.1', '< 0.7'
s.extensions = %w(src/ruby/ext/grpc/extconf.rb)

@ -35,7 +35,7 @@ egg_info.manifest_maker.template = 'PYTHON-MANIFEST.in'
PY3 = sys.version_info.major == 3
PYTHON_STEM = os.path.join('src', 'python', 'grpcio')
CORE_INCLUDE = ('include', '.',)
BORINGSSL_INCLUDE = (os.path.join('third_party', 'boringssl', 'include'),)
SSL_INCLUDE = (os.path.join('third_party', 'boringssl', 'include'),)
ZLIB_INCLUDE = (os.path.join('third_party', 'zlib'),)
NANOPB_INCLUDE = (os.path.join('third_party', 'nanopb'),)
CARES_INCLUDE = (
@ -85,6 +85,24 @@ CLASSIFIERS = [
# present, then it will still attempt to use Cython.
BUILD_WITH_CYTHON = os.environ.get('GRPC_PYTHON_BUILD_WITH_CYTHON', False)
# Export this variable to use the system installation of openssl. You need to
# have the header files installed (in /usr/include/openssl) and during
# runtime, the shared libary must be installed
BUILD_WITH_SYSTEM_OPENSSL = os.environ.get('GRPC_PYTHON_BUILD_SYSTEM_OPENSSL',
False)
# Export this variable to use the system installation of zlib. You need to
# have the header files installed (in /usr/include/) and during
# runtime, the shared libary must be installed
BUILD_WITH_SYSTEM_ZLIB = os.environ.get('GRPC_PYTHON_BUILD_SYSTEM_ZLIB',
False)
# Export this variable to use the system installation of cares. You need to
# have the header files installed (in /usr/include/) and during
# runtime, the shared libary must be installed
BUILD_WITH_SYSTEM_CARES = os.environ.get('GRPC_PYTHON_BUILD_SYSTEM_CARES',
False)
# Environment variable to determine whether or not to enable coverage analysis
# in Cython modules.
ENABLE_CYTHON_TRACING = os.environ.get(
@ -149,8 +167,21 @@ CORE_C_FILES = tuple(grpc_core_dependencies.CORE_SOURCE_FILES)
if "win32" in sys.platform:
CORE_C_FILES = filter(lambda x: 'third_party/cares' not in x, CORE_C_FILES)
if BUILD_WITH_SYSTEM_OPENSSL:
CORE_C_FILES = filter(lambda x: 'third_party/boringssl' not in x, CORE_C_FILES)
CORE_C_FILES = filter(lambda x: 'src/boringssl' not in x, CORE_C_FILES)
SSL_INCLUDE = (os.path.join('/usr', 'include', 'openssl'),)
if BUILD_WITH_SYSTEM_ZLIB:
CORE_C_FILES = filter(lambda x: 'third_party/zlib' not in x, CORE_C_FILES)
ZLIB_INCLUDE = (os.path.join('/usr', 'include'),)
if BUILD_WITH_SYSTEM_CARES:
CORE_C_FILES = filter(lambda x: 'third_party/cares' not in x, CORE_C_FILES)
CARES_INCLUDE = (os.path.join('/usr', 'include'),)
EXTENSION_INCLUDE_DIRECTORIES = (
(PYTHON_STEM,) + CORE_INCLUDE + BORINGSSL_INCLUDE + ZLIB_INCLUDE +
(PYTHON_STEM,) + CORE_INCLUDE + SSL_INCLUDE + ZLIB_INCLUDE +
NANOPB_INCLUDE + CARES_INCLUDE + ADDRESS_SORTING_INCLUDE)
EXTENSION_LIBRARIES = ()
@ -160,6 +191,12 @@ if not "win32" in sys.platform:
EXTENSION_LIBRARIES += ('m',)
if "win32" in sys.platform:
EXTENSION_LIBRARIES += ('advapi32', 'ws2_32',)
if BUILD_WITH_SYSTEM_OPENSSL:
EXTENSION_LIBRARIES += ('ssl', 'crypto',)
if BUILD_WITH_SYSTEM_ZLIB:
EXTENSION_LIBRARIES += ('z',)
if BUILD_WITH_SYSTEM_CARES:
EXTENSION_LIBRARIES += ('cares',)
DEFINE_MACROS = (
('OPENSSL_NO_ASM', 1), ('_WIN32_WINNT', 0x600),

@ -590,19 +590,16 @@ void GenerateBindServiceMethod(Printer* out, const ServiceDescriptor* service) {
out->Print("{\n");
out->Indent();
out->Print("return grpc::ServerServiceDefinition.CreateBuilder()\n");
out->Print("return grpc::ServerServiceDefinition.CreateBuilder()");
out->Indent();
out->Indent();
for (int i = 0; i < service->method_count(); i++) {
const MethodDescriptor* method = service->method(i);
out->Print(".AddMethod($methodfield$, serviceImpl.$methodname$)",
out->Print("\n.AddMethod($methodfield$, serviceImpl.$methodname$)",
"methodfield", GetMethodFieldName(method), "methodname",
method->name());
if (i == service->method_count() - 1) {
out->Print(".Build();");
}
out->Print("\n");
}
out->Print(".Build();\n");
out->Outdent();
out->Outdent();
@ -676,7 +673,8 @@ grpc::string GetServices(const FileDescriptor* file, bool generate_client,
out.PrintRaw(leading_comments.c_str());
}
out.Print("#pragma warning disable 1591\n");
out.Print("#pragma warning disable 0414, 1591\n");
out.Print("#region Designer generated code\n");
out.Print("\n");
out.Print("using grpc = global::Grpc.Core;\n");

@ -891,6 +891,7 @@ typedef struct client_channel_call_data {
grpc_closure pick_cancel_closure;
grpc_polling_entity* pollent;
bool pollent_added_to_interested_parties;
// Batches are added to this list when received from above.
// They are removed when we are done handling the batch (i.e., when
@ -949,7 +950,6 @@ static void retry_commit(grpc_call_element* elem,
static void start_internal_recv_trailing_metadata(grpc_call_element* elem);
static void on_complete(void* arg, grpc_error* error);
static void start_retriable_subchannel_batches(void* arg, grpc_error* ignored);
static void pick_after_resolver_result_start_locked(grpc_call_element* elem);
static void start_pick_locked(void* arg, grpc_error* ignored);
//
@ -2684,59 +2684,133 @@ static void pick_done(void* arg, grpc_error* error) {
}
}
static void maybe_add_call_to_channel_interested_parties_locked(
grpc_call_element* elem) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (!calld->pollent_added_to_interested_parties) {
calld->pollent_added_to_interested_parties = true;
grpc_polling_entity_add_to_pollset_set(calld->pollent,
chand->interested_parties);
}
}
static void maybe_del_call_from_channel_interested_parties_locked(
grpc_call_element* elem) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (calld->pollent_added_to_interested_parties) {
calld->pollent_added_to_interested_parties = false;
grpc_polling_entity_del_from_pollset_set(calld->pollent,
chand->interested_parties);
}
}
// Invoked when a pick is completed to leave the client_channel combiner
// and continue processing in the call combiner.
// If needed, removes the call's polling entity from chand->interested_parties.
static void pick_done_locked(grpc_call_element* elem, grpc_error* error) {
call_data* calld = static_cast<call_data*>(elem->call_data);
maybe_del_call_from_channel_interested_parties_locked(elem);
GRPC_CLOSURE_INIT(&calld->pick_closure, pick_done, elem,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_SCHED(&calld->pick_closure, error);
}
// A wrapper around pick_done_locked() that is used in cases where
// either (a) the pick was deferred pending a resolver result or (b) the
// pick was done asynchronously. Removes the call's polling entity from
// chand->interested_parties before invoking pick_done_locked().
static void async_pick_done_locked(grpc_call_element* elem, grpc_error* error) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
grpc_polling_entity_del_from_pollset_set(calld->pollent,
chand->interested_parties);
pick_done_locked(elem, error);
}
namespace grpc_core {
// Note: This runs under the client_channel combiner, but will NOT be
// holding the call combiner.
static void pick_callback_cancel_locked(void* arg, grpc_error* error) {
grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
// Note: chand->lb_policy may have changed since we started our pick,
// in which case we will be cancelling the pick on a policy other than
// the one we started it on. However, this will just be a no-op.
if (GPR_LIKELY(error != GRPC_ERROR_NONE && chand->lb_policy != nullptr)) {
// Performs subchannel pick via LB policy.
class LbPicker {
public:
// Starts a pick on chand->lb_policy.
static void StartLocked(grpc_call_element* elem) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p calld=%p: cancelling pick from LB policy %p",
gpr_log(GPR_INFO, "chand=%p calld=%p: starting pick on lb_policy=%p",
chand, calld, chand->lb_policy.get());
}
chand->lb_policy->CancelPickLocked(&calld->pick, GRPC_ERROR_REF(error));
// If this is a retry, use the send_initial_metadata payload that
// we've cached; otherwise, use the pending batch. The
// send_initial_metadata batch will be the first pending batch in the
// list, as set by get_batch_index() above.
calld->pick.initial_metadata =
calld->seen_send_initial_metadata
? &calld->send_initial_metadata
: calld->pending_batches[0]
.batch->payload->send_initial_metadata.send_initial_metadata;
calld->pick.initial_metadata_flags =
calld->seen_send_initial_metadata
? calld->send_initial_metadata_flags
: calld->pending_batches[0]
.batch->payload->send_initial_metadata
.send_initial_metadata_flags;
GRPC_CLOSURE_INIT(&calld->pick_closure, &LbPicker::DoneLocked, elem,
grpc_combiner_scheduler(chand->combiner));
calld->pick.on_complete = &calld->pick_closure;
GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback");
const bool pick_done = chand->lb_policy->PickLocked(&calld->pick);
if (GPR_LIKELY(pick_done)) {
// Pick completed synchronously.
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p calld=%p: pick completed synchronously",
chand, calld);
}
pick_done_locked(elem, GRPC_ERROR_NONE);
GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback");
} else {
// Pick will be returned asynchronously.
// Add the polling entity from call_data to the channel_data's
// interested_parties, so that the I/O of the LB policy can be done
// under it. It will be removed in pick_done_locked().
maybe_add_call_to_channel_interested_parties_locked(elem);
// Request notification on call cancellation.
GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback_cancel");
grpc_call_combiner_set_notify_on_cancel(
calld->call_combiner,
GRPC_CLOSURE_INIT(&calld->pick_cancel_closure,
&LbPicker::CancelLocked, elem,
grpc_combiner_scheduler(chand->combiner)));
}
}
GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback_cancel");
}
// Callback invoked by LoadBalancingPolicy::PickLocked() for async picks.
// Unrefs the LB policy and invokes async_pick_done_locked().
static void pick_callback_done_locked(void* arg, grpc_error* error) {
grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p calld=%p: pick completed asynchronously", chand,
calld);
private:
// Callback invoked by LoadBalancingPolicy::PickLocked() for async picks.
// Unrefs the LB policy and invokes pick_done_locked().
static void DoneLocked(void* arg, grpc_error* error) {
grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p calld=%p: pick completed asynchronously",
chand, calld);
}
pick_done_locked(elem, GRPC_ERROR_REF(error));
GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback");
}
async_pick_done_locked(elem, GRPC_ERROR_REF(error));
GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback");
}
// Note: This runs under the client_channel combiner, but will NOT be
// holding the call combiner.
static void CancelLocked(void* arg, grpc_error* error) {
grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
// Note: chand->lb_policy may have changed since we started our pick,
// in which case we will be cancelling the pick on a policy other than
// the one we started it on. However, this will just be a no-op.
if (GPR_UNLIKELY(error != GRPC_ERROR_NONE && chand->lb_policy != nullptr)) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: cancelling pick from LB policy %p", chand,
calld, chand->lb_policy.get());
}
chand->lb_policy->CancelPickLocked(&calld->pick, GRPC_ERROR_REF(error));
}
GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback_cancel");
}
};
} // namespace grpc_core
// Applies service config to the call. Must be invoked once we know
// that the resolver has returned results to the channel.
@ -2766,6 +2840,24 @@ static void apply_service_config_to_call_locked(grpc_call_element* elem) {
grpc_deadline_state_reset(elem, calld->deadline);
}
}
// If the service config set wait_for_ready and the application
// did not explicitly set it, use the value from the service config.
uint32_t* send_initial_metadata_flags =
&calld->pending_batches[0]
.batch->payload->send_initial_metadata
.send_initial_metadata_flags;
if (GPR_UNLIKELY(
calld->method_params->wait_for_ready() !=
ClientChannelMethodParams::WAIT_FOR_READY_UNSET &&
!(*send_initial_metadata_flags &
GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET))) {
if (calld->method_params->wait_for_ready() ==
ClientChannelMethodParams::WAIT_FOR_READY_TRUE) {
*send_initial_metadata_flags |= GRPC_INITIAL_METADATA_WAIT_FOR_READY;
} else {
*send_initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
}
}
}
}
// If no retry policy, disable retries.
@ -2776,215 +2868,164 @@ static void apply_service_config_to_call_locked(grpc_call_element* elem) {
}
}
// Starts a pick on chand->lb_policy.
// Returns true if pick is completed synchronously.
static bool pick_callback_start_locked(grpc_call_element* elem) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
// Invoked once resolver results are available.
static void process_service_config_and_start_lb_pick_locked(
grpc_call_element* elem) {
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p calld=%p: starting pick on lb_policy=%p", chand,
calld, chand->lb_policy.get());
}
// Only get service config data on the first attempt.
if (GPR_LIKELY(calld->num_attempts_completed == 0)) {
apply_service_config_to_call_locked(elem);
}
// If the application explicitly set wait_for_ready, use that.
// Otherwise, if the service config specified a value for this
// method, use that.
//
// The send_initial_metadata batch will be the first one in the list,
// as set by get_batch_index() above.
calld->pick.initial_metadata =
calld->seen_send_initial_metadata
? &calld->send_initial_metadata
: calld->pending_batches[0]
.batch->payload->send_initial_metadata.send_initial_metadata;
uint32_t send_initial_metadata_flags =
calld->seen_send_initial_metadata
? calld->send_initial_metadata_flags
: calld->pending_batches[0]
.batch->payload->send_initial_metadata
.send_initial_metadata_flags;
const bool wait_for_ready_set_from_api =
send_initial_metadata_flags &
GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET;
const bool wait_for_ready_set_from_service_config =
calld->method_params != nullptr &&
calld->method_params->wait_for_ready() !=
ClientChannelMethodParams::WAIT_FOR_READY_UNSET;
if (GPR_UNLIKELY(!wait_for_ready_set_from_api &&
wait_for_ready_set_from_service_config)) {
if (calld->method_params->wait_for_ready() ==
ClientChannelMethodParams::WAIT_FOR_READY_TRUE) {
send_initial_metadata_flags |= GRPC_INITIAL_METADATA_WAIT_FOR_READY;
} else {
send_initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
}
}
calld->pick.initial_metadata_flags = send_initial_metadata_flags;
GRPC_CLOSURE_INIT(&calld->pick_closure, pick_callback_done_locked, elem,
grpc_combiner_scheduler(chand->combiner));
calld->pick.on_complete = &calld->pick_closure;
GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback");
const bool pick_done = chand->lb_policy->PickLocked(&calld->pick);
if (GPR_LIKELY(pick_done)) {
// Pick completed synchronously.
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p calld=%p: pick completed synchronously",
chand, calld);
}
GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback");
} else {
GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback_cancel");
grpc_call_combiner_set_notify_on_cancel(
calld->call_combiner,
GRPC_CLOSURE_INIT(&calld->pick_cancel_closure,
pick_callback_cancel_locked, elem,
grpc_combiner_scheduler(chand->combiner)));
}
return pick_done;
// Start LB pick.
grpc_core::LbPicker::StartLocked(elem);
}
typedef struct {
grpc_call_element* elem;
bool finished;
grpc_closure closure;
grpc_closure cancel_closure;
} pick_after_resolver_result_args;
// Note: This runs under the client_channel combiner, but will NOT be
// holding the call combiner.
static void pick_after_resolver_result_cancel_locked(void* arg,
grpc_error* error) {
pick_after_resolver_result_args* args =
static_cast<pick_after_resolver_result_args*>(arg);
if (GPR_LIKELY(args->finished)) {
gpr_free(args);
return;
}
// If we don't yet have a resolver result, then a closure for
// pick_after_resolver_result_done_locked() will have been added to
// chand->waiting_for_resolver_result_closures, and it may not be invoked
// until after this call has been destroyed. We mark the operation as
// finished, so that when pick_after_resolver_result_done_locked()
// is called, it will be a no-op. We also immediately invoke
// async_pick_done_locked() to propagate the error back to the caller.
args->finished = true;
grpc_call_element* elem = args->elem;
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: cancelling pick waiting for resolver result",
chand, calld);
}
// Note: Although we are not in the call combiner here, we are
// basically stealing the call combiner from the pending pick, so
// it's safe to call async_pick_done_locked() here -- we are
// essentially calling it here instead of calling it in
// pick_after_resolver_result_done_locked().
async_pick_done_locked(elem, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick cancelled", &error, 1));
}
static void pick_after_resolver_result_done_locked(void* arg,
grpc_error* error) {
pick_after_resolver_result_args* args =
static_cast<pick_after_resolver_result_args*>(arg);
if (GPR_UNLIKELY(args->finished)) {
/* cancelled, do nothing */
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO, "call cancelled before resolver result");
}
gpr_free(args);
return;
}
args->finished = true;
grpc_call_element* elem = args->elem;
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (GPR_UNLIKELY(error != GRPC_ERROR_NONE)) {
namespace grpc_core {
// Handles waiting for a resolver result.
// Used only for the first call on an idle channel.
class ResolverResultWaiter {
public:
explicit ResolverResultWaiter(grpc_call_element* elem) : elem_(elem) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p calld=%p: resolver failed to return data",
gpr_log(GPR_INFO,
"chand=%p calld=%p: deferring pick pending resolver result",
chand, calld);
}
async_pick_done_locked(elem, GRPC_ERROR_REF(error));
} else if (GPR_UNLIKELY(chand->resolver == nullptr)) {
// Shutting down.
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p calld=%p: resolver disconnected", chand,
calld);
// Add closure to be run when a resolver result is available.
GRPC_CLOSURE_INIT(&done_closure_, &ResolverResultWaiter::DoneLocked, this,
grpc_combiner_scheduler(chand->combiner));
AddToWaitingList();
// Set cancellation closure, so that we abort if the call is cancelled.
GRPC_CLOSURE_INIT(&cancel_closure_, &ResolverResultWaiter::CancelLocked,
this, grpc_combiner_scheduler(chand->combiner));
grpc_call_combiner_set_notify_on_cancel(calld->call_combiner,
&cancel_closure_);
}
private:
// Adds closure_ to chand->waiting_for_resolver_result_closures.
void AddToWaitingList() {
channel_data* chand = static_cast<channel_data*>(elem_->channel_data);
grpc_closure_list_append(&chand->waiting_for_resolver_result_closures,
&done_closure_, GRPC_ERROR_NONE);
}
// Invoked when a resolver result is available.
static void DoneLocked(void* arg, grpc_error* error) {
ResolverResultWaiter* self = static_cast<ResolverResultWaiter*>(arg);
// If CancelLocked() has already run, delete ourselves without doing
// anything. Note that the call stack may have already been destroyed,
// so it's not safe to access anything in elem_.
if (GPR_UNLIKELY(self->finished_)) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO, "call cancelled before resolver result");
}
Delete(self);
return;
}
async_pick_done_locked(
elem, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
} else if (GPR_UNLIKELY(chand->lb_policy == nullptr)) {
// Transient resolver failure.
// If call has wait_for_ready=true, try again; otherwise, fail.
uint32_t send_initial_metadata_flags =
calld->seen_send_initial_metadata
? calld->send_initial_metadata_flags
: calld->pending_batches[0]
.batch->payload->send_initial_metadata
.send_initial_metadata_flags;
if (send_initial_metadata_flags & GRPC_INITIAL_METADATA_WAIT_FOR_READY) {
// Otherwise, process the resolver result.
grpc_call_element* elem = self->elem_;
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (GPR_UNLIKELY(error != GRPC_ERROR_NONE)) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: resolver returned but no LB policy; "
"wait_for_ready=true; trying again",
gpr_log(GPR_INFO, "chand=%p calld=%p: resolver failed to return data",
chand, calld);
}
pick_after_resolver_result_start_locked(elem);
pick_done_locked(elem, GRPC_ERROR_REF(error));
} else if (GPR_UNLIKELY(chand->resolver == nullptr)) {
// Shutting down.
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p calld=%p: resolver disconnected", chand,
calld);
}
pick_done_locked(elem,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
} else if (GPR_UNLIKELY(chand->lb_policy == nullptr)) {
// Transient resolver failure.
// If call has wait_for_ready=true, try again; otherwise, fail.
uint32_t send_initial_metadata_flags =
calld->seen_send_initial_metadata
? calld->send_initial_metadata_flags
: calld->pending_batches[0]
.batch->payload->send_initial_metadata
.send_initial_metadata_flags;
if (send_initial_metadata_flags & GRPC_INITIAL_METADATA_WAIT_FOR_READY) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: resolver returned but no LB policy; "
"wait_for_ready=true; trying again",
chand, calld);
}
// Re-add ourselves to the waiting list.
self->AddToWaitingList();
// Return early so that we don't set finished_ to true below.
return;
} else {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: resolver returned but no LB policy; "
"wait_for_ready=false; failing",
chand, calld);
}
pick_done_locked(
elem,
grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Name resolution failure"),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE));
}
} else {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: resolver returned but no LB policy; "
"wait_for_ready=false; failing",
gpr_log(GPR_INFO, "chand=%p calld=%p: resolver returned, doing LB pick",
chand, calld);
}
async_pick_done_locked(
elem,
grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Name resolution failure"),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE));
process_service_config_and_start_lb_pick_locked(elem);
}
} else {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p calld=%p: resolver returned, doing pick",
chand, calld);
self->finished_ = true;
}
// Invoked when the call is cancelled.
// Note: This runs under the client_channel combiner, but will NOT be
// holding the call combiner.
static void CancelLocked(void* arg, grpc_error* error) {
ResolverResultWaiter* self = static_cast<ResolverResultWaiter*>(arg);
// If DoneLocked() has already run, delete ourselves without doing anything.
if (GPR_LIKELY(self->finished_)) {
Delete(self);
return;
}
if (GPR_LIKELY(pick_callback_start_locked(elem))) {
// Even if the LB policy returns a result synchronously, we have
// already added our polling entity to chand->interested_parties
// in order to wait for the resolver result, so we need to
// remove it here. Therefore, we call async_pick_done_locked()
// instead of pick_done_locked().
async_pick_done_locked(elem, GRPC_ERROR_NONE);
// If we are being cancelled, immediately invoke pick_done_locked()
// to propagate the error back to the caller.
if (GPR_UNLIKELY(error != GRPC_ERROR_NONE)) {
grpc_call_element* elem = self->elem_;
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: cancelling call waiting for name "
"resolution",
chand, calld);
}
// Note: Although we are not in the call combiner here, we are
// basically stealing the call combiner from the pending pick, so
// it's safe to call pick_done_locked() here -- we are essentially
// calling it here instead of calling it in DoneLocked().
pick_done_locked(elem, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick cancelled", &error, 1));
}
self->finished_ = true;
}
}
static void pick_after_resolver_result_start_locked(grpc_call_element* elem) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: deferring pick pending resolver result", chand,
calld);
}
pick_after_resolver_result_args* args =
static_cast<pick_after_resolver_result_args*>(gpr_zalloc(sizeof(*args)));
args->elem = elem;
GRPC_CLOSURE_INIT(&args->closure, pick_after_resolver_result_done_locked,
args, grpc_combiner_scheduler(chand->combiner));
grpc_closure_list_append(&chand->waiting_for_resolver_result_closures,
&args->closure, GRPC_ERROR_NONE);
grpc_call_combiner_set_notify_on_cancel(
calld->call_combiner,
GRPC_CLOSURE_INIT(&args->cancel_closure,
pick_after_resolver_result_cancel_locked, args,
grpc_combiner_scheduler(chand->combiner)));
}
grpc_call_element* elem_;
grpc_closure done_closure_;
grpc_closure cancel_closure_;
bool finished_ = false;
};
} // namespace grpc_core
static void start_pick_locked(void* arg, grpc_error* ignored) {
grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
@ -2993,31 +3034,24 @@ static void start_pick_locked(void* arg, grpc_error* ignored) {
GPR_ASSERT(calld->pick.connected_subchannel == nullptr);
GPR_ASSERT(calld->subchannel_call == nullptr);
if (GPR_LIKELY(chand->lb_policy != nullptr)) {
// We already have an LB policy, so ask it for a pick.
if (GPR_LIKELY(pick_callback_start_locked(elem))) {
// Pick completed synchronously.
pick_done_locked(elem, GRPC_ERROR_NONE);
return;
}
// We already have resolver results, so process the service config
// and start an LB pick.
process_service_config_and_start_lb_pick_locked(elem);
} else if (GPR_UNLIKELY(chand->resolver == nullptr)) {
pick_done_locked(elem,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
} else {
// We do not yet have an LB policy, so wait for a resolver result.
if (GPR_UNLIKELY(chand->resolver == nullptr)) {
pick_done_locked(elem,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
return;
}
if (GPR_UNLIKELY(!chand->started_resolving)) {
start_resolving_locked(chand);
}
pick_after_resolver_result_start_locked(elem);
// Create a new waiter, which will delete itself when done.
grpc_core::New<grpc_core::ResolverResultWaiter>(elem);
// Add the polling entity from call_data to the channel_data's
// interested_parties, so that the I/O of the resolver can be done
// under it. It will be removed in pick_done_locked().
maybe_add_call_to_channel_interested_parties_locked(elem);
}
// We need to wait for either a resolver result or for an async result
// from the LB policy. Add the polling entity from call_data to the
// channel_data's interested_parties, so that the I/O of the LB policy
// and resolver can be done under it. The polling entity will be
// removed in async_pick_done_locked().
grpc_polling_entity_add_to_pollset_set(calld->pollent,
chand->interested_parties);
}
//

@ -179,8 +179,8 @@ static void on_accept(void* arg, grpc_endpoint* tcp,
grpc_handshake_manager* handshake_mgr = grpc_handshake_manager_create();
grpc_handshake_manager_pending_list_add(&state->pending_handshake_mgrs,
handshake_mgr);
gpr_mu_unlock(&state->mu);
grpc_tcp_server_ref(state->tcp_server);
gpr_mu_unlock(&state->mu);
server_connection_state* connection_state =
static_cast<server_connection_state*>(
gpr_zalloc(sizeof(*connection_state)));

@ -41,14 +41,18 @@
#include "src/core/lib/transport/static_metadata.h"
#include "src/core/lib/transport/timeout_encoding.h"
#define HASH_FRAGMENT_1(x) ((x)&255)
#define HASH_FRAGMENT_2(x) ((x >> 8) & 255)
#define HASH_FRAGMENT_3(x) ((x >> 16) & 255)
#define HASH_FRAGMENT_4(x) ((x >> 24) & 255)
#define HASH_FRAGMENT_MASK (GRPC_CHTTP2_HPACKC_NUM_VALUES - 1)
#define HASH_FRAGMENT_1(x) ((x)&HASH_FRAGMENT_MASK)
#define HASH_FRAGMENT_2(x) \
(((x) >> GRPC_CHTTP2_HPACKC_NUM_VALUES_BITS) & HASH_FRAGMENT_MASK)
#define HASH_FRAGMENT_3(x) \
(((x) >> (GRPC_CHTTP2_HPACKC_NUM_VALUES_BITS * 2)) & HASH_FRAGMENT_MASK)
#define HASH_FRAGMENT_4(x) \
(((x) >> (GRPC_CHTTP2_HPACKC_NUM_VALUES_BITS * 3)) & HASH_FRAGMENT_MASK)
/* if the probability of this item being seen again is < 1/x then don't add
it to the table */
#define ONE_ON_ADD_PROBABILITY 128
#define ONE_ON_ADD_PROBABILITY (GRPC_CHTTP2_HPACKC_NUM_VALUES >> 1)
/* don't consider adding anything bigger than this to the hpack table */
#define MAX_DECODER_SPACE_USAGE 512
@ -135,7 +139,7 @@ static void inc_filter(uint8_t idx, uint32_t* sum, uint8_t* elems) {
} else {
int i;
*sum = 0;
for (i = 0; i < GRPC_CHTTP2_HPACKC_NUM_FILTERS; i++) {
for (i = 0; i < GRPC_CHTTP2_HPACKC_NUM_VALUES; i++) {
elems[i] /= 2;
(*sum) += elems[i];
}

@ -28,8 +28,9 @@
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/transport.h"
#define GRPC_CHTTP2_HPACKC_NUM_FILTERS 256
#define GRPC_CHTTP2_HPACKC_NUM_VALUES 256
// This should be <= 8. We use 6 to save space.
#define GRPC_CHTTP2_HPACKC_NUM_VALUES_BITS 6
#define GRPC_CHTTP2_HPACKC_NUM_VALUES (1 << GRPC_CHTTP2_HPACKC_NUM_VALUES_BITS)
/* initial table size, per spec */
#define GRPC_CHTTP2_HPACKC_INITIAL_TABLE_SIZE 4096
/* maximum table size we'll actually use */
@ -58,7 +59,7 @@ typedef struct {
a new literal should be added to the compression table or not.
They track a single integer that counts how often a particular value has
been seen. When that count reaches max (255), all values are halved. */
uint8_t filter_elems[GRPC_CHTTP2_HPACKC_NUM_FILTERS];
uint8_t filter_elems[GRPC_CHTTP2_HPACKC_NUM_VALUES];
/* entry tables for keys & elems: these tables track values that have been
seen and *may* be in the decompressor table */

@ -140,12 +140,12 @@ static void tcp_connect(grpc_closure* closure, grpc_endpoint** ep,
socket, connect->addr_name);
}
grpc_custom_socket_vtable->connect(
socket, (const grpc_sockaddr*)resolved_addr->addr, resolved_addr->len,
custom_connect_callback);
GRPC_CLOSURE_INIT(&connect->on_alarm, on_alarm, socket,
grpc_schedule_on_exec_ctx);
grpc_timer_init(&connect->alarm, deadline, &connect->on_alarm);
grpc_custom_socket_vtable->connect(
socket, (const grpc_sockaddr*)resolved_addr->addr, resolved_addr->len,
custom_connect_callback);
}
grpc_tcp_client_vtable custom_tcp_client_vtable = {tcp_connect};

@ -17,7 +17,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//
#pragma warning disable 1591
#pragma warning disable 0414, 1591
#region Designer generated code
using grpc = global::Grpc.Core;

@ -20,7 +20,7 @@
// The canonical version of this proto can be found at
// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto
//
#pragma warning disable 1591
#pragma warning disable 0414, 1591
#region Designer generated code
using grpc = global::Grpc.Core;

@ -19,7 +19,7 @@
//
// An integration test service that covers all the method signature permutations
// of unary/streaming requests/responses.
#pragma warning disable 1591
#pragma warning disable 0414, 1591
#region Designer generated code
using grpc = global::Grpc.Core;

@ -0,0 +1,38 @@
// <auto-generated>
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: src/proto/grpc/testing/empty_service.proto
// </auto-generated>
#pragma warning disable 1591, 0612, 3021
#region Designer generated code
using pb = global::Google.Protobuf;
using pbc = global::Google.Protobuf.Collections;
using pbr = global::Google.Protobuf.Reflection;
using scg = global::System.Collections.Generic;
namespace Grpc.Testing {
/// <summary>Holder for reflection information generated from src/proto/grpc/testing/empty_service.proto</summary>
public static partial class EmptyServiceReflection {
#region Descriptor
/// <summary>File descriptor for src/proto/grpc/testing/empty_service.proto</summary>
public static pbr::FileDescriptor Descriptor {
get { return descriptor; }
}
private static pbr::FileDescriptor descriptor;
static EmptyServiceReflection() {
byte[] descriptorData = global::System.Convert.FromBase64String(
string.Concat(
"CipzcmMvcHJvdG8vZ3JwYy90ZXN0aW5nL2VtcHR5X3NlcnZpY2UucHJvdG8S",
"DGdycGMudGVzdGluZzIOCgxFbXB0eVNlcnZpY2ViBnByb3RvMw=="));
descriptor = pbr::FileDescriptor.FromGeneratedCode(descriptorData,
new pbr::FileDescriptor[] { },
new pbr::GeneratedClrTypeInfo(null, null));
}
#endregion
}
}
#endregion Designer generated code

@ -0,0 +1,85 @@
// <auto-generated>
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: src/proto/grpc/testing/empty_service.proto
// </auto-generated>
// Original file comments:
// Copyright 2018 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#pragma warning disable 0414, 1591
#region Designer generated code
using grpc = global::Grpc.Core;
namespace Grpc.Testing {
/// <summary>
/// A service that has zero methods.
/// See https://github.com/grpc/grpc/issues/15574
/// </summary>
public static partial class EmptyService
{
static readonly string __ServiceName = "grpc.testing.EmptyService";
/// <summary>Service descriptor</summary>
public static global::Google.Protobuf.Reflection.ServiceDescriptor Descriptor
{
get { return global::Grpc.Testing.EmptyServiceReflection.Descriptor.Services[0]; }
}
/// <summary>Base class for server-side implementations of EmptyService</summary>
public abstract partial class EmptyServiceBase
{
}
/// <summary>Client for EmptyService</summary>
public partial class EmptyServiceClient : grpc::ClientBase<EmptyServiceClient>
{
/// <summary>Creates a new client for EmptyService</summary>
/// <param name="channel">The channel to use to make remote calls.</param>
public EmptyServiceClient(grpc::Channel channel) : base(channel)
{
}
/// <summary>Creates a new client for EmptyService that uses a custom <c>CallInvoker</c>.</summary>
/// <param name="callInvoker">The callInvoker to use to make remote calls.</param>
public EmptyServiceClient(grpc::CallInvoker callInvoker) : base(callInvoker)
{
}
/// <summary>Protected parameterless constructor to allow creation of test doubles.</summary>
protected EmptyServiceClient() : base()
{
}
/// <summary>Protected constructor to allow creation of configured clients.</summary>
/// <param name="configuration">The client configuration.</param>
protected EmptyServiceClient(ClientBaseConfiguration configuration) : base(configuration)
{
}
/// <summary>Creates a new instance of client from given <c>ClientBaseConfiguration</c>.</summary>
protected override EmptyServiceClient NewInstance(ClientBaseConfiguration configuration)
{
return new EmptyServiceClient(configuration);
}
}
/// <summary>Creates service definition that can be registered with a server</summary>
/// <param name="serviceImpl">An object implementing the server-side handling logic.</param>
public static grpc::ServerServiceDefinition BindService(EmptyServiceBase serviceImpl)
{
return grpc::ServerServiceDefinition.CreateBuilder().Build();
}
}
}
#endregion

@ -23,7 +23,7 @@
// Currently, 'Gauge' (i.e a metric that represents the measured value of
// something at an instant of time) is the only metric type supported by the
// service.
#pragma warning disable 1591
#pragma warning disable 0414, 1591
#region Designer generated code
using grpc = global::Grpc.Core;

@ -19,7 +19,7 @@
//
// An integration test service that covers all the method signature permutations
// of unary/streaming requests/responses.
#pragma warning disable 1591
#pragma warning disable 0414, 1591
#region Designer generated code
using grpc = global::Grpc.Core;

@ -20,7 +20,7 @@
// An integration test service that covers all the method signature permutations
// of unary/streaming requests/responses.
//
#pragma warning disable 1591
#pragma warning disable 0414, 1591
#region Designer generated code
using grpc = global::Grpc.Core;

@ -19,7 +19,7 @@
//
// An integration test service that covers all the method signature permutations
// of unary/streaming requests/responses.
#pragma warning disable 1591
#pragma warning disable 0414, 1591
#region Designer generated code
using grpc = global::Grpc.Core;

@ -19,7 +19,7 @@
//
// Service exported by server reflection
//
#pragma warning disable 1591
#pragma warning disable 0414, 1591
#region Designer generated code
using grpc = global::Grpc.Core;

@ -42,4 +42,4 @@ $PROTOC --plugin=$PLUGIN --csharp_out=$TESTING_DIR/CoreStats --grpc_out=$TESTING
# don't match the package names. Setting -I to the correct value src/proto
# breaks the code generation.
$PROTOC --plugin=$PLUGIN --csharp_out=$TESTING_DIR --grpc_out=$TESTING_DIR \
-I . src/proto/grpc/testing/{control,echo_messages,empty,messages,metrics,payloads,benchmark_service,report_qps_scenario_service,worker_service,stats,test}.proto
-I . src/proto/grpc/testing/{control,echo_messages,empty,empty_service,messages,metrics,payloads,benchmark_service,report_qps_scenario_service,worker_service,stats,test}.proto

@ -31,7 +31,7 @@
Pod::Spec.new do |s|
s.name = 'BoringSSL'
version = '10.0.4'
version = '10.0.5'
s.version = version
s.summary = 'BoringSSL is a fork of OpenSSL that is designed to meet Google’s needs.'
# Adapted from the homepage:
@ -61,8 +61,7 @@ Pod::Spec.new do |s|
Currently BoringSSL is the SSL library in Chrome/Chromium, Android (but it’s not part of the
NDK) and a number of other apps/programs.
DESC
s.homepage = 'https://boringssl.googlesource.com/boringssl/'
s.documentation_url = 'https://commondatastorage.googleapis.com/chromium-boringssl-docs/headers.html'
s.homepage = 'https://github.com/google/boringssl'
s.license = { :type => 'Mixed', :file => 'LICENSE' }
# "The name and email addresses of the library maintainers, not the Podspec maintainer."
s.authors = 'Adam Langley', 'David Benjamin', 'Matt Braithwaite'

@ -35,6 +35,7 @@ class CallCredentials2Test extends PHPUnit_Framework_TestCase
$this->channel = new Grpc\Channel(
'localhost:'.$this->port,
[
'force_new' => true,
'grpc.ssl_target_name_override' => $this->host_override,
'grpc.default_authority' => $this->host_override,
'credentials' => $credentials,

@ -41,6 +41,7 @@ class CallCredentialsTest extends PHPUnit_Framework_TestCase
$this->channel = new Grpc\Channel(
'localhost:'.$this->port,
[
'force_new' => true,
'grpc.ssl_target_name_override' => $this->host_override,
'grpc.default_authority' => $this->host_override,
'credentials' => $this->credentials,

@ -24,12 +24,14 @@ class CallTest extends PHPUnit_Framework_TestCase
public static function setUpBeforeClass()
{
self::$server = new Grpc\Server([]);
self::$port = self::$server->addHttp2Port('0.0.0.0:0');
self::$port = self::$server->addHttp2Port('0.0.0.0:53000');
}
public function setUp()
{
$this->channel = new Grpc\Channel('localhost:'.self::$port, []);
$this->channel = new Grpc\Channel('localhost:'.self::$port, [
'force_new' => true,
]);
$this->call = new Grpc\Call($this->channel,
'/foo',
Grpc\Timeval::infFuture());

@ -22,13 +22,16 @@ class EndToEndTest extends PHPUnit_Framework_TestCase
{
$this->server = new Grpc\Server([]);
$this->port = $this->server->addHttp2Port('0.0.0.0:0');
$this->channel = new Grpc\Channel('localhost:'.$this->port, []);
$this->channel = new Grpc\Channel('localhost:'.$this->port, [
"force_new" => true,
]);
$this->server->start();
}
public function tearDown()
{
$this->channel->close();
unset($this->server);
}
public function testSimpleRequestBody()

@ -206,13 +206,16 @@ class InterceptorTest extends PHPUnit_Framework_TestCase
{
$this->server = new Grpc\Server([]);
$this->port = $this->server->addHttp2Port('0.0.0.0:0');
$this->channel = new Grpc\Channel('localhost:'.$this->port, ['credentials' => Grpc\ChannelCredentials::createInsecure()]);
$this->channel = new Grpc\Channel('localhost:'.$this->port, [
'force_new' => true,
'credentials' => Grpc\ChannelCredentials::createInsecure()]);
$this->server->start();
}
public function tearDown()
{
$this->channel->close();
unset($this->server);
}
@ -222,6 +225,7 @@ class InterceptorTest extends PHPUnit_Framework_TestCase
$channel_matadata_interceptor = new ChangeMetadataInterceptor();
$intercept_channel = Grpc\Interceptor::intercept($this->channel, $channel_matadata_interceptor);
$client = new InterceptorClient('localhost:'.$this->port, [
'force_new' => true,
'credentials' => Grpc\ChannelCredentials::createInsecure(),
], $intercept_channel);
$req = new SimpleRequest($req_text);
@ -250,6 +254,7 @@ class InterceptorTest extends PHPUnit_Framework_TestCase
$intercept_channel1 = Grpc\Interceptor::intercept($this->channel, $channel_matadata_interceptor);
$intercept_channel2 = Grpc\Interceptor::intercept($intercept_channel1, $channel_matadata_intercepto2);
$client = new InterceptorClient('localhost:'.$this->port, [
'force_new' => true,
'credentials' => Grpc\ChannelCredentials::createInsecure(),
], $intercept_channel2);
@ -275,6 +280,7 @@ class InterceptorTest extends PHPUnit_Framework_TestCase
$intercept_channel3 = Grpc\Interceptor::intercept($this->channel,
[$channel_matadata_intercepto2, $channel_matadata_interceptor]);
$client = new InterceptorClient('localhost:'.$this->port, [
'force_new' => true,
'credentials' => Grpc\ChannelCredentials::createInsecure(),
], $intercept_channel3);
@ -304,6 +310,7 @@ class InterceptorTest extends PHPUnit_Framework_TestCase
$intercept_channel = Grpc\Interceptor::intercept($this->channel,
$change_request_interceptor);
$client = new InterceptorClient('localhost:'.$this->port, [
'force_new' => true,
'credentials' => Grpc\ChannelCredentials::createInsecure(),
], $intercept_channel);
@ -354,6 +361,7 @@ class InterceptorTest extends PHPUnit_Framework_TestCase
$intercept_channel = Grpc\Interceptor::intercept($this->channel,
$channel_request_interceptor);
$client = new InterceptorClient('localhost:'.$this->port, [
'force_new' => true,
'credentials' => Grpc\ChannelCredentials::createInsecure(),
], $intercept_channel);
@ -374,7 +382,10 @@ class InterceptorTest extends PHPUnit_Framework_TestCase
{
$channel = new Grpc\Channel(
'localhost:0',
['credentials' => Grpc\ChannelCredentials::createInsecure()]
[
'force_new' => true,
'credentials' => Grpc\ChannelCredentials::createInsecure()
]
);
$interceptor_channel = Grpc\Interceptor::intercept($channel, new Grpc\Interceptor());
$state = $interceptor_channel->getConnectivityState();
@ -386,7 +397,10 @@ class InterceptorTest extends PHPUnit_Framework_TestCase
{
$channel = new Grpc\Channel(
'localhost:0',
['credentials' => Grpc\ChannelCredentials::createInsecure()]
[
'force_new' => true,
'credentials' => Grpc\ChannelCredentials::createInsecure()
]
);
$interceptor_channel = Grpc\Interceptor::intercept($channel, new Grpc\Interceptor());
$now = Grpc\Timeval::now();
@ -402,7 +416,10 @@ class InterceptorTest extends PHPUnit_Framework_TestCase
{
$channel = new Grpc\Channel(
'localhost:0',
['credentials' => Grpc\ChannelCredentials::createInsecure()]
[
'force_new' => true,
'credentials' => Grpc\ChannelCredentials::createInsecure()
]
);
$interceptor_channel = Grpc\Interceptor::intercept($channel, new Grpc\Interceptor());
$this->assertNotNull($interceptor_channel);
@ -413,7 +430,10 @@ class InterceptorTest extends PHPUnit_Framework_TestCase
{
$channel = new Grpc\Channel(
'localhost:8888',
['credentials' => Grpc\ChannelCredentials::createInsecure()]
[
'force_new' => true,
'credentials' => Grpc\ChannelCredentials::createInsecure()
]
);
$interceptor_channel = Grpc\Interceptor::intercept($channel, new Grpc\Interceptor());
$target = $interceptor_channel->getTarget();

@ -34,6 +34,7 @@ class SecureEndToEndTest extends PHPUnit_Framework_TestCase
$this->channel = new Grpc\Channel(
'localhost:'.$this->port,
[
'force_new' => true,
'grpc.ssl_target_name_override' => $this->host_override,
'grpc.default_authority' => $this->host_override,
'credentials' => $credentials,
@ -44,6 +45,7 @@ class SecureEndToEndTest extends PHPUnit_Framework_TestCase
public function tearDown()
{
$this->channel->close();
unset($this->server);
}
public function testSimpleRequestBody()

@ -55,7 +55,10 @@ class ServerTest extends PHPUnit_Framework_TestCase
$port = $this->server->addHttp2Port('0.0.0.0:0');
$this->server->start();
$channel = new Grpc\Channel('localhost:'.$port,
['credentials' => Grpc\ChannelCredentials::createInsecure()]);
[
'force_new' => true,
'credentials' => Grpc\ChannelCredentials::createInsecure()
]);
$deadline = Grpc\Timeval::infFuture();
$call = new Grpc\Call($channel, 'dummy_method', $deadline);

@ -0,0 +1,23 @@
// Copyright 2018 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package grpc.testing;
// A service that has zero methods.
// See https://github.com/grpc/grpc/issues/15574
service EmptyService {
}

@ -1250,19 +1250,20 @@ class Server(six.with_metaclass(abc.ABCMeta)):
"""Stops this Server.
This method immediately stop service of new RPCs in all cases.
If a grace period is specified, this method returns immediately
and all RPCs active at the end of the grace period are aborted.
If a grace period is not specified, then all existing RPCs are
teriminated immediately and the this method blocks until the last
RPC handler terminates.
If a grace period is not specified (by passing None for `grace`),
all existing RPCs are aborted immediately and this method
blocks until the last RPC handler terminates.
This method is idempotent and may be called at any time.
Passing a smaller grace value in subsequent call will have
the effect of stopping the Server sooner. Passing a larger
grace value in subsequent call *will not* have the effect of
stopping the server later (i.e. the most restrictive grace
value is used).
Passing a smaller grace value in a subsequent call will have
the effect of stopping the Server sooner (passing None will
have the effect of stopping the server immediately). Passing
a larger grace value in a subsequent call *will not* have the
effect of stopping the server later (i.e. the most restrictive
grace value is used).
Args:
grace: A duration of time in seconds or None.

@ -17,6 +17,17 @@ cimport cpython
import grpc
import threading
def _spawn_callback_in_thread(cb_func, args):
threading.Thread(target=cb_func, args=args).start()
async_callback_func = _spawn_callback_in_thread
def set_async_callback_func(callback_func):
global async_callback_func
async_callback_func = callback_func
def _spawn_callback_async(callback, args):
async_callback_func(callback, args)
cdef class CallCredentials:
@ -40,7 +51,7 @@ cdef int _get_metadata(
else:
cb(user_data, NULL, 0, status, error_details)
args = context.service_url, context.method_name, callback,
threading.Thread(target=<object>state, args=args).start()
_spawn_callback_async(<object>state, args)
return 0 # Asynchronous return

@ -418,6 +418,11 @@ def init_grpc_gevent():
g_event = gevent.event.Event()
g_pool = gevent.pool.Group()
def cb_func(cb, args):
_spawn_greenlet(cb, *args)
set_async_callback_func(cb_func)
gevent_resolver_vtable.resolve = socket_resolve
gevent_resolver_vtable.resolve_async = socket_resolve_async

@ -416,7 +416,6 @@
ifeq ($(HAS_PKG_CONFIG),true)
OPENSSL_ALPN_CHECK_CMD = $(PKG_CONFIG) --atleast-version=1.0.2 openssl
OPENSSL_NPN_CHECK_CMD = $(PKG_CONFIG) --atleast-version=1.0.1 openssl
ZLIB_CHECK_CMD = $(PKG_CONFIG) --exists zlib
PROTOBUF_CHECK_CMD = $(PKG_CONFIG) --atleast-version=3.5.0 protobuf
CARES_CHECK_CMD = $(PKG_CONFIG) --atleast-version=1.11.0 libcares
@ -429,7 +428,6 @@
endif
OPENSSL_ALPN_CHECK_CMD = $(CC) $(CPPFLAGS) $(CFLAGS) -o $(TMPOUT) test/build/openssl-alpn.c $(addprefix -l, $(OPENSSL_LIBS)) $(LDFLAGS)
OPENSSL_NPN_CHECK_CMD = $(CC) $(CPPFLAGS) $(CFLAGS) -o $(TMPOUT) test/build/openssl-npn.c $(addprefix -l, $(OPENSSL_LIBS)) $(LDFLAGS)
BORINGSSL_COMPILE_CHECK_CMD = $(CC) $(CPPFLAGS) ${defaults.boringssl.CPPFLAGS} $(CFLAGS) ${defaults.boringssl.CFLAGS} -o $(TMPOUT) test/build/boringssl.c $(LDFLAGS)
ZLIB_CHECK_CMD = $(CC) $(CPPFLAGS) $(CFLAGS) -o $(TMPOUT) test/build/zlib.c -lz $(LDFLAGS)
PROTOBUF_CHECK_CMD = $(CXX) $(CPPFLAGS) $(CXXFLAGS) -o $(TMPOUT) test/build/protobuf.cc -lprotobuf $(LDFLAGS)
@ -457,13 +455,7 @@
ifndef REQUIRE_CUSTOM_LIBRARIES_$(CONFIG)
HAS_SYSTEM_OPENSSL_ALPN ?= $(shell $(OPENSSL_ALPN_CHECK_CMD) 2> /dev/null && echo true || echo false)
ifeq ($(HAS_SYSTEM_OPENSSL_ALPN),true)
HAS_SYSTEM_OPENSSL_NPN = true
CACHE_MK += HAS_SYSTEM_OPENSSL_ALPN = true,
else
HAS_SYSTEM_OPENSSL_NPN ?= $(shell $(OPENSSL_NPN_CHECK_CMD) 2> /dev/null && echo true || echo false)
endif
ifeq ($(HAS_SYSTEM_OPENSSL_NPN),true)
CACHE_MK += HAS_SYSTEM_OPENSSL_NPN = true,
endif
HAS_SYSTEM_ZLIB ?= $(shell $(ZLIB_CHECK_CMD) 2> /dev/null && echo true || echo false)
ifeq ($(HAS_SYSTEM_ZLIB),true)
@ -480,7 +472,6 @@
else
# override system libraries if the config requires a custom compiled library
HAS_SYSTEM_OPENSSL_ALPN = false
HAS_SYSTEM_OPENSSL_NPN = false
HAS_SYSTEM_ZLIB = false
HAS_SYSTEM_PROTOBUF = false
HAS_SYSTEM_CARES = false
@ -627,12 +618,7 @@
EMBED_OPENSSL ?= $(HAS_EMBEDDED_OPENSSL_ALPN)
NO_SECURE ?= false
else # HAS_EMBEDDED_OPENSSL_ALPN=false
ifeq ($(HAS_SYSTEM_OPENSSL_NPN),true)
EMBED_OPENSSL ?= false
NO_SECURE ?= false
else
NO_SECURE ?= true
endif # HAS_SYSTEM_OPENSSL_NPN=true
endif # HAS_EMBEDDED_OPENSSL_ALPN
endif # HAS_SYSTEM_OPENSSL_ALPN
@ -666,10 +652,10 @@
else # HAS_PKG_CONFIG=false
LIBS_SECURE = $(OPENSSL_LIBS)
endif # HAS_PKG_CONFIG
ifeq ($(HAS_SYSTEM_OPENSSL_NPN),true)
ifeq ($(DISABLE_ALPN),true)
CPPFLAGS += -DTSI_OPENSSL_ALPN_SUPPORT=0
LIBS_SECURE = $(OPENSSL_LIBS)
endif # HAS_SYSTEM_OPENSSL_NPN
endif # DISABLE_ALPN
PC_LIBS_SECURE = $(addprefix -l, $(LIBS_SECURE))
endif # EMBED_OPENSSL
endif # NO_SECURE
@ -890,7 +876,6 @@
run_dep_checks:
$(OPENSSL_ALPN_CHECK_CMD) || true
$(OPENSSL_NPN_CHECK_CMD) || true
$(ZLIB_CHECK_CMD) || true
$(PERFTOOLS_CHECK_CMD) || true
$(PROTOBUF_CHECK_CMD) || true

@ -22,7 +22,9 @@
s.files += Dir.glob('src/ruby/bin/**/*')
s.files += Dir.glob('src/ruby/ext/**/*')
s.files += Dir.glob('src/ruby/lib/**/*')
s.files += Dir.glob('src/ruby/pb/**/*')
s.files += Dir.glob('src/ruby/pb/**/*').reject do |f|
f.match(%r{^src/ruby/pb/test})
end
s.files += Dir.glob('include/grpc/**/*')
s.test_files = Dir.glob('src/ruby/spec/**/*')
s.bindir = 'src/ruby/bin'
@ -30,7 +32,6 @@
s.platform = Gem::Platform::RUBY
s.add_dependency 'google-protobuf', '~> 3.1'
s.add_dependency 'googleauth', '>= 0.5.1', '< 0.7'
s.add_dependency 'googleapis-common-protos-types', '~> 1.0.0'
s.add_development_dependency 'bundler', '~> 1.9'
@ -43,6 +44,7 @@
s.add_development_dependency 'rspec', '~> 3.6'
s.add_development_dependency 'rubocop', '~> 0.49.1'
s.add_development_dependency 'signet', '~> 0.7.0'
s.add_development_dependency 'googleauth', '>= 0.5.1', '< 0.7'
s.extensions = %w(src/ruby/ext/grpc/extconf.rb)

@ -10,7 +10,7 @@ RUN apt-get update && apt-get install -y ${'\\'}
mercurial ${'\\'}
zlib1g-dev && apt-get clean
# Install Pyenv and dev Python versions 3.5 and 3.6
# Install Pyenv and dev Python versions 3.{5,6,7}
RUN curl -L https://raw.githubusercontent.com/yyuu/pyenv-installer/master/bin/pyenv-installer | bash
ENV PATH /root/.pyenv/bin:$PATH
RUN eval "$(pyenv init -)"
@ -18,5 +18,6 @@ RUN eval "$(pyenv virtualenv-init -)"
RUN pyenv update
RUN pyenv install 3.5-dev
RUN pyenv install 3.6-dev
RUN pyenv install 3.7-dev
RUN pyenv install pypy-5.3.1
RUN pyenv local 3.5-dev 3.6-dev pypy-5.3.1
RUN pyenv local 3.5-dev 3.6-dev 3.7-dev pypy-5.3.1

@ -14,15 +14,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM debian:jessie
FROM debian:stretch
<%include file="../../apt_get_basic.include"/>
<%include file="../../gcp_api_libraries.include"/>
<%include file="../../python_deps.include"/>
<%include file="../../apt_get_pyenv.include"/>
# Install pip and virtualenv for Python 3.4
RUN curl https://bootstrap.pypa.io/get-pip.py | python3.4
RUN python3.4 -m pip install virtualenv
# Install pip and virtualenv for Python 3.5
RUN curl https://bootstrap.pypa.io/get-pip.py | python3.5
RUN python3.5 -m pip install virtualenv
<%include file="../../run_tests_addons.include"/>
# Define the default command.

@ -1,30 +0,0 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/* This is just a compilation test, to see if we have a version of OpenSSL with
NPN support installed. It's not meant to be run, and all of the values and
function calls there are non-sensical. The code is only meant to test the
presence of symbols, and we're expecting a compilation failure otherwise. */
#include <stdlib.h>
#include <openssl/ssl.h>
int main() {
SSL_get0_next_proto_negotiated(NULL, NULL, NULL);
return OPENSSL_NPN_UNSUPPORTED;
}

@ -160,6 +160,8 @@ static void encode_int_to_str(int i, char* p) {
}
static void test_decode_table_overflow() {
// Decrease the default table size to make decode table overflow easier.
grpc_chttp2_hpack_compressor_set_max_table_size(&g_compressor, 1024);
int i;
char key[3], value[3];
char* expect;
@ -170,27 +172,20 @@ static void test_decode_table_overflow() {
false,
};
for (i = 0; i < 114; i++) {
for (i = 0; i < 29; i++) {
encode_int_to_str(i, key);
encode_int_to_str(i + 1, value);
if (i + 61 >= 127) {
if (i == 0) {
// 3fe107 corresponds to the table size update.
gpr_asprintf(&expect,
"000009 0104 deadbeef ff%02x 40 02%02x%02x 02%02x%02x",
i + 61 - 127, key[0], key[1], value[0], value[1]);
} else if (i > 0) {
"00000a 0104 deadbeef 3fe107 40 02%02x%02x 02%02x%02x",
key[0], key[1], value[0], value[1]);
verify(params, expect, 1, key, value);
} else {
gpr_asprintf(&expect,
"000008 0104 deadbeef %02x 40 02%02x%02x 02%02x%02x",
0x80 + 61 + i, key[0], key[1], value[0], value[1]);
} else {
gpr_asprintf(&expect, "000007 0104 deadbeef 40 02%02x%02x 02%02x%02x",
key[0], key[1], value[0], value[1]);
}
if (i > 0) {
verify(params, expect, 2, "aa", "ba", key, value);
} else {
verify(params, expect, 1, key, value);
}
gpr_free(expect);
}

@ -1,226 +0,0 @@
/*
*
* Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <memory>
#include <mutex>
#include <thread>
#include <grpc/grpc.h>
#include <grpc/support/log.h>
#include <grpc/support/time.h>
#include <grpcpp/channel.h>
#include <grpcpp/client_context.h>
#include <grpcpp/create_channel.h>
#include <grpcpp/server.h>
#include <grpcpp/server_builder.h>
#include "src/proto/grpc/testing/echo.grpc.pb.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
#include "test/cpp/end2end/test_service_impl.h"
#include <gtest/gtest.h>
using grpc::testing::EchoRequest;
using grpc::testing::EchoResponse;
using std::chrono::system_clock;
namespace grpc {
namespace testing {
namespace {
// Subclass of TestServiceImpl that increments a request counter for
// every call to the Echo RPC.
class MyTestServiceImpl : public TestServiceImpl {
public:
MyTestServiceImpl() : request_count_(0) {}
Status Echo(ServerContext* context, const EchoRequest* request,
EchoResponse* response) override {
{
std::unique_lock<std::mutex> lock(mu_);
++request_count_;
}
return TestServiceImpl::Echo(context, request, response);
}
int request_count() {
std::unique_lock<std::mutex> lock(mu_);
return request_count_;
}
private:
std::mutex mu_;
int request_count_;
};
class RoundRobinEnd2endTest : public ::testing::Test {
protected:
RoundRobinEnd2endTest() : server_host_("localhost") {}
void StartServers(size_t num_servers,
std::vector<int> ports = std::vector<int>()) {
for (size_t i = 0; i < num_servers; ++i) {
int port = 0;
if (ports.size() == num_servers) port = ports[i];
servers_.emplace_back(new ServerData(server_host_, port));
}
}
void TearDown() override {
for (size_t i = 0; i < servers_.size(); ++i) {
servers_[i]->Shutdown();
}
}
void ResetStub(bool round_robin) {
ChannelArguments args;
if (round_robin) args.SetLoadBalancingPolicyName("round_robin");
std::ostringstream uri;
uri << "ipv4:///";
for (size_t i = 0; i < servers_.size() - 1; ++i) {
uri << "127.0.0.1:" << servers_[i]->port_ << ",";
}
uri << "127.0.0.1:" << servers_[servers_.size() - 1]->port_;
channel_ =
CreateCustomChannel(uri.str(), InsecureChannelCredentials(), args);
stub_ = grpc::testing::EchoTestService::NewStub(channel_);
}
void SendRpc(int num_rpcs, bool expect_ok = true) {
EchoRequest request;
EchoResponse response;
request.set_message("Live long and prosper.");
for (int i = 0; i < num_rpcs; i++) {
ClientContext context;
Status status = stub_->Echo(&context, request, &response);
if (expect_ok) {
EXPECT_TRUE(status.ok());
EXPECT_EQ(response.message(), request.message());
} else {
EXPECT_FALSE(status.ok());
}
}
}
struct ServerData {
int port_;
std::unique_ptr<Server> server_;
MyTestServiceImpl service_;
explicit ServerData(const grpc::string& server_host, int port = 0) {
port_ = port > 0 ? port : grpc_pick_unused_port_or_die();
gpr_log(GPR_INFO, "starting server on port %d", port_);
std::ostringstream server_address;
server_address << server_host << ":" << port_;
ServerBuilder builder;
builder.AddListeningPort(server_address.str(),
InsecureServerCredentials());
builder.RegisterService(&service_);
server_ = builder.BuildAndStart();
gpr_log(GPR_INFO, "server startup complete");
}
void Shutdown() { server_->Shutdown(); }
};
const grpc::string server_host_;
std::shared_ptr<Channel> channel_;
std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
std::vector<std::unique_ptr<ServerData>> servers_;
};
TEST_F(RoundRobinEnd2endTest, PickFirst) {
// Start servers and send one RPC per server.
const int kNumServers = 3;
StartServers(kNumServers);
ResetStub(false /* round_robin */);
SendRpc(kNumServers);
// All requests should have gone to a single server.
bool found = false;
for (size_t i = 0; i < servers_.size(); ++i) {
const int request_count = servers_[i]->service_.request_count();
if (request_count == kNumServers) {
found = true;
} else {
EXPECT_EQ(0, request_count);
}
}
EXPECT_TRUE(found);
// Check LB policy name for the channel.
EXPECT_EQ("pick_first", channel_->GetLoadBalancingPolicyName());
}
TEST_F(RoundRobinEnd2endTest, RoundRobin) {
// Start servers and send one RPC per server.
const int kNumServers = 3;
StartServers(kNumServers);
ResetStub(true /* round_robin */);
// Send one RPC per backend and make sure they are used in order.
// Note: This relies on the fact that the subchannels are reported in
// state READY in the order in which the addresses are specified,
// which is only true because the backends are all local.
for (size_t i = 0; i < servers_.size(); ++i) {
SendRpc(1);
EXPECT_EQ(1, servers_[i]->service_.request_count()) << "for backend #" << i;
}
// Check LB policy name for the channel.
EXPECT_EQ("round_robin", channel_->GetLoadBalancingPolicyName());
}
TEST_F(RoundRobinEnd2endTest, RoundRobinReconnect) {
// Start servers and send one RPC per server.
const int kNumServers = 1;
std::vector<int> ports;
ports.push_back(grpc_pick_unused_port_or_die());
StartServers(kNumServers, ports);
ResetStub(true /* round_robin */);
// Send one RPC per backend and make sure they are used in order.
// Note: This relies on the fact that the subchannels are reported in
// state READY in the order in which the addresses are specified,
// which is only true because the backends are all local.
for (size_t i = 0; i < servers_.size(); ++i) {
SendRpc(1);
EXPECT_EQ(1, servers_[i]->service_.request_count()) << "for backend #" << i;
}
// Check LB policy name for the channel.
EXPECT_EQ("round_robin", channel_->GetLoadBalancingPolicyName());
// Kill all servers
for (size_t i = 0; i < servers_.size(); ++i) {
servers_[i]->Shutdown();
}
// Client request should fail.
SendRpc(1, false);
// Bring servers back up on the same port (we aren't recreating the channel).
StartServers(kNumServers, ports);
// Client request should succeed.
SendRpc(1);
}
} // namespace
} // namespace testing
} // namespace grpc
int main(int argc, char** argv) {
grpc_test_init(argc, argv);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM debian:jessie
FROM debian:stretch
# Install Git and basic packages.
RUN apt-get update && apt-get install -y \
@ -80,7 +80,7 @@ RUN apt-get update && apt-get install -y \
mercurial \
zlib1g-dev && apt-get clean
# Install Pyenv and dev Python versions 3.5 and 3.6
# Install Pyenv and dev Python versions 3.{5,6,7}
RUN curl -L https://raw.githubusercontent.com/yyuu/pyenv-installer/master/bin/pyenv-installer | bash
ENV PATH /root/.pyenv/bin:$PATH
RUN eval "$(pyenv init -)"
@ -88,12 +88,13 @@ RUN eval "$(pyenv virtualenv-init -)"
RUN pyenv update
RUN pyenv install 3.5-dev
RUN pyenv install 3.6-dev
RUN pyenv install 3.7-dev
RUN pyenv install pypy-5.3.1
RUN pyenv local 3.5-dev 3.6-dev pypy-5.3.1
RUN pyenv local 3.5-dev 3.6-dev 3.7-dev pypy-5.3.1
# Install pip and virtualenv for Python 3.4
RUN curl https://bootstrap.pypa.io/get-pip.py | python3.4
RUN python3.4 -m pip install virtualenv
# Install pip and virtualenv for Python 3.5
RUN curl https://bootstrap.pypa.io/get-pip.py | python3.5
RUN python3.5 -m pip install virtualenv
RUN mkdir /var/local/jenkins

@ -34,7 +34,8 @@ sudo systemsetup -setusingnetworktime on
date
# Add GCP credentials for BQ access
pip install google-api-python-client --user python
# pin google-api-python-client to avoid https://github.com/grpc/grpc/issues/15600
pip install google-api-python-client==1.6.7 --user python
export GOOGLE_APPLICATION_CREDENTIALS=${KOKORO_GFILE_DIR}/GrpcTesting-d0eeee2db331.json
# If this is a PR using RUN_TESTS_FLAGS var, then add flags to filter tests

@ -22,8 +22,8 @@ mkdir -p ${KOKORO_KEYSTORE_DIR}
cp ${KOKORO_GFILE_DIR}/GrpcTesting-d0eeee2db331.json ${KOKORO_KEYSTORE_DIR}/4321_grpc-testing-service
temp_dir=$(mktemp -d)
ln -f "${KOKORO_GFILE_DIR}/bazel-release-0.12.0" ${temp_dir}/bazel
chmod 755 "${KOKORO_GFILE_DIR}/bazel-release-0.12.0"
ln -f "${KOKORO_GFILE_DIR}/bazel-rc-0.14.0rc5" ${temp_dir}/bazel
chmod 755 "${KOKORO_GFILE_DIR}/bazel-rc-0.14.0rc5"
export PATH="${temp_dir}:${PATH}"
# This should show ${temp_dir}/bazel
which bazel
@ -49,9 +49,12 @@ source tools/internal_ci/helper_scripts/prepare_build_linux_rc
--strategy=Closure=remote \
--genrule_strategy=remote \
--experimental_strict_action_env=true \
--experimental_remote_platform_override='properties:{name:"container-image" value:"docker://gcr.io/cloud-marketplace/google/rbe-debian8@sha256:1ede2a929b44d629ec5abe86eee6d7ffea1d5a4d247489a8867d46cfde3e38bd" }' \
--crosstool_top=@com_github_bazelbuild_bazeltoolchains//configs/debian8_clang/0.3.0/bazel_0.10.0:toolchain \
--experimental_remote_platform_override='properties:{name:"container-image" value:"docker://gcr.io/cloud-marketplace/google/rbe-ubuntu16-04@sha256:59bf0e191a6b5cc1ab62c2224c810681d1326bad5a27b1d36c9f40113e79da7f" }' \
--crosstool_top=@com_github_bazelbuild_bazeltoolchains//configs/ubuntu16_04_clang/1.0/bazel_0.13.0/default:toolchain \
--define GRPC_PORT_ISOLATED_RUNTIME=1 \
--action_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1 \
--extra_toolchains=@com_github_bazelbuild_bazeltoolchains//configs/ubuntu16_04_clang/1.0/bazel_0.13.0/cpp:cc-toolchain-clang-x86_64-default \
--extra_execution_platforms=@com_github_bazelbuild_bazeltoolchains//configs/ubuntu16_04_clang/1.0:rbe_ubuntu1604 \
$1 \
-- //test/... || FAILED="true"

@ -23,8 +23,8 @@ mkdir -p ${KOKORO_KEYSTORE_DIR}
cp ${KOKORO_GFILE_DIR}/GrpcTesting-d0eeee2db331.json ${KOKORO_KEYSTORE_DIR}/4321_grpc-testing-service
temp_dir=$(mktemp -d)
ln -f "${KOKORO_GFILE_DIR}/bazel-release-0.12.0" ${temp_dir}/bazel
chmod 755 "${KOKORO_GFILE_DIR}/bazel-release-0.12.0"
ln -f "${KOKORO_GFILE_DIR}/bazel-rc-0.14.0rc5" ${temp_dir}/bazel
chmod 755 "${KOKORO_GFILE_DIR}/bazel-rc-0.14.0rc5"
export PATH="${temp_dir}:${PATH}"
# This should show ${temp_dir}/bazel
which bazel
@ -50,7 +50,7 @@ source tools/internal_ci/helper_scripts/prepare_build_linux_rc
--strategy=Closure=remote \
--genrule_strategy=remote \
--experimental_strict_action_env=true \
--experimental_remote_platform_override='properties:{name:"container-image" value:"docker://gcr.io/asci-toolchain/nosla-debian8-clang-msan@sha256:8f381d55c0456fb65821c90ada902c2584977e03a1eaca8fba8ce77e644c775b" }' \
--experimental_remote_platform_override='properties:{name:"container-image" value:"docker://gcr.io/cloud-marketplace/google/rbe-ubuntu16-04@sha256:59bf0e191a6b5cc1ab62c2224c810681d1326bad5a27b1d36c9f40113e79da7f" }' \
--define GRPC_PORT_ISOLATED_RUNTIME=1 \
--copt=-gmlt \
--strip=never \
@ -59,8 +59,11 @@ source tools/internal_ci/helper_scripts/prepare_build_linux_rc
--linkopt=-fsanitize=memory \
--copt=-fsanitize-memory-track-origins \
--action_env=LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH \
--host_crosstool_top=@com_github_bazelbuild_bazeltoolchains//configs/debian8_clang/0.3.0/bazel_0.10.0:toolchain \
--crosstool_top=@com_github_bazelbuild_bazeltoolchains//configs/experimental/debian8_clang/0.3.0/bazel_0.10.0/msan:msan_experimental_toolchain \
--host_crosstool_top=@com_github_bazelbuild_bazeltoolchains//configs/ubuntu16_04_clang/1.0/bazel_0.13.0/default:toolchain \
--crosstool_top=@com_github_bazelbuild_bazeltoolchains//configs/ubuntu16_04_clang/1.0/bazel_0.13.0/msan:toolchain \
--action_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1 \
--extra_toolchains=@com_github_bazelbuild_bazeltoolchains//configs/ubuntu16_04_clang/1.0/bazel_0.13.0/cpp:cc-toolchain-clang-x86_64-default \
--extra_execution_platforms=@com_github_bazelbuild_bazeltoolchains//configs/ubuntu16_04_clang/1.0:rbe_ubuntu1604 \
-- //test/... || FAILED="true"
# Sleep to let ResultStore finish writing results before querying

@ -23,8 +23,8 @@ mkdir -p ${KOKORO_KEYSTORE_DIR}
cp ${KOKORO_GFILE_DIR}/GrpcTesting-d0eeee2db331.json ${KOKORO_KEYSTORE_DIR}/4321_grpc-testing-service
temp_dir=$(mktemp -d)
ln -f "${KOKORO_GFILE_DIR}/bazel-release-0.12.0" ${temp_dir}/bazel
chmod 755 "${KOKORO_GFILE_DIR}/bazel-release-0.12.0"
ln -f "${KOKORO_GFILE_DIR}/bazel-rc-0.14.0rc5" ${temp_dir}/bazel
chmod 755 "${KOKORO_GFILE_DIR}/bazel-rc-0.14.0rc5"
export PATH="${temp_dir}:${PATH}"
# This should show ${temp_dir}/bazel
which bazel
@ -50,13 +50,16 @@ source tools/internal_ci/helper_scripts/prepare_build_linux_rc
--strategy=Closure=remote \
--genrule_strategy=remote \
--experimental_strict_action_env=true \
--experimental_remote_platform_override='properties:{name:"container-image" value:"docker://gcr.io/cloud-marketplace/google/rbe-debian8@sha256:1ede2a929b44d629ec5abe86eee6d7ffea1d5a4d247489a8867d46cfde3e38bd" }' \
--experimental_remote_platform_override='properties:{name:"container-image" value:"docker://gcr.io/cloud-marketplace/google/rbe-ubuntu16-04@sha256:59bf0e191a6b5cc1ab62c2224c810681d1326bad5a27b1d36c9f40113e79da7f" }' \
--define GRPC_PORT_ISOLATED_RUNTIME=1 \
--copt=-gmlt \
--strip=never \
--copt=-fsanitize=undefined \
--linkopt=-fsanitize=undefined \
--crosstool_top=@com_github_bazelbuild_bazeltoolchains//configs/experimental/debian8_clang/0.3.0/bazel_0.12.0/ubsan:toolchain \
--crosstool_top=@com_github_bazelbuild_bazeltoolchains//configs/experimental/ubuntu16_04_clang/1.0/bazel_0.13.0/ubsan:toolchain \
--action_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1 \
--extra_toolchains=@com_github_bazelbuild_bazeltoolchains//configs/ubuntu16_04_clang/1.0/bazel_0.13.0/cpp:cc-toolchain-clang-x86_64-default \
--extra_execution_platforms=@com_github_bazelbuild_bazeltoolchains//configs/ubuntu16_04_clang/1.0:rbe_ubuntu1604 \
-- //test/... || FAILED="true"
# Sleep to let ResultStore finish writing results before querying

@ -326,7 +326,14 @@ LANG_RELEASE_MATRIX = {
},
],
'csharp': [
#{'v1.0.1': None},
{
'v1.0.1': {
'patch': [
'tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile',
'tools/dockerfile/interoptest/grpc_interop_csharpcoreclr/Dockerfile',
]
}
},
{
'v1.1.4': None
},
@ -377,6 +384,7 @@ TESTCASES_VERSION_MATRIX = {
'node_v1.4.2': 'node__v1.1.4',
'node_v1.6.6': 'node__v1.1.4',
'ruby_v1.0.1': 'ruby__v1.0.1',
'csharp_v1.0.1': 'csharp__v1.1.4',
'csharp_v1.1.4': 'csharp__v1.1.4',
'csharp_v1.2.5': 'csharp__v1.1.4',
'python_v1.0.x': 'python__v1.0.x',

@ -97,6 +97,12 @@ argp.add_argument(
'reusing the repo can cause git checkout error if you switch '
'between releases.')
argp.add_argument(
'--upload_images',
action='store_true',
help='If set, images will be uploaded to container registry after building.'
)
args = argp.parse_args()
@ -166,8 +172,10 @@ def build_all_images_for_lang(lang):
"""Build all docker images for a language across releases and runtimes."""
if not args.git_checkout:
if args.release != 'master':
print('WARNING: --release is set but will be ignored\n')
releases = ['master']
print(
'Cannot use --release without also enabling --git_checkout.\n')
sys.exit(1)
releases = [args.release]
else:
if args.release == 'all':
releases = client_matrix.get_release_tags(lang)
@ -268,6 +276,13 @@ def maybe_apply_patches_on_git_tag(stack_base, lang, release):
sys.exit(1)
subprocess.check_output(
['git', 'apply', patch_file], cwd=stack_base, stderr=subprocess.STDOUT)
# TODO(jtattermusch): this really would need simplification and refactoring
# - "git add" and "git commit" can easily be done in a single command
# - it looks like the only reason for the existence of the "files_to_patch"
# entry is to perform "git add" - which is clumsy and fragile.
# - we only allow a single patch with name "git_repo.patch". A better design
# would be to allow multiple patches that can have more descriptive names.
for repo_relative_path in files_to_patch:
subprocess.check_output(
['git', 'add', repo_relative_path],
@ -334,8 +349,12 @@ languages = args.language if args.language != ['all'] else _LANGUAGES
for lang in languages:
docker_images = build_all_images_for_lang(lang)
for image in docker_images:
jobset.message('START', 'Uploading %s' % image, do_newline=True)
# docker image name must be in the format <gcr_path>/<image>:<gcr_tag>
assert image.startswith(args.gcr_path) and image.find(':') != -1
subprocess.call(['gcloud', 'docker', '--', 'push', image])
if args.upload_images:
jobset.message('START', 'Uploading %s' % image, do_newline=True)
# docker image name must be in the format <gcr_path>/<image>:<gcr_tag>
assert image.startswith(args.gcr_path) and image.find(':') != -1
subprocess.call(['gcloud', 'docker', '--', 'push', image])
else:
# Uploading (and overwriting images) by default can easily break things.
print('Not uploading image %s, run with --upload_images to upload.'
% image)

@ -0,0 +1,81 @@
diff --git a/third_party/boringssl b/third_party/boringssl
index c880e42ba1..70ef9596bb 160000
--- a/third_party/boringssl
+++ b/third_party/boringssl
@@ -1 +1 @@
-Subproject commit c880e42ba1c8032d4cdde2aba0541d8a9d9fa2e9
+Subproject commit 70ef9596bbcc11353b9bb8d4e91478694dd21439
diff --git a/third_party/gflags b/third_party/gflags
index 05b155ff59..30dbc81fb5 160000
--- a/third_party/gflags
+++ b/third_party/gflags
@@ -1 +1 @@
-Subproject commit 05b155ff59114735ec8cd089f669c4c3d8f59029
+Subproject commit 30dbc81fb5ffdc98ea9b14b1918bfe4e8779b26e
diff --git a/third_party/googletest b/third_party/googletest
index c99458533a..ec44c6c167 160000
--- a/third_party/googletest
+++ b/third_party/googletest
@@ -1 +1 @@
-Subproject commit c99458533a9b4c743ed51537e25989ea55944908
+Subproject commit ec44c6c1675c25b9827aacd08c02433cccde7780
diff --git a/third_party/protobuf b/third_party/protobuf
index 1a58673508..b5fbb742af 160000
--- a/third_party/protobuf
+++ b/third_party/protobuf
@@ -1 +1 @@
-Subproject commit 1a586735085e817b1f52e53feec92ce418049f69
+Subproject commit b5fbb742af122b565925987e65c08957739976a7
diff --git a/third_party/zlib b/third_party/zlib
index 5089329162..cacf7f1d4e 160000
--- a/third_party/zlib
+++ b/third_party/zlib
@@ -1 +1 @@
-Subproject commit 50893291621658f355bc5b4d450a8d06a563053d
+Subproject commit cacf7f1d4e3d44d871b605da3b647f07d718623f
diff --git a/tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile
index da1d2c645e..f405994293 100644
--- a/tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile
+++ b/tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile
@@ -67,11 +67,10 @@ RUN apt-get update && apt-get install -y time && apt-get clean
# C# dependencies
# Update to a newer version of mono
-RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF
-RUN echo "deb http://download.mono-project.com/repo/debian wheezy main" | tee /etc/apt/sources.list.d/mono-xamarin.list
+RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF
+RUN echo "deb http://download.mono-project.com/repo/debian wheezy/snapshots/4.6 main" | tee /etc/apt/sources.list.d/official.list
RUN echo "deb http://download.mono-project.com/repo/debian wheezy-apache24-compat main" | tee -a /etc/apt/sources.list.d/mono-xamarin.list
RUN echo "deb http://download.mono-project.com/repo/debian wheezy-libjpeg62-compat main" | tee -a /etc/apt/sources.list.d/mono-xamarin.list
-RUN echo "deb http://download.mono-project.com/repo/debian wheezy-libtiff-compat main" | tee -a /etc/apt/sources.list.d/mono-xamarin.list
# Install dependencies
RUN apt-get update && apt-get -y dist-upgrade && apt-get install -y \
diff --git a/tools/dockerfile/interoptest/grpc_interop_csharpcoreclr/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_csharpcoreclr/Dockerfile
index 65f67d3650..26223753ed 100644
--- a/tools/dockerfile/interoptest/grpc_interop_csharpcoreclr/Dockerfile
+++ b/tools/dockerfile/interoptest/grpc_interop_csharpcoreclr/Dockerfile
@@ -82,11 +82,10 @@ RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2
# C# dependencies
# Update to a newer version of mono
-RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF
-RUN echo "deb http://download.mono-project.com/repo/debian wheezy main" | tee /etc/apt/sources.list.d/mono-xamarin.list
+RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF
+RUN echo "deb http://download.mono-project.com/repo/debian jessie main" | tee /etc/apt/sources.list.d/mono-official.list
RUN echo "deb http://download.mono-project.com/repo/debian wheezy-apache24-compat main" | tee -a /etc/apt/sources.list.d/mono-xamarin.list
RUN echo "deb http://download.mono-project.com/repo/debian wheezy-libjpeg62-compat main" | tee -a /etc/apt/sources.list.d/mono-xamarin.list
-RUN echo "deb http://download.mono-project.com/repo/debian wheezy-libtiff-compat main" | tee -a /etc/apt/sources.list.d/mono-xamarin.list
# Install dependencies
RUN apt-get update && apt-get -y dist-upgrade && apt-get install -y \
@@ -99,7 +98,8 @@ RUN nuget update -self
# Install dotnet SDK based on https://www.microsoft.com/net/core#debian
RUN apt-get update && apt-get install -y curl libunwind8 gettext
-RUN curl -sSL -o dotnet.tar.gz https://go.microsoft.com/fwlink/?LinkID=809130
+# https://github.com/dotnet/core/blob/master/release-notes/download-archives/1.0.1-preview2-download.md
+RUN curl -sSL -o dotnet.tar.gz https://go.microsoft.com/fwlink/?LinkID=827530
RUN mkdir -p /opt/dotnet && tar zxf dotnet.tar.gz -C /opt/dotnet
RUN ln -s /opt/dotnet/dotnet /usr/local/bin

@ -116,6 +116,10 @@ def find_all_images_for_lang(lang):
return {}
releases = [args.release]
# TODO(jtattermusch): why do we need to query the existing images/tags?
# From LANG_RUNTIME_MATRIX and LANG_RELEASE_MATRIX it should be obvious
# which tags we want to test - and it should be an error if they are
# missing.
# Images tuples keyed by runtime.
images = {}
for runtime in client_matrix.LANG_RUNTIME_MATRIX[lang]:

Loading…
Cancel
Save